repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Hossein-Noroozpour/PyHDM | hml/classification/HNearestNeighborsClassifier.py | 1 | 2014 | #!/usr/bin/python3.3
# coding=utf-8
"""
Module for K nearest neighbours.
"""
__author__ = 'Hossein Noroozpour Thany Abady'
#from math3d import sqrt
import numpy
class HNearestNeighboursClassifier():
"""
Class for K nearest neighbors algorithm.
"""
def __init__(self, n_neighbors=5, weight_function=lambda l: [1. / (d + .0001) for d in l], weight_name='i'):
self.n_neighbors = n_neighbors
self.weight_function = weight_function
self.train = None
self.target = None
self.weight_name = weight_name
def fit(self, train, target):
"""
:param train:
:param target:
"""
self.train = numpy.array(train)
self.target = target
return self
def predict(self, test):
"""
:param test:
"""
result = []
test = numpy.array(test)
for t in test:
distances = []
for r in self.train:
d = r - t
distances.append(sqrt(d.dot(d)))
weights = self.weight_function(distances)
wc = [(weights[i], self.target[i]) for i in range(len(self.target))]
wc.sort(key=lambda tup: tup[0], reverse=True)
v = dict()
for i in range(self.n_neighbors):
if v.get(wc[i][1]) is None:
v[wc[i][1]] = 1
else:
v[wc[i][1]] += 1
vote = 0
c = 0
for k in v.keys():
if v[k] >= vote:
c = k
result.append(c)
return result
def __str__(self):
return 'K nearest neighbors classifier with n=' + str(self.n_neighbors) + ' and weight=' + str(self.weight_name)
def score(self, x, y):
"""
:param x:
:param y:
"""
p = self.predict(x)
c = 0
for i in range(len(y)):
if p[i] == y[i]:
c += 1
return float(c) / float(len(y)) | mit | 3,813,939,173,336,904,000 | 25.866667 | 120 | 0.478153 | false |
ndexbio/ndex-enrich | Persistence_Unit_Tests.py | 1 | 5444 | __author__ = 'aarongary'
import unittest
from persistence import EnrichmentPersistence
class Dev_Uint_Tests(unittest.TestCase):
def test_persistence(self):
try:
my_json = {
'termClassification': [{
'status': 'unknown',
'geneSymbol': '',
'termId': 'RATTUS',
'probabilitiesMap': {
'icd10': '0.0',
'gene': '0.0',
'disease': '0.0',
'drug': '0.0',
'genome': '1.0'
},
'desc': ''
}, {
'status': 'success',
'geneSymbol': 'ENSG00000230855',
'termId': 'OR2J3',
'probabilitiesMap': {
'icd10': '0.0',
'gene': '1.0',
'disease': '0.0',
'drug': '0.0',
'genome': '0.0'
},
'desc': 'olfactory receptor, family 2, subfamily J, member 3 [Source:HGNC Symbol;Acc:HGNC:8261]'
}, {
'status': 'success',
'geneSymbol': 'ENSG00000129673',
'termId': 'AANAT',
'probabilitiesMap': {
'icd10': '0.0',
'gene': '1.0',
'disease': '0.0',
'drug': '0.0',
'genome': '0.0'
},
'desc': 'aralkylamine N-acetyltransferase [Source:HGNC Symbol;Acc:HGNC:19]'
}, {
'status': 'success',
'geneSymbol': '',
'termId': 'LYMPHATIC',
'probabilitiesMap': {
'icd10': '1.0',
'gene': '0.0',
'disease': '0.0',
'drug': '0.0',
'genome': '0.0'
},
'desc': ''
}, {
'status': 'success',
'geneSymbol': 'ENSG00000163749',
'termId': 'CCDC158',
'probabilitiesMap': {
'icd10': '0.0',
'gene': '1.0',
'disease': '0.0',
'drug': '0.0',
'genome': '0.0'
},
'desc': 'coiled-coil domain containing 158 [Source:HGNC Symbol;Acc:HGNC:26374]'
}, {
'status': 'success',
'geneSymbol': 'ENSG00000173261',
'termId': 'PLAC8L1',
'probabilitiesMap': {
'icd10': '0.0',
'gene': '1.0',
'disease': '0.0',
'drug': '0.0',
'genome': '0.0'
},
'desc': 'PLAC8-like 1 [Source:HGNC Symbol;Acc:HGNC:31746]'
}, {
'status': 'success',
'geneSymbol': '',
'termId': 'CAFFEINE',
'probabilitiesMap': {
'icd10': '0.5',
'gene': '0.0',
'disease': '0.0',
'drug': '0.5',
'genome': '0.0'
},
'desc': ''
}, {
'status': 'success',
'geneSymbol': '',
'termId': 'HUMAN',
'probabilitiesMap': {
'icd10': '1.0',
'gene': '0.0',
'disease': '0.0',
'drug': '0.0',
'genome': '0.0'
},
'desc': ''
}, {
'status': 'unknown',
'geneSymbol': '',
'termId': 'ASLFDKJDS',
'probabilitiesMap': {
'icd10': '0.0',
'gene': '0.0',
'disease': '0.0',
'drug': '0.0',
'genome': '0.0'
},
'desc': ''
}]
}
ep = EnrichmentPersistence()
ep.save_file(my_json, 'my_test_6')
ep.save_file(my_json, 'my_test_7')
ep.save_file(my_json, 'my_test_8')
ep.save_file(my_json, 'my_test_9')
ep.save_file(my_json, 'my_test_10')
print ep.get_file('my_test_8')
self.assertTrue(1 == 1)
except Exception as e:
print e.message
self.assertTrue(1 == 0)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | 8,271,728,539,399,308,000 | 39.325926 | 120 | 0.2759 | false |
ybonjour/nuus | services/indexing/Indexer.py | 1 | 3702 | __author__ = 'Yves Bonjour'
from Tokenizer import create_tokenizer
import redis
import uuid
def create_indexer(redis_host, redis_port):
tokenizer = create_tokenizer()
redis_db = redis.Redis(redis_host, redis_port)
store = RedisIndexStore(redis_db)
return Indexer(store, tokenizer)
class Indexer:
def __init__(self, store, tokenizer):
self.store = store
self.tokenizer = tokenizer
def index(self, text, document_id):
tokens = self.tokenizer.tokenize(text)
for token in tokens:
self.store.add(document_id, token)
def document_frequency_normalized(self, term):
return float(self.store.document_frequency(term)) / float(self.store.num_documents())
def term_document_frequency(self, document, term):
return self.store.term_document_frequency(document, term)
def get_posting_list(self, term):
return self.store.posting_list(term)
def get_terms(self, document):
return self.store.get_terms(document)
class MemoryIndexStore(object):
def __init__(self):
self.posting_lists = {}
self.documents = {}
def posting_list(self, term):
if term not in self.posting_lists:
return {}
return self.posting_lists[term]
def get_terms(self, document):
if document not in self.documents:
return []
return self.documents[document]
def document_frequency(self, term):
if term not in self.posting_lists:
return 0
return len(self.posting_lists[term])
def num_documents(self):
return len(self.documents)
def term_document_frequency(self, document, term):
if term not in self.posting_lists or document not in self.posting_lists[term]:
return 0
return self.posting_lists[term][document]
def add(self, document, term):
if term not in self.posting_lists:
self.posting_lists[term] = {}
if document not in self.posting_lists[term]:
self.posting_lists[term][document] = 0
self.posting_lists[term][document] += 1
if document not in self.documents:
self.documents[document] = set()
self.documents[document].add(term)
class RedisIndexStore(object):
def __init__(self, redis):
self.redis = redis
def posting_list(self, term):
return {uuid.UUID(document): int(self.redis.get(self._posting_key(term, document)))
for document in self.redis.smembers(self._term_key(term))}
def document_frequency(self, term):
return len(self.redis.smembers(self._term_key(term)))
def get_terms(self, document):
return self.redis.smembers(self._document_key(document))
def num_documents(self):
return len(self.redis.smembers(self._documents_key()))
def term_document_frequency(self, document, term):
tdf = self.redis.get(self._posting_key(term, document))
return int(tdf) if tdf else 0
def add(self, document, term):
self.redis.sadd(self._documents_key(), document)
self.redis.sadd(self._term_key(term), document)
self.redis.sadd(self._document_key(document), term)
self.redis.setnx(self._posting_key(term, document), 0)
self.redis.incr(self._posting_key(term, document))
def _documents_key(self):
return "documents"
def _document_key(self, document):
return "document:{document}".format(document=document)
def _term_key(self, term):
return "term:{term}".format(term=term)
def _posting_key(self, term, document):
return "posting:{term}:{document}".format(term=term, document=document) | mit | 5,113,585,835,216,535,000 | 29.105691 | 93 | 0.642896 | false |
cria/microSICol | import_db.py | 1 | 7329 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Script to import XML data to current SICol database
# Obs: This script must be executed on root directory
# Author:Renato Arnellas Coelho renatoac at gmail dot com
import sys
import os
from xml.dom.minidom import Document,parse
def importSQLite(xml,sqlite_path='./db/sqlite.db'):
'''
xml = XML filename
sqlite_path = default is usually used
'''
from pysqlite2 import dbapi2 as sqlite
print "Connecting to SQLite database..."
if os.path.exists(sqlite_path):
#Connect
connect = sqlite.connect(sqlite_path,detect_types=sqlite.PARSE_COLNAMES,isolation_level=None)
cursor = connect.cursor()
print "Loading SQLite XML..."
doc = parse(xml)
tables = doc.getElementsByTagName('table')
for table in tables:
tablename = table.getAttribute('name')
print "Emptying table '%s'..." % tablename
rows = table.getElementsByTagName('row')
cursor.execute("DELETE FROM %s;" % tablename) #clear table first
print "Inserting values in table '%s'..." % tablename
### INSERT ITEM ###
for row in rows:
fields = row.getElementsByTagName('field')
colnames = []
colvalues = []
for field in fields:
colnames.append('`'+field.getAttribute('name')+'`')
coltype = field.getAttribute('type')
if coltype == 'integer':
colvalues.append(field.getAttribute('value'))
elif coltype == 'NULL':
colvalues.append("NULL")
else: #behaves as string
colvalues.append("'"+field.getAttribute('value').replace("'","\\'")+"'")
cursor.execute("INSERT INTO `%s` (%s) VALUES (%s);" % (tablename,",".join(colnames),",".join(colvalues) ) )
###################
#Close
cursor.close()
connect.close()
print "*** Import Finished ***"
raw_input()
else:
print "*** ERROR ***"
print "Unable to connect to SQLite database."
raw_input()
def importData(xml,host,user,pwd,dbname,port):
'''
xml = XML filename
host = MySQL host
user = MySQL root user
pwd = MySQL root password
dbname = MySQL database to be used
port = MySQL port number
'''
import MySQLdb as mysql
#Load file to Python XML object
print "Loading XML..."
doc = parse(xml)
print "Generating intermediate SQL import file..."
output = []
#Connect to database
output.append("USE %s;" % dbname)
#Set Global VARS
output.append("/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;")
output.append("/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;")
output.append("/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;")
output.append("/*!40101 SET NAMES utf8 */;")
output.append("/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;")
output.append("/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;")
output.append("/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;")
output.append("")
#Insert data in each table disabling key constrains
tables = doc.getElementsByTagName('table')
for table in tables:
tablename = table.getAttribute('name')
print "Reading table '%s'..." % tablename
rows = table.getElementsByTagName('row')
output.append("/*!40000 ALTER TABLE `%s` DISABLE KEYS */;" % tablename)
output.append("TRUNCATE TABLE `%s`;" % tablename) #clear table first
### INSERT ITEM ###
for row in rows:
fields = row.getElementsByTagName('field')
colnames = []
colvalues = []
for field in fields:
colnames.append('`'+field.getAttribute('name')+'`')
coltype = field.getAttribute('type')
if coltype == 'integer':
colvalues.append(field.getAttribute('value'))
elif coltype == 'NULL':
colvalues.append("NULL")
else: #behaves as string
colvalues.append("'"+field.getAttribute('value').replace("'","\\'")+"'")
output.append("INSERT INTO `%s`.`%s` (%s) VALUES (%s);" % (dbname,tablename,",".join(colnames),",".join(colvalues) ) )
###################
output.append("/*!40000 ALTER TABLE `%s` ENABLE KEYS */;" % tablename)
#Set Global VARS
output.append("")
output.append("/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;")
output.append("/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;")
output.append("/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;")
output.append("/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;")
output.append("/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;")
output.append("/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;")
output.append("/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;")
#Save SQL file
open('import.sql','w').write("\n".join(output).encode('utf-8'))
print "Running SQL import..."
sicol_path = os.getcwd()+os.sep+'db'+os.sep+'scripts'+os.sep
import platform
if platform.system() == "Windows" or platform.system() == "Microsoft":
mysql_path = [x for x in os.environ['PATH'].split(";") if x.lower().find('mysql') != -1]
else: #UNIX
pipe = os.popen("which mysql") #grab where MySQL is installed
mysql_path = pipe.read().strip()
if mysql_path == '' or mysql_path == []:
print "*********** ERROR ***********"
print "Please insert path to executable directory (mysql.exe) in OS 'PATH' variable."
raw_input() #Wait for user input...
else:
if platform.system() == "Windows" or platform.system() == "Microsoft":
#Ignore whether PATH ends with '\' or not
mysql_path = mysql_path[0]
if mysql_path[-1] != '\\': mysql_path += '\\'
mysql_path = '"' + mysql_path + 'mysql.exe"'
try:
bd_version = dbname.split("_")[1]
except Exception,e:
print "*********** ERROR ***********"
print "Please type \"sicol_v###\" where ### = version number."
raw_input() #Wait for user input...
return
try:
os.system("%s -h%s -u%s -p%s < %s" % (mysql_path,host,user,pwd,os.getcwd()+os.sep+"import.sql") )
except Exception,e:
print "*********** ERROR ***********"
print str(e)
raw_input() #Wait for user input...
return
print "*** Import Finished ***"
raw_input()
#If this script is called locally...
if __name__ == "__main__":
print "*** Import SICol Database ***"
opt = raw_input("Import MySQL data? (y/n)")[0].lower()
if opt == 'y':
import getpass
import os.path
host = raw_input("host=")
port = raw_input("port=")
root_login = raw_input("administrator login=")
root_pwd = getpass.getpass("administrator password=")
dbname = raw_input("database name=")
xml = raw_input("import XML filename=")
while not os.path.exists(xml) and xml != '':
print "*** ERROR ***"
print "Specified file does not exist!"
xml = raw_input("import XML filename=")
if xml != '':
importData(xml,host,root_login,root_pwd,dbname,port)
opt = raw_input("Import SQLite data? (y/n)")[0].lower()
if opt == 'y':
xml = raw_input("import XML filename=")
while not os.path.exists(xml) and xml != '':
print "*** ERROR ***"
print "Specified file does not exist!"
xml = raw_input("import XML filename=")
if xml != '':
importSQLite(xml)
| gpl-2.0 | -555,823,517,836,128,260 | 39.716667 | 127 | 0.617137 | false |
longde123/MultiversePlatform | server/config/friendworld/extensions_proxy.py | 1 | 50308 | #
# The Multiverse Platform is made available under the MIT License.
#
# Copyright (c) 2012 The Multiverse Foundation
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
#
from java.util.concurrent import *
from java.util import *
from java.lang import *
from java.net import *
from java.sql import *
from multiverse.mars import *
from multiverse.mars.core import *
from multiverse.mars.objects import *
from multiverse.mars.util import *
from multiverse.mars.plugins import *
from multiverse.server.math import *
from multiverse.server.plugins import *
from multiverse.server.events import *
from multiverse.server.objects import *
from multiverse.server.engine import *
from multiverse.server.util import *
import time
import sys
driverName = "com.mysql.jdbc.Driver"
Class.forName(driverName)
# photo storage
places_url = "http://places.multiverse.net/"
# host running web database
webdb_host = "webdb.mv-places.com"
# for testing
#webdb_host = "localhost"
ProxyPlugin.MaxConcurrentUsers = 400
ROOM_PLAYER_LIMIT = 50
maxUsersProp = Engine.getProperty("places.max_concurrent_users")
if maxUsersProp != None:
ProxyPlugin.MaxConcurrentUsers = int(maxUsersProp)
roomLimitProp = Engine.getProperty("places.room_player_limit")
if roomLimitProp != None:
ROOM_PLAYER_LIMIT = int(roomLimitProp)
AGENT_NAME = Engine.getAgent().getName()
TOKEN_LIFE = 30000 # 30 seconds after which the token expires
def getDomainHost():
hostName = Engine.getMessageServerHostname()
if hostName == 'localhost':
try:
localMachine = InetAddress.getLocalHost()
hostName = localMachine.getHostName()
except UnknownHostException:
Log.error("getDomainHost: couldn't get host name from local IP address %s" % str(localMachine))
Log.debug("getDomainHost: hostname = %s" % hostName)
return hostName
domainHostName = getDomainHost()
class SetMeshCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
cmd = cmdEvent.getCommand();
playerOid = cmdEvent.getObjectOid()
meshstring = cmd[cmd.index(' ')+1:]
submeshes = LinkedList()
meshlist = meshstring.split()
basemesh = meshlist[0]
for i in range(1, len(meshlist)-1, 2):
submesh = DisplayContext.Submesh(meshlist[i], meshlist[i+1])
submeshes.add(submesh)
Log.debug("/setmesh: oid=" + str(playerOid) + " to: " + meshstring)
WorldManagerClient.modifyDisplayContext(playerOid, WorldManagerClient.ModifyDisplayContextAction.REPLACE, basemesh, submeshes)
class PlayAnimationCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
cmd = cmdEvent.getCommand();
playerOid = cmdEvent.getObjectOid()
animation = cmd[cmd.index(' ')+1:]
Log.debug("/playanimation: oid=" + str(playerOid) + " with: " + animation);
AnimationClient.playSingleAnimation(playerOid, animation)
class DanceCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
cmd = cmdEvent.getCommand()
args = cmd.split()
playerOid = cmdEvent.getObjectOid()
Log.debug("/dance: oid=" + str(playerOid))
if len(args) == 1:
currentDanceState = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "dancestate")
newDanceState = 0
if currentDanceState == 0:
rand = Random()
newDanceState = int(rand.nextInt(6)) + 1
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "dancestate", newDanceState)
elif len(args) == 2:
if args[1] == "on":
newDanceState = int(rand.nextInt(6)) + 1
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "dancestate", newDanceState)
elif args[1] == "off" or args[1] == "0":
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "dancestate", 0)
else:
try:
newDanceState = int(args[1])
if newDanceState >= 1 and newDanceState <= 6:
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "dancestate", newDanceState)
except:
pass
class GestureCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
cmd = cmdEvent.getCommand()
args = cmd.split()
playerOid = cmdEvent.getObjectOid()
Log.debug("/gesture: oid=" + str(playerOid))
if len(args) == 1:
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "gesturestate", Boolean(not EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "gesturestate")))
elif len(args) == 2:
if args[1] == "on":
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "gesturestate", Boolean(True))
if args[1] == "off":
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "gesturestate", Boolean(False))
sitList = {
'low' : 'ntrl_sit_50cm',
'med' : 'ntrl_sit_75cm',
'high' : 'ntrl_sit_85cm',
'1' : 'ntrl_sit_50cm_attd_01_idle_01',
'2' : 'ntrl_sit_50cm_attd_02_idle_01',
'3' : 'ntrl_sit_50cm_attd_03_idle_01',
}
class SitCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
cmd = cmdEvent.getCommand()
args = cmd.split()
playerOid = cmdEvent.getObjectOid()
Log.debug("/sit: oid=" + str(playerOid))
if len(args) == 1:
Log.debug("/sit: oid=" + str(playerOid))
if (not WorldManagerClient.getObjectProperty(playerOid, "sitstate")):
AnimationClient.playSingleAnimation(playerOid, "sit") # stand to sit
else:
# AnimationClient.playSingleAnimation(playerOid, "stand") # sit to stand
pass
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "sitstate", Boolean(not EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "sitstate")))
elif len(args) == 2:
sitStyle = args[1]
Log.debug("/sit: oid=" + str(playerOid) + ", sit style=" + sitStyle)
if sitStyle == "on":
AnimationClient.playSingleAnimation(playerOid, "sit") # stand to sit
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "sitstate", Boolean(True))
return
elif sitStyle == "off":
# AnimationClient.playSingleAnimation(playerOid, "stand") # sit to stand
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "sitstate", Boolean(False))
return
animName = 'sit'
if sitStyle in sitList.keys():
animName = sitList[sitStyle]
if (not WorldManagerClient.getObjectProperty(playerOid, "sitstate")):
AnimationClient.playSingleAnimation(playerOid, animName) # stand to sit
else:
# AnimationClient.playSingleAnimation(playerOid, "stand") # sit to stand
pass
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "sitstate", Boolean(not EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "sitstate")))
class GMCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
cmd = cmdEvent.getCommand()
args = cmd.split()
playerOid = cmdEvent.getObjectOid()
accountId = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "AccountId")
if isAdmin(accountId):
Log.debug("/gmmode: oid=" + str(playerOid))
gmMode = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "GMMode")
if gmMode == None:
gmMode = False
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "GMMode", Boolean(not gmMode))
class PropertyCommand(ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
cmd = cmdEvent.getCommand()
args = cmd.split()
if len(args) == 3:
playerOid = cmdEvent.getObjectOid()
Log.debug("/property: oid=" + str(playerOid) + " " + args[1] + " " + args[2])
propName = args[1]
propValue = args[2]
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, propName, propValue)
if len(args) == 2:
playerOid = cmdEvent.getObjectOid()
Log.debug("/property: oid=" + str(playerOid) + " " + args[1])
propName = args[1]
propValue = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, propName)
WorldManagerClient.sendObjChatMsg(playerOid, 0, str(propValue))
class IgnoreCommand(ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
player = proxyPlugin.getPlayer(playerOid)
cmd = cmdEvent.getCommand()
args = cmd.split()
Log.debug("/ignore: oid=%s; cmd=%s; args=%s" % (str(playerOid), cmd, args))
# Rest for 2+ but only ignore the first.
# Additional args may be first name, last name, etc.,
# for greater ignore granularity in the future.
if len(args) >= 2:
result = proxyPlugin.matchingPlayers(player, args[1], True)
if result is not None:
oids = result[0]
if oids is not None and oids.size() > 0:
if playerOid in oids: # can't ignore self
# This is ugly, but remove(playerOid) doesn't
# work (playerOid is treated as an index), and
# indexOf(playerOid) returns -1.
for i in range(len(oids)):
if playerOid == oids[i]:
oids.remove(i)
break;
# Make sure removing playerOid didn't empty the list.
if oids.size() > 0:
proxyPlugin.updateIgnoredOids(player, oids, None)
WorldManagerClient.sendObjChatMsg(playerOid, 0, "You are now ignoring all characters named %s." % args[1])
else:
WorldManagerClient.sendObjChatMsg(playerOid, 0, "No matches found for %s." % args[1])
else:
WorldManagerClient.sendObjChatMsg(playerOid, 0, "No matches found for %s." % args[1])
else:
WorldManagerClient.sendObjChatMsg(playerOid, 0, "Usage: /ignore playername")
#
# places specific /sys command
# determine admin status of caller, than calls into common/proxy.py
#
class FRW_SysCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
accountId = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "AccountId")
if isAdmin(accountId):
handleSysCommand(cmdEvent)
proxyPlugin.registerCommand("/setmesh", SetMeshCommand())
proxyPlugin.registerCommand("/playanimation", PlayAnimationCommand())
proxyPlugin.registerCommand("/dance", DanceCommand())
proxyPlugin.registerCommand("/gesture", GestureCommand())
proxyPlugin.registerCommand("/sit", SitCommand())
proxyPlugin.registerCommand("/gmmode", GMCommand())
proxyPlugin.registerCommand("/property", PropertyCommand())
proxyPlugin.registerCommand("/ignore", IgnoreCommand())
proxyPlugin.registerCommand("/sys", FRW_SysCommand())
class YesCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/yes: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_nod")
class NoCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/no: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_headshake")
class ShrugCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/shrug: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_shrug")
class LaughCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/laugh: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_laugh")
class WaveCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/wave: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_wave")
class BowCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/bow: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_bow")
class PointCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/point: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_point")
class ClapCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/clap: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_clap")
class CheerCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
Log.debug("/cheer: oid=" + str(playerOid))
AnimationClient.playSingleAnimation(playerOid, "ntrl_cheer")
class AttitudeCommand (ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
cmd = cmdEvent.getCommand()
args = cmd.split()
animNum = None
if len(args) > 1:
try:
animNum = int(args[1])
except:
animNum = 1
else:
animNum = 1
if animNum > 3:
animNum = 1
Log.debug("/attitude: oid= %s; cmd=%s" % (str(playerOid), cmd))
AnimationClient.playSingleAnimation(playerOid, "ntrl_attd_%02d_idle_01" % animNum)
class SetTVUrlCommand(ProxyPlugin.CommandParser):
def parse(self, cmdEvent):
playerOid = cmdEvent.getObjectOid()
tvOid = cmdEvent.getTarget()
cmd = cmdEvent.getCommand()
splitCmd = cmd.split(" ")
url = splitCmd[1]
if url != None and (url.startswith("http://") or url.startswith("mms://")):
WorldManagerClient.setObjectProperty(tvOid,"tv_url", url)
WorldManagerClient.sendObjChatMsg(playerOid, 0, "TV set to: " + url)
else:
WorldManagerClient.sendObjChatMsg(playerOid, 0, "Please include http:// or mms:// in the address")
proxyPlugin.registerCommand("/yes", YesCommand())
proxyPlugin.registerCommand("/no", NoCommand())
proxyPlugin.registerCommand("/shrug", ShrugCommand())
proxyPlugin.registerCommand("/laugh", LaughCommand())
proxyPlugin.registerCommand("/wave", WaveCommand())
proxyPlugin.registerCommand("/bow", BowCommand())
proxyPlugin.registerCommand("/point", PointCommand())
proxyPlugin.registerCommand("/clap", ClapCommand())
proxyPlugin.registerCommand("/cheer", CheerCommand())
proxyPlugin.registerCommand("/attitude", AttitudeCommand())
proxyPlugin.registerCommand("/attd", AttitudeCommand())
proxyPlugin.registerCommand("/settvurl", SetTVUrlCommand())
def instanceSetObjectProperty(instanceOid, oid, namespace, key, value):
props = EnginePlugin.getObjectProperty(instanceOid, Namespace.INSTANCE, "RoomItemsProps")
objInfo = WorldManagerClient.getObjectInfo(oid)
objName = objInfo.name # objInfo.getProperty("name")
objProps = None
if props.containsKey(objName):
objProps = props[objName]
else:
objProps = HashMap()
objProps[key] = value
props[objName] = objProps
EnginePlugin.setObjectProperty(instanceOid, Namespace.INSTANCE, "RoomItemsProps", props)
######################
# Dynamic Instancing #
######################
class DynInstProxyExtHook (ProxyExtensionHook):
def processExtensionEvent(self, event, player, proxy):
props = event.getPropertyMap()
DynamicInstancing().handleRequest(props, player, proxy)
def setProfilePhotos(instanceOid):
roomItemsProps = EnginePlugin.getObjectProperty(instanceOid, Namespace.INSTANCE, "RoomItemsProps")
roomOwnerId = EnginePlugin.getObjectProperty(instanceOid, Namespace.INSTANCE, "AccountId")
roomStyle = EnginePlugin.getObjectProperty(instanceOid, Namespace.INSTANCE, "RoomStyle")
# get photo for room owner
photoURL = getDBProperty(roomOwnerId, "PhotoURL")
# get oid for profile_main
profileMain = roomStyle + "_profile_main"
profileMainOid = ObjectManagerClient.getNamedObject(instanceOid, profileMain, None)
Log.debug("[CYC] '%s' oid is %s" % (profileMain, profileMainOid))
if profileMainOid is None:
return
# set pic_url for profile
roomItemsProps = setObjectProperty(profileMainOid, Namespace.WORLD_MANAGER, "pic_url", photoURL, roomItemsProps)
# get friendlist
friendlist = getFriendlist(roomOwnerId)
i = 0
for friendId in friendlist:
# get photo
photoURL = getDBProperty(friendId, "PhotoURL")
# set pic_url for friendlist
i = i + 1
profileName = roomStyle + "_profile_%02d" % i
profileOid = ObjectManagerClient.getNamedObject(instanceOid, profileName, None)
Log.debug("[CYC] '%s' oid is %s" % (profileName, profileOid))
if profileOid is None:
return
roomItemsProps = setObjectProperty(profileOid, Namespace.WORLD_MANAGER, "pic_url", photoURL, roomItemsProps)
roomItemsProps = setObjectProperty(profileOid, Namespace.WORLD_MANAGER, "AccountId", friendId, roomItemsProps)
EnginePlugin.setObjectProperty(instanceOid, Namespace.INSTANCE, "RoomItemsProps", roomItemsProps)
#
# Separate class allows instancing to be called outside the hook
# (i.e. kicking a player to the default instance).
#
class DynamicInstancing:
def handleRequest(self, props, player, proxy):
cmd = None
if props.containsKey("command"):
cmd = props["command"]
if cmd == "collectible":
self.addCollectible(props, player, proxy)
if (cmd == "instance") or (cmd == "load"):
Log.debug("processExtensionEvent (dyninst): cmd =" + cmd)
markerName = ""
if props.containsKey("markerName"):
markerName = props["markerName"]
else:
markerName = "spawnPt"
instanceName = ""
if props.containsKey("instanceName"):
instanceName = props["instanceName"]
owner = None
if props.containsKey("owner"):
owner = props["owner"]
db = Engine.getDatabase()
try:
accountId = int(owner)
except:
ownerOid = db.getOidByName(owner, Namespace.WORLD_MANAGER)
accountId = EnginePlugin.getObjectProperty(ownerOid, Namespace.WORLD_MANAGER, "AccountId")
instanceName = "room-" + str(accountId)
instanceOid = self.loadInstance(props, player, proxy, instanceName)
if instanceOid == None:
WorldManagerClient.sendObjChatMsg(player.getOid(), 0, "Player does not have a room.")
return
if (cmd == "instance"):
success = self.enterInstance(props, player, proxy, instanceName, markerName)
if success:
playerOid = player.getOid()
roomOwnerId = None # default instance
if owner is not None: # room instance
roomOwnerId = EnginePlugin.getObjectProperty(instanceOid, Namespace.INSTANCE, "AccountId")
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "roomOwnerId", roomOwnerId)
def loadInstance(self, props, player, proxy, instanceName):
instanceOid = InstanceClient.getInstanceOid(instanceName)
if instanceOid is None:
Log.error("Error loading instance "+instanceName)
return None
while True:
result = InstanceClient.loadInstance(instanceOid)
if result != InstanceClient.RESULT_ERROR_RETRY:
break
time.sleep(1)
if result != InstanceClient.RESULT_OK:
Log.error("Error loading instance "+str(instanceOid)+", result "+str(result))
if instanceName.find("room-") == 0:
setProfilePhotos(instanceOid)
return instanceOid
def enterInstance(self, props, player, proxy, instanceName, markerName):
instanceOid = proxyPlugin.getInstanceEntryCallback().selectInstance(player,instanceName)
if instanceOid == None:
return False
if instanceName.find("room-") == 0:
setProfilePhotos(instanceOid)
if (instanceOid is not None):
loc = InstanceClient.getMarkerPoint(instanceOid, markerName)
wnode = BasicWorldNode()
wnode.setInstanceOid(instanceOid)
rand = Random()
newloc = Point((loc.getX() + (int(rand.nextFloat() * 4000.0) - 2000)),
(loc.getY()),
(loc.getZ() + (int(rand.nextFloat() * 4000.0) - 2000)))
wnode.setLoc(newloc)
wnode.setDir(MVVector(0,0,0))
return InstanceClient.objectInstanceEntry(player.getOid(),wnode,0)
return False
def addCollectible(self, props, player, proxy):
Log.debug("makeCollectible (dyninst): loveseat")
playerOid = player.getOid()
loc = Point(props["loc"])
dir = props["dir"]
meshname = props["mesh_name"]
itemname = props["item_name"]
pWNode = WorldManagerClient.getWorldNode(playerOid)
instanceOid = pWNode.getInstanceOid()
iInfo = InstanceClient.getInstanceInfo(instanceOid, InstanceClient.FLAG_NAME)
dc = DisplayContext(meshname, True)
ot = Template("furniture") # template name
ot.put(Namespace.WORLD_MANAGER, WorldManagerClient.TEMPL_DISPLAY_CONTEXT, dc)
ot.put(Namespace.WORLD_MANAGER, WorldManagerClient.TEMPL_NAME, itemname)
ot.put(Namespace.WORLD_MANAGER, WorldManagerClient.TEMPL_INSTANCE, Long(instanceOid)) # -- instance OID
ot.put(Namespace.WORLD_MANAGER, WorldManagerClient.TEMPL_LOC, loc) # player location + 2m in the Z-axis
ot.put(Namespace.WORLD_MANAGER, WorldManagerClient.TEMPL_ORIENT, dir) # player orientation
# ot.put(Namespace.WORLD_MANAGER, "Targetable", Boolean(True))
# ot.put(Namespace.WORLD_MANAGER, "ClickHookName", "furniture_menu")
ot.put(Namespace.OBJECT_MANAGER, ObjectManagerClient.TEMPL_PERSISTENT, Boolean(True))
objectOid = ObjectManagerClient.generateObject("furniture", ot) # template name
rv = WorldManagerClient.spawn(objectOid)
Log.debug("dynamic instance: generated obj oid = " + str(objectOid))
return objectOid
proxyPlugin.addProxyExtensionHook("proxy.DYNAMIC_INSTANCE", DynInstProxyExtHook())
class PlacesInstanceEntryCallback (InstanceEntryCallback):
def instanceEntryAllowed(self, playerOid, instanceOid, location):
Log.info("PlacesInstanceEntryCallback: playerOid="+str(playerOid)+" "+
"instanceOid="+str(instanceOid)+" loc="+str(location))
info = None
# Get the instance name. In the case of a room, we can extract
# the owner's account id.
instanceName = Engine.getDatabase().getObjectName(instanceOid, InstanceClient.NAMESPACE)
if instanceName == None:
info = InstanceClient.getInstanceInfo(instanceOid,
InstanceClient.FLAG_PLAYER_POPULATION | InstanceClient.FLAG_NAME)
if info == None or info.name == None:
Log.debug("PlacesInstanceEntryCallback: Could not get instance information for instanceOid="+str(instanceOid))
return False
instanceName = info.name
if instanceName.find("room-") != 0:
return True
ownerAccountId = instanceName[5:]
# Get the player's account id
playerAccountId = EnginePlugin.getObjectProperty(playerOid, Namespace.OBJECT_MANAGER, "AccountId")
# HACK for backward compatibility: if no AccountId, then allow
if playerAccountId == None:
return True
# Player can always enter their own room
if playerAccountId == int(ownerAccountId):
return True
if not self.playerAllowedEntry(ownerAccountId, playerAccountId):
Log.debug("PlacesInstanceEntryCallback: playerAllowed returned false for accountId " + str(playerAccountId))
WorldManagerClient.sendObjChatMsg(playerOid, 0, "Privacy settings for room '" + instanceName + "' don't allow you to enter")
return False
# Get instance population and check limit
if info == None:
info = InstanceClient.getInstanceInfo(instanceOid, InstanceClient.FLAG_PLAYER_POPULATION)
limit = EnginePlugin.getObjectProperty(instanceOid, InstanceClient.NAMESPACE, "populationLimit")
if limit == None:
limit = ROOM_PLAYER_LIMIT
if info.playerPopulation >= limit:
WorldManagerClient.sendObjChatMsg(playerOid, 0, "Room is full, try again later.")
Log.info("ProxyPlugin: INSTANCE_FULL playerOid=" + str(playerOid) +
" instanceOid=" + str(instanceOid) +
" ownerAccountId=" + str(ownerAccountId) +
" limit=" + str(limit))
return False
else:
return True
return True
def playerAllowedEntry(self, ownerAccountId, friendAccountId):
privacy_setting = "Anyone"
is_friend = 0
logPrefix = "playerAllowedEntry: For ownerAccountId " + str(ownerAccountId) + " and friendAccountId " + str(friendAccountId)
sql = "SELECT p.value, IF (EXISTS (SELECT 1 FROM friends AS f WHERE f.my_id = %d AND f.friend_id = %d) ,1,0) AS is_friend FROM profile AS p WHERE p.account_id = %d AND p.property = 'Privacy'" % (ownerAccountId, friendAccountId, ownerAccountId)
try:
url = "jdbc:mysql://"+webdb_host+"/friendworld?user=root&password=test"
# Get a row with two columns: the value of the 'Privacy' property for the profile table, and whether friendIs is a friend
con = DriverManager.getConnection(url)
stm = con.createStatement()
srs = stm.executeQuery(sql)
if (srs.next()):
privacy_setting = srs.getString("value")
is_friend = srs.getInt("is_friend")
#Log.debug(logPrefix + privacy_setting + " and is_friend = " + str(is_friend))
else:
# If there were no rows returned, that means we should use the default value of "Anyone"
#Log.debug(logPrefix + ", didn't find a 'Privacy' row in the properties table")
privacy_setting = "Anyone"
srs.close()
stm.close()
con.close()
except:
Log.debug("playerAllowedEntry: Got exception running database query to retrieve privacy permission for account " +
str(ownerAccountId) + ", sql is " + sql + ", exception " + str(sys.exc_info()[0]))
if privacy_setting == "Anyone":
Log.debug(logPrefix + ", allowing entry because the privacy setting is 'Anyone'")
return True
if (privacy_setting == "Friends"):
if is_friend == 1:
Log.debug(logPrefix + ", allowing entry because the privacy setting is 'Friends' and he is a friend")
return True
else:
Log.debug(logPrefix + ", not allowing entry because the privacy setting is 'Friends' and he is not a friend")
return False
else:
Log.debug(logPrefix + ", not allowing entry because the privacy setting is '" + privacy_setting + "'")
return False
def selectInstance(self,player,instanceName):
infos = InstanceClient.getInstanceInfoByName(instanceName,
InstanceClient.FLAG_PLAYER_POPULATION)
if infos.size() == 0:
Log.error("PlacesInstanceEntryCallback: unknown instance name " +
instanceName)
return None
if infos.size() == 1:
return infos.get(0).oid
selected = None
for info in infos:
if selected == None or info.playerPopulation > selected.playerPopulation:
limit = EnginePlugin.getObjectProperty(info.oid,
InstanceClient.NAMESPACE, "populationLimit")
if limit == None:
limit = ROOM_PLAYER_LIMIT
if info.playerPopulation < limit:
selected = info
if selected != None:
return selected.oid
else:
Log.error("PlacesInstanceEntryCallback: all instances full name=" +
instanceName)
return None
proxyPlugin.setInstanceEntryCallback(PlacesInstanceEntryCallback())
#####
#
#####
def setObjectProperty(oid, namespace, key, value, props):
objInfo = WorldManagerClient.getObjectInfo(oid)
objName = objInfo.name # objInfo.getProperty("name")
objProps = None
if props.containsKey(objName):
objProps = props[objName]
else:
objProps = HashMap()
objProps[key] = value
props[objName] = objProps
EnginePlugin.setObjectProperty(oid, namespace, key, value)
return props
class SetPropertyProxyExtHook (ProxyExtensionHook):
def processExtensionEvent(self, event, player, proxy):
playerOid = player.getOid()
pWNode = WorldManagerClient.getWorldNode(playerOid)
instanceOid = pWNode.getInstanceOid()
# security check -- check if player is instance owner
isOwner = False
accountId = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "AccountId")
instanceName = InstanceClient.getInstanceInfo(instanceOid, InstanceClient.FLAG_NAME).name
instanceOwnerStr = instanceName[instanceName.index('-')+1:]
instanceOwner = Integer.parseInt(instanceOwnerStr)
if instanceOwner == accountId:
isOwner = True
props = event.getPropertyMap()
roomItemsProps = EnginePlugin.getObjectProperty(instanceOid, Namespace.INSTANCE, "RoomItemsProps")
if 'tv_url' in props.keySet():
oid = props['oid']
url = props['tv_url']
roomItemsProps = setObjectProperty(oid, Namespace.WORLD_MANAGER, "tv_url", url, roomItemsProps)
if 'radio_url' in props.keySet():
oid = props['oid']
url = props['radio_url']
roomItemsProps = setObjectProperty(oid, Namespace.WORLD_MANAGER, "radio_url", url, roomItemsProps)
if 'pic_url' in props.keySet() and isOwner:
oid = props['oid']
url = props['pic_url']
roomItemsProps = setObjectProperty(oid, Namespace.WORLD_MANAGER, "pic_url", url, roomItemsProps)
if 'cd_url' in props.keySet() and isOwner:
oid = props['oid']
url = props['cd_url']
name = props['tooltip']
roomItemsProps = setObjectProperty(oid, Namespace.WORLD_MANAGER, "cd_url", url, roomItemsProps)
roomItemsProps = setObjectProperty(oid, Namespace.WORLD_MANAGER, "tooltip", name, roomItemsProps)
if 'subsurface' in props.keySet() and isOwner:
objOid = props['oid']
subsurfaceName = props['subsurface']
subsurface = props['value']
roomItemsProps = setObjectProperty(objOid, Namespace.WORLD_MANAGER, subsurfaceName, subsurface, roomItemsProps)
roomItemsProps = setObjectProperty(objOid, Namespace.WORLD_MANAGER, 'AppearanceOverride', 'coloredfurniture', roomItemsProps)
######
# if 'hide' in props.keySet():
# for pair in props['hide']:
# roomItemsProps = self.setObjectProperty(pair[0], Namespace.WORLD_MANAGER, 'Hide', Boolean(pair[1]), roomItemsProps)
# if 'style' in props.keySet():
# objOid = props['oid']
# style = props['style']
# roomItemsProps = self.setObjectProperty(objOid, Namespace.WORLD_MANAGER, 'RoomStyle', style, roomItemsProps)
######
EnginePlugin.setObjectProperty(instanceOid, Namespace.INSTANCE, "RoomItemsProps", roomItemsProps)
proxyPlugin.addProxyExtensionHook("mv.SET_PROPERTY", SetPropertyProxyExtHook())
#
# convenience function used solely to determine whether the SELECT
# finds a match - note we append "LIMIT 1" to the passed query, to
# return only a single match
#
# returns True (there was a match), or False (there were no matches)
#
def doesQueryMatch(sql):
result = False
url = "jdbc:mysql://%s/friendworld?user=root&password=test" % webdb_host
con = DriverManager.getConnection(url)
stm = con.createStatement()
sql = "%s LIMIT 1" % sql
res = stm.executeQuery(sql)
if res.next():
result = True
stm.close()
con.close()
return result
#
# convenience function used to perform an INSERT, UPDATE or DELETE
# on the web database
#
# returns number of rows affected by the update
#
def updateDatabase(sql):
result = 0
url = "jdbc:mysql://%s/friendworld?user=root&password=test" % webdb_host
con = DriverManager.getConnection(url)
stm = con.createStatement()
result = stm.executeUpdate(sql)
stm.close()
con.close()
return result
class AddFriendProxyExtHook (ProxyExtensionHook):
def processExtensionEvent(self, event, player, proxy):
Log.debug("[CYC] add friend proxy hook")
playerOid = player.getOid()
pWNode = WorldManagerClient.getWorldNode(playerOid)
instanceOid = pWNode.getInstanceOid()
props = event.getPropertyMap()
friendAccountId = None
if props.containsKey('friend_id'):
friendAccountId = props['friend_id']
friendOid = None
if props.containsKey('friend_oid'):
friendOid = props['friend_oid']
myAccountId = None
if props.containsKey('account_id'):
myAccountId = props['account_id']
Log.debug("[CYC] %s, %s, %s" % (friendAccountId, friendOid, myAccountId))
if friendAccountId is None or friendOid is None or myAccountId is None:
return
#
# so we can provide the player with useful feedback
#
friendName = proxyPlugin.getPlayer(friendOid).name
#
# don't add a friend invite if...
#
# we're already friends
if doesQueryMatch("SELECT friend_id FROM friends WHERE my_id = %d AND friend_id = %d" % (myAccountId, friendAccountId)):
WorldManagerClient.sendObjChatMsg(playerOid, 2, "You're already friends with %s." % friendName)
return
# i've already invited this person to become friends
haveInvited = doesQueryMatch("SELECT to_id, from_id FROM invitations WHERE to_id = %d AND from_id = %d" % (friendAccountId, myAccountId))
if haveInvited:
WorldManagerClient.sendObjChatMsg(playerOid, 2, "You've already sent %s a friend request." % friendName)
return
#
# if this person has previously invited me to become friends,
# treat 'add friend' as a confirmation - add as friend, and
# remove any mutual invitations
#
if doesQueryMatch("SELECT to_id, from_id FROM invitations WHERE to_id = %d AND from_id = %d" % (myAccountId, friendAccountId)):
result = updateDatabase("INSERT INTO friends (my_id, friend_id, timestamp) VALUES (%d, %d, NOW())" % (myAccountId, friendAccountId))
result = updateDatabase("INSERT INTO friends (my_id, friend_id, timestamp) VALUES (%d, %d, NOW())" % (friendAccountId, myAccountId))
result = updateDatabase("DELETE FROM invitations WHERE to_id = %d AND from_id = %d" % (myAccountId, friendAccountId))
if haveInvited:
result = updateDatabase("DELETE FROM invitations WHERE to_id = %d AND from_id = %d" % (friendAccountId, myAccountId))
WorldManagerClient.sendObjChatMsg(playerOid, 2, "You are now friends with %s." % friendName)
return
Log.debug("[CYC] adding friend ... db call")
# Add friend
message = ""
url = "jdbc:mysql://"+webdb_host+"/friendworld?user=root&password=test"
sql = "INSERT INTO invitations (to_id, from_id, message, timestamp) VALUES (%s, %s, '%s', NOW())" % (friendAccountId, myAccountId, message)
con = DriverManager.getConnection(url)
stm = con.createStatement()
res = stm.executeUpdate(sql)
Log.debug("[CYC] add friend insert result = %d" % res)
stm.close()
con.close()
Log.debug("[CYC] sending friend request message")
# Send friend message
WorldManagerClient.sendObjChatMsg(playerOid, 2, "You have sent a friend request to %s." % friendName)
WorldManagerClient.sendObjChatMsg(friendOid, 2, "You have a new friend request from %s." % player.name)
proxyPlugin.addProxyExtensionHook("mvp.ADD_FRIEND", AddFriendProxyExtHook())
class KickPlayerProxyExtHook (ProxyExtensionHook):
def processExtensionEvent(self, event, player, proxy):
playerOid = player.getOid()
# get player's accountId
accountId = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "AccountId")
Log.debug("KickHook: kick request from playerOid=%d, accountId=%d" % (playerOid, accountId))
# get room's ownerId
roomOwnerId = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "roomOwnerId")
# kicking player must own the room or be an admin
adminState = str(getDBProperty(accountId, "Admin"))
if accountId != roomOwnerId and adminState != "True":
WorldManagerClient.sendObjChatMsg(playerOid, 2, "Sorry, you don't have permission to kick that player.")
return
# validate kick target
props = event.getPropertyMap()
kickOid = None
if props.containsKey('oid'):
kickOid = props['oid']
if kickOid is None:
return
# don't let owner be kicked from their own room
kickAccountId = EnginePlugin.getObjectProperty(kickOid, Namespace.WORLD_MANAGER, "AccountId")
if kickAccountId == roomOwnerId:
WorldManagerClient.sendObjChatMsg(playerOid, 2, "Sorry, can't kick a player from their own room.")
return
# bad target, go away!
props = HashMap()
props.put("command", "instance")
props.put("instanceName", "default")
props.put("markerName", "spawnPt")
kickedPlayer = proxyPlugin.getPlayer(kickOid)
Log.debug("KickHook: kicking kickOid=%d (%s)" % (kickOid, kickedPlayer.name))
DynamicInstancing().handleRequest(props, kickedPlayer, proxy)
WorldManagerClient.sendObjChatMsg(playerOid, 2, "%s has been kicked from the room." % kickedPlayer.name)
WorldManagerClient.sendObjChatMsg(kickOid, 2, "You have been kicked from the room.")
proxyPlugin.addProxyExtensionHook("mvp.KICK_FROM_ROOM", KickPlayerProxyExtHook())
proxyPlugin.addProxyExtensionHook("proxy.INSTANCE_ENTRY", InstanceEntryProxyHook())
def getDBProperty(accountId, property):
value = None
try:
url = "jdbc:mysql://"+webdb_host+"/friendworld?user=root&password=test"
sql = "SELECT value FROM profile WHERE account_id = %d AND property = '%s'" % ( accountId, property)
con = DriverManager.getConnection(url)
stm = con.createStatement()
srs = stm.executeQuery(sql)
# _types = {Types.INTEGER:srs.getInt, Types.FLOAT:srs.getFloat}
while (srs.next()):
value = srs.getString(1)
srs.close()
stm.close()
con.close()
except:
Log.debug("getDBProperty(): Exception")
pass
if value is None:
if property == "PhotoURL":
# value = places_url + "images/missing.jpg"
value = places_url + "photos/%08d.jpg" % accountId
else:
value = "Unknown"
Log.debug("getDBProperty(): accountId=%d, property=%s, value=%s" % (accountId, property, value))
return value
#
# Simple test to see if the player is an admin.
#
def isAdmin(accountId):
result = False
state = getDBProperty(accountId, "Admin")
if state == "True":
result = True
return result
def getFriendlist(accountId):
friendList = LinkedList()
try:
url = "jdbc:mysql://"+webdb_host+"/friendworld?user=root&password=test"
sql = "SELECT friend_id FROM friends WHERE my_id = %d LIMIT 12" % accountId
con = DriverManager.getConnection(url)
stm = con.createStatement()
srs = stm.executeQuery(sql)
# _types = {Types.INTEGER:srs.getInt, Types.FLOAT:srs.getFloat}
while (srs.next()):
friend_id = srs.getInt(1)
friendList.add(str(friend_id))
srs.close()
stm.close()
con.close()
except:
friendList.add(1)
# friendList.add(2156)
# friendList.add(7811)
return friendList
def getPlaylist(accountId):
playList = LinkedList()
try:
url = "jdbc:mysql://"+webdb_host+"/friendworld?user=root&password=test"
sql = "SELECT name, URL FROM media WHERE account_id = %d AND media_type=1" % accountId
con = DriverManager.getConnection(url)
stm = con.createStatement()
srs = stm.executeQuery(sql)
# _types = {Types.INTEGER:srs.getInt, Types.FLOAT:srs.getFloat}
while (srs.next()):
name = srs.getString(1)
url = srs.getString(2)
nvpair = LinkedList()
nvpair.add(name)
nvpair.add(url)
playList.add(nvpair)
srs.close()
stm.close()
con.close()
except:
nvpair = LinkedList()
nvpair.add("Slick Rick 1")
nvpair.add("http://www.tradebit.com/usr/scheme05/pub/8/Chamillionaire-feat.-Slick-Rick---Hip-Hop-Police.mp3")
playList.add(nvpair)
return playList
class GetPropertyProxyExtHook (ProxyExtensionHook):
def processExtensionEvent(self, event, player, proxy):
props = event.getPropertyMap()
oid = None
if props.containsKey("oid"):
oid = props["oid"]
else:
oid = player.getOid()
accountId = None
if props.containsKey("account_id"):
accountId = props["account_id"]
else:
accountId = EnginePlugin.getObjectProperty(oid, Namespace.WORLD_MANAGER, "AccountId")
if accountId is None:
accountId = EnginePlugin.getObjectProperty(oid, Namespace.WORLD_MANAGER, "roomOwnerId")
propKey = None
if props.containsKey("property_name"):
propKey = props["property_name"]
else:
propKey = "PhotoURL"
cmd = None
if props.containsKey("cmd"):
cmd = props["cmd"]
if (accountId is not None) and (oid is not None):
if (cmd == "property"):
propValue = getDBProperty(accountId, propKey)
EnginePlugin.setObjectProperty(oid, Namespace.WORLD_MANAGER, propKey, propValue)
if (cmd == "friendlist"):
friend_list = getFriendlist(accountId)
EnginePlugin.setObjectProperty(oid, Namespace.WORLD_MANAGER, "friendlist", friend_list)
if (cmd == "playlist"):
play_list = getPlaylist(accountId)
EnginePlugin.setObjectProperty(oid, Namespace.WORLD_MANAGER, "playlist", play_list)
if (cmd == "roomstyle"):
room_style = EnginePlugin.getObjectProperty(Instance.currentOid(), Namespace.INSTANCE, "RoomStyle")
EnginePlugin.setObjectProperty(oid, Namespace.WORLD_MANAGER, "roomstyle", room_style)
if (cmd == "room_owner_id"):
roomOwnerId = EnginePlugin.getObjectProperty(Instance.currentOid(), Namespace.INSTANCE, "AccountId")
EnginePlugin.setObjectProperty(oid, Namespace.WORLD_MANAGER, "roomOwnerId", roomOwnerId)
proxyPlugin.addProxyExtensionHook("mv.GET_PROPERTY", GetPropertyProxyExtHook())
class UpdateObjectProxyExtHook (ProxyExtensionHook):
def processExtensionEvent(self, event, player, proxy):
props = event.getPropertyMap()
dir = None
if props.containsKey("dir"):
dir = props["dir"]
transition = None
if props.containsKey("transition"):
transition = props["transition"]
idle = None
if props.containsKey("idle"):
idle = props["idle"]
loc_start = None
if props.containsKey("loc_start"):
loc_start = props["loc_start"]
if (transition is not None) and (idle is not None) and (loc_start is not None):
wnode_start = BasicWorldNode()
wnode_start.setLoc(Point(loc_start))
wnode_start.setOrientation(dir)
playerOid = player.getOid()
WorldManagerClient.updateWorldNode(playerOid, wnode_start, True)
AnimationClient.playSingleAnimation(playerOid, transition)
# wnode_end = BasicWorldNode()
# wnode_end.setLoc(Point(loc_end))
# wnode_end.setOrientation(dir)
# WorldManagerClient.updateWorldNode(playerOid, wnode_end, True)
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "sitidle", idle)
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "sitstate", Boolean(True))
if props.containsKey("property"):
oid = None
if props.containsKey("oid"):
oid = props["oid"]
property = props["property"]
value = None
if props.containsKey("value"):
value = props["value"]
if (oid is not None) and (property is not None) and (value is not None):
EnginePlugin.setObjectProperty(oid, Namespace.WORLD_MANAGER, property, value)
proxyPlugin.addProxyExtensionHook("mv.UPDATE_OBJECT", UpdateObjectProxyExtHook())
class PlacesLoginCallback (ProxyLoginCallback):
def preLoad(self, player, conn):
pass
def postLoad(self, player, conn):
#
# setting "isAdmin" on the player object will let us appropriately
# update UI elements on the client where only admins should be able
# to perform an operation - note that this should only be used for
# UI, no to determine permission to perform an operation - admin
# requests should *ALWAYS* be confirmed on the world server
#
playerOid = player.getOid()
accountId = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "AccountId")
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "isAdmin", isAdmin(accountId))
def postSpawn(self, player, conn):
Log.debug("[CYC] postSpawn")
playerOid = player.getOid()
pWNode = WorldManagerClient.getWorldNode(playerOid)
instanceOid = pWNode.getInstanceOid()
iInfo = InstanceClient.getInstanceInfo(instanceOid, InstanceClient.FLAG_NAME)
instanceName = iInfo.name
if instanceName.find("room-") == 0:
setProfilePhotos(instanceOid)
roomOwnerId = EnginePlugin.getObjectProperty(instanceOid, Namespace.INSTANCE, "AccountId")
EnginePlugin.setObjectProperty(playerOid, Namespace.WORLD_MANAGER, "roomOwnerId", roomOwnerId)
proxyPlugin.setProxyLoginCallback(PlacesLoginCallback())
def generateToken(props=None):
expiry = System.currentTimeMillis() + TOKEN_LIFE
if props is None:
tokenSpec = SecureTokenSpec(SecureTokenSpec.TOKEN_TYPE_DOMAIN, AGENT_NAME, expiry)
else:
tokenSpec = SecureTokenSpec(SecureTokenSpec.TOKEN_TYPE_DOMAIN, AGENT_NAME, expiry, props)
token = SecureTokenManager.getInstance().generateToken(tokenSpec)
return token
class GenerateTokenProxyExtHook (ProxyExtensionHook):
def processExtensionEvent(self, event, player, proxy):
playerOid = player.getOid()
eventProps = event.getPropertyMap()
if not 'frameName' in eventProps or not 'jspArgs' in eventProps:
WorldManagerClient.sendObjChatMsg(playerOid, 0, "GTPExtHook request failed: Bad data passed to server.")
return
# get player's accountId
accountId = EnginePlugin.getObjectProperty(playerOid, Namespace.WORLD_MANAGER, "AccountId")
Log.debug("GenerateTokenHook: token requested by playerOid=%d, accountId=%d" % (playerOid, accountId))
props = HashMap()
props.put("accountId", accountId)
token = generateToken(props=props)
token64 = Base64.encodeBytes(token, Base64.URL_SAFE)
Log.debug("GenerateTokenHook: token64 = %s" % token64)
msg = WorldManagerClient.TargetedExtensionMessage("mvp.TOKEN_GENERATED", playerOid)
msgProps = msg.getPropertyMapRef()
# need to send these back to the client
jspArgs = eventProps['jspArgs']
jspArgs = "%s&host=%s&token=%s" % (jspArgs, domainHostName, token64)
msgProps.put("jspArgs", jspArgs)
msgProps.put("frameName", eventProps['frameName'])
Engine.getAgent().sendBroadcast(msg)
proxyPlugin.addProxyExtensionHook("mvp.GENERATE_TOKEN", GenerateTokenProxyExtHook())
| mit | 7,856,342,551,657,046,000 | 43.168569 | 244 | 0.654389 | false |
cwilkes/event_store_meta | tests/test_models.py | 1 | 1689 | # -*- coding: utf-8 -*-
"""Model unit tests."""
import datetime as dt
import pytest
from event_store_meta.user.models import User, Role
from .factories import UserFactory
@pytest.mark.usefixtures('db')
class TestUser:
def test_get_by_id(self):
user = User('foo', '[email protected]')
user.save()
retrieved = User.get_by_id(user.id)
assert retrieved == user
def test_created_at_defaults_to_datetime(self):
user = User(username='foo', email='[email protected]')
user.save()
assert bool(user.created_at)
assert isinstance(user.created_at, dt.datetime)
def test_password_is_nullable(self):
user = User(username='foo', email='[email protected]')
user.save()
assert user.password is None
def test_factory(self, db):
user = UserFactory(password="myprecious")
db.session.commit()
assert bool(user.username)
assert bool(user.email)
assert bool(user.created_at)
assert user.is_admin is False
assert user.active is True
assert user.check_password('myprecious')
def test_check_password(self):
user = User.create(username="foo", email="[email protected]",
password="foobarbaz123")
assert user.check_password('foobarbaz123') is True
assert user.check_password("barfoobaz") is False
def test_full_name(self):
user = UserFactory(first_name="Foo", last_name="Bar")
assert user.full_name == "Foo Bar"
def test_roles(self):
role = Role(name='admin')
role.save()
u = UserFactory()
u.roles.append(role)
u.save()
assert role in u.roles
| bsd-3-clause | 3,872,727,385,110,969,300 | 28.631579 | 63 | 0.612789 | false |
MG-group-tools/MGFunc | mgfunc_v2/cluster2fasta.py | 1 | 15574 | #!/usr/bin/env python2.7
import sys
import os
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 13:13:45 2013
CLASS-VERSION
@author: Kosai
"""
import cPickle as pickle
from datetime import datetime as dt
import time
import argparse
import gzip
class main:
'''
Class version of the cluster2fasta program
'''
def __init__(self):
self.start = time.time()
self.d_ = dt.today()
self.timestarted = self.d_.strftime("%d-%m-%Y %H:%M:%S")
self._D = {}
self.parseArgs()
def parseArgs(self):
parser = argparse.ArgumentParser(prog="cluster2fasta.py", usage="cluster2fasta.py -c mycluster.txt -o mycluster.output -num [-ui uniprot.index\/uniprot.index.p -uf uniprot.fasta] [-ki SAMPLE.index\/SAMPLE.index.p -kf SAMPLE.fasta]", epilog="Written by Kosai+Asli, oct 2013. Last modified apr 2014.")
parser.add_argument("-ui",metavar="uniprot_index_file",help="Uniprot index file",nargs="*")
parser.add_argument("-uf",metavar="uniprot_fasta",help="Fasta-file for all uniprot (from swiss2fasta)",nargs="*")
parser.add_argument("-ki",metavar="sample_index_file",help="Genecatalog index file",nargs=1)
parser.add_argument("-kf",metavar="sample_fasta",help="Fasta-file for all genecatalog sequences",nargs=1)
parser.add_argument("-sfi",metavar="sample_list",help="A list of genecatalog index files and fasta files",nargs=1)
#parser.add_argument("-sfl",metavar="sample_fasta_list",help="Fasta-files list for all genecatalog sequences",nargs=1)
parser.add_argument("-c",metavar="Cluster-name",help="Cluster-file",nargs=1,required=True)
parser.add_argument("-o",metavar="Output",help="Output name",nargs=1)
parser.add_argument("-num",help="Adds 2 coloumns to a new file, with cluster_id\'s, number of sample-genes and number of uniprot ID\'s",action="store_true")
parser.add_argument("-v",help="Verbose. Prints out progress and details to stdout output. Write \"-v\" with no arguments in commandline. Default is off.",action="store_true")
#return parser.parse_args("-o testcluster.argsO_tester".split()), parser #testing on windows
#return parser.parse_args("".split()), parser #testing on window
# self.args = parser.parse_args()
self.parser = parser
def fileIn(self,infile):
if infile[-3:] == ".gz":
return gzip.open(infile,"r")
else:
return open(infile,"r")
def fileOut(self,outfile):
return open(outfile, "w")
def fileClose(self,cfile):
cfile.close()
'''
def dictMaker(i,D_ID): #Create dictionary from index-text file
D = {}
if i[0].split(".")[-1] == "index":
indexline = ""
for el in D_ID:
indexline = el.rstrip().split("\t")
D[indexline[0]] = [indexline[1],indexline[2]]
self.printer("\n\nDICTIONARY DONE!!!!\n\n")
return D
else:
return pickle.load(D_ID)
'''
def dictMaker(self,i,D_ID, j): #Create dictionary from index-text file
if i.split(".")[-1] == "indexed":
indexline = ""
for el in D_ID:
indexline = el.rstrip().split("\t")
self._D[indexline[0]] = [indexline[1],indexline[2], j]
self.printer("\nDictionary done, time used (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
return 1
# else:
# print "Check index file names. :" + i
# self._D = pickle.load(D_ID)
# self.printer("\nDictionary done, time used (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
# return 1
def missingGeneLog(self,genecat,uniprot):
log = self.fileOut(self.args.o[0] + ".missingGenes.log")
for el in genecat:
log.write(el[0]+"\t"+el[1]+"\n")
for el in uniprot:
log.write(el[0]+"\t"+el[1]+"\n")
self.fileClose(log)
def seqExtracter3(self,ID,myD,uni): #Dictionary look-up, one big dictionary
if ID in myD:
start = int(myD[ID][0])
stop = int(myD[ID][1])
if uni == 1:
self.uniprotFasta.seek(start)
seq = self.uniprotFasta.read(stop-start)
seq = "".join(seq.split("\n"))
return seq,1
else:
fasta = self.fileIn(self._F[int(myD[ID][2])][1])
fasta.seek(start)
seq = fasta.read(stop-start)
seq = "".join(seq.split("\n"))
self.fileClose(fasta)
return seq,1
else:
return "",0
def seqExtracter(self,ID,myD,fasta,uni): #Dictionary look-up, one big dictionary
if ID in myD:
start = int(myD[ID][0])
stop = int(myD[ID][1])
fasta.seek(start)
seq = fasta.read(stop-start)
seq = "".join(seq.split("\n"))
return seq,1
else:
return "",0
def seqExtracter2(self,ID,myD,fasta): #Dictionary look-up, each key is first gene letter
start = int(myD[ID[0]][ID][0])
stop = int(myD[ID[0]][ID][1])
fasta.seek(start)
seq = fasta.read(stop-start)
seq = "".join(seq.split("\n"))
return seq
def genecat_list(self):
clusterID =self.fileIn(self.args.c[0])
output = self.fileOut(self.args.o[0]+".genecatalog.fasta")
self._F = {}
infiles=0
for line in file(self.args.sfi[0]):
index = line.split("\t")[0]
fasta = line.split("\t")[1].strip("\n")
self._F[infiles] = [index,fasta]
genecatID = self.fileIn(index)
a = self.dictMaker(index,genecatID,infiles) #takes time
if a ==1 : self.printer("DictMaker worked for " + index)
else: self.printer("DictMaker did not work, check index files " + index)
self.fileClose(genecatID)
infiles+=1
suc = 0
missing = []
seq = ""
for line in clusterID:
L = line.rstrip().split("\t")
C = str(L[0]) #clusterID
L2 = L[2].split(",")
for el in L2:
seq,suc = self.seqExtracter3(el,self._D,0)
if suc == 1:
output.write(">"+C+":"+el+"\n"+seq+"\n")
else:
missing.append([el,C])
#print self._D
self._D = {}
self.fileClose(output)
self.fileClose(clusterID)
return missing
def genecat(self,args,parser):
clusterID =self.fileIn(args.c[0])
genecatID = self.fileIn(args.ki[0])
genecatFasta = self.fileIn(args.kf[0])
output = self.fileOut(args.o[0]+".genecatalog.fasta")
a = self.dictMaker(args.ki[0],genecatID,0) #takes time
if a ==1 : self.printer("DictMaker worked for " + args.ki[0])
else: self.printer("DictMaker did not work, check index files " + args.ki[0])
self.fileClose(genecatID)
GenecatalogD = {}
cGenecatalog = 1
suc = 0
missing = []
seq = ""
for line in clusterID:
L = line.rstrip().split("\t")
C = str(L[0]) #clusterID
L2 = L[2].split(",")
for el in L2:
seq,suc = self.seqExtracter(el,self._D,genecatFasta,0)
if suc == 1:
if el not in GenecatalogD:
GenecatalogD[el] = el[0]+str(cGenecatalog)
cGenecatalog += 1
#output.write(">"+C+"_"+GenecatalogD[el]+"\n"+seq+"\n")
output.write(">"+C+":"+el+"\n"+seq+"\n")
else:
missing.append([el,C])
#print self._D
self._D = {}
# GenecatalogIDconversion(GenecatalogD)
self.fileClose(output)
self.fileClose(genecatFasta)
self.fileClose(clusterID)
return missing
def uniprot(self,args,parser):
clusterID = self.fileIn(args.c[0])
uniprotID = self.fileIn(args.ui[0])
self.uniprotFasta = self.fileIn(args.uf[0])
ctotfile = os.popen("wc -l "+args.c[0])
ctot = ctotfile.read()
ctotfile.close()
ctot = int(ctot.split(" ")[0])
rangelist = range(0,ctot,1)
output = self.fileOut(args.o[0]+".uniprotids.fasta")
D = self.dictMaker(args.ui[0],uniprotID,0) #takes time
if D ==1 : self.printer("DictMaker worked for " + args.ui[0])
else: self.printer("DictMaker did not work, check index files " + args.ui[0])
self.fileClose(uniprotID)
seq = ""
missing = []
suc = 1
c = 0
for line in clusterID:
c+=1
L = line.rstrip().split("\t")
C = str(L[0]) #clusterID
if L[1] == "N":
continue
L2 = L[3].split(",")
for el in L2:
el = el.split("|")[2]
seq,suc = self.seqExtracter3(el,self._D,1)
if suc == 1:
output.write(">"+C+":"+el+"\n"+seq+"\n")
else:
missing.append([el,C])
#if c in rangelist:
#self.printer("FINISHED "+str(c)+" ENTRIES out of "+str(ctot))
del D
self.fileClose(output)
self.fileClose(self.uniprotFasta)
self.fileClose(clusterID)
return missing
def GenecatalogIDconversion(self,D):
self.printer("\nPrinting GeneConversionTable....")
fout = self.fileOut("GeneConversionTable.txt")
for key in D:
fout.write(key+"\t"+D[key]+"\n")
fout.close()
self.printer("DONE!\n")
def numberCounter(self,args,parser):
clusterID = self.fileIn(args.c[0])
if self.args.o:
output = self.fileOut(args.o[0]+".genenumbers")
else:
output = self.fileOut(args.c[0]+".genenumbers")
t = "\t"
n = "\n"
for line in clusterID:
L = line.split("\t")
output.write(L[0]+t+str(len(L[1].split(",")))+t+str(len(set(L[2].split(","))))+n)
self.fileClose(clusterID)
self.fileClose(output)
def printer(self,string): #surpressing output print if -q (quiet) is on
# if not self.args.quiet:
if self.args.v:
print string,
def read_columns(self, i, csv_file):
item=""
with open(csv_file, 'r') as csvfile:
for line in csvfile.readlines():
array = line.strip("\n").split('\t')
item = item + "\n" + array[i]
return item
def mainthing(self):
# self.printer("\n***cluster2fasta.py initialized at "\
# + self.d_.strftime("%H:%M %d/%m-%Y") + "***\n")
# self.printer("Arguments:\n")
# self.parseArgs()
no = 1
missing1 = []
missing2 = []
if bool(self.args.ki)^bool(self.args.kf):
self.printer("***ERROR!*** Only one of -ki and -kf was provided!\n")
elif bool(self.args.ui)^bool(self.args.uf):
self.printer("***ERROR!*** Only one of -ui and -uf was provided!\n")
elif not self.args.c:
self.printer("***ERROR!*** No cluster-files(s) provided!\n")
elif (self.args.ki or self.args.ui) and not self.args.o:
self.printer("***ERROR!*** No output-name provided!\n")
else:
if self.args.ki and self.args.kf and self.args.c and self.args.o:
self.printer("\tCluster-file: "+self.args.c[0] +"\n\tGenecatalog-index file: "+self.args.ki[0]+"\n\tGenecatalog fasta-file: "+self.args.kf[0]+"\n\tOutput file-name: "+self.args.o[0]+".genecatgenes.fasta\n")
no = 0
missing1 = self.genecat(self.args,self.parser)
self.printer("\nGenecatalog Genes Done! Time (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
if self.args.sfi and self.args.c and self.args.o:
self.printer("\tCluster-file: \n\t\t"+self.args.c[0] +"\n\tGenecatalog-index files: \n\t\t"+self.read_columns(0, self.args.sfi[0])+"\n\tGenecatalog fasta-files: \n\t\t"+self.read_columns(1, self.args.sfi[0])+"\n\tOutput file-name: \n\t\t"+ self.args.o[0]+".genecatgenes.fasta.gz\n")
no = 0
missing1 = self.genecat_list()
self.printer("\nGenecatalog Genes Done! Time (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
if self.args.ui and self.args.uf and self.args.c and self.args.o:
self.printer("\tCluster-file: "+self.args.c[0] +"\n\tUniprot-index file: "+self.args.ui[0]+"\n\tUniprot fasta-file: "+self.args.uf[0]+"\n\tOutput file-name: "+self.args.o[0]+".uniprotids.fasta\n")
no = 0
missing2 = self.uniprot(self.args,self.parser)
self.printer("\nUniprot ID\'s Done! Time (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
if self.args.num and self.args.c:
if not self.args.o:
self.printer("\tCluster-file: "+self.args.c[0] +"\n\tOutput file-name: "+self.args.c[0][:-4]+".genenumbers\n")
else:
self.printer("\tCluster-file: "+self.args.c[0] +"\n\tOutput file-name: "+self.args.o[0]+".genenumbers\n")
no = 0
self.numberCounter(self.args,self.parser)
self.printer("\nNumber Calculations Done! Time (so far): "+str(round((time.time() - self.start) / 60,3))+" min\n")
if no == 1:
self.printer("none!\n")
self.missingGeneLog(missing1,missing2)
timeused = (time.time() - self.start) / 60
self.printer("Time used: "+str(round(timeused*60))\
+ " seconds ("+str(round(timeused)) + " min)\n")
def test(self,num):
self.printer("test")
'''
if __name__ == "__main__":
myclass = main
myclass.mainthing
myclass.test(2)
self.printer("yoyoooyooyoo")
'''
if __name__ == "__main__":
try:
myclass = main()
myclass.args = myclass.parser.parse_args(sys.argv[1:])
myclass.printer("\n### "+sys.argv[0]+" initialized at "+ myclass.timestarted + "\n")
myclass.printer("### OPTIONS: "+str(myclass.args)+"\n")
myclass.mainthing()
#except IOError as i:
# print "I/O error({0}): {1}".format(i.errno, i.strerror)
except Exception,e:
print str(e)
import traceback
traceback.print_exc()
##############################
'''
INPUT:
The User inputs an index-file and a fasta-file.
The index file indexes each entry in the fasta file. In the case of -ui and -uf,
-ui would a pickle-file which contains the start and end for the sequences in each
entry of the uniprot file (-uf).
if -num is toggled, the script will not create a fasta-output, but instead
show the number of genecat-genes (sample-genes) and uniprot ID's in each cluster.
OUTPUT:
The output is a fasta file containing the sequences of each uniprot/genecat-gene in the input
from the clusters.
OPTIONS LIST:
"-ui" "uniprot_index_file": Uniprot index file containing
"-uf" "uniprot_fasta": Fasta-file for all uniprot (from swiss2fasta)
"-ki" "sample_index_file": Sample index file
"-kf" "sample_fasta": Fasta-file for all sample sequences
"-c" "Cluster-name": Cluster-file
"-o" "Output fasta file": Output name
"-num": Adds 2 coloumns to a new file, with cluster_id's, number of sample-genes and number of uniprot ID's
'''
| gpl-3.0 | 5,104,992,305,668,626,000 | 39.557292 | 307 | 0.553679 | false |
GoogleCloudPlatform/professional-services | examples/bq-email-exports/export_query_results_function/main.py | 1 | 2698 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cloud Function for exporting BigQuery results from an anonymous table to GCS.
Triggered after BigQuery query is complete.
"""
import base64
import json
import logging
import os
import google.api_core.client_info
from google.cloud import bigquery
CLIENT_INFO = google.api_core.client_info.ClientInfo(
user_agent="google-pso-example/bq-email-exports")
def main(event, context):
"""Entrypoint for Cloud Function"""
data = base64.b64decode(event['data'])
upstream_bq_dts_obj = json.loads(data)
error = upstream_bq_dts_obj.get('errorStatus')
if error:
logging.error(
RuntimeError(f"Error in upstream query job: {error['message']}."))
else:
project_id = get_env('PROJECT_ID')
dataset_id = upstream_bq_dts_obj['destinationDatasetId']
table_name = upstream_bq_dts_obj['params'][
'destination_table_name_template']
schedule_time = upstream_bq_dts_obj['scheduleTime']
bq_client = bigquery.Client(client_info=CLIENT_INFO)
dataset_ref = bigquery.DatasetReference.from_string(
dataset_id, default_project=project_id)
table_ref = dataset_ref.table(table_name)
destination_uri = get_destination_uri(schedule_time)
extract_config = bigquery.ExtractJobConfig(
compression=get_env('COMPRESSION'),
destination_format=get_env('DEST_FMT'),
field_delimeter=get_env('FIELD_DELIMITER'),
use_avro_logical_types=get_env('USE_AVRO_TYPES'))
bq_client.extract_table(table_ref,
destination_uri,
job_id_prefix="email_export_",
job_config=extract_config)
print(
f"Exporting {project_id}:{dataset_id}.{table_name} to {destination_uri}"
)
def get_destination_uri(schedule_time):
"""Returns destination GCS URI for export"""
return (f"gs://{get_env('BUCKET_NAME')}/"
f"{schedule_time}/{get_env('OBJECT_NAME')}")
def get_env(name):
"""Returns environment variable"""
return os.environ[name]
| apache-2.0 | -1,615,927,224,140,515,800 | 34.973333 | 84 | 0.659748 | false |
hustodemon/spacewalk | backend/server/rhnServer/server_hardware.py | 1 | 34703 | #
# Copyright (c) 2008--2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# This file contains all the logic necessary to manipulate Hardware
# items - load, reload, instanciate and save
#
import string
import sys
from rhn.UserDictCase import UserDictCase
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnException import rhnFault
from spacewalk.common.rhnTB import Traceback
from spacewalk.server import rhnSQL
def kudzu_mapping(dict=None):
""" this is a class we use to get the mapping for a kudzu entry """
# This is the generic mapping we need
mapping = {
'desc': 'description',
}
# error handling if we get passed weird stuff.
if not dict:
return mapping
if not type(dict) == type({}) and not isinstance(dict, UserDictCase):
return mapping
hw_bus = dict.get("bus")
# we need to have a bus type to be able to continue
if not hw_bus:
return mapping
hw_bus = string.lower(hw_bus)
extra = {}
if hw_bus == "ddc":
extra = {
"id": None,
"horizsyncmin": "prop1",
"horizsyncmax": "prop2",
"vertrefreshmin": "prop3",
"vertrefreshmax": "prop4",
"modes": None,
"mem": None,
}
elif hw_bus == "ide":
extra = {
"physical": "prop1",
"logical": "prop2",
}
elif hw_bus in ["isapnp", "isa"]:
extra = {
"pdeviceid": "prop1",
"deviceid": "prop2",
"compat": "prop3",
"native": None,
"active": None,
"cardnum": None, # XXX: fix me
"logdev": "prop4",
"io": "prop2",
"irq": "prop1",
"dma": "prop3",
"mem": "prop4",
}
elif hw_bus == "keyboard":
extra = {}
elif hw_bus == "psaux":
extra = {}
elif hw_bus == "parallel":
extra = {
'pnpmfr': 'prop1',
'pnpdesc': 'prop2',
'pnpmodel': 'prop3',
'pnpmodes': 'prop4',
'pinfo': None,
'pinfo.xres': None,
'pinfo.yres': None,
'pinfo.color': None,
'pinfo.ascii': None,
}
elif hw_bus == "pci":
extra = {
'vendorid': 'prop1',
'deviceid': 'prop2',
'subvendorid': 'prop3',
'subdeviceid': 'prop4',
'network.hwaddr': None,
'pcibus': None,
'pcidev': None,
'pcifn': None,
'pcidom': None,
}
elif hw_bus == "sbus":
extra = {
"monitor": "prop1",
"width": "prop2",
"height": "prop3",
"freq": "prop4",
}
elif hw_bus == "scsi":
extra = {
'host': 'prop1',
'id': 'prop2',
'channel': 'prop3',
'lun': 'prop4',
'generic': None,
}
elif hw_bus == "serial":
extra = {
'pnpmfr': 'prop1',
'pnpdesc': 'prop2',
'pnpmodel': 'prop3',
'pnpcompat': "prop4",
}
elif hw_bus == "usb":
extra = {
"vendorid": "prop1",
"deviceid": "prop2",
"usbclass": "prop3",
"usbbus": "prop4",
"usblevel": "pciType",
"usbdev": None,
"usbprod": None,
"usbsubclass": None,
"usbprotocol": None,
"usbport": None,
"usbmfr": None,
"productname": None,
"productrevision": None,
'network.hwaddr': None,
}
elif hw_bus == "firewire":
extra = {
'vendorid': 'prop1',
'deviceid': 'prop2',
'subvendorid': 'prop3',
'subdeviceid': 'prop4',
}
elif hw_bus == 'pcmcia':
extra = {
'vendorid': 'prop1',
'deviceid': 'prop2',
'function': 'prop3',
'slot': 'prop4',
'network.hwaddr': None,
}
mapping.update(extra)
return mapping
def cleanse_ip_addr(ip_addr):
""" Cleans up things like 127.00.00.01 """
if ip_addr is None:
return None
# Make sure it's a string
ip_addr = str(ip_addr)
# If the ipaddr is empty, jus return empty str
if not len(ip_addr):
return ''
arr = ip_addr.split('.')
# lstrip will remove all leading zeros; if multiple zeros are present, it
# would remove too much, hence the or '0' here.
return '.'.join([x.lstrip('0') or '0' for x in arr])
class GenericDevice:
""" A generic device class """
table = "override-GenericDevice"
def __init__(self):
self.id = 0
self.status = 1 # just added
self.data = {}
# default to the hardware seq...
self.sequence = "rhn_hw_dev_id_seq"
self._autonull = ("description", "board")
def getid(self):
if self.id == 0:
self.id = rhnSQL.Sequence(self.sequence)()
return self.id
def must_save(self):
if self.id == 0 and self.status == 2: # deleted new item
return 0
if self.status == 0: # original item, unchanged
return 0
return 1
def save(self, sysid):
""" save data in the rhnDevice table """
log_debug(4, self.table, self.status, self.data)
if not self.must_save():
return 0
t = rhnSQL.Table(self.table, "id")
# check if we have to delete
if self.status == 2 and self.id:
# delete the entry
del t[self.id]
return 0
# set description to null if empty
self._null_columns([self.data], self._autonull)
# make sure we have a device id
devid = self.getid()
for k in self.data.keys():
if self.data[k] is None:
del self.data[k]
self.data["server_id"] = sysid
t[devid] = self.data
self.status = 0 # now it is saved
return 0
def reload(self, devid):
""" reload from rhnDevice table based on devid """
if not devid:
return -1
t = rhnSQL.Table(self.table, "id")
self.data = t[devid]
# clean up fields we don't want
if self.data:
for k in ["created", "modified"]:
if self.data.has_key(k):
del self.data[k]
self.id = devid
self.status = 0
return 0
def _null_columns(self, params, names=()):
""" Method searches for empty string in params dict with names
defined in names list and replaces them with None value which
is translated to NULL in SQL.
We do not allow empty strings in database for compatibility
reasons between Oracle and PostgreSQL.
"""
# list of dicts
for param in params:
for name in names:
if name in param and param[name] == '':
param[name] = None
class Device(GenericDevice):
""" This is the base Device class that supports instantiation from a
dictionarry. the __init__ takes the dictionary as its argument,
together with a list of valid fields to recognize and with a mapping
for dictionary keys into valid field names for self.data
The fields are required to know what fields we have in the
table. The mapping allows transformation from whatever comes in to
valid fields in the table Looks complicated but it isn't -- gafton
"""
def __init__(self, fields, dict=None, mapping=None):
GenericDevice.__init__(self)
x = {}
for k in fields:
x[k] = None
self.data = UserDictCase(x)
if not dict:
return
# make sure we get a UserDictCase to work with
if type(dict) == type({}):
dict = UserDictCase(dict)
if mapping is None or type(mapping) == type({}):
mapping = UserDictCase(mapping)
if not isinstance(dict, UserDictCase) or \
not isinstance(mapping, UserDictCase):
log_error("Argument passed is not a dictionary", dict, mapping)
raise TypeError("Argument passed is not a dictionary",
dict, mapping)
# make sure we have a platform
for k in dict.keys():
if dict[k] == '':
dict[k] = None
if self.data.has_key(k):
self.data[k] = dict[k]
continue
if mapping.has_key(k):
# the mapping dict might tell us to lose some fields
if mapping[k] is not None:
self.data[mapping[k]] = dict[k]
else:
log_error("Unknown HW key =`%s'" % k,
dict.dict(), mapping.dict())
# The try-except is added just so that we can send e-mails
try:
raise KeyError("Don't know how to parse key `%s''" % k,
dict.dict())
except:
Traceback(mail=1)
# Ignore this key
continue
# clean up this data
try:
for k in self.data.keys():
if type(self.data[k]) == type("") and len(self.data[k]):
self.data[k] = string.strip(self.data[k])
if not len(self.data[k]):
continue
if self.data[k][0] == '"' and self.data[k][-1] == '"':
self.data[k] = self.data[k][1:-1]
except IndexError:
raise IndexError, "Can not process data = %s, key = %s" % (
repr(self.data), k), sys.exc_info()[2]
class HardwareDevice(Device):
""" A more specific device based on the Device class """
table = "rhnDevice"
def __init__(self, dict=None):
fields = ['class', 'bus', 'device', 'driver', 'detached',
'description', 'pcitype', 'prop1', 'prop2',
'prop3', 'prop4']
# get a processed mapping
mapping = kudzu_mapping(dict)
# ... and do little to no work
Device.__init__(self, fields, dict, mapping)
# use the hardware id sequencer
self.sequence = "rhn_hw_dev_id_seq"
class CPUDevice(Device):
""" A class for handling CPU - mirrors the rhnCPU structure """
table = "rhnCPU"
def __init__(self, dict=None):
fields = ['cpu_arch_id', 'architecture', 'bogomips', 'cache',
'family', 'mhz', 'stepping', 'flags', 'model',
'version', 'vendor', 'nrcpu', 'acpiVersion',
'apic', 'apmVersion', 'chipset', 'nrsocket']
mapping = {
"bogomips": "bogomips",
"cache": "cache",
"model": "model",
"platform": "architecture",
"type": "vendor",
"model_rev": "stepping",
"model_number": "family",
"model_ver": "version",
"model_version": "version",
"speed": "mhz",
"count": "nrcpu",
"socket_count": "nrsocket",
"other": "flags",
"desc": None,
'class': None,
}
# now instantiate this class
Device.__init__(self, fields, dict, mapping)
self.sequence = "rhn_cpu_id_seq"
if not dict:
return
if self.data.get("cpu_arch_id") is not None:
return # all fine, we have the arch
# if we don't have an architecture, guess it
if not self.data.has_key("architecture"):
log_error("hash does not have a platform member: %s" % dict)
raise AttributeError, "Expected a hash value for member `platform'"
# now extract the arch field, which has to come out of rhnCpuArch
arch = self.data["architecture"]
row = rhnSQL.Table("rhnCpuArch", "label")[arch]
if row is None or not row.has_key("id"):
log_error("Can not find arch %s in rhnCpuArch" % arch)
raise AttributeError, "Invalid architecture for CPU: `%s'" % arch
self.data["cpu_arch_id"] = row["id"]
del self.data["architecture"]
if self.data.has_key("nrcpu"): # make sure this is a number
try:
self.data["nrcpu"] = int(self.data["nrcpu"])
except:
self.data["nrcpu"] = 1
if self.data["nrcpu"] == 0:
self.data["nrcpu"] = 1
class NetworkInformation(Device):
""" This is a wrapper class for the Network Information (rhnServerNetwork) """
table = "rhnServerNetwork"
def __init__(self, dict=None):
fields = ["hostname", "ipaddr", "ip6addr"]
mapping = {'class': None}
Device.__init__(self, fields, dict, mapping)
self._autonull = ('ipaddr', 'ip6addr')
# use our own sequence
self.sequence = "rhn_server_net_id_seq"
# bugzilla: 129840 kudzu (rhpl) will sometimes pad octets
# with leading zeros, causing confusion; clean those up
self.data['ipaddr'] = cleanse_ip_addr(self.data['ipaddr'])
class NetIfaceInformation(Device):
key_mapping = {
'hwaddr': 'hw_addr',
'module': 'module',
}
def __init__(self, dict=None):
log_debug(4, dict)
self.ifaces = {}
self.db_ifaces = []
# parameters which are not allowed to be empty and set to NULL
self._autonull = ('hw_addr', 'module')
if not dict:
return
for name, info in dict.items():
if name == 'class':
# Ignore it
continue
if not isinstance(info, type({})):
raise rhnFault(53, "Unexpected format for interface %s" %
name)
vdict = {}
for key, mapping in self.key_mapping.items():
# Look at the mapping first; if not found, look for the key
if info.has_key(mapping):
k = mapping
else:
k = key
if not info.has_key(k):
raise rhnFault(53, "Unable to find required field %s"
% key)
val = info[k]
vdict[mapping] = val
if 'ipaddr' in info and info['ipaddr']:
vdict['ipv4'] = NetIfaceAddress4(
[{'ipaddr': info['ipaddr'], 'broadcast': info['broadcast'], 'netmask': info['netmask']}])
if 'ipv6' in info and info['ipv6']:
vdict['ipv6'] = NetIfaceAddress6(info["ipv6"])
self.ifaces[name] = vdict
def __str__(self):
return "<%s Class at %d: %s>\n" % (
self.__class__.__name__,
id(self), {
"self.ifaces": self.ifaces,
"self.db_ifaces": self.db_ifaces,
})
__repr__ = __str__
def save(self, server_id):
log_debug(4, self.ifaces)
self.reload(server_id)
log_debug(4, "Interfaces in DB", self.db_ifaces)
# Compute updates, deletes and inserts
inserts = []
updates = []
deletes = []
ifaces = self.ifaces.copy()
for iface in self.db_ifaces:
name = iface['name']
if not self.ifaces.has_key(name):
# To be deleted
deletes.append({'server_id': server_id, 'name': name})
continue
uploaded_iface = ifaces[name].copy()
del ifaces[name]
if _hash_eq(uploaded_iface, iface):
# Same value
continue
uploaded_iface.update({'name': name, 'server_id': server_id})
if 'ipv4' in uploaded_iface:
del(uploaded_iface['ipv4'])
if 'ipv6' in uploaded_iface:
del(uploaded_iface['ipv6'])
updates.append(uploaded_iface)
# Everything else in self.ifaces has to be inserted
for name, info in ifaces.items():
iface = {}
iface['name'] = name
iface['server_id'] = server_id
iface['hw_addr'] = info['hw_addr']
iface['module'] = info['module']
inserts.append(iface)
log_debug(4, "Deletes", deletes)
log_debug(4, "Updates", updates)
log_debug(4, "Inserts", inserts)
self._update(updates)
self._insert(inserts)
ifaces = self.ifaces.copy()
for name, info in ifaces.items():
if not 'ipv6' in info:
info['ipv6'] = NetIfaceAddress6()
info['ipv6'].save(self.get_server_id(server_id, name))
if not 'ipv4' in info:
info['ipv4'] = NetIfaceAddress4()
info['ipv4'].save(self.get_server_id(server_id, name))
# delete address (if any) of deleted interaces
for d in deletes:
interface = NetIfaceAddress6()
interface.save(self.get_server_id(server_id, d['name']))
interface = NetIfaceAddress4()
interface.save(self.get_server_id(server_id, d['name']))
self._delete(deletes)
return 0
def get_server_id(self, server_id, name):
""" retrieve id for given server_id and name """
h = rhnSQL.prepare("select id from rhnServerNetInterface where server_id=:server_id and name=:name")
h.execute(server_id=server_id, name=name)
row = h.fetchone_dict()
if row:
return row['id']
else:
return None
def _insert(self, params):
q = """insert into rhnServerNetInterface
(%s) values (%s)"""
self._null_columns(params, self._autonull)
columns = self.key_mapping.values() + ['server_id', 'name']
columns.sort()
bind_params = string.join(map(lambda x: ':' + x, columns), ", ")
h = rhnSQL.prepare(q % (string.join(columns, ", "), bind_params))
return _dml(h, params)
def _delete(self, params):
q = """delete from rhnServerNetInterface
where %s"""
columns = ['server_id', 'name']
wheres = map(lambda x: '%s = :%s' % (x, x), columns)
h = rhnSQL.prepare(q % string.join(wheres, " and "))
return _dml(h, params)
def _update(self, params):
q = """update rhnServerNetInterface
set %s
where %s"""
self._null_columns(params, self._autonull)
wheres = ['server_id', 'name']
wheres = map(lambda x: '%s = :%s' % (x, x), wheres)
wheres = string.join(wheres, " and ")
updates = self.key_mapping.values()
updates.sort()
updates = map(lambda x: '%s = :%s' % (x, x), updates)
updates = string.join(updates, ", ")
h = rhnSQL.prepare(q % (updates, wheres))
return _dml(h, params)
def reload(self, server_id):
h = rhnSQL.prepare("""
select *
from rhnServerNetInterface
where server_id = :server_id
""")
h.execute(server_id=server_id)
self.db_ifaces = []
while 1:
row = h.fetchone_dict()
if not row:
break
hval = {'primary_id': row['id'], 'name': row['name'], 'server_id': server_id}
for key in self.key_mapping.values():
hval[key] = row[key]
hval['ipv4'] = NetIfaceAddress4()
hval['ipv4'].reload(hval['primary_id'])
hval['ipv6'] = NetIfaceAddress6()
hval['ipv6'].reload(hval['primary_id'])
self.db_ifaces.append(hval)
self.status = 0
return 0
class NetIfaceAddress(Device):
key_mapping = {
'netmask': 'netmask',
'address': 'address',
}
unique = ['address'] # to be overriden by child
table = 'rhnServerNetAddress' # to be overriden by child
def __init__(self, list_ifaces=None):
log_debug(4, list_ifaces)
self.ifaces = {}
self.db_ifaces = []
# parameters which are not allowed to be empty and set to NULL
self._autonull = ('address', 'netmask')
self.sequence = "rhn_srv_net_iface_id_seq"
if not list_ifaces:
return
for info in list_ifaces:
if not isinstance(info, type({})):
raise rhnFault(53, "Unexpected format for interface %s" %
info)
vdict = {}
for key, mapping in self.key_mapping.items():
# Look at the mapping first; if not found, look for the key
if info.has_key(mapping):
k = mapping
else:
k = key
if not info.has_key(k):
raise rhnFault(53, "Unable to find required field %s"
% (key))
val = info[k]
if mapping in ['ip_addr', 'netmask', 'broadcast', 'address']:
# bugzilla: 129840 kudzu (rhpl) will sometimes pad octets
# with leading zeros, causing confusion; clean those up
val = self.cleanse_ip_addr(val)
vdict[mapping] = val
self.ifaces[vdict['address']] = vdict
def __str__(self):
return "<%s Class at %d: %s>\n" % (
self.__class__.__name__,
id(self), {
"self.ifaces": self.ifaces,
"self.db_ifaces": self.db_ifaces,
})
__repr__ = __str__
def cleanse_ip_addr(self, val):
""" to be overriden by child """
return val
def save(self, interface_id):
log_debug(4, self.ifaces)
self.reload(interface_id)
log_debug(4, "Net addresses in DB", self.db_ifaces)
# Compute updates, deletes and inserts
inserts = []
updates = []
deletes = []
ifaces = self.ifaces.copy()
for iface in self.db_ifaces:
address = iface['address']
if not self.ifaces.has_key(iface['address']):
# To be deleted
# filter out params, which are not used in query
iface = dict((column, iface[column]) for column in self.unique)
deletes.append(iface)
continue
uploaded_iface = ifaces[address]
del ifaces[address]
# FIXME this is inefficient for IPv4 as it row is present it will be always update
if _hash_eq(uploaded_iface, iface):
# Same value
continue
uploaded_iface.update({'interface_id': interface_id})
updates.append(uploaded_iface)
# Everything else in self.ifaces has to be inserted
for name, iface in ifaces.items():
iface['address'] = iface['address']
iface['interface_id'] = interface_id
inserts.append(iface)
log_debug(4, "Deletes", deletes)
log_debug(4, "Updates", updates)
log_debug(4, "Inserts", inserts)
self._delete(deletes)
self._update(updates)
self._insert(inserts)
def _insert(self, params):
q = """insert into %s
(%s) values (%s)"""
self._null_columns(params, self._autonull)
columns = self.key_mapping.values() + ['interface_id']
columns.sort()
bind_params = string.join(map(lambda x: ':' + x, columns), ", ")
h = rhnSQL.prepare(q % (self.table, string.join(columns, ", "), bind_params))
return _dml(h, params)
def _delete(self, params):
q = """delete from %s
where %s"""
columns = self.unique
wheres = map(lambda x: '%s = :%s' % (x, x), columns)
h = rhnSQL.prepare(q % (self.table, string.join(wheres, " and ")))
return _dml(h, params)
def _update(self, params):
q = """update %s
set %s
where %s"""
self._null_columns(params, self._autonull)
wheres = self.unique
wheres = map(lambda x: '%s = :%s' % (x, x), wheres)
wheres = string.join(wheres, " and ")
updates = self.key_mapping.values()
updates.sort()
updates = map(lambda x: '%s = :%s' % (x, x), updates)
updates = string.join(updates, ", ")
h = rhnSQL.prepare(q % (self.table, updates, wheres))
return _dml(h, params)
def reload(self, interface_id):
h = rhnSQL.prepare("""
select *
from %s
where interface_id = :interface_id
order by interface_id
""" % self.table)
h.execute(interface_id=interface_id)
self.db_ifaces = []
while 1:
row = h.fetchone_dict()
if not row:
break
hval = {'interface_id': row['interface_id']}
for key in self.key_mapping.values():
hval[key] = row[key]
self.db_ifaces.append(hval)
self.status = 0
return 0
class NetIfaceAddress6(NetIfaceAddress):
""" IPv6 Network interface """
key_mapping = {
'netmask': 'netmask',
'addr': 'address',
'scope': 'scope',
}
table = 'rhnServerNetAddress6'
unique = ['interface_id', 'address', 'scope']
def __init__(self, addr_dict=None):
NetIfaceAddress.__init__(self, addr_dict)
self._autonull = ('address', 'netmask', 'scope')
class NetIfaceAddress4(NetIfaceAddress):
""" IPv4 Network interface """
key_mapping = {
'netmask': 'netmask',
'ipaddr': 'address',
'broadcast': 'broadcast',
}
table = 'rhnServerNetAddress4'
unique = ['interface_id']
def __init__(self, addr_dict=None):
NetIfaceAddress.__init__(self, addr_dict)
self._autonull = ('address', 'netmask', 'broadcast')
def cleanse_ip_addr(self, val):
return cleanse_ip_addr(val)
def _hash_eq(h1, h2):
""" Compares two hashes and return 1 if the first is a subset of the second """
log_debug(5, h1, h2)
for k, v in h1.items():
if not h2.has_key(k):
return 0
if h2[k] != v:
return 0
return 1
def _dml(statement, params):
log_debug(5, params)
if not params:
return 0
params = _transpose(params)
rowcount = statement.executemany(**params)
log_debug(5, "Affected rows", rowcount)
return rowcount
def _transpose(hasharr):
""" Transpose the array of hashes into a hash of arrays """
if not hasharr:
return {}
keys = hasharr[0].keys()
result = {}
for k in keys:
result[k] = []
for hval in hasharr:
for k in keys:
if hval.has_key(k):
result[k].append(hval[k])
else:
result[k].append(None)
return result
class MemoryInformation(Device):
""" Memory information """
table = "rhnRAM"
def __init__(self, dict=None):
fields = ["ram", "swap"]
mapping = {"class": None}
Device.__init__(self, fields, dict, mapping)
# use our own sequence
self.sequence = "rhn_ram_id_seq"
if not dict:
return
# Sometimes we get sent a NNNNL number and we need to strip the L
for k in fields:
if not self.data.has_key(k):
continue
if self.data[k] in [None, "None", ""]:
self.data[k] = -1
self.data[k] = str(self.data[k])
if self.data[k][-1] == 'L':
self.data[k] = self.data[k][:-1]
class DMIInformation(Device):
""" DMI information """
table = "rhnServerDMI"
def __init__(self, dict=None):
fields = ["vendor", "system", "product", "asset", "board",
"bios_vendor", "bios_version", "bios_release"]
mapping = {"class": None}
Device.__init__(self, fields, dict, mapping)
# use our own sequence
self.sequence = "rhn_server_dmi_id_seq"
self._autonull = ("vendor", "system", "product", "asset", "board",
"bios_vendor", "bios_version", "bios_release")
if not dict:
return
# deal with hardware with insanely long dmi strings...
for key, value in self.data.items():
# Some of the values may be None
if value and isinstance(value, type("")):
self.data[key] = value[:256]
class InstallInformation(Device):
""" Install information """
table = "rhnServerInstallInfo"
def __init__(self, dict=None):
fields = ['install_method', 'iso_status', 'mediasum']
mapping = {
'class': None,
'installmethod': 'install_method',
'isostatus': 'iso_status',
'mediasum': 'mediasum',
}
Device.__init__(self, fields, dict, mapping)
self.sequence = 'rhn_server_install_info_id_seq'
class Hardware:
""" Support for the hardware items """
def __init__(self):
self.__hardware = {}
self.__loaded = 0
self.__changed = 0
def hardware_by_class(self, device_class):
return self.__hardware[device_class]
def add_hardware(self, hardware):
""" add new hardware """
log_debug(4, hardware)
if not hardware:
return -1
if type(hardware) == type({}):
hardware = UserDictCase(hardware)
if not isinstance(hardware, UserDictCase):
log_error("argument type is not hash: %s" % hardware)
raise TypeError, "This function requires a hash as an argument"
# validation is important
hw_class = hardware.get("class")
if hw_class is None:
return -1
hw_class = string.lower(hw_class)
class_type = None
if hw_class in ["video", "audio", "audio_hd", "usb", "other", "hd", "floppy",
"mouse", "modem", "network", "cdrom", "scsi",
"unspec", "scanner", "tape", "capture", "raid",
"socket", "keyboard", "printer", "firewire", "ide"]:
class_type = HardwareDevice
elif hw_class == "cpu":
class_type = CPUDevice
elif hw_class == "netinfo":
class_type = NetworkInformation
elif hw_class == "memory":
class_type = MemoryInformation
elif hw_class == "dmi":
class_type = DMIInformation
elif hw_class == "installinfo":
class_type = InstallInformation
elif hw_class == "netinterfaces":
class_type = NetIfaceInformation
else:
log_error("UNKNOWN CLASS TYPE `%s'" % hw_class)
# Same trick: try-except and raise the exception so that Traceback
# can send the e-mail
try:
raise KeyError, "Unknwon class type `%s' for hardware '%s'" % (
hw_class, hardware)
except:
Traceback(mail=1)
return
# create the new device
new_dev = class_type(hardware)
if self.__hardware.has_key(class_type):
_l = self.__hardware[class_type]
else:
_l = self.__hardware[class_type] = []
_l.append(new_dev)
self.__changed = 1
return 0
def delete_hardware(self, sysid=None):
""" This function deletes all hardware. """
log_debug(4, sysid)
if not self.__loaded:
self.reload_hardware_byid(sysid)
hardware = self.__hardware
if hardware == {}:
# nothing to delete
return 0
self.__changed = 1
for device_type in hardware.keys():
for hw in hardware[device_type]:
hw.status = 2 # deleted
# filter out the hardware that was just added and then
# deleted before saving
hardware[device_type] = filter(lambda a:
not (a.status == 2 and hasattr(a, "id") and a.id == 0),
hardware[device_type])
return 0
def save_hardware_byid(self, sysid):
"""Save the hardware list """
log_debug(3, sysid, "changed = %s" % self.__changed)
hardware = self.__hardware
if hardware == {}: # nothing loaded
return 0
if not self.__changed:
return 0
for device_type, hw_list in hardware.items():
for hw in hw_list:
hw.save(sysid)
self.__changed = 0
return 0
def __load_from_db(self, DevClass, sysid):
""" Load a certain hardware class from the database """
if not self.__hardware.has_key(DevClass):
self.__hardware[DevClass] = []
h = rhnSQL.prepare("select id from %s where server_id = :sysid" % DevClass.table)
h.execute(sysid=sysid)
rows = h.fetchall_dict() or []
for device in rows:
dev_id = device['id']
dev = DevClass()
dev.reload(dev_id)
self.__hardware[DevClass].append(dev)
def reload_hardware_byid(self, sysid):
""" load all hardware devices for a server """
log_debug(4, sysid)
if not sysid:
return -1
self.__hardware = {} # discard what was already loaded
# load from all hardware databases
self.__load_from_db(HardwareDevice, sysid)
self.__load_from_db(CPUDevice, sysid)
self.__load_from_db(DMIInformation, sysid)
self.__load_from_db(NetworkInformation, sysid)
self.__load_from_db(MemoryInformation, sysid)
self.__load_from_db(InstallInformation, sysid)
net_iface_info = NetIfaceInformation()
net_iface_info.reload(sysid)
self.__hardware[NetIfaceInformation] = [net_iface_info]
# now set the flag
self.__changed = 0
self.__loaded = 1
return 0
| gpl-2.0 | -6,922,134,149,411,319,000 | 32.757782 | 109 | 0.51788 | false |
iffy/eliot | benchmarks/logwriter.py | 1 | 1041 | """
A benchmark for eliot.logwriter.
"""
import tempfile
import time
from twisted.internet.task import react
from twisted.python.filepath import FilePath
from eliot.logwriter import ThreadedFileWriter
LENGTH = 100
MESSAGES = 100000
def main(reactor):
print "Message size: %d bytes Num messages: %d" % (LENGTH, MESSAGES)
message = b"a" * LENGTH
fp = FilePath(tempfile.mktemp())
writer = ThreadedFileWriter(fp.open("ab"), reactor)
writer.startService()
start = time.time()
for i in range(MESSAGES):
writer(message)
d = writer.stopService()
def done(_):
elapsed = time.time() - start
kbSec = (LENGTH * MESSAGES) / (elapsed * 1024)
messagesSec = MESSAGES / elapsed
print "messages/sec: %s KB/sec: %s" % (messagesSec, kbSec)
d.addCallback(done)
def cleanup(result):
fp.restat()
print
print "File size: ", fp.getsize()
fp.remove()
d.addBoth(cleanup)
return d
if __name__ == '__main__':
react(main, [])
| apache-2.0 | -6,183,397,444,295,994,000 | 20.6875 | 74 | 0.622478 | false |
kennym/itools | test/test_ical.py | 1 | 36237 | # -*- coding: UTF-8 -*-
# Copyright (C) 2005-2008 Juan David Ibáñez Palomar <[email protected]>
# Copyright (C) 2006-2007 Nicolas Deram <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from the Standard Library
from cStringIO import StringIO
from datetime import datetime
from unittest import TestCase, main
# Import from itools
from itools.csv import Property
from itools.csv.table import encode_param_value
from itools.datatypes import String
from itools.ical import iCalendar, icalendarTable
# Example with 1 event
content = """
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Mozilla.org/NONSGML Mozilla Calendar V1.0//EN
METHOD:PUBLISH
BEGIN:VEVENT
UID:581361a0-1dd2-11b2-9a42-bd3958eeac9a
SUMMARY:Résumé
DESCRIPTION:all all all
LOCATION:France
STATUS:TENTATIVE
CLASS:PRIVATE
X-MOZILLA-RECUR-DEFAULT-INTERVAL:0
DTSTART;VALUE="DATE":20050530
DTEND;VALUE=DATE:20050531
DTSTAMP:20050601T074604Z
ATTENDEE;RSVP=TRUE;MEMBER="mailto:[email protected]":mailto:[email protected]
ATTENDEE;MEMBER="mailto:[email protected]":mailto:[email protected]
PRIORITY:1
SEQUENCE:0
END:VEVENT
END:VCALENDAR
"""
# Example with 2 events
content2 = """
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Mozilla.org/NONSGML Mozilla Calendar V1.0//EN
METHOD:PUBLISH
BEGIN:VEVENT
UID:581361a0-1dd2-11b2-9a42-bd3958eeac9a
SUMMARY:Refound
DESCRIPTION:all all all
LOCATION:France
STATUS:TENTATIVE
CLASS:PRIVATE
X-MOZILLA-RECUR-DEFAULT-INTERVAL:0
DTSTART;VALUE="DATE":20050530T000000
DTEND;VALUE=DATE:20050531T235959.999999
DTSTAMP:20050601T074604Z
ATTENDEE;RSVP=TRUE;MEMBER="mailto:[email protected]":mailto:[email protected]
PRIORITY:1
SEQUENCE:0
END:VEVENT
BEGIN:VEVENT
UID:581361a0-1dd2-11b2-9a42-bd3958eeac9b
SUMMARY:222222222
DTSTART;VALUE="DATE":20050701
DTEND;VALUE=DATE:20050701
ATTENDEE;RSVP=TRUE;MEMBER="mailto:[email protected]":mailto:[email protected]
PRIORITY:2
SEQUENCE:0
END:VEVENT
END:VCALENDAR
"""
def property_to_string(prop_name, prop):
"""Method only used by test_load and test_load2.
"""
value, params = prop.value, ''
for p_name in prop.parameters:
p_value = prop.parameters[p_name]
p_value = [ encode_param_value(p_name, x, String) for x in p_value ]
param = ';%s=%s' % (p_name, ','.join(p_value))
params = params + param
return u'%s%s:%s' % (prop_name, params, value)
class icalTestCase(TestCase):
def setUp(self):
self.cal1 = iCalendar(string=content)
self.cal2 = iCalendar(string=content2)
def test_new(self):
cal = iCalendar()
properties = []
for name in cal.properties:
params = cal.properties[name].parameters
value = cal.properties[name].value
property = '%s;%s:%s' % (name, params, value)
properties.append(property)
# Test properties
expected_properties = [
u'VERSION;{}:2.0',
u'PRODID;{}:-//itaapy.com/NONSGML ikaaro icalendar V1.0//EN']
self.assertEqual(properties, expected_properties)
# Test components
self.assertEqual(len(cal.get_components()), 0)
self.assertEqual(cal.get_components('VEVENT'), [])
def test_property(self):
"""Test to create, access and encode a property with or without
parameters.
"""
# Property without parameter
expected = ['SUMMARY:This is the summary\n']
property_value = Property('This is the summary')
output = self.cal1.encode_property('SUMMARY', property_value)
self.assertEqual(output, expected)
# Property with one parameter
expected = ['ATTENDEE;MEMBER="mailto:[email protected]":'
'mailto:[email protected]\n']
member = 'mailto:[email protected]'
value = Property('mailto:[email protected]', MEMBER=[member])
output = self.cal1.encode_property('ATTENDEE', value)
self.assertEqual(output, expected)
def test_get_property_values(self):
cal = self.cal1
# icalendar property
expected = '2.0'
property = cal.get_property_values('VERSION')
self.assertEqual(property.value, expected)
# Component property
events = cal.get_components('VEVENT')
properties = events[0].get_version()
expected = u'Résumé'
property = events[0].get_property_values('SUMMARY')
self.assertEqual(property.value, expected)
expected = 1
property = events[0].get_property_values('PRIORITY')
self.assertEqual(property.value, expected)
# Component properties
properties = {}
properties['MYADD'] = Property(u'Résumé à crêtes')
value = Property(u'Property added by calling add_property')
properties['DESCRIPTION'] = value
member = '"mailto:[email protected]"'
value = Property('mailto:[email protected]', MEMBER=[member])
properties['ATTENDEE'] = value
uid = cal.add_component('VEVENT', **properties)
event = cal.get_component_by_uid(uid)
properties = event.get_property_values()
self.assertEqual('MYADD' in properties, True)
self.assertEqual('DESCRIPTION' in properties, True)
self.assertEqual('ATTENDEE' in properties, True)
self.assertEqual('VERSION' in properties, False)
def test_add_to_calendar(self):
"""Test to add property and component to an empty icalendar object.
"""
cal = iCalendar()
cal.add_component('VEVENT')
self.assertEqual(len(cal.get_components('VEVENT')), 1)
value = Property('PUBLISH')
cal.set_property('METHOD', value)
self.assertEqual(cal.get_property_values('METHOD'), value)
def test_load(self):
"""Test loading a simple calendar.
"""
cal = self.cal1
# Test icalendar properties
properties = []
for name in cal.properties:
property_value = cal.properties[name]
# Only property METHOD can occur several times, we give only one
if isinstance(property_value, list):
property_value = property_value[0]
params = property_value.parameters
value = property_value.value
property = '%s;%s:%s' % (name, params, value)
properties.append(property)
expected_properties = [
u'VERSION;{}:2.0',
u'METHOD;{}:PUBLISH',
u'PRODID;{}:-//Mozilla.org/NONSGML Mozilla Calendar V1.0//EN' ]
self.assertEqual(properties, expected_properties)
# Test component properties
properties = []
event = cal.get_components('VEVENT')[0]
version = event.get_version()
for prop_name in version:
datatype = cal.get_record_datatype(prop_name)
if datatype.multiple is False:
prop = version[prop_name]
property = property_to_string(prop_name, prop)
properties.append(property)
else:
for prop in version[prop_name]:
property = property_to_string(prop_name, prop)
properties.append(property)
expected_event_properties = [
u'STATUS:TENTATIVE',
u'DTSTAMP:2005-06-01 07:46:04',
u'DESCRIPTION:all all all',
u'ATTENDEE;MEMBER="mailto:[email protected]"'
';RSVP=TRUE:mailto:[email protected]',
u'ATTENDEE;MEMBER="mailto:[email protected]"'
':mailto:[email protected]',
u'SUMMARY:Résumé',
u'PRIORITY:1',
u'LOCATION:France',
u'X-MOZILLA-RECUR-DEFAULT-INTERVAL:0',
u'DTEND;VALUE=DATE:2005-05-31 00:00:00',
u'DTSTART;VALUE=DATE:2005-05-30 00:00:00',
u'CLASS:PRIVATE']
self.assertEqual(event.uid, '581361a0-1dd2-11b2-9a42-bd3958eeac9a')
self.assertEqual(properties, expected_event_properties)
self.assertEqual(len(cal.get_components('VEVENT')), 1)
# Test journals
self.assertEqual(len(cal.get_components('VJOURNAL')), 0)
# Test todos
self.assertEqual(len(cal.get_components('TODO')), 0)
# Test freebusys
self.assertEqual(len(cal.get_components('FREEBUSY')), 0)
# Test timezones
self.assertEqual(len(cal.get_components('TIMEZONE')), 0)
# Test others
self.assertEqual(len(cal.get_components('others')), 0)
def test_load_2(self):
"""Test loading a 2 events calendar.
"""
cal = self.cal2
properties = []
for name in cal.properties:
params = cal.properties[name].parameters
value = cal.properties[name].value
property = '%s;%s:%s' % (name, params, value)
properties.append(property)
# Test properties
expected_properties = [
u'VERSION;{}:2.0',
u'METHOD;{}:PUBLISH',
u'PRODID;{}:-//Mozilla.org/NONSGML Mozilla Calendar V1.0//EN' ]
self.assertEqual(properties, expected_properties)
events = []
for event in cal.get_components('VEVENT'):
version = event.get_version()
properties = []
for prop_name in version:
if prop_name == 'DTSTAMP':
continue
datatype = cal.get_record_datatype(prop_name)
if datatype.multiple is False:
prop = version[prop_name]
property = property_to_string(prop_name, prop)
properties.append(property)
else:
for prop in version[prop_name]:
property = property_to_string(prop_name, prop)
properties.append(property)
events.append(properties)
# Test events
expected_events = [[
u'STATUS:TENTATIVE',
u'DESCRIPTION:all all all',
u'ATTENDEE;MEMBER="mailto:[email protected]"'
';RSVP=TRUE:mailto:[email protected]',
u'SUMMARY:Refound',
u'PRIORITY:1',
u'LOCATION:France',
u'X-MOZILLA-RECUR-DEFAULT-INTERVAL:0',
u'DTEND;VALUE=DATE:2005-05-31 23:59:59.999999',
u'DTSTART;VALUE=DATE:2005-05-30 00:00:00',
u'CLASS:PRIVATE'],
[
u'ATTENDEE;MEMBER="mailto:[email protected]";RSVP=TRUE'\
':mailto:[email protected]',
u'SUMMARY:222222222',
u'PRIORITY:2',
u'DTEND;VALUE=DATE:2005-07-01 00:00:00',
u'DTSTART;VALUE=DATE:2005-07-01 00:00:00'
]]
self.assertEqual(events, expected_events)
self.assertEqual(len(cal.get_components('VEVENT')), 2)
# Test journals
self.assertEqual(len(cal.get_components('VJOURNAL')), 0)
# Test todos
self.assertEqual(len(cal.get_components('TODO')), 0)
# Test freebusys
self.assertEqual(len(cal.get_components('FREEBUSY')), 0)
# Test timezones
self.assertEqual(len(cal.get_components('TIMEZONE')), 0)
# Test others
self.assertEqual(len(cal.get_components('others')), 0)
# Just call to_str method
def test_to_str(self):
"""Call to_str method.
"""
cal = self.cal2
cal.to_str()
def test_add_property(self):
"""Test adding a property to any component.
"""
cal = self.cal2
event = cal.get_components('VEVENT')[1]
# other property (MYADD)
name, value = 'MYADD', Property(u'Résumé à crêtes')
cal.update_component(event.uid, **{name: value})
property = event.get_property_values(name)
self.assertEqual(property[0], value)
self.assertEqual(property[0].value, value.value)
# property DESCRIPTION
name = 'DESCRIPTION'
value = Property(u'Property added by calling add_property')
cal.update_component(event.uid, **{name: value})
property = event.get_property_values(name)
self.assertEqual(property, value)
# property ATTENDEE
name = 'ATTENDEE'
value = event.get_property_values(name)
member = '"mailto:[email protected]"'
value.append(Property('mailto:[email protected]', MEMBER=[member]))
cal.update_component(event.uid, **{name: value})
property = event.get_property_values(name)
self.assertEqual(str(property[0].value), 'mailto:[email protected]')
self.assertEqual(property[1].parameters, {'MEMBER': [member]})
self.assertEqual(property[1], value[1])
def test_icalendar_set_property(self):
"""Test setting a new value to an existant icalendar property.
"""
cal = self.cal1
name, value = 'VERSION', Property('2.1')
cal.set_property(name, value)
self.assertEqual(cal.get_property_values(name), value)
cal.set_property(name, [value, ])
self.assertEqual(cal.get_property_values(name), value)
def test_component_set_property(self):
"""Test setting a new value to an existant component property.
"""
cal = self.cal1
event = cal.get_components('VEVENT')[0]
name, value = 'SUMMARY', Property('This is a new summary')
cal.update_component(event.uid, **{name: value})
self.assertEqual(event.get_property_values(name), value)
member = '"mailto:[email protected]"'
value = [
Property('mailto:[email protected]', MEMBER=[member]),
Property('mailto:[email protected]'),
Property('mailto:[email protected]')]
cal.update_component(event.uid, ATTENDEE=value)
self.assertEqual(event.get_property_values('ATTENDEE'), value)
def test_search_events(self):
"""Test get events filtered by arguments given.
"""
# Test with 1 event
cal = self.cal1
attendee_value = 'mailto:[email protected]'
events = cal.search_events(ATTENDEE=attendee_value)
self.assertEqual(len(events), 1)
events = cal.search_events(STATUS='CONFIRMED')
self.assertEqual(events, [])
events = cal.search_events(STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events(ATTENDEE=attendee_value, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events(STATUS='TENTATIVE', PRIORITY=1)
self.assertEqual(len(events), 1)
events = cal.search_events(
ATTENDEE=[attendee_value, 'mailto:[email protected]'],
STATUS='TENTATIVE',
PRIORITY=1)
self.assertEqual(len(events), 1)
# Tests with 2 events
cal = self.cal2
attendee_value = 'mailto:[email protected]'
events = cal.search_events(ATTENDEE=attendee_value)
self.assertEqual(len(events), 2)
events = cal.search_events(STATUS='CONFIRMED')
self.assertEqual(events, [])
events = cal.search_events(STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events(ATTENDEE=attendee_value, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events(STATUS='TENTATIVE', PRIORITY=1)
self.assertEqual(len(events), 1)
events = cal.search_events(
ATTENDEE=[attendee_value, 'mailto:[email protected]'],
STATUS='TENTATIVE',
PRIORITY=1)
self.assertEqual(len(events), 1)
def test_search_events_in_date(self):
"""Test search events by date.
"""
cal = self.cal1
date = datetime(2005, 5, 29)
events = cal.search_events_in_date(date)
self.assertEqual(len(events), 0)
self.assertEqual(cal.has_event_in_date(date), False)
date = datetime(2005, 5, 30)
events = cal.search_events_in_date(date)
self.assertEqual(len(events), 1)
self.assertEqual(cal.has_event_in_date(date), True)
events = cal.search_events_in_date(date, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events_in_date(date, STATUS='CONFIRMED')
self.assertEqual(len(events), 0)
attendee_value = 'mailto:[email protected]'
events = cal.search_events_in_date(date, ATTENDEE=attendee_value)
self.assertEqual(len(events), 1)
events = cal.search_events_in_date(date, ATTENDEE=attendee_value,
STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events_in_date(date, ATTENDEE=attendee_value,
STATUS='CONFIRMED')
self.assertEqual(len(events), 0)
date = datetime(2005, 7, 30)
events = cal.search_events_in_date(date)
self.assertEqual(len(events), 0)
self.assertEqual(cal.has_event_in_date(date), False)
def test_search_events_in_range(self):
"""Test search events matching given dates range.
"""
cal = self.cal2
dtstart = datetime(2005, 1, 1)
dtend = datetime(2005, 1, 1, 20, 0)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 0)
dtstart = datetime(2005, 5, 28)
dtend = datetime(2005, 5, 30, 0, 50)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
dtstart = datetime(2005, 5, 29)
dtend = datetime(2005, 5, 30, 0, 1)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
dtstart = datetime(2005, 5, 30, 23, 59, 59)
dtend = datetime(2005, 5, 31, 0, 0)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
dtstart = datetime(2005, 5, 1)
dtend = datetime(2005, 8, 1)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 2)
dtstart = datetime(2005, 5, 30, 23)
dtend = datetime(2005, 6, 1)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
dtstart = datetime(2005, 5, 31, 0, 0, 1)
dtend = datetime(2005, 6, 1)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
events = cal.search_events_in_range(dtstart, dtend, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events_in_range(dtstart, dtend, STATUS='CONFIRMED')
self.assertEqual(len(events), 0)
attendee_value = 'mailto:[email protected]'
events = cal.search_events_in_range(dtstart, dtend,
ATTENDEE=attendee_value)
self.assertEqual(len(events), 1)
events = cal.search_events_in_range(dtstart, dtend,
ATTENDEE=attendee_value, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events_in_range(dtstart, dtend,
ATTENDEE=attendee_value, STATUS='CONFIRMED')
self.assertEqual(len(events), 0)
def test_get_conflicts(self):
"""Test get_conflicts method which returns uid couples of events
conflicting on a given date.
"""
cal = self.cal2
date = datetime(2005, 05, 30)
conflicts = cal.get_conflicts(date)
self.assertEqual(conflicts, None)
# Set a conflict
uid1 = '581361a0-1dd2-11b2-9a42-bd3958eeac9a'
uid2 = '581361a0-1dd2-11b2-9a42-bd3958eeac9b'
cal.update_component(uid2, DTSTART=Property(datetime(2005, 05, 30)),
DTEND=Property(datetime(2005, 05, 31)))
conflicts = cal.get_conflicts(date)
self.assertEqual(conflicts, [(uid1, uid2)])
class icalTableTestCase(TestCase):
def setUp(self):
src = iCalendar(string=content)
src = StringIO(src.to_str())
cal = icalendarTable()
cal.load_state_from_ical_file(src)
self.cal1 = cal
src = iCalendar(string=content2)
src = StringIO(src.to_str())
cal = icalendarTable()
cal.load_state_from_ical_file(src)
self.cal2 = cal
def test_new(self):
cal = icalendarTable()
# Test components
self.assertEqual(len(cal.get_components()), 0)
self.assertEqual(cal.get_components('VEVENT'), [])
def test_property(self):
"""Test to create, access and encode a property with or without
parameters.
"""
# Property without parameter
expected = ['SUMMARY:This is the summary\n']
property_value = Property('This is the summary')
output = self.cal1.encode_property('SUMMARY', property_value)
self.assertEqual(output, expected)
# Property with one parameter
expected = ['ATTENDEE;MEMBER="mailto:[email protected]":'
'mailto:[email protected]\n']
member = 'mailto:[email protected]'
value = Property('mailto:[email protected]', MEMBER=[member])
output = self.cal1.encode_property('ATTENDEE', value)
self.assertEqual(output, expected)
def test_get_property(self):
cal = self.cal1
# Component property
events = cal.get_components('VEVENT')
properties = events[0][-1]
expected = u'Résumé'
property = events[0].get_property('SUMMARY')
self.assertEqual(property.value, expected)
expected = 1
property = events[0].get_property('PRIORITY')
self.assertEqual(property.value, expected)
# Component properties
properties = {}
properties['MYADD'] = Property(u'Résumé à crêtes')
value = Property(u'Property added by calling add_property')
properties['DESCRIPTION'] = value
member = '"mailto:[email protected]"'
value = Property('mailto:[email protected]', MEMBER=[member])
properties['ATTENDEE'] = value
properties['type'] = 'VEVENT'
uid = cal.add_record(properties).UID
event = cal.get_component_by_uid(uid)[0]
properties = event.get_property()
self.assertEqual('MYADD' in properties, True)
self.assertEqual('DESCRIPTION' in properties, True)
self.assertEqual('ATTENDEE' in properties, True)
self.assertEqual('VERSION' in properties, False)
def test_add_to_calendar(self):
"""Test to add property and component to an empty icalendar object.
"""
cal = icalendarTable()
cal.add_record({'type': 'VEVENT'})
self.assertEqual(len(cal.get_components('VEVENT')), 1)
def test_load(self):
"""Test loading a simple calendar.
"""
cal = self.cal1
# Test component properties
properties = []
event = cal.get_components('VEVENT')[0]
version = event[-1]
for prop_name in version:
if prop_name in ('ts', 'id', 'type', 'UID', 'SEQUENCE'):
continue
datatype = cal.get_record_datatype(prop_name)
if getattr(datatype, 'multiple', False) is False:
prop = version[prop_name]
property = property_to_string(prop_name, prop)
properties.append(property)
else:
for prop in version[prop_name]:
property = property_to_string(prop_name, prop)
properties.append(property)
expected_event_properties = [
u'STATUS:TENTATIVE',
u'DTSTAMP:2005-06-01 07:46:04',
u'DESCRIPTION:all all all',
u'ATTENDEE;MEMBER="mailto:[email protected]"'
';RSVP=TRUE:mailto:[email protected]',
u'ATTENDEE;MEMBER="mailto:[email protected]"'
':mailto:[email protected]',
u'SUMMARY:Résumé',
u'PRIORITY:1',
u'LOCATION:France',
u'X-MOZILLA-RECUR-DEFAULT-INTERVAL:0',
u'DTEND;VALUE=DATE:2005-05-31 00:00:00',
u'DTSTART;VALUE=DATE:2005-05-30 00:00:00',
u'CLASS:PRIVATE']
self.assertEqual(event.UID, '581361a0-1dd2-11b2-9a42-bd3958eeac9a')
self.assertEqual(properties, expected_event_properties)
self.assertEqual(len(cal.get_components('VEVENT')), 1)
# Test journals
self.assertEqual(len(cal.get_components('VJOURNAL')), 0)
# Test todos
self.assertEqual(len(cal.get_components('TODO')), 0)
# Test freebusys
self.assertEqual(len(cal.get_components('FREEBUSY')), 0)
# Test timezones
self.assertEqual(len(cal.get_components('TIMEZONE')), 0)
# Test others
self.assertEqual(len(cal.get_components('others')), 0)
def test_load_2(self):
"""Test loading a 2 events calendar.
"""
cal = self.cal2
events = []
for event in cal.get_components('VEVENT'):
version = event[-1]
properties = []
for prop_name in version:
if prop_name in ('ts', 'id', 'type', 'UID', 'SEQUENCE'):
continue
if prop_name == 'DTSTAMP':
continue
datatype = cal.get_record_datatype(prop_name)
if getattr(datatype, 'multiple', False) is False:
prop = version[prop_name]
property = property_to_string(prop_name, prop)
properties.append(property)
else:
for prop in version[prop_name]:
property = property_to_string(prop_name, prop)
properties.append(property)
events.append(properties)
# Test events
expected_events = [[
u'ATTENDEE;MEMBER="mailto:[email protected]";RSVP=TRUE'\
':mailto:[email protected]',
u'SUMMARY:222222222',
u'PRIORITY:2',
u'DTEND;VALUE=DATE:2005-07-01 00:00:00',
u'DTSTART;VALUE=DATE:2005-07-01 00:00:00'
],
[
u'STATUS:TENTATIVE',
u'DESCRIPTION:all all all',
u'ATTENDEE;MEMBER="mailto:[email protected]"'
';RSVP=TRUE:mailto:[email protected]',
u'SUMMARY:Refound',
u'PRIORITY:1',
u'LOCATION:France',
u'X-MOZILLA-RECUR-DEFAULT-INTERVAL:0',
u'DTEND;VALUE=DATE:2005-05-31 23:59:59.999999',
u'DTSTART;VALUE=DATE:2005-05-30 00:00:00',
u'CLASS:PRIVATE']
]
self.assertEqual(events, expected_events)
self.assertEqual(len(cal.get_components('VEVENT')), 2)
# Test journals
self.assertEqual(len(cal.get_components('VJOURNAL')), 0)
# Test todos
self.assertEqual(len(cal.get_components('TODO')), 0)
# Test freebusys
self.assertEqual(len(cal.get_components('FREEBUSY')), 0)
# Test timezones
self.assertEqual(len(cal.get_components('TIMEZONE')), 0)
# Test others
self.assertEqual(len(cal.get_components('others')), 0)
# Just call to_ical method
def test_to_ical(self):
"""Call to_ical method.
"""
cal = self.cal2
cal.to_ical()
def test_add_property(self):
"""Test adding a property to any component.
"""
cal = self.cal2
event = cal.get_components('VEVENT')[1]
# other property (MYADD)
name, value = 'MYADD', Property(u'Résumé à crêtes')
cal.update_record(event.id, **{name: value})
property = event.get_property(name)
self.assertEqual(property[0], value)
self.assertEqual(property[0].value, value.value)
# property DESCRIPTION
name = 'DESCRIPTION'
value = Property(u'Property added by calling add_property')
cal.update_record(event.id, **{name: value})
property = event.get_property(name)
self.assertEqual(property, value)
# property ATTENDEE
name = 'ATTENDEE'
value = event.get_property(name)
member = '"mailto:[email protected]"'
value.append(Property('mailto:[email protected]', MEMBER=[member]))
cal.update_record(event.id, **{name: value})
property = event.get_property(name)
self.assertEqual(str(property[0].value), 'mailto:[email protected]')
self.assertEqual(property[1].parameters, {'MEMBER': [member]})
self.assertEqual(property[1], value[1])
def test_component_set_property(self):
"""Test setting a new value to an existant component property.
"""
cal = self.cal1
event = cal.get_components('VEVENT')[0]
name, value = 'SUMMARY', Property('This is a new summary')
cal.update_record(event.id, **{name: value})
self.assertEqual(event.get_property(name), value)
member = '"mailto:[email protected]"'
value = [
Property('mailto:[email protected]', MEMBER=[member]),
Property('mailto:[email protected]'),
Property('mailto:[email protected]')]
cal.update_record(event.id, ATTENDEE=value)
self.assertEqual(event.get_property('ATTENDEE'), value)
def test_search_events(self):
"""Test get events filtered by arguments given.
"""
cal = self.cal1
# Test with 1 event
attendee_value = 'mailto:[email protected]'
events = cal.search_events(ATTENDEE=attendee_value)
self.assertEqual(len(events), 1)
events = cal.search_events(STATUS='CONFIRMED')
self.assertEqual(events, [])
events = cal.search_events(STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events(ATTENDEE=attendee_value, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events(STATUS='TENTATIVE', PRIORITY=1)
self.assertEqual(len(events), 1)
events = cal.search_events(
ATTENDEE=[attendee_value, 'mailto:[email protected]'],
STATUS='TENTATIVE',
PRIORITY=1)
self.assertEqual(len(events), 1)
# Tests with 2 events
cal = iCalendar(string=content2)
attendee_value = 'mailto:[email protected]'
events = cal.search_events(ATTENDEE=attendee_value)
self.assertEqual(len(events), 2)
events = cal.search_events(STATUS='CONFIRMED')
self.assertEqual(events, [])
events = cal.search_events(STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events(ATTENDEE=attendee_value, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events(STATUS='TENTATIVE', PRIORITY=1)
self.assertEqual(len(events), 1)
events = cal.search_events(
ATTENDEE=[attendee_value, 'mailto:[email protected]'],
STATUS='TENTATIVE',
PRIORITY=1)
self.assertEqual(len(events), 1)
def test_search_events_in_date(self):
"""Test search events by date.
"""
cal = self.cal1
date = datetime(2005, 5, 29)
events = cal.search_events_in_date(date)
self.assertEqual(len(events), 0)
self.assertEqual(cal.has_event_in_date(date), False)
date = datetime(2005, 5, 30)
events = cal.search_events_in_date(date)
self.assertEqual(len(events), 1)
self.assertEqual(cal.has_event_in_date(date), True)
events = cal.search_events_in_date(date, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events_in_date(date, STATUS='CONFIRMED')
self.assertEqual(len(events), 0)
attendee_value = 'mailto:[email protected]'
events = cal.search_events_in_date(date, ATTENDEE=attendee_value)
self.assertEqual(len(events), 1)
events = cal.search_events_in_date(date, ATTENDEE=attendee_value,
STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events_in_date(date, ATTENDEE=attendee_value,
STATUS='CONFIRMED')
self.assertEqual(len(events), 0)
date = datetime(2005, 7, 30)
events = cal.search_events_in_date(date)
self.assertEqual(len(events), 0)
self.assertEqual(cal.has_event_in_date(date), False)
def test_search_events_in_range(self):
"""Test search events matching given dates range.
"""
cal = self.cal2
dtstart = datetime(2005, 1, 1)
dtend = datetime(2005, 1, 1, 20, 0)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 0)
dtstart = datetime(2005, 5, 28)
dtend = datetime(2005, 5, 30, 0, 50)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
dtstart = datetime(2005, 5, 29)
dtend = datetime(2005, 5, 30, 0, 1)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
dtstart = datetime(2005, 5, 30, 23, 59, 59)
dtend = datetime(2005, 5, 31, 0, 0)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
dtstart = datetime(2005, 5, 1)
dtend = datetime(2005, 8, 1)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 2)
dtstart = datetime(2005, 5, 30, 23)
dtend = datetime(2005, 6, 1)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
dtstart = datetime(2005, 5, 31, 0, 0, 1)
dtend = datetime(2005, 6, 1)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
events = cal.search_events_in_range(dtstart, dtend, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events_in_range(dtstart, dtend, STATUS='CONFIRMED')
self.assertEqual(len(events), 0)
attendee_value = 'mailto:[email protected]'
events = cal.search_events_in_range(dtstart, dtend,
ATTENDEE=attendee_value)
self.assertEqual(len(events), 1)
events = cal.search_events_in_range(dtstart, dtend,
ATTENDEE=attendee_value, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events_in_range(dtstart, dtend,
ATTENDEE=attendee_value, STATUS='CONFIRMED')
self.assertEqual(len(events), 0)
def test_get_conflicts(self):
"""Test get_conflicts method which returns uid couples of events
conflicting on a given date.
"""
cal = self.cal2
date = datetime(2005, 05, 30)
conflicts = cal.get_conflicts(date)
self.assertEqual(conflicts, None)
# Set a conflict
uid1 = 0
uid2 = 1
cal.update_record(uid1, DTSTART=Property(datetime(2005, 05, 30)),
DTEND=Property(datetime(2005, 05, 31)))
conflicts = cal.get_conflicts(date)
self.assertEqual(conflicts, [(uid1, uid2)])
if __name__ == '__main__':
main()
| gpl-3.0 | -2,529,380,354,615,915,500 | 34.291423 | 79 | 0.597227 | false |
hirofumi0810/tensorflow_end2end_speech_recognition | utils/dataset/xe.py | 1 | 5444 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Base class for loading dataset for the frame-wise model.
In this class, all data will be loaded at each step.
You can use the multi-GPU version.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from utils.dataset.base import Base
class DatasetBase(Base):
def __init__(self, *args, **kwargs):
super(DatasetBase, self).__init__(*args, **kwargs)
def __getitem__(self, index):
input_i = np.array(self.input_paths[index])
label_i = np.array(self.label_paths[index])
return (input_i, label_i)
def __len__(self):
if self.data_type == 'train':
return 18088388
elif self.data_type == 'dev_clean':
return 968057
elif self.data_type == 'dev_other':
return 919980
def __next__(self, batch_size=None):
"""Generate each mini-batch.
Args:
batch_size (int, optional): the size of mini-batch
Returns:
A tuple of `(inputs, labels, inputs_seq_len, labels_seq_len, input_names)`
inputs: list of input data of size
`[num_gpu, B, input_size]`
labels: list of target labels of size
`[num_gpu, B, num_classes]`
input_names: list of file name of input data of size
`[num_gpu, B]`
is_new_epoch (bool): If true, 1 epoch is finished
"""
if self.max_epoch is not None and self.epoch >= self.max_epoch:
raise StopIteration
# NOTE: max_epoch = None means infinite loop
if batch_size is None:
batch_size = self.batch_size
# reset
if self.is_new_epoch:
self.is_new_epoch = False
# Load the first block at each epoch
if self.iteration == 0 or self.is_new_epoch:
# Randomly sample block
block_index = random.sample(list(self.rest_block), 1)
self.rest_block -= set(block_index)
# Load block
self.inputs_block = np.array(list(
map(lambda path: np.load(path),
self.input_paths[block_index])))
# NOTE: `[1, num_frames_per_block, input_dim]`
self.inputs_block = self.inputs_block.reshape(
-1, self.inputs_block.shape[-1])
self.labels_block = np.array(list(
map(lambda path: np.load(path),
self.label_paths[block_index])))
# NOTE: `[1, num_frames_per_block, num_classes]`
self.labels_block = self.labels_block.reshape(
-1, self.labels_block.shape[-1])
self.rest_frames = set(range(0, len(self.inputs_block), 1))
# Load block if needed
if len(self.rest_frames) < batch_size and len(self.rest_block) != 0:
# Randomly sample block
if len(self.rest_block) > 1:
block_index = random.sample(list(self.rest_block), 1)
else:
# Last block in each epoch
block_index = list(self.rest_block)
self.rest_block -= set(block_index)
# tmp
rest_inputs_pre_block = self.inputs_block[list(self.rest_frames)]
rest_labels_pre_block = self.labels_block[list(self.rest_frames)]
self.inputs_block = np.array(list(
map(lambda path: np.load(path),
self.input_paths[block_index]))).reshape(-1, self.inputs_block.shape[-1])
self.labels_block = np.array(list(
map(lambda path: np.load(path),
self.label_paths[block_index]))).reshape(-1, self.labels_block.shape[-1])
# Concatenate
self.inputs_block = np.concatenate(
(rest_inputs_pre_block, self.inputs_block), axis=0)
self.labels_block = np.concatenate(
(rest_labels_pre_block, self.labels_block), axis=0)
self.rest_frames = set(range(0, len(self.inputs_block), 1))
# Randomly sample frames
if len(self.rest_frames) > batch_size:
frame_indices = random.sample(
list(self.rest_frames), batch_size)
else:
# Last mini-batch in each block
frame_indices = list(self.rest_frames)
# Shuffle selected mini-batch
random.shuffle(frame_indices)
self.rest_frames -= set(frame_indices)
if len(self.rest_block) == 0 and len(self.rest_frames) == 0:
self.reset()
self.is_new_epoch = True
self.epoch += 1
self.rest_block = set(range(0, len(self.input_paths), 1))
# Set values of each data in mini-batch
inputs = self.inputs_block[frame_indices]
labels = self.labels_block[frame_indices]
###############
# Multi-GPUs
###############
if self.num_gpu > 1:
# Now we split the mini-batch data by num_gpu
inputs = np.array_split(inputs, self.num_gpu, axis=0)
labels = np.array_split(labels, self.num_gpu, axis=0)
else:
inputs = inputs[np.newaxis, :, :]
labels = labels[np.newaxis, :, :]
self.iteration += len(frame_indices)
return (inputs, labels), self.is_new_epoch
| mit | -3,416,920,738,480,581,600 | 35.536913 | 93 | 0.549596 | false |
cs591B1-Project/Social-Media-Impact-on-Stock-Market-and-Price | data/25 verizon/getDataAutoQuery.py | 1 | 3761 | import webhose;
import time;
from datetime import datetime, timedelta
from lxml import html
import requests
import unirest
webhose.config(token='c6052904-f312-436b-a6d8-d915084ac866')
days_back = 30
date_days_ago = datetime.now() - timedelta(days=days_back)
organization = 'verizon'
lang = 'english'
country = 'US'
#set API Token
apiToken = 'c6052904-f312-436b-a6d8-d915084ac866'
# Build URL
#queryURL = 'https://webhose.io/search?token=' + apiToken + '&format=json&q=' + sentiment + '%3A%22' + organization + '%22&ts=1478565932339'
### UPDATE YOUR END POINT HERE - Amazon Positive
response = unirest.get("https://webhose.io/search?token=c6052904-f312-436b-a6d8-d915084ac866&format=json&q=language%3A(english)%20thread.country%3AUS%20organization.positive%3A%22Verizon%22&ts=1478579908230",
headers={
"Accept": "text/plain"
}
)
count = 1
results = response.body["totalResults"]
while results > 0:
fileName = 'verizon_pos_' + str(count) + '.json'
out0 = open(fileName, 'w')
out0.truncate()
out0.write(response.raw_body)
out0.write("\n")
out0.close()
count = count + 1
print response.body["next"]
url = 'https://webhose.io' + response.body["next"]
response = unirest.get(url,
headers={
"Accept": "text/plain"
}
)
results = response.body["totalResults"]
## UPDATE YOUR END POINT HERE - Amazon Neutral
response = unirest.get("https://webhose.io/search?token=c6052904-f312-436b-a6d8-d915084ac866&format=json&q=language%3A(english)%20thread.country%3AUS%20organization.neutral%3A%22Verizon%22&ts=1478579995010",
headers={
"Accept": "text/plain"
}
)
count = 1
results = response.body["totalResults"]
while results > 0:
fileName = 'verizon_neu_' + str(count) + '.json'
out0 = open(fileName, 'w')
out0.truncate()
out0.write(response.raw_body)
out0.write("\n")
out0.close()
count = count + 1
print response.body["next"]
url = 'https://webhose.io' + response.body["next"]
response = unirest.get(url,
headers={
"Accept": "text/plain"
}
)
results = response.body["totalResults"]
## UPDATE YOUR END POINT HERE - Amazon Negative
response = unirest.get("https://webhose.io/search?token=c6052904-f312-436b-a6d8-d915084ac866&format=json&q=language%3A(english)%20thread.country%3AUS%20organization.negative%3A%22Verizon%22&ts=1478580006047",
headers={
"Accept": "text/plain"
}
)
count = 1
results = response.body["totalResults"]
while results > 0:
fileName = 'verizon_neg_' + str(count) + '.json'
out0 = open(fileName, 'w')
out0.truncate()
out0.write(response.raw_body)
out0.write("\n")
out0.close()
count = count + 1
print response.body["next"]
url = 'https://webhose.io' + response.body["next"]
response = unirest.get(url,
headers={
"Accept": "text/plain"
}
)
results = response.body["totalResults"]
'''
postiveData = webhose.search("organization.positive:\"" + topic +
"\" language:\"" + lang +
"\" thread.country:\"" + country +
"\" domain_rank:<100000", since=int(time.mktime(date_days_ago.timetuple())) )
negativeData = webhose.search("organization.negative:\"" + topic +
"\" language:\"" + lang +
"\" thread.country:\"" + country +
"\" format:\"" + "json" +
"\" domain_rank:<100000", since=int(time.mktime(date_days_ago.timetuple())) )
neutralData = webhose.search("organization.negative:\"" + topic +
"\" language:\"" + lang +
"\" thread.country:\"" + country +
"\" domain_rank:<100000", since=int(time.mktime(date_days_ago.timetuple())) )
page = requests.get('https://webhose.io/search?token=c6052904-f312-436b-a6d8-d915084ac866&format=json&q=organization.positive%3A%22Microsoft%22&ts=1478565802902')
#print page
#print page.content
#print negativeData.next
#tree = html.fromstring(page.content)
'''
| mit | 3,822,441,865,177,565,000 | 26.253623 | 208 | 0.686785 | false |
jumpserver/jumpserver | apps/perms/serializers/asset/permission.py | 1 | 5347 | # -*- coding: utf-8 -*-
#
from rest_framework import serializers
from django.utils.translation import ugettext_lazy as _
from django.db.models import Prefetch, Q
from orgs.mixins.serializers import BulkOrgResourceModelSerializer
from perms.models import AssetPermission, Action
from assets.models import Asset, Node, SystemUser
from users.models import User, UserGroup
__all__ = [
'AssetPermissionSerializer',
'ActionsField',
]
class ActionsField(serializers.MultipleChoiceField):
def __init__(self, *args, **kwargs):
kwargs['choices'] = Action.CHOICES
super().__init__(*args, **kwargs)
def to_representation(self, value):
return Action.value_to_choices(value)
def to_internal_value(self, data):
if data is None:
return data
return Action.choices_to_value(data)
class ActionsDisplayField(ActionsField):
def to_representation(self, value):
values = super().to_representation(value)
choices = dict(Action.CHOICES)
return [choices.get(i) for i in values]
class AssetPermissionSerializer(BulkOrgResourceModelSerializer):
actions = ActionsField(required=False, allow_null=True)
is_valid = serializers.BooleanField(read_only=True)
is_expired = serializers.BooleanField(read_only=True, label=_('Is expired'))
users_display = serializers.ListField(child=serializers.CharField(), label=_('Users name'), required=False)
user_groups_display = serializers.ListField(child=serializers.CharField(), label=_('User groups name'), required=False)
assets_display = serializers.ListField(child=serializers.CharField(), label=_('Assets name'), required=False)
nodes_display = serializers.ListField(child=serializers.CharField(), label=_('Nodes name'), required=False)
system_users_display = serializers.ListField(child=serializers.CharField(), label=_('System users name'), required=False)
class Meta:
model = AssetPermission
fields_mini = ['id', 'name']
fields_small = fields_mini + [
'is_active', 'is_expired', 'is_valid', 'actions',
'created_by', 'date_created', 'date_expired',
'date_start', 'comment'
]
fields_m2m = [
'users', 'users_display', 'user_groups', 'user_groups_display', 'assets', 'assets_display',
'nodes', 'nodes_display', 'system_users', 'system_users_display',
'users_amount', 'user_groups_amount', 'assets_amount',
'nodes_amount', 'system_users_amount',
]
fields = fields_small + fields_m2m
read_only_fields = ['created_by', 'date_created']
extra_kwargs = {
'is_expired': {'label': _('Is expired')},
'is_valid': {'label': _('Is valid')},
'actions': {'label': _('Actions')},
'users_amount': {'label': _('Users amount')},
'user_groups_amount': {'label': _('User groups amount')},
'assets_amount': {'label': _('Assets amount')},
'nodes_amount': {'label': _('Nodes amount')},
'system_users_amount': {'label': _('System users amount')},
}
@classmethod
def setup_eager_loading(cls, queryset):
""" Perform necessary eager loading of data. """
queryset = queryset.prefetch_related('users', 'user_groups', 'assets', 'nodes', 'system_users')
return queryset
def to_internal_value(self, data):
if 'system_users_display' in data:
# system_users_display 转化为 system_users
system_users = data.get('system_users', [])
system_users_display = data.pop('system_users_display')
for name in system_users_display:
system_user = SystemUser.objects.filter(name=name).first()
if system_user and system_user.id not in system_users:
system_users.append(system_user.id)
data['system_users'] = system_users
return super().to_internal_value(data)
def perform_display_create(self, instance, **kwargs):
# 用户
users_to_set = User.objects.filter(
Q(name__in=kwargs.get('users_display')) | Q(username__in=kwargs.get('users_display'))
).distinct()
instance.users.add(*users_to_set)
# 用户组
user_groups_to_set = UserGroup.objects.filter(name__in=kwargs.get('user_groups_display')).distinct()
instance.user_groups.add(*user_groups_to_set)
# 资产
assets_to_set = Asset.objects.filter(
Q(ip__in=kwargs.get('assets_display')) | Q(hostname__in=kwargs.get('assets_display'))
).distinct()
instance.assets.add(*assets_to_set)
# 节点
nodes_to_set = Node.objects.filter(full_value__in=kwargs.get('nodes_display')).distinct()
instance.nodes.add(*nodes_to_set)
def create(self, validated_data):
display = {
'users_display' : validated_data.pop('users_display', ''),
'user_groups_display' : validated_data.pop('user_groups_display', ''),
'assets_display' : validated_data.pop('assets_display', ''),
'nodes_display' : validated_data.pop('nodes_display', '')
}
instance = super().create(validated_data)
self.perform_display_create(instance, **display)
return instance
| gpl-2.0 | -1,230,686,421,651,684,400 | 40.913386 | 125 | 0.622957 | false |
brainstorm/bcbio-nextgen | tests/bcbio_vm/test_docker.py | 1 | 3019 | import os
import subprocess
import pytest
from tests.conftest import make_workdir
from tests.conftest import get_post_process_yaml
@pytest.marks('docker')
def test_docker(install_test_files, data_dir):
"""Run an analysis with code and tools inside a docker container.
Requires https://github.com/chapmanb/bcbio-nextgen-vm
"""
with make_workdir() as workdir:
cl = [
"bcbio_vm.py",
"--datadir=%s" % data_dir,
"run",
"--systemconfig=%s" % get_post_process_yaml(data_dir, workdir),
"--fcdir=%s" % os.path.join(
data_dir, os.pardir, "100326_FC6107FAAXX"),
os.path.join(data_dir, "run_info-bam.yaml")
]
subprocess.check_call(cl)
@pytest.marks('docker_ipython', 'docker')
def test_docker_ipython(install_test_files, data_dir):
"""Run an analysis with code and tools inside a docker container,
driven via IPython.
Requires https://github.com/chapmanb/bcbio-nextgen-vm
"""
with make_workdir() as workdir:
cl = [
"bcbio_vm.py",
"--datadir=%s" % data_dir,
"ipython",
"--systemconfig=%s" % get_post_process_yaml(data_dir, workdir),
"--fcdir=%s" % os.path.join(
data_dir, os.pardir, "100326_FC6107FAAXX"),
os.path.join(data_dir, "run_info-bam.yaml"),
"lsf", "localrun"
]
subprocess.check_call(cl)
class TestCWL():
""" Run simple CWL workflows.
Requires https://github.com/chapmanb/bcbio-nextgen-vm
"""
@pytest.marks('cwl_docker', 'cwl', 'docker')
def test_2_cwl_docker(install_test_files, data_dir):
"""Create a common workflow language description and run on a
Docker installation.
"""
with make_workdir() as workdir:
cl = ["bcbio_vm.py", "cwl", "../data/automated/run_info-cwl.yaml",
"--systemconfig", get_post_process_yaml(data_dir, workdir)]
subprocess.check_call(cl)
cl = ["bcbio_vm.py", "cwlrun", "cwltool", "run_info-cwl-workflow"]
subprocess.check_call(cl)
print
print "To run with a CWL tool, cd test_automated_output and:"
print " ".join(cl)
@pytest.marks('speed2', 'cwl', 'cwl_local', 'install_required')
def test_1_cwl_local(self, install_test_files, data_dir):
"""Create a common workflow language description and run on local installation.
"""
with make_workdir() as workdir:
cl = ["bcbio_vm.py", "cwl", "../data/automated/run_info-cwl.yaml",
"--systemconfig", get_post_process_yaml(data_dir, workdir)]
subprocess.check_call(cl)
cl = ["bcbio_vm.py", "cwlrun", "cwltool", "run_info-cwl-workflow",
"--no-container"]
subprocess.check_call(cl)
print
print "To run with a CWL tool, cd test_automated_output and:"
print " ".join(cl)
| mit | -4,401,909,337,369,498,600 | 34.940476 | 87 | 0.576019 | false |
commtrack/commtrack-old-to-del | apps/hq/tests/views.py | 1 | 3356 | from django.test import TestCase
from django.test.client import Client
from hq.models import ExtUser, Domain, Organization, ReporterProfile
from hq.tests.util import create_user_and_domain
from reporters.models import Reporter
class ViewsTestCase(TestCase):
def setUp(self):
user, domain = create_user_and_domain()
self.client.login(username='brian',password='test')
org = Organization(name='mockorg', domain=domain)
org.save()
def testBasicViews(self):
reporter = Reporter(alias="rapporteur")
reporter.save()
domain = Domain.objects.get(name='mockdomain')
profile = ReporterProfile(reporter=reporter, domain=domain)
profile.save()
response = self.client.get('/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/serverup.txt')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/change_password/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/report/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/report/email/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/report/sms/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/reporters/add/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/reporters/%s/' % reporter.id)
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/charts/default/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/charts/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/stats/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
# TODO - fix
"""
response = self.client.get('/stats/delinquents/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
"""
# format url variables like so:
# response = self.client.get('/api/xforms/',{'format':'json'})
def tearDown(self):
user = ExtUser.objects.get(username='brian')
user.delete()
domain = Domain.objects.get(name='mockdomain')
domain.delete()
| bsd-3-clause | -5,212,102,433,399,762,000 | 42.025641 | 70 | 0.665375 | false |
chromium/chromium | buildtools/checkdeps/rules.py | 5 | 7044 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base classes to represent dependency rules, used by checkdeps.py"""
import os
import re
class Rule(object):
"""Specifies a single rule for an include, which can be one of
ALLOW, DISALLOW and TEMP_ALLOW.
"""
# These are the prefixes used to indicate each type of rule. These
# are also used as values for self.allow to indicate which type of
# rule this is.
ALLOW = '+'
DISALLOW = '-'
TEMP_ALLOW = '!'
def __init__(self, allow, directory, dependent_directory, source):
self.allow = allow
self._dir = directory
self._dependent_dir = dependent_directory
self._source = source
def __str__(self):
return '"%s%s" from %s.' % (self.allow, self._dir, self._source)
def AsDependencyTuple(self):
"""Returns a tuple (allow, dependent dir, dependee dir) for this rule,
which is fully self-sufficient to answer the question whether the dependent
is allowed to depend on the dependee, without knowing the external
context."""
return self.allow, self._dependent_dir or '.', self._dir or '.'
def ParentOrMatch(self, other):
"""Returns true if the input string is an exact match or is a parent
of the current rule. For example, the input "foo" would match "foo/bar"."""
return self._dir == other or self._dir.startswith(other + '/')
def ChildOrMatch(self, other):
"""Returns true if the input string would be covered by this rule. For
example, the input "foo/bar" would match the rule "foo"."""
return self._dir == other or other.startswith(self._dir + '/')
class MessageRule(Rule):
"""A rule that has a simple message as the reason for failing,
unrelated to directory or source.
"""
def __init__(self, reason):
super(MessageRule, self).__init__(Rule.DISALLOW, '', '', '')
self._reason = reason
def __str__(self):
return self._reason
def ParseRuleString(rule_string, source):
"""Returns a tuple of a character indicating what type of rule this
is, and a string holding the path the rule applies to.
"""
if not rule_string:
raise Exception('The rule string "%s" is empty\nin %s' %
(rule_string, source))
if not rule_string[0] in [Rule.ALLOW, Rule.DISALLOW, Rule.TEMP_ALLOW]:
raise Exception(
'The rule string "%s" does not begin with a "+", "-" or "!".' %
rule_string)
# If a directory is specified in a DEPS file with a trailing slash, then it
# will not match as a parent directory in Rule's [Parent|Child]OrMatch above.
# Ban them.
if rule_string[-1] == '/':
raise Exception(
'The rule string "%s" ends with a "/" which is not allowed.'
' Please remove the trailing "/".' % rule_string)
return rule_string[0], rule_string[1:]
class Rules(object):
"""Sets of rules for files in a directory.
By default, rules are added to the set of rules applicable to all
dependee files in the directory. Rules may also be added that apply
only to dependee files whose filename (last component of their path)
matches a given regular expression; hence there is one additional
set of rules per unique regular expression.
"""
def __init__(self):
"""Initializes the current rules with an empty rule list for all
files.
"""
# We keep the general rules out of the specific rules dictionary,
# as we need to always process them last.
self._general_rules = []
# Keys are regular expression strings, values are arrays of rules
# that apply to dependee files whose basename matches the regular
# expression. These are applied before the general rules, but
# their internal order is arbitrary.
self._specific_rules = {}
def __str__(self):
result = ['Rules = {\n (apply to all files): [\n%s\n ],' % '\n'.join(
' %s' % x for x in self._general_rules)]
for regexp, rules in list(self._specific_rules.items()):
result.append(' (limited to files matching %s): [\n%s\n ]' % (
regexp, '\n'.join(' %s' % x for x in rules)))
result.append(' }')
return '\n'.join(result)
def AsDependencyTuples(self, include_general_rules, include_specific_rules):
"""Returns a list of tuples (allow, dependent dir, dependee dir) for the
specified rules (general/specific). Currently only general rules are
supported."""
def AddDependencyTuplesImpl(deps, rules, extra_dependent_suffix=""):
for rule in rules:
(allow, dependent, dependee) = rule.AsDependencyTuple()
tup = (allow, dependent + extra_dependent_suffix, dependee)
deps.add(tup)
deps = set()
if include_general_rules:
AddDependencyTuplesImpl(deps, self._general_rules)
if include_specific_rules:
for regexp, rules in list(self._specific_rules.items()):
AddDependencyTuplesImpl(deps, rules, "/" + regexp)
return deps
def AddRule(self, rule_string, dependent_dir, source, dependee_regexp=None):
"""Adds a rule for the given rule string.
Args:
rule_string: The include_rule string read from the DEPS file to apply.
source: A string representing the location of that string (filename, etc.)
so that we can give meaningful errors.
dependent_dir: The directory to which this rule applies.
dependee_regexp: The rule will only be applied to dependee files
whose filename (last component of their path)
matches the expression. None to match all
dependee files.
"""
rule_type, rule_dir = ParseRuleString(rule_string, source)
if not dependee_regexp:
rules_to_update = self._general_rules
else:
if dependee_regexp in self._specific_rules:
rules_to_update = self._specific_rules[dependee_regexp]
else:
rules_to_update = []
# Remove any existing rules or sub-rules that apply. For example, if we're
# passed "foo", we should remove "foo", "foo/bar", but not "foobar".
rules_to_update = [x for x in rules_to_update
if not x.ParentOrMatch(rule_dir)]
rules_to_update.insert(0, Rule(rule_type, rule_dir, dependent_dir, source))
if not dependee_regexp:
self._general_rules = rules_to_update
else:
self._specific_rules[dependee_regexp] = rules_to_update
def RuleApplyingTo(self, include_path, dependee_path):
"""Returns the rule that applies to |include_path| for a dependee
file located at |dependee_path|.
"""
dependee_filename = os.path.basename(dependee_path)
for regexp, specific_rules in list(self._specific_rules.items()):
if re.match(regexp, dependee_filename):
for rule in specific_rules:
if rule.ChildOrMatch(include_path):
return rule
for rule in self._general_rules:
if rule.ChildOrMatch(include_path):
return rule
return MessageRule('no rule applying.')
| bsd-3-clause | 3,845,343,916,866,982,000 | 36.870968 | 80 | 0.66113 | false |
anhaidgroup/py_entitymatching | py_entitymatching/dask/dask_extract_features.py | 1 | 9597 | import logging
import os
import pandas as pd
import multiprocessing
import numpy as np
import dask
from dask.diagnostics import ProgressBar
from dask import delayed
from cloudpickle import cloudpickle
import tempfile
import py_entitymatching.catalog.catalog_manager as cm
import py_entitymatching.utils.catalog_helper as ch
import py_entitymatching.utils.generic_helper as gh
from py_entitymatching.utils.validation_helper import validate_object_type
from py_entitymatching.feature.extractfeatures import get_feature_vals_by_cand_split
from py_entitymatching.utils.validation_helper import validate_object_type
from py_entitymatching.dask.utils import validate_chunks, get_num_partitions, \
get_num_cores, wrap
logger = logging.getLogger(__name__)
def dask_extract_feature_vecs(candset, attrs_before=None, feature_table=None,
attrs_after=None, verbose=False,
show_progress=True, n_chunks=1):
"""
WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK
This function extracts feature vectors from a DataFrame (typically a
labeled candidate set).
Specifically, this function uses feature
table, ltable and rtable (that is present in the `candset`'s
metadata) to extract feature vectors.
Args:
candset (DataFrame): The input candidate set for which the features
vectors should be extracted.
attrs_before (list): The list of attributes from the input candset,
that should be added before the feature vectors (defaults to None).
feature_table (DataFrame): A DataFrame containing a list of
features that should be used to compute the feature vectors (
defaults to None).
attrs_after (list): The list of attributes from the input candset
that should be added after the feature vectors (defaults to None).
verbose (boolean): A flag to indicate whether the debug information
should be displayed (defaults to False).
show_progress (boolean): A flag to indicate whether the progress of
extracting feature vectors must be displayed (defaults to True).
n_chunks (int): The number of partitions to split the candidate set. If it
is set to -1, the number of partitions will be set to the
number of cores in the machine.
Returns:
A pandas DataFrame containing feature vectors.
The DataFrame will have metadata ltable and rtable, pointing
to the same ltable and rtable as the input candset.
Also, the output
DataFrame will have three columns: key, foreign key ltable, foreign
key rtable copied from input candset to the output DataFrame. These
three columns precede the columns mentioned in `attrs_before`.
Raises:
AssertionError: If `candset` is not of type pandas
DataFrame.
AssertionError: If `attrs_before` has attributes that
are not present in the input candset.
AssertionError: If `attrs_after` has attribtues that
are not present in the input candset.
AssertionError: If `feature_table` is set to None.
AssertionError: If `n_chunks` is not of type
int.
Examples:
>>> import py_entitymatching as em
>>> from py_entitymatching.dask.dask_extract_features import dask_extract_feature_vecs
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> match_f = em.get_features_for_matching(A, B)
>>> # G is the labeled dataframe which should be converted into feature vectors
>>> H = dask_extract_feature_vecs(G, features=match_f, attrs_before=['title'], attrs_after=['gold_labels'])
"""
logger.warning(
"WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.")
# Validate input parameters
# # We expect the input candset to be of type pandas DataFrame.
validate_object_type(candset, pd.DataFrame, error_prefix='Input cand.set')
# # If the attrs_before is given, Check if the attrs_before are present in
# the input candset
if attrs_before != None:
if not ch.check_attrs_present(candset, attrs_before):
logger.error(
'The attributes mentioned in attrs_before is not present '
'in the input table')
raise AssertionError(
'The attributes mentioned in attrs_before is not present '
'in the input table')
# # If the attrs_after is given, Check if the attrs_after are present in
# the input candset
if attrs_after != None:
if not ch.check_attrs_present(candset, attrs_after):
logger.error(
'The attributes mentioned in attrs_after is not present '
'in the input table')
raise AssertionError(
'The attributes mentioned in attrs_after is not present '
'in the input table')
# We expect the feature table to be a valid object
if feature_table is None:
logger.error('Feature table cannot be null')
raise AssertionError('The feature table cannot be null')
# Do metadata checking
# # Mention what metadata is required to the user
ch.log_info(logger, 'Required metadata: cand.set key, fk ltable, '
'fk rtable, '
'ltable, rtable, ltable key, rtable key', verbose)
# # Get metadata
ch.log_info(logger, 'Getting metadata from catalog', verbose)
key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key = \
cm.get_metadata_for_candset(
candset, logger, verbose)
# # Validate metadata
ch.log_info(logger, 'Validating metadata', verbose)
cm._validate_metadata_for_candset(candset, key, fk_ltable, fk_rtable,
ltable, rtable, l_key, r_key,
logger, verbose)
# Extract features
# id_list = [(row[fk_ltable], row[fk_rtable]) for i, row in
# candset.iterrows()]
# id_list = [tuple(tup) for tup in candset[[fk_ltable, fk_rtable]].values]
# # Set index for convenience
l_df = ltable.set_index(l_key, drop=False)
r_df = rtable.set_index(r_key, drop=False)
# # Apply feature functions
ch.log_info(logger, 'Applying feature functions', verbose)
col_names = list(candset.columns)
fk_ltable_idx = col_names.index(fk_ltable)
fk_rtable_idx = col_names.index(fk_rtable)
validate_object_type(n_chunks, int, 'Parameter n_chunks')
validate_chunks(n_chunks)
n_chunks = get_num_partitions(n_chunks, len(candset))
c_splits = np.array_split(candset, n_chunks)
pickled_obj = cloudpickle.dumps(feature_table)
feat_vals_by_splits = []
for i in range(len(c_splits)):
partial_result = delayed(get_feature_vals_by_cand_split)(pickled_obj,
fk_ltable_idx,
fk_rtable_idx, l_df,
r_df, c_splits[i],
False)
feat_vals_by_splits.append(partial_result)
feat_vals_by_splits = delayed(wrap)(feat_vals_by_splits)
if show_progress:
with ProgressBar():
feat_vals_by_splits = feat_vals_by_splits.compute(scheduler="processes",
num_workers=get_num_cores())
else:
feat_vals_by_splits = feat_vals_by_splits.compute(scheduler="processes",
num_workers=get_num_cores())
feat_vals = sum(feat_vals_by_splits, [])
# Construct output table
feature_vectors = pd.DataFrame(feat_vals, index=candset.index.values)
# # Rearrange the feature names in the input feature table order
feature_names = list(feature_table['feature_name'])
feature_vectors = feature_vectors[feature_names]
ch.log_info(logger, 'Constructing output table', verbose)
# print(feature_vectors)
# # Insert attrs_before
if attrs_before:
if not isinstance(attrs_before, list):
attrs_before = [attrs_before]
attrs_before = gh.list_diff(attrs_before, [key, fk_ltable, fk_rtable])
attrs_before.reverse()
for a in attrs_before:
feature_vectors.insert(0, a, candset[a])
# # Insert keys
feature_vectors.insert(0, fk_rtable, candset[fk_rtable])
feature_vectors.insert(0, fk_ltable, candset[fk_ltable])
feature_vectors.insert(0, key, candset[key])
# # insert attrs after
if attrs_after:
if not isinstance(attrs_after, list):
attrs_after = [attrs_after]
attrs_after = gh.list_diff(attrs_after, [key, fk_ltable, fk_rtable])
attrs_after.reverse()
col_pos = len(feature_vectors.columns)
for a in attrs_after:
feature_vectors.insert(col_pos, a, candset[a])
col_pos += 1
# Reset the index
# feature_vectors.reset_index(inplace=True, drop=True)
# # Update the catalog
cm.init_properties(feature_vectors)
cm.copy_properties(candset, feature_vectors)
# Finally, return the feature vectors
return feature_vectors
| bsd-3-clause | 4,681,506,796,973,360,000 | 38.012195 | 115 | 0.624153 | false |
NuAoA/mopidy-alcd | mopidy_AdafruitLCD/Adafruit_LCD_frontend.py | 1 | 3199 | #!/usr/bin/env python
import logging
import traceback
import pykka
import mopidy
import sys
import re #todo: remove
import threading
from time import sleep
from mopidy import core
from .Adafruit_player import AdafruitPlayer
logger = logging.getLogger(__name__)
class AdafruitLCD(pykka.ThreadingActor, core.CoreListener):
def __init__(self,config,core):
super(AdafruitLCD,self).__init__()
self.core = core
self.player = AdafruitPlayer(core)
self.startup = threading.Thread(target=self.media_scan)
#self.player.run()
def media_scan(self):
media_list = []
timeout = 0
self.player.plate.smessage("Loading Media...")
sleep(2)
while self.player.running:
if timeout>=50 or self.player.inMenus:
if not self.player.inMenus:
if len(media_list)==0:
self.player.plate.smessage("No Media Found",line=1)
elif self.player.track!=None:
self.player.displaySongInfo()
break
update = False
list = self.core.library.browse(None).get()
for media in list:
if media.name in media_list:
pass
else:
media_list.append(media.name)
update = True
break
if not self.player.inMenus:
if len(media_list) > 0:
if update:
str = ""
for item in media_list:
if str != "":
str = item+", "+str
else:
str = item
self.player.plate.smessage(str.ljust(16),line=1)
sleep(1)
else:
sleep(5)
else:
sleep(5)
timeout+=1
def on_start(self):
logger.info("[ALCD] Starting AdafruitLCD")
self.player.start()
self.startup.start()
def on_stop(self):
logger.info("[ALCD] Stopping AdafruitLCD")
self.player.stop()
def track_playback_ended(self,tl_track, time_position):
logger.info("[ALCD] track playback ended")
self.player.track_playback_ended(tl_track.track)
def track_playback_started(self,tl_track):
try:
logger.info("[ALCD] Now playing:")
try:
for artist in tl_track.track.artists:
logger.info("[ALCD] >"+tl_track.track.name+ " by " +artist.name)
except:
traceback.print_exc()
self.player.updateCurrentTrack(tl_track.track)
except:
traceback.print_exc()
def playback_state_changed(self,old_state,new_state):
try:
#logger.info("[ALCD] Playback state changed from " + old_state + " to " + new_state)
self.player.updatePlaybackState(old_state,new_state)
except:
traceback.print_exc()
def print_tracks(self,tl_track_list):
for tltrack in tl_track_list:
logger.info("[ALCD] " + tltrack.track.name)
"""
def playlists_loaded(self):
logger.info("[ALCD] Playlists:")
try:
for playlist in self.core.playlists.playlists.get():
if re.search("spotify:user:spotify",playlist.uri):
self.core.tracklist.add(tracks=playlist.tracks)
self.core.playback.play()
except:
traceback.print_exc()
def tracklist_changed(self):
logger.info("[ALCD] Tracklist updated")
print(" Total: "+str(len(self.core.tracklist.tl_tracks.get())))
#self.print_tracks(self.core.tracklist.tl_tracks.get())
def track_playback_ended(self,tl_track,time_position):
logger.info("[ALCD] Playback Ended")
"""
| apache-2.0 | -27,827,435,575,448,490 | 24.388889 | 87 | 0.65708 | false |
chilitechno/barrioSquare | setup.py | 1 | 1178 | from py2deb import Py2deb
from glob import glob
version = "0.1.20"
p=Py2deb("barriosquare")
p.author="Chris J. Burris"
p.mail="[email protected]"
p.description="Maemo application to access foursquare.com api functionality"
p["/opt/barrioSquare"] = ["barriosq.py|barriosq","barrioConfig.py","barrioStyles.py","get-location.py","oauth.py","oauthclient.py","loading.gif","loading2.gif","loading.html","refreshing.gif","friendsIcon.png","myInfoIcon.png","placesIcon.png","refreshIcon.png","searchIcon.png","settingsIcon.png","signOutIcon.png","CHANGELOG","README","LICENSE.txt","SignInFourSquare.png","powerbyfoursquare2.png","historyIcon.png",]
p["/usr/share/applications/hildon"] = ["barrioSquare.desktop",]
p["/usr/share/icons/hicolor/48x48/apps"] = ["barrioSquare.png",]
p["/usr/share/icons/hicolor/64x64/apps"] = ["barrioSquare64.png",]
p.url = "http://www.chilitechno.com/fster"
p.depends="python2.5, python-osso, python2.5-qt4-common, python2.5-qt4-core, python2.5-qt4-gui, python2.5-qt4-network, python2.5-qt4-webkit, python-location"
p.license="gpl"
p.arch="all"
p.section="net"
# p.postinstall="gtk-update-icon-cache -f /usr/share/icons/hicolor"
p.generate(version)
| gpl-3.0 | -1,532,249,144,477,468,200 | 55.095238 | 418 | 0.745331 | false |
EdDev/vdsm | tests/virttests/vmstats_test.py | 1 | 18952 | #
# Copyright 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import copy
import uuid
import six
from vdsm.virt import vmstats
from testlib import VdsmTestCase as TestCaseBase
from testlib import permutations, expandPermutations
_FAKE_BULK_STATS = {
'f3243a90-2e9e-4061-b7b3-a6c585e14857': (
{
'state.state': 1,
'state.reason': 1,
'cpu.time': 13755069120,
'cpu.user': 3370000000,
'cpu.system': 6320000000,
'balloon.current': 4194304,
'balloon.maximum': 4194304,
'vcpu.current': 2,
'vcpu.maximum': 16,
'vcpu.0.state': 1,
'vcpu.0.time': 10910000000,
'vcpu.1.state': 1,
'vcpu.1.time': 0,
'net.count': 1,
'net.0.name': 'vnet0',
'net.0.rx.bytes': 0,
'net.0.rx.pkts': 0,
'net.0.rx.errs': 0,
'net.0.rx.drop': 0,
'net.0.tx.bytes': 0,
'net.0.tx.pkts': 0,
'net.0.tx.errs': 0,
'net.0.tx.drop': 0,
'block.count': 2,
'block.0.name': 'hdc',
'block.0.rd.reqs': 0,
'block.0.rd.bytes': 0,
'block.0.rd.times': 0,
'block.0.wr.reqs': 0,
'block.0.wr.bytes': 0,
'block.0.wr.times': 0,
'block.0.fl.reqs': 0,
'block.0.fl.times': 0,
'block.0.allocation': 0,
'block.1.name': 'vda',
'block.1.path': (
'/rhev'
'/data-center'
'/00000001-0001-0001-0001-0000000001e8'
'/bbed5784-b0ee-4a0a-aff2-801da0bcf39e'
'/images'
'/cbe82d1f-a0ba-4af2-af2f-788d15eef043'
'/7ba49d31-4fa7-49df-8df4-37a22de79f62'
),
'block.1.rd.reqs': 1,
'block.1.rd.bytes': 512,
'block.1.rd.times': 58991,
'block.1.wr.reqs': 0,
'block.1.wr.bytes': 0,
'block.1.wr.times': 0,
'block.1.fl.reqs': 0,
'block.1.fl.times': 0,
'block.1.allocation': 0,
'block.1.capacity': 42949672960,
},
{
'state.state': 1,
'state.reason': 1,
'cpu.time': 13755069120,
'cpu.user': 3370000000,
'cpu.system': 6320000000,
'balloon.current': 4194304,
'balloon.maximum': 4194304,
'vcpu.current': 2,
'vcpu.maximum': 16,
'vcpu.0.state': 1,
'vcpu.0.time': 10910000000,
'vcpu.1.state': 1,
'vcpu.1.time': 0,
'net.count': 2,
'net.0.name': 'vnet1',
'net.0.rx.bytes': 0,
'net.0.rx.pkts': 0,
'net.0.rx.errs': 0,
'net.0.rx.drop': 0,
'net.0.tx.bytes': 0,
'net.0.tx.pkts': 0,
'net.0.tx.errs': 0,
'net.0.tx.drop': 0,
'net.1.name': 'vnet0',
'net.1.rx.bytes': 1024,
'net.1.rx.pkts': 128,
'net.1.rx.errs': 0,
'net.1.rx.drop': 0,
'net.1.tx.bytes': 2048,
'net.1.tx.pkts': 256,
'net.1.tx.errs': 0,
'net.1.tx.drop': 0,
'block.count': 3,
'block.0.name': 'hdd',
'block.0.rd.reqs': 0,
'block.0.rd.bytes': 0,
'block.0.rd.times': 0,
'block.0.wr.reqs': 0,
'block.0.wr.bytes': 0,
'block.0.wr.times': 0,
'block.0.fl.reqs': 0,
'block.0.fl.times': 0,
'block.0.allocation': 0,
'block.1.name': 'vda',
'block.1.path': (
'/rhev'
'/data-center'
'/00000001-0001-0001-0001-0000000001e8'
'/bbed5784-b0ee-4a0a-aff2-801da0bcf39e'
'/images'
'/cbe82d1f-a0ba-4af2-af2f-788d15eef043'
'/7ba49d31-4fa7-49df-8df4-37a22de79f62'
),
'block.1.rd.reqs': 1,
'block.1.rd.bytes': 512,
'block.1.rd.times': 58991,
'block.1.wr.reqs': 0,
'block.1.wr.bytes': 0,
'block.1.wr.times': 0,
'block.1.fl.reqs': 0,
'block.1.fl.times': 0,
'block.1.allocation': 0,
'block.1.capacity': 42949672960,
'block.2.name': 'hdc',
'block.2.rd.reqs': 0,
'block.2.rd.bytes': 0,
'block.2.rd.times': 0,
'block.2.wr.reqs': 0,
'block.2.wr.bytes': 0,
'block.2.wr.times': 0,
'block.2.fl.reqs': 0,
'block.2.fl.times': 0,
'block.2.allocation': 0,
},
),
}
# on SR-IOV we seen unexpected net.count == 2 but data only for one nic.
_FAKE_BULK_STATS_SRIOV = {
'f3243a90-2e9e-4061-b7b3-a6c585e14857': (
{
'state.state': 1,
'state.reason': 1,
'cpu.time': 13755069120,
'cpu.user': 3370000000,
'cpu.system': 6320000000,
'balloon.current': 4194304,
'balloon.maximum': 4194304,
'vcpu.current': 2,
'vcpu.maximum': 16,
'vcpu.0.state': 1,
'vcpu.0.time': 10910000000,
'vcpu.1.state': 1,
'vcpu.1.time': 0,
'net.count': 2,
'net.1.name': 'vnet1',
'net.1.rx.bytes': 0,
'net.1.rx.pkts': 0,
'net.1.rx.errs': 0,
'net.1.rx.drop': 0,
'net.1.tx.bytes': 0,
'net.1.tx.pkts': 0,
'net.1.tx.errs': 0,
'net.1.tx.drop': 0,
'block.count': 2,
'block.0.name': 'hdc',
'block.0.rd.reqs': 0,
'block.0.rd.bytes': 0,
'block.0.rd.times': 0,
'block.0.wr.reqs': 0,
'block.0.wr.bytes': 0,
'block.0.wr.times': 0,
'block.0.fl.reqs': 0,
'block.0.fl.times': 0,
'block.0.allocation': 0,
'block.1.name': 'vda',
'block.1.path': (
'/rhev'
'/data-center'
'/00000001-0001-0001-0001-0000000001e8'
'/bbed5784-b0ee-4a0a-aff2-801da0bcf39e'
'/images'
'/cbe82d1f-a0ba-4af2-af2f-788d15eef043'
'/7ba49d31-4fa7-49df-8df4-37a22de79f62'
),
'block.1.rd.reqs': 1,
'block.1.rd.bytes': 512,
'block.1.rd.times': 58991,
'block.1.wr.reqs': 0,
'block.1.wr.bytes': 0,
'block.1.wr.times': 0,
'block.1.fl.reqs': 0,
'block.1.fl.times': 0,
'block.1.allocation': 0,
'block.1.capacity': 42949672960,
},
)
}
class VmStatsTestCase(TestCaseBase):
def setUp(self):
# just pick one sampling
self.samples = next(six.itervalues(_FAKE_BULK_STATS))
self.bulk_stats = self.samples[0]
self.interval = 10 # seconds
def assertNameIsAt(self, stats, group, idx, name):
self.assertEqual(stats['%s.%d.name' % (group, idx)], name)
def assertStatsHaveKeys(self, stats, keys):
for key in keys:
self.assertIn(key, stats)
def assertRepeatedStatsHaveKeys(self, items, stats, keys):
for item in items:
self.assertStatsHaveKeys(stats[item.name], keys)
@expandPermutations
class UtilsFunctionsTests(VmStatsTestCase):
# we should not test private functions, but this one is
# the cornerstone of bulk stats translation, so we make
# one exception for the sake of the practicality.
@permutations([['block', 'hdc'], ['net', 'vnet0']])
def test_find_existing(self, group, name):
indexes = vmstats._find_bulk_stats_reverse_map(
self.bulk_stats, group)
self.assertNameIsAt(
self.bulk_stats, group, indexes[name], name)
@permutations([['block'], ['net']])
def test_find_bogus(self, group):
name = 'inexistent'
indexes = vmstats._find_bulk_stats_reverse_map(
self.bulk_stats, group)
self.assertNotIn(name, indexes)
@permutations([['block', 'hdc'], ['net', 'vnet0']])
def test_index_can_change(self, group, name):
all_indexes = []
for bulk_stats in self.samples:
indexes = vmstats._find_bulk_stats_reverse_map(
bulk_stats, group)
self.assertNameIsAt(bulk_stats, group, indexes[name], name)
all_indexes.append(indexes)
# and indeed indexes must change
self.assertEqual(len(all_indexes), len(self.samples))
def test_network_missing(self):
# seen using SR-IOV
bulk_stats = next(six.itervalues(_FAKE_BULK_STATS_SRIOV))
indexes = vmstats._find_bulk_stats_reverse_map(
bulk_stats[0], 'net')
self.assertTrue(indexes)
@expandPermutations
class NetworkStatsTests(VmStatsTestCase):
# TODO: grab them from the schema
_EXPECTED_KEYS = (
'macAddr',
'name',
'speed',
'state',
'rxErrors',
'rxDropped',
'txErrors',
'txDropped',
'rx',
'tx',
'sampleTime',
)
def test_nic_have_all_keys(self):
nic = FakeNic(name='vnet0', model='virtio',
mac_addr='00:1a:4a:16:01:51')
testvm = FakeVM(nics=(nic,))
stats = vmstats._nic_traffic(
testvm,
nic.name, nic.nicModel, nic.macAddr,
self.bulk_stats, 0,
self.bulk_stats, 0,
self.interval)
self.assertStatsHaveKeys(stats, self._EXPECTED_KEYS)
def test_networks_have_all_keys(self):
nics = (
FakeNic(name='vnet0', model='virtio',
mac_addr='00:1a:4a:16:01:51'),
)
vm = FakeVM(nics=nics)
stats = {}
vmstats.networks(vm, stats,
self.bulk_stats, self.bulk_stats,
self.interval)
self.assertRepeatedStatsHaveKeys(nics, stats['network'],
self._EXPECTED_KEYS)
def test_networks_good_interval(self):
nics = (
FakeNic(name='vnet0', model='virtio',
mac_addr='00:1a:4a:16:01:51'),
)
vm = FakeVM(nics=nics)
stats = {}
self.assertTrue(
vmstats.networks(vm, stats,
self.bulk_stats, self.bulk_stats,
1)
)
@permutations([[-42], [0]])
def test_networks_bad_interval(self, interval):
nics = (
FakeNic(name='vnet0', model='virtio',
mac_addr='00:1a:4a:16:01:51'),
)
vm = FakeVM(nics=nics)
stats = {}
self.assertTrue(
vmstats.networks(vm, stats,
self.bulk_stats, self.bulk_stats,
0) is None
)
@permutations([
['net.0.rx.bytes'], ['net.0.rx.pkts'],
['net.0.rx.errs'], ['net.0.rx.drop'], ['net.0.tx.bytes'],
['net.0.tx.pkts'], ['net.0.tx.errs'], ['net.0.tx.drop'],
])
def test_networks_missing_key(self, key):
nics = (
FakeNic(name='vnet0', model='virtio',
mac_addr='00:1a:4a:16:01:51'),
)
vm = FakeVM(nics=nics)
vm.migrationPending = True
faulty_bulk_stats = {}
faulty_bulk_stats.update(self.bulk_stats)
del faulty_bulk_stats[key]
stats = {}
self.assertTrue(
vmstats.networks(vm, stats,
self.bulk_stats, faulty_bulk_stats,
1)
)
class DiskStatsTests(VmStatsTestCase):
# TODO: grab them from the schema
# Note: these are the minimal set Vdsm exported,
# no clear rationale for this subset.
_EXPECTED_KEYS = (
'truesize',
'apparentsize',
'readLatency',
'writeLatency',
'flushLatency',
'imageID',
# TODO: add test for 'lunGUID'
'readRate',
'writeRate',
'readOps',
'writeOps',
'readBytes',
'writtenBytes',
)
def test_disk_all_keys_present(self):
interval = 10 # seconds
drives = (FakeDrive(name='hdc', size=700 * 1024 * 1024),)
testvm = FakeVM(drives=drives)
stats = {}
stats_before = copy.deepcopy(self.bulk_stats)
stats_after = copy.deepcopy(self.bulk_stats)
_ensure_delta(stats_before, stats_after,
'block.0.rd.reqs', 1024)
_ensure_delta(stats_before, stats_after,
'block.0.rd.bytes', 128 * 1024)
vmstats.disks(testvm, stats,
stats_before, stats_after,
interval)
self.assertRepeatedStatsHaveKeys(drives, stats['disks'],
self._EXPECTED_KEYS)
def test_interval_zero(self):
interval = 0 # seconds
# with zero interval, we won't have {read,write}Rate
expected_keys = tuple(k for k in self._EXPECTED_KEYS
if k not in ('readRate', 'writeRate'))
drives = (FakeDrive(name='hdc', size=700 * 1024 * 1024),)
testvm = FakeVM(drives=drives)
stats = {}
self.assertNotRaises(vmstats.disks,
testvm, stats,
self.bulk_stats, self.bulk_stats,
interval)
self.assertRepeatedStatsHaveKeys(drives,
stats['disks'],
expected_keys)
def test_disk_missing_rate(self):
partial_stats = self._drop_stats(
('block.0.rd.bytes', 'block.1.rd.bytes',
'block.0.wr.bytes', 'block.1.wr.bytes'))
interval = 10 # seconds
drives = (FakeDrive(name='hdc', size=700 * 1024 * 1024),)
testvm = FakeVM(drives=drives)
stats = {}
self.assertNotRaises(vmstats.disks,
testvm, stats,
partial_stats, partial_stats,
interval)
def test_disk_missing_latency(self):
partial_stats = self._drop_stats(
('block.0.rd.times', 'block.1.rd.times',
'block.0.wr.reqs', 'block.1.wr.reqs'))
interval = 10 # seconds
drives = (FakeDrive(name='hdc', size=700 * 1024 * 1024),)
testvm = FakeVM(drives=drives)
stats = {}
self.assertNotRaises(vmstats.disks,
testvm, stats,
partial_stats, partial_stats,
interval)
def _drop_stats(self, keys):
partial_stats = copy.deepcopy(self.bulk_stats)
for key in keys:
del partial_stats[key]
return partial_stats
FIRST_CPU_SAMPLE = {'cpu.user': 4740000000, 'cpu.system': 6490000000}
LAST_CPU_SAMPLE = {'cpu.user': 4760000000, 'cpu.system': 6500000000}
@expandPermutations
class CpuStatsTests(VmStatsTestCase):
# all data stolen from Vdsm and/or virsh -r domstats
INTERVAL = 15. # seconds.
# [first, last]
# intentionally use only one sample, the other empty
@permutations([[{}, {}],
[{}, FIRST_CPU_SAMPLE],
[FIRST_CPU_SAMPLE, {}]])
def test_empty_samples(self, first, last):
stats = {}
res = vmstats.cpu(stats, {}, {}, self.INTERVAL)
self.assertEqual(stats,
{'cpuUser': 0.0, 'cpuSys': 0.0})
self.assertEqual(res, None)
def test_only_cpu_user_system(self):
stats = {}
res = vmstats.cpu(stats, FIRST_CPU_SAMPLE, LAST_CPU_SAMPLE,
self.INTERVAL)
self.assertEqual(stats, {
'cpuUser': 0.0,
'cpuSys': 0.2,
'cpuUsage': '11260000000',
})
self.assertEqual(res, None)
def test_update_all_keys(self):
stats = {}
first_sample = {'cpu.time': 24345584838}
first_sample.update(FIRST_CPU_SAMPLE)
last_sample = {'cpu.time': 24478198023}
last_sample.update(LAST_CPU_SAMPLE)
res = vmstats.cpu(stats, first_sample, last_sample,
self.INTERVAL)
self.assertEqual(stats, {
'cpuUser': 0.6840879,
'cpuSys': 0.2,
'cpuUsage': '11260000000',
})
self.assertNotEquals(res, None)
# helpers
def _ensure_delta(stats_before, stats_after, key, delta):
"""
Set stats_before[key] and stats_after[key] so that
stats_after[key] - stats_before[key] == abs(delta).
"""
stats_before[key] = 0
stats_after[key] = abs(delta)
class FakeNic(object):
def __init__(self, name, model, mac_addr):
self.name = name
self.nicModel = model
self.macAddr = mac_addr
class FakeDrive(object):
def __init__(self, name, size):
self.name = name
self.apparentsize = size
self.truesize = size
self.GUID = str(uuid.uuid4())
self.imageID = str(uuid.uuid4())
self.domainID = str(uuid.uuid4())
self.poolID = str(uuid.uuid4())
self.volumeID = str(uuid.uuid4())
def __contains__(self, item):
# isVdsmImage support
return item in ('imageID', 'domainID', 'poolID', 'volumeID')
class FakeVM(object):
def __init__(self, nics=None, drives=None):
self.id = str(uuid.uuid4())
self.nics = nics if nics is not None else []
self.drives = drives if drives is not None else []
self.migrationPending = False
@property
def monitorable(self):
return not self.migrationPending
def getNicDevices(self):
return self.nics
def getDiskDevices(self):
return self.drives
| gpl-2.0 | -2,565,157,551,980,856,000 | 31.067682 | 79 | 0.503271 | false |
pankajk/MasterThesis | Code/Graph_Kernels/SKG/KroneckerGenerator.py | 1 | 2725 | import numpy as np
import networkx as nx
import math
import random
def convert(something):#use networkx conversion from numpy array
#g = nx.from_numpy_matrix(someNPMat)
g = nx.to_networkx_graph(something)
return g
def deleteSelfLoops(graph, nNodes): #used to take away self loops in final graph for stat purposes
nNodes = int(nNodes)
for i in range(nNodes):
for j in range(nNodes):
if(i == j):
graph[i, j] = 0
return graph
def generateStochasticKron(initMat, k, deleteSelfLoopsForStats=False, directed=False, customEdges=False, edges=0):
initN = initMat.getNumNodes()
nNodes = math.pow(initN, k)#get final size and make empty 'kroned' matrix
mtxDim = initMat.getNumNodes()
mtxSum = initMat.getMtxSum()
if(customEdges == True):
nEdges = edges
if(nEdges > (nNodes*nNodes)):
raise ValueError("More edges than possible with number of Nodes")
else:
nEdges = math.pow(mtxSum, k) #get number of predicted edges
collisions = 0
print ("Edges: ")
print (nEdges)
print ("Nodes: ")
print (nNodes)
#create vector for recursive matrix probability
probToRCPosV = []
cumProb = 0.0
for i in range(mtxDim):
for j in range(mtxDim):
prob = initMat.getValue(i, j)
if(prob > 0.0):
cumProb += prob
probToRCPosV.append((cumProb/mtxSum, i, j))
#print "Prob Vector Value:" #testing
#print cumProb/mtxSum #testing
#add Nodes
finalGraph = np.zeros((nNodes, nNodes))
#add Edges
e = 0
#print nEdges #testing
while(e < nEdges):
rng = nNodes
row = 0
col = 0
for t in range(k):
prob = random.uniform(0, 1)
#print "prob:" #testing
#print prob #testing
n = 0
while(prob > probToRCPosV[n][0]):
n += 1
mrow = probToRCPosV[n][1]
mcol = probToRCPosV[n][2]
rng /= mtxDim
row += mrow * rng
col += mcol * rng
if(finalGraph[row, col] == 0): #if there is no edge
finalGraph[row, col] = 1
e += 1
if(not directed): #symmetry if not directed
if(row != col):
finalGraph[col, row] = 1
e += 1
else:
collisions += 1
print ("Collisions: ")
print (collisions) #testing
#delete self loops if needed for stats
if(deleteSelfLoopsForStats):
finalGraph = deleteSelfLoops(finalGraph, nNodes)
finalGraph = convert(finalGraph)
return finalGraph
| mit | -5,153,844,839,685,211,000 | 28.945055 | 114 | 0.557798 | false |
eniac/faas | factor/linalg.py | 1 | 5126 | import sys
import os
import re
import math
import utils
import logging
logger = logging.getLogger('Linalg')
cmd_logger = logging.getLogger('cmd')
# Exact configuration here will depends on instance/hardware type.
def run_rankfile(linalg_params):
logger.info("--- Generating rankfile ---")
machines = linalg_params['machines']
num_of_mpi = linalg_params['mpi_rows'] * linalg_params['mpi_cols']
num_of_mach = len(machines)
num_of_sock = linalg_params['phys_socks_per_machine']
num_of_cores_per_sock = linalg_params['phys_core_per_sock']
jobs_assigned_to_mach = 0
with open(linalg_params['rankfile'], 'wt', encoding='utf-8') as rfile:
for mach_no in range(0, num_of_mach):
if mach_no < num_of_mpi % num_of_mach:
num_of_jobs = num_of_mpi // num_of_mach + 1
else:
num_of_jobs = num_of_mpi // num_of_mach
cores_unassigned = num_of_cores_per_sock * num_of_sock
socket_counter = {}
for sock in range(0, num_of_sock):
socket_counter[sock] = 0
for job_id in range(0, num_of_jobs):
rank_no = jobs_assigned_to_mach + job_id
sock_no = job_id % num_of_sock
start_core = socket_counter[sock_no]
cores_to_use = int(math.ceil(cores_unassigned // (num_of_jobs - job_id)))
end_core = socket_counter[sock_no] + cores_to_use - 1
# Case for socket splitting
if end_core >= num_of_cores_per_sock:
core_needed = cores_to_use
slot_str = ""
while core_needed > 0:
sock = min(socket_counter, key=socket_counter.get)
core_use = (num_of_cores_per_sock - socket_counter[sock] if core_needed >= num_of_cores_per_sock - socket_counter[sock] else core_needed)
core_needed -= core_use
start_core = socket_counter[sock]
end_core = socket_counter[sock] + core_use - 1
slot_str += ("{sock}:{start}-{end},"
.format(sock=sock, start=socket_counter[sock], end=end_core))
socket_counter[sock] += core_use
slot_str = slot_str[0:-1]
rfile.write("rank {n}={mach} slot={slot}\n"
.format(n=rank_no, mach=machines[mach_no], slot=slot_str))
cores_unassigned -= cores_to_use
continue
rfile.write("rank {n}={mach} slot={sock}:{start}-{end}\n"
.format(n=rank_no, mach=machines[mach_no], sock=sock_no, start=start_core, end=end_core))
socket_counter[sock_no] += cores_to_use
cores_unassigned -= cores_to_use
jobs_assigned_to_mach += num_of_jobs
logger.info("--- End of generating rankfile ---")
def run_linalg(linalg_params):
logger.info("--- Beginning MSieve linear algebra ---")
linalg_cmd = "mpirun -np " + str(linalg_params['mpi_rows'] * linalg_params['mpi_cols'])
linalg_cmd += " -H " + ",".join(linalg_params['machines'])
linalg_cmd += " -rf " + linalg_params['rankfile']
linalg_cmd += " " + os.path.join(linalg_params['msievedir'], 'msieve')
linalg_cmd += " -nf " + linalg_params['fb_path']
linalg_cmd += (" -nc2 \"mpi_nrows={rows} mpi_ncols={cols} target_density={td}\""
.format(rows=linalg_params['mpi_rows'], cols=linalg_params['mpi_cols'], td=linalg_params['target_density']))
linalg_cmd += " -v -t " + str(linalg_params['threads'])
linalg_cmd += " -l " + linalg_params['log_path']
linalg_cmd += " -s " + linalg_params['dat_path']
linalg_cmd += " " + str(linalg_params['N'])
cmd_logger.info(linalg_cmd)
stdout, stderr, ret = utils.run_command(linalg_cmd, include_stdout=True, include_stderr=True, include_returncode=True, logger=logger)
if ret != 0:
logger.error("Received error code " + str(ret) + " from Msieve linear algebra. Exiting...")
sys.exit(1)
logger.info("--- End of MSieve linear algebra ---")
def run(parameters):
linalg_paths = ['tasks', 'msieve', 'linalg']
linalg_keys = {
"N": int,
"msievedir": str,
"mpi": str,
"hosts": str,
"target_density": int,
"phys_socks_per_machine": int,
"phys_core_per_sock": int,
"threads_per_core": int,
"threads": int,
"rankfile": str,
"fb_path": str,
"log_path": str,
"dat_path": str,
}
linalg_params = parameters.myparams(linalg_keys, linalg_paths)
linalg_params['machines'] = [ m.strip() for m in linalg_params['hosts'].split(',') if len(m) > 0 ]
linalg_params['mpi_rows'], linalg_params['mpi_cols'] = [ int(x) for x in linalg_params['mpi'].split("x") ]
# Create a rankfile based on current mpi configuration
run_rankfile(linalg_params)
# Run linear algebra
run_linalg(linalg_params)
| lgpl-3.0 | 764,551,282,516,274,000 | 41.716667 | 161 | 0.552282 | false |
timevortexproject/timevortex | timevortex/utils/timeserieslogger.py | 1 | 1057 | #!/usr/bin/python3
# -*- coding: utf8 -*-
# -*- Mode: Python; py-indent-offset: 4 -*-
"""TSL functions"""
from timevortex.utils.globals import ERROR_TIMESERIES_NOT_DEFINED
KEY_TSL_BAD_JSON = "ts_without_json_message"
KEY_TSL_NO_SITE_ID = "ts_without_site_id"
KEY_TSL_NO_VARIABLE_ID = "ts_without_variable_id"
KEY_TSL_NO_VALUE = "ts_without_message"
KEY_TSL_NO_DATE = "ts_without_date"
KEY_TSL_NO_DST_TIMEZONE = "ts_without_dst_timezone"
KEY_TSL_NO_NON_DST_TIMEZONE = "ts_without_non_dst_timezone"
INCORRECT_MESSAGE = "Receive incorrect message => %s"
ERROR_TSL = {
KEY_TSL_BAD_JSON: ERROR_TIMESERIES_NOT_DEFINED,
KEY_TSL_NO_SITE_ID: INCORRECT_MESSAGE % "missing siteID in %s",
KEY_TSL_NO_VARIABLE_ID: INCORRECT_MESSAGE % "missing variableID in %s",
KEY_TSL_NO_VALUE: INCORRECT_MESSAGE % "missing value in %s",
KEY_TSL_NO_DATE: INCORRECT_MESSAGE % "missing date in %s",
KEY_TSL_NO_DST_TIMEZONE: INCORRECT_MESSAGE % "missing dstTimezone in %s",
KEY_TSL_NO_NON_DST_TIMEZONE: INCORRECT_MESSAGE % "missing nonDstTimezone in %s",
}
| mit | 4,012,301,638,480,330,000 | 41.28 | 84 | 0.703879 | false |
fos/fos-legacy | fos/actor/odfslicer.py | 1 | 2054 | import numpy as np
class ODF_Slice(object):
def __init__(self,odfs,vertices,faces,noiso,batch,group=None):
J=0
self.odfs_no=J
self.vertex_list=(odfs.shape[0]*odfs.shape[1])*[None]
for index in np.ndindex(odfs.shape[:2]):
values=odfs[index]
if noiso:
values=np.interp(values,[values.min(),values.max()],[0,.5])
inds=faces.ravel().tolist()
shift=index+(0,)
print J,odfs.shape[0]*odfs.shape[1]
points=np.dot(np.diag(values),vertices)
points=points+np.array(shift)
verx=points.ravel().tolist()
normals=np.zeros((len(vertices),3))
ones_=np.ones(len(values))
colors=np.vstack((values,ones_,ones_)).T
colors=colors.ravel().tolist()
p=vertices
l=faces
trinormals=np.cross(p[l[:,0]]-p[l[:,1]],\
p[l[:,1]]-p[l[:,2]],\
axisa=1,axisb=1)
for (i,lp) in enumerate(faces):
normals[lp]+=trinormals[i]
div=np.sqrt(np.sum(normals**2,axis=1))
div=div.reshape(len(div),1)
normals=(normals/div)
norms=np.array(normals).ravel().tolist()
self.vertex_list[i] = batch.add_indexed(len(vertices),\
GL_TRIANGLES,\
group,\
inds,\
('v3d/static',verx),\
('n3d/static',norms),\
('c3d/static',colors))
J+=1
def update(self):
pass
def delete(self):
for i in range(self.odfs_no):
self.vertex_list.delete()
| bsd-3-clause | -3,123,293,143,917,437,000 | 32.672131 | 75 | 0.399708 | false |
kkoksvik/FreeCAD | src/Mod/Start/StartPage/StartPage.py | 2 | 26929 | #***************************************************************************
#* *
#* Copyright (c) 2012 *
#* Yorik van Havre <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
# This is the start page template
import os,FreeCAD,FreeCADGui,tempfile,time,zipfile,urllib,re,cStringIO
from PySide import QtGui
from xml.etree.ElementTree import parse
FreeCADGui.addLanguagePath(":/translations")
FreeCADGui.updateLocale()
def translate(context,text):
"convenience function for the Qt translator"
# return str(QtGui.QApplication.translate(context, text, None, QtGui.QApplication.UnicodeUTF8).toUtf8())
u = QtGui.QApplication.translate(context, text, None,
QtGui.QApplication.UnicodeUTF8).encode("utf8")
s = cStringIO.StringIO()
for i in u:
if ord(i) == 39:
s.write("\\'")
else:
s.write(i)
t = s.getvalue()
s.close()
return t
# texts to be translated
text01 = translate("StartPage","FreeCAD Start Center")
text02 = translate("StartPage","Start a new project")
text03 = translate("StartPage","Recent Files")
text04 = translate("StartPage","Latest videos")
text05 = translate("StartPage","Latest commits")
text06 = translate("StartPage","On the web")
text07 = translate("StartPage","This is the FreeCAD Homepage. Here you will be able to find a lot of information about FreeCAD, including tutorials, examples and user documentation.")
text08 = translate("StartPage","FreeCAD Homepage")
text09 = translate("StartPage","Example projects")
text10 = translate("StartPage","Schenkel STEP file")
text11 = translate("StartPage","Load a PartDesign example")
text12 = translate("StartPage","Load a Drawing extraction")
text13 = translate("StartPage","Load a Robot simulation example")
text14 = translate("StartPage","Projects from the Web")
text15 = translate("StartPage","Schenkel STEP")
text16 = translate("StartPage","Complex Part")
text17 = translate("StartPage","Close this window after opening or creating a file")
text18 = translate("StartPage","Don't show me this window again next time")
text19 = translate("StartPage","Designing parts")
text20 = translate("StartPage","The <b>Part Design</b> workbench is designed to create complex pieces based on constrained 2D sketches. Use it to draw 2D shapes, constrain some of their elements and extrude them to form 3D pieces.")
text21 = translate("StartPage","Example workflow")
text22 = translate("StartPage","Part Design")
text23 = translate("StartPage","Designing architectural elements")
text24 = translate("StartPage","The <b>Architectural Design</b> workbench is specially designed for working with architectural elements such as walls or windows. Start by drawing 2D shapes, and use them as guides to build architecutral objects.")
text25 = translate("StartPage","Architectural Design")
text26 = translate("StartPage","Working with Meshes")
text27 = translate("StartPage","The <b>Mesh Workbench</b> is used to work with Mesh objects. Meshes are simpler 3D objects than Part objects, but they are often easier to import and export to/from other applications.")
text28 = translate("StartPage","FreeCAD offers you several tools to convert between Mesh and Part objects.")
text29 = translate("StartPage","Work with Meshes")
text30 = translate("StartPage","The complete workbench")
text31 = translate("StartPage","FreeCAD Complete workbench")
text32 = translate("StartPage","populated with some of the most commonly used tools.")
text33 = translate("StartPage","file size:")
text34 = translate("StartPage","creation time:")
text35 = translate("StartPage","last modified:")
text36 = translate("StartPage","location:")
text37 = translate("StartPage","User manual")
text38 = translate("StartPage","http://www.freecadweb.org/wiki/index.php?title=Online_Help_Toc")
text39 = translate("StartPage","Tutorials")
text40 = translate("StartPage","Python resources")
text41 = translate("StartPage","File not found")
text42 = translate("StartPage","from <a href=http://twitter.com/FreeCADNews>@FreeCADNews</a>")
text43 = translate("StartPage","The FreeCAD-tutorial blog")
text44 = translate("StartPage","from <a href=http://www.youtube.com/user/FreeCADNews?feature=mhee>FreeCADNews channel</a>")
text45 = translate("StartPage","This is the official user manual of FreeCAD, built, maintained and translated by the FreeCAD community.")
text46 = translate("StartPage","The tutorials section on the FreeCAD website")
text47 = translate("StartPage","The section of the FreeCAD website dedicated to python scripting, with examples, explanations, and API commands.")
text48 = translate("StartPage","A blog dedicated to teaching FreeCAD, maintained by members of the FreeCAD community")
text49 = translate("StartPage","Getting started")
text50 = translate("StartPage","The FreeCAD interface is divided in workbenches, which are sets of tools suited for a specific task. You can start with one of the workbenches in this list, or with the complete workbench, which presents you with some of the most used tools gathered from other workbenches. Click to read more about workbenches on the FreeCAD website.")
text51 = translate("StartPage","http://www.freecadweb.org/wiki/index.php?title=Workbenches")
text52 = translate("StartPage","Ship Design")
text53 = translate("StartPage","Designing and calculating ships")
text54 = translate("StartPage","The <b>Ship Design</b> module offers several tools to help ship designers to view, model and calculate profiles and other specific properties of ship hulls.")
text55 = translate("StartPage","Load an Architectural example model")
text56 = translate("StartPage","http://www.freecadweb.org/wiki/index.php?title=Tutorials")
text57 = translate("StartPage","http://www.freecadweb.org/wiki/index.php?title=Power_users_hub")
text58 = translate("StartPage","Your version of FreeCAD is up to date.")
text59 = translate("StartPage","There is a new release of FreeCAD available.")
text60 = translate("StartPage","Load an FEM example analysis")
text61 = translate("StartPage","Obtain a development version")
text62 = translate("StartPage","<b>Development versions</b> are made available by community members from time to time and usually contain the latest changes, but are more likely to contain bugs.")
text63 = translate("StartPage","See all commits")
# get FreeCAD version
v = FreeCAD.Version()
vmajor = v[0]
vminor = v[1]
vbuild = v[2].split(" ")[0]
# here is the html page skeleton
page = """
<html>
<head>
<title>FreeCAD - Start page</title>
<script language="javascript">
var linkDescriptions = [];
function JSONscriptRequest(fullUrl) {
// REST request path
this.fullUrl = fullUrl;
// Get the DOM location to put the script tag
this.headLoc = document.getElementsByTagName("head").item(0);
// Generate a unique script tag id
this.scriptId = 'JscriptId' + JSONscriptRequest.scriptCounter++;
}
// Static script ID counter
JSONscriptRequest.scriptCounter = 1;
JSONscriptRequest.prototype.buildScriptTag = function () {
// Create the script tag
this.scriptObj = document.createElement("script");
// Add script object attributes
this.scriptObj.setAttribute("type", "text/javascript");
this.scriptObj.setAttribute("charset", "utf-8");
this.scriptObj.setAttribute("src", this.fullUrl);
this.scriptObj.setAttribute("id", this.scriptId);
}
JSONscriptRequest.prototype.removeScriptTag = function () {
// Destroy the script tag
this.headLoc.removeChild(this.scriptObj);
}
JSONscriptRequest.prototype.addScriptTag = function () {
// Create the script tag
this.headLoc.appendChild(this.scriptObj);
}
function show(theText) {
ddiv = document.getElementById("description");
if (theText == "") theText = " ";
ddiv.innerHTML = theText;
}
function checkVersion(data) {
vdiv = document.getElementById("versionbox");
var cmajor = """ + vmajor + """;
var cminor = """ + vminor + """;
var cbuild = """ + vbuild + """;
var amajor = data[0]['major'];
var aminor = data[0]['minor'];
var abuild = data[0]['build'];
if (cmajor >= amajor && cminor >= aminor && cbuild >= abuild) {
vdiv.innerHTML=" """ + text58 + """: """ + vmajor + """.""" + vminor + """.""" + vbuild + """";
} else {
vdiv.innerHTML="<a href=exthttp://github.com/FreeCAD/FreeCAD/releases/latest> """ + text59 + """:"+amajor+"."+aminor+"."+abuild+"</a>";
}
}
function load() {
// load latest news
ddiv = document.getElementById("news");
ddiv.innerHTML = "Connecting...";
var tobj=new JSONscriptRequest('https://api.github.com/repos/FreeCAD/FreeCAD/commits?callback=showTweets');
tobj.buildScriptTag(); // Build the script tag
tobj.addScriptTag(); // Execute (add) the script tag
ddiv.innerHTML = "Downloading latest news...";
// load version
var script = document.createElement('script');
script.src = 'http://www.freecadweb.org/version.php?callback=checkVersion';
document.body.appendChild(script);
}
function stripTags(text) {
// from http://www.pagecolumn.com/tool/all_about_html_tags.htm /<\s*\/?\s*span\s*.*?>/g
stripped = text.replace("<table", "<div");
stripped = stripped.replace("</table", "</div");
stripped = stripped.replace("<tr", "<tr");
stripped = stripped.replace("</tr", "</tr");
stripped = stripped.replace("<td", "<td");
stripped = stripped.replace("</td", "</td");
stripped = stripped.replace("555px", "auto");
stripped = stripped.replace("border:1px", "border:0px");
stripped = stripped.replace("color:#000000;","");
return stripped;
}
function showTweets(data) {
ddiv = document.getElementById('news');
ddiv.innerHTML = "Received";
var html = ['<ul>'];
for (var i = 0; i < 15; i++) {
html.push('<li><img src="web.png"> <a href="ext', data.data[i].commit.url, '" onMouseOver="showDescr(', i+1, ')" onMouseOut="showDescr()">', data.data[i].commit.message, '</a></li>');
if ("message" in data.data[i].commit) {
linkDescriptions.push(stripTags(data.data[i].commit.message)+'<br/>'+data.data[i].commit.author.name+'<br/>'+data.data[i].commit.author.date);
} else {
linkDescriptions.push("");
}
}
html.push('</ul>');
html.push('<a href="exthttp://github.com/FreeCAD/FreeCAD/commits/master">""" + text63 + """<a/>');
ddiv.innerHTML = html.join('');
}
function showDescr(d) {
if (d) {
show(linkDescriptions[d-1]);
} else {
show("");
}
}
function scroller() {
desc = document.getElementById("description");
base = document.getElementById("column").offsetTop;
scro = window.scrollY;
if (scro > base) {
desc.className = "stick";
} else {
desc.className = "";
}
}
document.onmousemove=scroller;
</script>
<style type="text/css">
body {
background: #basecolor;
color: #textcolor;
font-family: Arial, Helvetica, Sans;
font-size: 11px;
}
a {
color: #linkcolor;
font-weight: bold;
text-decoration: none;
padding: 2px;
}
a:hover {
color: white;
background: #linkcolor;
border-radius: 5px;
}
p {
text-align: justify;
}
.left {
text-align: left;
}
h1 {
font-size: 3em;
letter-spacing: 2px;
padding: 20px 0 0 80px;
align: bottom;
color: #ffffff;
}
h2 {
font-size: 1.2em;
}
ul {
list-style-type: none;
padding: 0;
}
#column {
margin: 0 350px 0 10px;
}
#column img {
max-width: 14px;
}
.block {
background: #windowcolor;
border-radius: 5px;
padding: 8px;
margin-bottom: 10px;
color: #windowtextcolor;
width: auto;
}
.options {
clear: both;
}
.from {
font-size: 0.7em;
font-weight: normal;
}
#versionbox {
float: right;
text-align: right;
font-size: 0.33em;
font-weight: normal;
padding-right: 20px;
letter-spacing: 0;
color: #ffffff;
}
#description {
background: #windowcolor;
border-radius: 5px;
padding: 8px;
color: #windowtextcolor;
float: right;
width: 316px;
right: 10px;
height: 100%;
position: relative;
}
#description img {
max-width: 300px;
clear: both;
}
pre {
width: 300px !important;
white-space: pre-wrap;
}
.stick {
position: fixed !important;
top: 0px;
right: 18px !important;
}
</style>
</head>
<body onload="load()">
<h1><img src="FreeCAD.png"> """ + text01 + """<div id=versionbox> </div></h1>
<div id="description">
</div>
<div id="column">
<div class="block">
<h2>""" + text02 + """</h2>
defaultworkbenches
</div>
<div class="block">
<h2>""" + text03 + """</h2>
recentfiles
</div>
<div class="block">
<h2>""" + text05 + """</h2>
<div id="news">news feed</div>
</div>
<div class="block">
<h2>""" + text06 + """</h2>
defaultlinks
</div>
<div class="block">
<h2>""" + text09 + """</h2>
defaultexamples
</div>
customblocks
</div>
<!--
<form class="options">
<input type="checkbox" name="closeThisDialog">
""" + text17 + """<br/>
<input type="checkbox" name="dontShowAgain">
""" + text18 + """
</form>
-->
</body>
</html>
"""
def getWebExamples():
return """
<ul>
<li><a href="http://freecad-project.de/svn/ExampleData/FileFormates/Schenkel.stp">""" + text15 + """</a></li>
<li><a href="http://freecad-project.de/svn/ExampleData/Examples/CAD/Complex.FCStd">""" + text16 + """</a></li>
</ul>"""
def getExamples():
return """
<ul>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadSchenkel.py">""" + text10 + """</a></li>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadPartDesignExample.py">""" + text11 + """</a></li>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadDrawingExample.py">""" + text12 + """</a></li>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadRobotExample.py">""" + text13 + """</a></li>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadArchExample.py">""" + text55 + """</a></li>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadFemExample.py">""" + text60 + """</a></li>
</ul>"""
def getLinks():
return """
<ul>
<li><img src="web.png">
<a onMouseover="show('<p>""" + text07 + """</p>')"
onMouseout="show('')"
href="exthttp://www.freecadweb.org/">""" + text08 + """</a></li>
<li><img src="web.png">
<a onMouseover="show('<p>""" + text45 + """</p>')"
onMouseout="show('')"
href=ext""" + text38 + """>""" + text37 + """</a></li>
<li><img src="web.png">
<a onMouseover="show('<p>""" + text46 + """</p>')"
onMouseout="show('')"
href=ext""" + text56 + """>""" + text39 + """</a></li>
<li><img src="web.png">
<a onMouseover="show('<p>""" + text47 + """</p>')"
onMouseout="show('')"
href=ext""" + text57 + """>""" + text40 + """</a></li>
<li><img src="web.png">
<a onMouseover="show('<p>""" + text48 + """</p>')"
onMouseout="show('')"
href="exthttp://freecad-tutorial.blogspot.com/">""" + text43 + """</a></li>
<li><img src="web.png">
<a href="exthttp://github.com/FreeCAD/FreeCAD/releases"
onMouseOver="show('<p>""" + text62 + """</p>')"
onMouseOut="show('')">""" + text61 + """</a></li>
</ul>"""
def getWorkbenches():
return """
<ul>
<li><img src="blank.png">
<a onMouseover="show('<h3>""" + text49 + """</h3> \
<p>""" + text50 + """</p>')"
onMouseout="show('')"
href=""" + text51 + """>""" + text49 + """</a>
</li>
<li><img src="PartDesign.png">
<a onMouseover="show('<h3>""" + text19 + """</h3> \
<p>""" + text20 + """</p><p><small>""" + text21 + """ \
:</small></p><img src=PartDesignExample.png>')"
onMouseout="show('')"
href="PartDesign.py">""" + text22 + """</a>
</li>
<li><img src="ArchDesign.png">
<a onMouseover="show('<h3>""" + text23 + """</h3> \
<p>""" + text24 + """</p><p><small>""" + text21 + """ \
:</small></p><img src=ArchExample.png>')"
onMouseout="show('')"
href="ArchDesign.py">""" + text25 + """</a>
</li>
<li><img src="Ship.png">
<a onMouseover="show('<h3>""" + text53 + """</h3> \
<p>""" + text54 + """</p><p><small>""" + text21 + """ \
:</small></p><img src=ShipExample.png>')"
onMouseout="show('')"
href="Ship.py">""" + text52 + """</a>
</li>
<li><img src="Mesh.png">
<a onMouseover="show('<h3>""" + text26 + """</h3> \
<p>""" + text27 + """</p><p>""" + text28 + """</p>')"
onMouseout="show('')"
href="Mesh.py">""" + text29 + """</a>
</li>
</ul>"""
def getInfo(filename):
"returns available file information"
def getLocalTime(timestamp):
"returns a local time from a timestamp"
return time.strftime("%m/%d/%Y %H:%M:%S",time.localtime(timestamp))
def getSize(size):
"returns a human-readable size"
if size > 1024*1024:
hsize = str(size/(1024*1024)) + "Mb"
elif size > 1024:
hsize = str(size/1024) + "Kb"
else:
hsize = str(size) + "b"
return hsize
html = '<h3>'+os.path.basename(filename)+'</h3>'
if os.path.exists(filename):
# get normal file info
s = os.stat(filename)
html += "<p>" + text33 + " " + getSize(s.st_size) + "<br/>"
html += text34 + " " + getLocalTime(s.st_ctime) + "<br/>"
html += text35 + " " + getLocalTime(s.st_mtime) + "<br/>"
html += "<span>" + text36 + " " + filename + "</span></p>"
# get additional info from fcstd files
if os.path.splitext(filename)[1].upper() in [".FCSTD"]:
zfile=zipfile.ZipFile(filename)
files=zfile.namelist()
# check for meta-file if it's really a FreeCAD document
if files[0] == "Document.xml":
html += "<p>FreeCAD Standard File</p>"
image="thumbnails/Thumbnail.png"
if image in files:
image=zfile.read(image)
thumbfile = tempfile.mkstemp(suffix='.png')[1]
thumb = open(thumbfile,"wb")
thumb.write(image)
thumb.close()
html += '<img src=file://'
html += thumbfile + '><br/>'
else:
html += "<p>" + text41 + "</p>"
return html
def getRecentFiles():
"returns a list of 3 latest recent files"
rf = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/RecentFiles")
ct = rf.GetInt("RecentFiles")
html = '<ul>'
for i in range(3):
if i < ct:
mr = rf.GetString("MRU%d" % (i))
if os.path.exists(mr):
fn = os.path.basename(mr)
html += '<li>'
if mr[-5:].upper() == "FCSTD":
html += '<img src="freecad-doc.png" style="width: 16px"> '
else:
html += '<img src="blank.png" style="width: 16px"> '
html += '<a '
html += 'onMouseover="show(\''+getInfo(mr)+'\')" '
html += 'onMouseout="show(\'\')" '
html += 'href="LoadMRU'+str(i)+'.py">'
html += fn
html += '</a></li>'
html += '</ul>'
return html
def getFeed(url,numitems=3):
"returns a html list with links from the given RSS feed url"
xml = parse(urllib.urlopen(url)).getroot()
items = []
channel = xml.find('channel')
for element in channel.findall('item'):
items.append({'title': element.find('title').text,
'description': element.find('description').text,
'link': element.find('link').text})
if len(items) > numitems:
items = items[:numitems]
resp = '<ul>'
for item in items:
descr = re.compile("style=\".*?\"").sub('',item['description'])
descr = re.compile("alt=\".*?\"").sub('',descr)
descr = re.compile("\"").sub('',descr)
d1 = re.findall("<img.*?>",descr)[0]
d2 = re.findall("<span>.*?</span>",descr)[0]
descr = "<h3>" + item['title'] + "</h3>"
descr += d1 + "<br/>"
descr += d2
resp += '<li><a onMouseover="show(\''
resp += descr
resp += '\')" onMouseout="show(\'\')" href="'
resp += item['link']
resp += '">'
resp += item['title']
resp += '</a></li>'
resp += '</ul>'
print resp
return resp
def getCustomBlocks():
"fetches custom html files in FreeCAD user dir"
output = ""
return output
def setColors(html):
"gets theme colors from the system, and sets appropriate styles"
defaults = {"#basecolor":"#191B26",
"#linkcolor":"#0092E8",
"#textcolor":"#FFFFFF",
"#windowcolor":"#FFFFFF",
"#windowtextcolor":"#000000"}
try:
palette = QtGui.qApp.palette()
except:
pass
else:
#defaults["#basecolor"] = palette.base().color().name()
defaults["#basecolor"] = "#171A2B url(Background.jpg)"
#defaults["#linkcolor"] = palette.link().color().name() # UGLY!!
defaults["#textcolor"] = palette.text().color().name()
defaults["#windowcolor"] = palette.window().color().name()
defaults["#windowtextcolor"] = palette.windowText().color().name()
for k,v in defaults.iteritems():
html = html.replace(k,str(v))
return html
def handle():
"returns the complete html startpage"
# add recent files
recentfiles = getRecentFiles()
html = page.replace("recentfiles",recentfiles)
# add default workbenches
html = html.replace("defaultworkbenches",getWorkbenches())
# add default web links
html = html.replace("defaultlinks",getLinks())
# add default examples
html = html.replace("defaultexamples",getExamples())
# add web examples
#html = html.replace("webexamples",getWebExamples())
# add custom blocks
html = html.replace("customblocks",getCustomBlocks())
# fetches system colors
html = setColors(html)
return html
def exportTestFile():
f = open(os.path.expanduser("~")+os.sep+"freecad-startpage.html","wb")
f.write(handle())
f.close()
| lgpl-2.1 | -881,293,649,298,753,800 | 38.252616 | 368 | 0.527461 | false |
Nikita1710/ANUFifty50-Online-Mentoring-Platform | project/fifty_fifty/webcore/views.py | 1 | 4115 | from django.shortcuts import render, get_object_or_404
from django.core.mail import send_mail, BadHeaderError
from django.contrib import messages
from django.conf import settings
from django.contrib.auth.decorators import login_required
from content.models import Mentee, Mentor, Content_Summary
from blog.models import Post
from webcore.models import Profile
from feedback.forms import FeedbackForm
from feedback.models import Feedback_contact
from django.utils import timezone
#from content
# Create your views here.
def home(request):
context = locals()
template = 'index.html'
return render(request,template,context)
@login_required
def userProfile(request):
user = request.user
context = {'user':user, 'summary_list':Content_Summary.objects.all()}
template = 'menteelogin.html'
return render(request,template,context)
@login_required
def userProfileNews(request):
user = request.user
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
template = 'blog/post_list.html'
return render(request,template, {'posts': posts})
## post_detail views the blog posts individually
@login_required
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
template = 'blog/post_detail.html'
return render(request, template, {'post': post})
@login_required
def userProfileMentor(request):
user = request.user
template = 'mentor.html'
return render(request,template)
@login_required
def userProfileResources(request):
user = request.user
context = {'user':user, 'post_list':Post.objects.all(), 'mentee_list':Mentee.objects.all(), 'mentor_list':Mentor.objects.all(), 'Content_Summary_list':Content_Summary.objects.all()}
template = 'resources.html'
return render(request,template,context)
@login_required
def userProfileFAQ(request):
user = request.user
context = {'user':user}
template = 'FAQ.html'
return render(request,template,context)
@login_required
def userProfileProfile(request):
user = request.user
context = {'user':user}
template = 'profile.html'
return render(request,template,context)
@login_required
def userProfileContent(request):
user = request.user
context = {'user':user, 'mentee_list':Mentee.objects.all(), 'mentor_list':Mentor.objects.all()}
template = 'content.html'
return render(request,template,context)
@login_required
def userProfileSettings(request):
user = request.user
context = {'user':user}
template = 'settings.html'
return render(request,template,context)
@login_required
def feedback_process(request):
User = get_object_or_404(Profile, pk=request.user.pk)
contact_template = 'feedback/feedback_contact.html'
# sucess_template = 'thanks.html'
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = FeedbackForm(request.POST)
# check whether it's valid:
if form.is_valid():
receiver_email = settings.EMAIL_HOST_USER
subject = form.subject(User.role)
message = form.cleaned_data['message']
# handle email eceptions
try:
send_mail(subject, message, request.user.email, [receiver_email])
except Exception as ex:
data = messages.add_message(request, messages.ERROR,'An error occurred. {}'.format(str(ex)))
else:
feedback_form = form.save(commit=False)
# feedback_form.receiver_email = receiver_email
feedback_form.user = User
feedback_form.save()
data = messages.add_message(request, messages.INFO, 'Thanks for sending a feedback.')
# render thank you message
return render(request, contact_template, {'message': data})
# if a GET (or any other method) we'll create a blank form
else:
form = FeedbackForm(user=User.user)
return render(request, contact_template, {'form': form})
| apache-2.0 | 8,039,884,220,214,451,000 | 33.579832 | 185 | 0.687728 | false |
NicoVarg99/daf-recipes | ckan/ckan/ckan/ckan/tests/logic/action/test_delete.py | 1 | 20446 | # encoding: utf-8
import nose.tools
import ckan.tests.helpers as helpers
import ckan.tests.factories as factories
import ckan.logic as logic
import ckan.model as model
import ckan.plugins as p
import ckan.lib.search as search
assert_equals = nose.tools.assert_equals
assert_raises = nose.tools.assert_raises
class TestDelete:
def setup(self):
helpers.reset_db()
def test_resource_delete(self):
user = factories.User()
sysadmin = factories.Sysadmin()
resource = factories.Resource(user=user)
context = {}
params = {'id': resource['id']}
helpers.call_action('resource_delete', context, **params)
# Not even a sysadmin can see it now
assert_raises(logic.NotFound, helpers.call_action, 'resource_show',
{'user': sysadmin['name']}, **params)
# It is still there but with state=deleted
res_obj = model.Resource.get(resource['id'])
assert_equals(res_obj.state, 'deleted')
class TestDeleteResourceViews(object):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('image_view'):
p.load('image_view')
helpers.reset_db()
@classmethod
def teardown_class(cls):
p.unload('image_view')
def test_resource_view_delete(self):
resource_view = factories.ResourceView()
params = {'id': resource_view['id']}
helpers.call_action('resource_view_delete', context={}, **params)
assert_raises(logic.NotFound, helpers.call_action,
'resource_view_show',
context={}, **params)
# The model object is actually deleted
resource_view_obj = model.ResourceView.get(resource_view['id'])
assert_equals(resource_view_obj, None)
def test_delete_no_id_raises_validation_error(self):
params = {}
assert_raises(logic.ValidationError, helpers.call_action,
'resource_view_delete',
context={}, **params)
def test_delete_wrong_id_raises_not_found_error(self):
params = {'id': 'does_not_exist'}
assert_raises(logic.NotFound, helpers.call_action,
'resource_view_delete',
context={}, **params)
class TestClearResourceViews(object):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('image_view'):
p.load('image_view')
if not p.plugin_loaded('recline_view'):
p.load('recline_view')
helpers.reset_db()
@classmethod
def teardown_class(cls):
p.unload('image_view')
p.unload('recline_view')
def test_resource_view_clear(self):
factories.ResourceView(view_type='image_view')
factories.ResourceView(view_type='image_view')
factories.ResourceView(view_type='recline_view')
factories.ResourceView(view_type='recline_view')
count = model.Session.query(model.ResourceView).count()
assert_equals(count, 4)
helpers.call_action('resource_view_clear', context={})
count = model.Session.query(model.ResourceView).count()
assert_equals(count, 0)
def test_resource_view_clear_with_types(self):
factories.ResourceView(view_type='image_view')
factories.ResourceView(view_type='image_view')
factories.ResourceView(view_type='recline_view')
factories.ResourceView(view_type='recline_view')
count = model.Session.query(model.ResourceView).count()
assert_equals(count, 4)
helpers.call_action('resource_view_clear', context={},
view_types=['image_view'])
view_types = model.Session.query(model.ResourceView.view_type).all()
assert_equals(len(view_types), 2)
for view_type in view_types:
assert_equals(view_type[0], 'recline_view')
class TestDeleteTags(object):
def test_tag_delete_with_unicode_returns_unicode_error(self):
# There is not a lot of call for it, but in theory there could be
# unicode in the ActionError error message, so ensure that comes
# through in NotFound as unicode.
try:
helpers.call_action('tag_delete', id=u'Delta symbol: \u0394')
except logic.NotFound, e:
assert u'Delta symbol: \u0394' in unicode(e)
else:
assert 0, 'Should have raised NotFound'
class TestGroupPurge(object):
def setup(self):
helpers.reset_db()
def test_a_non_sysadmin_cant_purge_group(self):
user = factories.User()
group = factories.Group(user=user)
assert_raises(logic.NotAuthorized,
helpers.call_action,
'group_purge',
context={'user': user['name'], 'ignore_auth': False},
id=group['name'])
def test_purged_group_does_not_show(self):
group = factories.Group()
helpers.call_action('group_purge', id=group['name'])
assert_raises(logic.NotFound, helpers.call_action, 'group_show',
context={}, id=group['name'])
def test_purged_group_is_not_listed(self):
group = factories.Group()
helpers.call_action('group_purge', id=group['name'])
assert_equals(helpers.call_action('group_list', context={}), [])
def test_dataset_in_a_purged_group_no_longer_shows_that_group(self):
group = factories.Group()
dataset = factories.Dataset(groups=[{'name': group['name']}])
helpers.call_action('group_purge', id=group['name'])
dataset_shown = helpers.call_action('package_show', context={},
id=dataset['id'])
assert_equals(dataset_shown['groups'], [])
def test_purged_group_is_not_in_search_results_for_its_ex_dataset(self):
search.clear_all()
group = factories.Group()
dataset = factories.Dataset(groups=[{'name': group['name']}])
def get_search_result_groups():
results = helpers.call_action('package_search',
q=dataset['title'])['results']
return [g['name'] for g in results[0]['groups']]
assert_equals(get_search_result_groups(), [group['name']])
helpers.call_action('group_purge', id=group['name'])
assert_equals(get_search_result_groups(), [])
def test_purged_group_leaves_no_trace_in_the_model(self):
factories.Group(name='parent')
user = factories.User()
group1 = factories.Group(name='group1',
extras=[{'key': 'key1', 'value': 'val1'}],
users=[{'name': user['name']}],
groups=[{'name': 'parent'}])
factories.Dataset(name='ds', groups=[{'name': 'group1'}])
factories.Group(name='child', groups=[{'name': 'group1'}])
num_revisions_before = model.Session.query(model.Revision).count()
helpers.call_action('group_purge', id=group1['name'])
num_revisions_after = model.Session.query(model.Revision).count()
# the Group and related objects are gone
assert_equals(sorted([g.name for g in
model.Session.query(model.Group).all()]),
['child', 'parent'])
assert_equals(model.Session.query(model.GroupExtra).all(), [])
# the only members left are the users for the parent and child
assert_equals(sorted([
(m.table_name, m.group.name)
for m in model.Session.query(model.Member).join(model.Group)]),
[('user', 'child'), ('user', 'parent')])
# the dataset is still there though
assert_equals([p.name for p in model.Session.query(model.Package)],
['ds'])
# the group's object revisions were purged too
assert_equals(sorted(
[gr.name for gr in model.Session.query(model.GroupRevision)]),
['child', 'parent'])
assert_equals(model.Session.query(model.GroupExtraRevision).all(),
[])
# Member is not revisioned
# No Revision objects were purged, in fact 1 is created for the purge
assert_equals(num_revisions_after - num_revisions_before, 1)
def test_missing_id_returns_error(self):
assert_raises(logic.ValidationError,
helpers.call_action, 'group_purge')
def test_bad_id_returns_404(self):
assert_raises(logic.NotFound,
helpers.call_action, 'group_purge', id='123')
class TestOrganizationPurge(object):
def setup(self):
helpers.reset_db()
def test_a_non_sysadmin_cant_purge_org(self):
user = factories.User()
org = factories.Organization(user=user)
assert_raises(logic.NotAuthorized,
helpers.call_action,
'organization_purge',
context={'user': user['name'], 'ignore_auth': False},
id=org['name'])
def test_purged_org_does_not_show(self):
org = factories.Organization()
helpers.call_action('organization_purge', id=org['name'])
assert_raises(logic.NotFound, helpers.call_action, 'organization_show',
context={}, id=org['name'])
def test_purged_org_is_not_listed(self):
org = factories.Organization()
helpers.call_action('organization_purge', id=org['name'])
assert_equals(helpers.call_action('organization_list', context={}), [])
def test_dataset_in_a_purged_org_no_longer_shows_that_org(self):
org = factories.Organization()
dataset = factories.Dataset(owner_org=org['id'])
helpers.call_action('organization_purge', id=org['name'])
dataset_shown = helpers.call_action('package_show', context={},
id=dataset['id'])
assert_equals(dataset_shown['owner_org'], None)
def test_purged_org_is_not_in_search_results_for_its_ex_dataset(self):
search.clear_all()
org = factories.Organization()
dataset = factories.Dataset(owner_org=org['id'])
def get_search_result_owner_org():
results = helpers.call_action('package_search',
q=dataset['title'])['results']
return results[0]['owner_org']
assert_equals(get_search_result_owner_org(), org['id'])
helpers.call_action('organization_purge', id=org['name'])
assert_equals(get_search_result_owner_org(), None)
def test_purged_organization_leaves_no_trace_in_the_model(self):
factories.Organization(name='parent')
user = factories.User()
org1 = factories.Organization(
name='org1',
extras=[{'key': 'key1', 'value': 'val1'}],
users=[{'name': user['name']}],
groups=[{'name': 'parent'}])
factories.Dataset(name='ds', owner_org=org1['id'])
factories.Organization(name='child', groups=[{'name': 'org1'}])
num_revisions_before = model.Session.query(model.Revision).count()
helpers.call_action('organization_purge', id=org1['name'])
num_revisions_after = model.Session.query(model.Revision).count()
# the Organization and related objects are gone
assert_equals(sorted([o.name for o in
model.Session.query(model.Group).all()]),
['child', 'parent'])
assert_equals(model.Session.query(model.GroupExtra).all(), [])
# the only members left are the users for the parent and child
assert_equals(sorted([
(m.table_name, m.group.name)
for m in model.Session.query(model.Member).join(model.Group)]),
[('user', 'child'), ('user', 'parent')])
# the dataset is still there though
assert_equals([p.name for p in model.Session.query(model.Package)],
['ds'])
# the organization's object revisions were purged too
assert_equals(sorted(
[gr.name for gr in model.Session.query(model.GroupRevision)]),
['child', 'parent'])
assert_equals(model.Session.query(model.GroupExtraRevision).all(),
[])
# Member is not revisioned
# No Revision objects were purged, in fact 1 is created for the purge
assert_equals(num_revisions_after - num_revisions_before, 1)
def test_missing_id_returns_error(self):
assert_raises(logic.ValidationError,
helpers.call_action, 'organization_purge')
def test_bad_id_returns_404(self):
assert_raises(logic.NotFound,
helpers.call_action, 'organization_purge', id='123')
class TestDatasetPurge(object):
def setup(self):
helpers.reset_db()
def test_a_non_sysadmin_cant_purge_dataset(self):
user = factories.User()
dataset = factories.Dataset(user=user)
assert_raises(logic.NotAuthorized,
helpers.call_action,
'dataset_purge',
context={'user': user['name'], 'ignore_auth': False},
id=dataset['name'])
def test_purged_dataset_does_not_show(self):
dataset = factories.Dataset()
helpers.call_action('dataset_purge',
context={'ignore_auth': True},
id=dataset['name'])
assert_raises(logic.NotFound, helpers.call_action, 'package_show',
context={}, id=dataset['name'])
def test_purged_dataset_is_not_listed(self):
dataset = factories.Dataset()
helpers.call_action('dataset_purge', id=dataset['name'])
assert_equals(helpers.call_action('package_list', context={}), [])
def test_group_no_longer_shows_its_purged_dataset(self):
group = factories.Group()
dataset = factories.Dataset(groups=[{'name': group['name']}])
helpers.call_action('dataset_purge', id=dataset['name'])
dataset_shown = helpers.call_action('group_show', context={},
id=group['id'],
include_datasets=True)
assert_equals(dataset_shown['packages'], [])
def test_purged_dataset_is_not_in_search_results(self):
search.clear_all()
dataset = factories.Dataset()
def get_search_results():
results = helpers.call_action('package_search',
q=dataset['title'])['results']
return [d['name'] for d in results]
assert_equals(get_search_results(), [dataset['name']])
helpers.call_action('dataset_purge', id=dataset['name'])
assert_equals(get_search_results(), [])
def test_purged_dataset_leaves_no_trace_in_the_model(self):
factories.Group(name='group1')
org = factories.Organization()
dataset = factories.Dataset(
tags=[{'name': 'tag1'}],
groups=[{'name': 'group1'}],
owner_org=org['id'],
extras=[{'key': 'testkey', 'value': 'testvalue'}])
factories.Resource(package_id=dataset['id'])
num_revisions_before = model.Session.query(model.Revision).count()
helpers.call_action('dataset_purge',
context={'ignore_auth': True},
id=dataset['name'])
num_revisions_after = model.Session.query(model.Revision).count()
# the Package and related objects are gone
assert_equals(model.Session.query(model.Package).all(), [])
assert_equals(model.Session.query(model.Resource).all(), [])
assert_equals(model.Session.query(model.PackageTag).all(), [])
# there is no clean-up of the tag object itself, just the PackageTag.
assert_equals([t.name for t in model.Session.query(model.Tag).all()],
['tag1'])
assert_equals(model.Session.query(model.PackageExtra).all(), [])
# the only member left is for the user created in factories.Group() and
# factories.Organization()
assert_equals(sorted(
[(m.table_name, m.group.name)
for m in model.Session.query(model.Member).join(model.Group)]),
[('user', 'group1'), ('user', org['name'])])
# all the object revisions were purged too
assert_equals(model.Session.query(model.PackageRevision).all(), [])
assert_equals(model.Session.query(model.ResourceRevision).all(), [])
assert_equals(model.Session.query(model.PackageTagRevision).all(), [])
assert_equals(model.Session.query(model.PackageExtraRevision).all(),
[])
# Member is not revisioned
# No Revision objects were purged or created
assert_equals(num_revisions_after - num_revisions_before, 0)
def test_purged_dataset_removed_from_relationships(self):
child = factories.Dataset()
parent = factories.Dataset()
grandparent = factories.Dataset()
helpers.call_action('package_relationship_create',
subject=child['id'],
type='child_of',
object=parent['id'])
helpers.call_action('package_relationship_create',
subject=parent['id'],
type='child_of',
object=grandparent['id'])
assert_equals(len(
model.Session.query(model.PackageRelationship).all()), 2)
helpers.call_action('dataset_purge',
context={'ignore_auth': True},
id=parent['name'])
assert_equals(model.Session.query(model.PackageRelationship).all(), [])
def test_missing_id_returns_error(self):
assert_raises(logic.ValidationError,
helpers.call_action, 'dataset_purge')
def test_bad_id_returns_404(self):
assert_raises(logic.NotFound,
helpers.call_action, 'dataset_purge', id='123')
class TestUserDelete(object):
def setup(self):
helpers.reset_db()
def test_user_delete(self):
user = factories.User()
context = {}
params = {u'id': user[u'id']}
helpers.call_action(u'user_delete', context, **params)
# It is still there but with state=deleted
user_obj = model.User.get(user[u'id'])
assert_equals(user_obj.state, u'deleted')
def test_user_delete_removes_memberships(self):
user = factories.User()
factories.Organization(
users=[{u'name': user[u'id'], u'capacity': u'admin'}])
factories.Group(
users=[{u'name': user[u'id'], u'capacity': u'admin'}])
user_memberships = model.Session.query(model.Member).filter(
model.Member.table_id == user[u'id']).all()
assert_equals(len(user_memberships), 2)
assert_equals([m.state for m in user_memberships],
[u'active', u'active'])
context = {}
params = {u'id': user[u'id']}
helpers.call_action(u'user_delete', context, **params)
user_memberships = model.Session.query(model.Member).filter(
model.Member.table_id == user[u'id']).all()
# Member objects are still there, but flagged as deleted
assert_equals(len(user_memberships), 2)
assert_equals([m.state for m in user_memberships],
[u'deleted', u'deleted'])
def test_user_delete_removes_memberships_when_using_name(self):
user = factories.User()
factories.Organization(
users=[{u'name': user[u'id'], u'capacity': u'admin'}])
factories.Group(
users=[{u'name': user[u'id'], u'capacity': u'admin'}])
context = {}
params = {u'id': user[u'name']}
helpers.call_action(u'user_delete', context, **params)
user_memberships = model.Session.query(model.Member).filter(
model.Member.table_id == user[u'id']).all()
# Member objects are still there, but flagged as deleted
assert_equals(len(user_memberships), 2)
assert_equals([m.state for m in user_memberships],
[u'deleted', u'deleted'])
| gpl-3.0 | -774,451,038,322,788,900 | 36.039855 | 79 | 0.581923 | false |
arnaldog12/Manual-Pratico-Deep-Learning | utils/samples_generator.py | 1 | 1868 | import numpy as np
def make_cubic(n_samples, x_min, x_max, a=1, b=0, c=0, d=0, noise=0.0, random_state=None):
np.random.seed(random_state)
x = np.linspace(x_min, x_max, n_samples)
y = a*x**3 + b*x**2 + c*x + d + (2*noise*np.random.random(n_samples) - noise)
return x.reshape(-1,1), y.reshape(-1,1)
def make_exp(n_samples, x_min, x_max, noise=0.0, random_state=None):
np.random.seed(random_state)
x = np.linspace(x_min, x_max, n_samples)
y = np.exp(x) + 2*noise*np.random.random(n_samples) - noise
return x.reshape(-1,1), y.reshape(-1,1)
def make_log10(n_samples, x_min, x_max, noise=0.0, random_state=None):
np.random.seed(random_state)
x = np.logspace(np.log10(x_min), np.log10(x_max), n_samples)
y = np.log10(x) + 2*noise*np.random.random(n_samples) - noise
return x.reshape(-1,1), y.reshape(-1,1)
def make_spiral(n_samples, n_class=2, radius=1, laps=1.0, noise=0.0, random_state=None):
np.random.seed(random_state)
x = np.zeros((n_samples * n_class, 2))
y = np.zeros((n_samples * n_class))
pi_2 = 2 * np.math.pi
points = np.linspace(0, 1, n_samples)
r = points * radius
t = points * pi_2 * laps
for label, delta_t in zip(range(n_class), np.arange(0, pi_2, pi_2/n_class)):
random_noise = (2 * np.random.rand(n_samples) - 1) * noise
index = np.arange(label*n_samples, (label+1)*n_samples)
x[index] = np.c_[r * np.sin(t + delta_t) + random_noise,
r * np.cos(t + delta_t) + random_noise]
y[index] = label
return x, y.reshape(-1, 1)
def make_square(n_samples, x_min, x_max, a=1, b=0, c=0, noise=0.0, random_state=None):
np.random.seed(random_state)
x = np.linspace(x_min, x_max, n_samples)
y = a*x**2 + b*x + c + (2*noise*np.random.random(n_samples) - noise)
return x.reshape(-1,1), y.reshape(-1,1)
| mit | 8,612,343,678,604,408,000 | 43.47619 | 90 | 0.600107 | false |
sthyme/ZFSchizophrenia | BehaviorAnalysis/HSMovieAnalysis/setResolutionWidget.py | 1 | 5960 | #-----------------------
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'selectUI.ui'
# Created: Thu Feb 26 13:45:32 2015 by: PyQt4 UI code generator 4.11.3
#
# Created by Emily Conklin
# February 2015
# This program is connected to the main widget (NeuroGUI.py) and is a sub-user interface
# Called from imageTools.setCameraResolution
# Allows the user to specify:
# 1) default resolution
# 2) fit-to-screen resolution
# 3) fit-to-projector resolution
#-----------------------
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import sys
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_setResolutionWidget(QtGui.QDialog):
'''
sub-window class - QDialog type
'''
def __init__(self):
'''
initializes the dialog, data member
'''
QtGui.QDialog.__init__(self)
self.setupUi(self)
self.videoType=0
def setupUi(self, setResolutionWidget):
'''
called in the initialization method
sets up each layout, labels, buttons, etc.
'''
setResolutionWidget.setObjectName(_fromUtf8("setResolutionWidget"))
setResolutionWidget.resize(404, 300)
self.verticalLayout_2 = QtGui.QVBoxLayout(setResolutionWidget)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
#line 1: label for desired resolution
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.desiredResolutionLabel = QtGui.QLabel(setResolutionWidget)
self.desiredResolutionLabel.setObjectName(_fromUtf8("desiredResolutionLabel"))
self.horizontalLayout.addWidget(self.desiredResolutionLabel)
#lines 2,3,4: resolution options
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.defaultResRB = QtGui.QRadioButton(setResolutionWidget)
self.defaultResRB.setObjectName(_fromUtf8("defaultResRB"))
self.verticalLayout_3.addWidget(self.defaultResRB)
self.fitToScreenLE = QtGui.QRadioButton(setResolutionWidget)
self.fitToScreenLE.setObjectName(_fromUtf8("fitToScreenLE"))
self.verticalLayout_3.addWidget(self.fitToScreenLE)
self.fitToProjectorLE = QtGui.QRadioButton(setResolutionWidget)
self.fitToProjectorLE.setObjectName(_fromUtf8("fitToProjectorLE"))
self.verticalLayout_3.addWidget(self.fitToProjectorLE)
self.horizontalLayout.addLayout(self.verticalLayout_3)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.defaultResRB.setChecked(True) #defaults default resolution
#sets up button group with the three options
self.buttonGroup = QtGui.QButtonGroup()
self.buttonGroup.addButton(self.defaultResRB,0)
self.buttonGroup.addButton(self.fitToScreenLE,1)
self.buttonGroup.addButton(self.fitToProjectorLE,2)
#line 5: submit button
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem4)
self.Submit = QtGui.QPushButton(setResolutionWidget)
self.Submit.setObjectName(_fromUtf8("Submit"))
self.horizontalLayout_4.addWidget(self.Submit)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem5)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.retranslateUi(setResolutionWidget)
QtCore.QMetaObject.connectSlotsByName(setResolutionWidget)
def retranslateUi(self, setResolutionWidget):
'''
called in the setup method
sets label/button text and window titles
links buttons to other methods
'''
setResolutionWidget.setWindowTitle(_translate("setResolutionWidget", "Resolution Options", None))
self.desiredResolutionLabel.setText(_translate("setResolutionWidget", "Choose desired resolution:", None))
self.defaultResRB.setText(_translate("setResolutionWidget", "Default resolution", None))
self.fitToScreenLE.setText(_translate("setResolutionWidget", "Fit to screen (~720p)", None))
self.fitToProjectorLE.setText(_translate("setResolutionWidget", "Fit to projector (~480p)", None))
self.Submit.setText(_translate("setResolutionWidget", "Submit",None))
#finds out which radio button was pressed
self.defaultResRB.clicked.connect(self.readSignal)
self.fitToScreenLE.clicked.connect(self.readSignal)
self.fitToProjectorLE.clicked.connect(self.readSignal)
self.Submit.clicked.connect(self.submitClose) #connects submit button to submitClose
def readSignal(self):
'''
checks button group signal to determine radio button clicked
'''
self.videoType = self.buttonGroup.checkedId() #checks radio button signal
def submitClose(self):
'''
closes window when user hits submit, passes videoType
'''
self.accept()
if __name__=='__main__':
'''
main function to test widget as a standalone
'''
app=QtGui.QApplication(sys.argv)
ex=Ui_setResolutionWidget()
ex.show()
sys.exit(app.exec_())
| mit | -8,555,149,342,993,067,000 | 41.571429 | 114 | 0.69245 | false |
xin1195/smartSearch | setting.py | 1 | 1207 | #!/usr/bin/env python3
# _*_coding:utf-8_*_
import os
import motor.motor_tornado
import redis
from pymongo import MongoClient
from common.logManageLib import get_logger
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
cookie_secret="bZJc2sWbQLKoscdGkHn/VytuyfgXwQt8S0R0kRvJ5/xJ89E=",
login_url="/login",
xsrf_cookies=True,
debug=True,
)
# 设置mongodb的连接
client = motor.motor_tornado.MotorClient('mongodb://112.74.204.250:27017')
# 获取数据库连接
g_py_client = MongoClient("mongodb://112.74.204.250:27017")
# 设置redis的连接
g_redis_db = redis.StrictRedis(host='112.74.204.250', port=6379, password=None, db=1)
g_redis_time_5m = 5 * 60
g_redis_time_10m = 10 * 60
g_redis_time_30m = 30 * 60
g_redis_time_1h = 1 * 60 * 60
g_redis_time_2h = 2 * 60 * 60
g_redis_time_5h = 5 * 60 * 60
g_redis_time_1d = 24 * 60 * 60
g_redis_time_1w = 7 * 24 * 60 * 60
# 日志配置
logger = get_logger(strFileName="smartSearch.log", debug=20, showStreamLog=True, saveLogPath=None)
# domain 域名配置
domain = "http://www.liuhub.com/"
# domain = "http://127.0.0.1:8000/"
| apache-2.0 | 8,254,905,785,951,247,000 | 23.617021 | 98 | 0.685393 | false |
Detry322/map-creator | app/random.py | 1 | 1453 | from app.models import all_models
from app.utils import mkdir_p
from app import GENERATED_TILES_FOLDER, RANDOM_FOLDER, BACKPROPS_FOLDER
from scipy import misc
import glob
import numpy as np
import os
from keras.models import load_model, Model
from keras.optimizers import Adam, SGD, Adagrad
from keras.layers import LocallyConnected1D, Input, Reshape
from app import BACKPROPS_FOLDER, FORWARDPROPS_FOLDER, RANDOM_FOLDER
from app.utils import mkdir_p
from app.forwardprop import forwardprop_single_image
NOISE_SIZE = 100
import time
def random(model_file):
model = load_model(model_file)
generator = model.layers[0]
generator.trainable = False
for layer in generator.layers:
layer.trainable = False
api_key_water = [np.loadtxt(filename) for filename in glob.glob(os.path.join(BACKPROPS_FOLDER, 'api_key', 'water', '*.txt'))]
no_api_key_water = [np.loadtxt(filename) for filename in glob.glob(os.path.join(BACKPROPS_FOLDER, 'no_api_key', 'water', '*.txt'))]
no_api_key_trees = np.loadtxt(os.path.join(BACKPROPS_FOLDER, 'no_api_key', 'trees', '3391.png.txt'))
folder = os.path.join(RANDOM_FOLDER, '{}'.format(time.time()))
mkdir_p(folder)
for a in api_key_water:
for na in no_api_key_water:
api_key_trees = a - na + no_api_key_trees
image = forwardprop_single_image(generator, api_key_trees)
misc.imsave(os.path.join(folder, 'land-{}.png'.format(time.time())), ((image + 1)*128).astype('uint8'))
| mit | -2,502,770,354,397,162,500 | 32.022727 | 133 | 0.722643 | false |
prefetchnta/questlab | bin/x64bin/python/36/Lib/calendar.py | 1 | 23926 | """Calendar printing functions
Note when comparing these calendars to the ones printed by cal(1): By
default, these calendars have Monday as the first day of the week, and
Sunday as the last (the European convention). Use setfirstweekday() to
set the first day of the week (0=Monday, 6=Sunday)."""
import sys
import datetime
import locale as _locale
from itertools import repeat
__all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday",
"firstweekday", "isleap", "leapdays", "weekday", "monthrange",
"monthcalendar", "prmonth", "month", "prcal", "calendar",
"timegm", "month_name", "month_abbr", "day_name", "day_abbr",
"Calendar", "TextCalendar", "HTMLCalendar", "LocaleTextCalendar",
"LocaleHTMLCalendar", "weekheader"]
# Exception raised for bad input (with string parameter for details)
error = ValueError
# Exceptions raised for bad input
class IllegalMonthError(ValueError):
def __init__(self, month):
self.month = month
def __str__(self):
return "bad month number %r; must be 1-12" % self.month
class IllegalWeekdayError(ValueError):
def __init__(self, weekday):
self.weekday = weekday
def __str__(self):
return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday
# Constants for months referenced later
January = 1
February = 2
# Number of days per month (except for February in leap years)
mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# This module used to have hard-coded lists of day and month names, as
# English strings. The classes following emulate a read-only version of
# that, but supply localized names. Note that the values are computed
# fresh on each call, in case the user changes locale between calls.
class _localized_month:
_months = [datetime.date(2001, i+1, 1).strftime for i in range(12)]
_months.insert(0, lambda x: "")
def __init__(self, format):
self.format = format
def __getitem__(self, i):
funcs = self._months[i]
if isinstance(i, slice):
return [f(self.format) for f in funcs]
else:
return funcs(self.format)
def __len__(self):
return 13
class _localized_day:
# January 1, 2001, was a Monday.
_days = [datetime.date(2001, 1, i+1).strftime for i in range(7)]
def __init__(self, format):
self.format = format
def __getitem__(self, i):
funcs = self._days[i]
if isinstance(i, slice):
return [f(self.format) for f in funcs]
else:
return funcs(self.format)
def __len__(self):
return 7
# Full and abbreviated names of weekdays
day_name = _localized_day('%A')
day_abbr = _localized_day('%a')
# Full and abbreviated names of months (1-based arrays!!!)
month_name = _localized_month('%B')
month_abbr = _localized_month('%b')
# Constants for weekdays
(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
def isleap(year):
"""Return True for leap years, False for non-leap years."""
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def leapdays(y1, y2):
"""Return number of leap years in range [y1, y2).
Assume y1 <= y2."""
y1 -= 1
y2 -= 1
return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400)
def weekday(year, month, day):
"""Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12),
day (1-31)."""
return datetime.date(year, month, day).weekday()
def monthrange(year, month):
"""Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for
year, month."""
if not 1 <= month <= 12:
raise IllegalMonthError(month)
day1 = weekday(year, month, 1)
ndays = mdays[month] + (month == February and isleap(year))
return day1, ndays
class Calendar(object):
"""
Base calendar class. This class doesn't do any formatting. It simply
provides data to subclasses.
"""
def __init__(self, firstweekday=0):
self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday
def getfirstweekday(self):
return self._firstweekday % 7
def setfirstweekday(self, firstweekday):
self._firstweekday = firstweekday
firstweekday = property(getfirstweekday, setfirstweekday)
def iterweekdays(self):
"""
Return an iterator for one week of weekday numbers starting with the
configured first one.
"""
for i in range(self.firstweekday, self.firstweekday + 7):
yield i%7
def itermonthdates(self, year, month):
"""
Return an iterator for one month. The iterator will yield datetime.date
values and will always iterate through complete weeks, so it will yield
dates outside the specified month.
"""
date = datetime.date(year, month, 1)
# Go back to the beginning of the week
days = (date.weekday() - self.firstweekday) % 7
date -= datetime.timedelta(days=days)
oneday = datetime.timedelta(days=1)
while True:
yield date
try:
date += oneday
except OverflowError:
# Adding one day could fail after datetime.MAXYEAR
break
if date.month != month and date.weekday() == self.firstweekday:
break
def itermonthdays2(self, year, month):
"""
Like itermonthdates(), but will yield (day number, weekday number)
tuples. For days outside the specified month the day number is 0.
"""
for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday):
yield d, i % 7
def itermonthdays(self, year, month):
"""
Like itermonthdates(), but will yield day numbers. For days outside
the specified month the day number is 0.
"""
day1, ndays = monthrange(year, month)
days_before = (day1 - self.firstweekday) % 7
yield from repeat(0, days_before)
yield from range(1, ndays + 1)
days_after = (self.firstweekday - day1 - ndays) % 7
yield from repeat(0, days_after)
def monthdatescalendar(self, year, month):
"""
Return a matrix (list of lists) representing a month's calendar.
Each row represents a week; week entries are datetime.date values.
"""
dates = list(self.itermonthdates(year, month))
return [ dates[i:i+7] for i in range(0, len(dates), 7) ]
def monthdays2calendar(self, year, month):
"""
Return a matrix representing a month's calendar.
Each row represents a week; week entries are
(day number, weekday number) tuples. Day numbers outside this month
are zero.
"""
days = list(self.itermonthdays2(year, month))
return [ days[i:i+7] for i in range(0, len(days), 7) ]
def monthdayscalendar(self, year, month):
"""
Return a matrix representing a month's calendar.
Each row represents a week; days outside this month are zero.
"""
days = list(self.itermonthdays(year, month))
return [ days[i:i+7] for i in range(0, len(days), 7) ]
def yeardatescalendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting. The return
value is a list of month rows. Each month row contains up to width months.
Each month contains between 4 and 6 weeks and each week contains 1-7
days. Days are datetime.date objects.
"""
months = [
self.monthdatescalendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
def yeardays2calendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting (similar to
yeardatescalendar()). Entries in the week lists are
(day number, weekday number) tuples. Day numbers outside this month are
zero.
"""
months = [
self.monthdays2calendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
def yeardayscalendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting (similar to
yeardatescalendar()). Entries in the week lists are day numbers.
Day numbers outside this month are zero.
"""
months = [
self.monthdayscalendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
class TextCalendar(Calendar):
"""
Subclass of Calendar that outputs a calendar as a simple plain text
similar to the UNIX program cal.
"""
def prweek(self, theweek, width):
"""
Print a single week (no newline).
"""
print(self.formatweek(theweek, width), end=' ')
def formatday(self, day, weekday, width):
"""
Returns a formatted day.
"""
if day == 0:
s = ''
else:
s = '%2i' % day # right-align single-digit days
return s.center(width)
def formatweek(self, theweek, width):
"""
Returns a single week in a string (no newline).
"""
return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek)
def formatweekday(self, day, width):
"""
Returns a formatted week day name.
"""
if width >= 9:
names = day_name
else:
names = day_abbr
return names[day][:width].center(width)
def formatweekheader(self, width):
"""
Return a header for a week.
"""
return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays())
def formatmonthname(self, theyear, themonth, width, withyear=True):
"""
Return a formatted month name.
"""
s = month_name[themonth]
if withyear:
s = "%s %r" % (s, theyear)
return s.center(width)
def prmonth(self, theyear, themonth, w=0, l=0):
"""
Print a month's calendar.
"""
print(self.formatmonth(theyear, themonth, w, l), end='')
def formatmonth(self, theyear, themonth, w=0, l=0):
"""
Return a month's calendar string (multi-line).
"""
w = max(2, w)
l = max(1, l)
s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1)
s = s.rstrip()
s += '\n' * l
s += self.formatweekheader(w).rstrip()
s += '\n' * l
for week in self.monthdays2calendar(theyear, themonth):
s += self.formatweek(week, w).rstrip()
s += '\n' * l
return s
def formatyear(self, theyear, w=2, l=1, c=6, m=3):
"""
Returns a year's calendar as a multi-line string.
"""
w = max(2, w)
l = max(1, l)
c = max(2, c)
colwidth = (w + 1) * 7 - 1
v = []
a = v.append
a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip())
a('\n'*l)
header = self.formatweekheader(w)
for (i, row) in enumerate(self.yeardays2calendar(theyear, m)):
# months in this row
months = range(m*i+1, min(m*(i+1)+1, 13))
a('\n'*l)
names = (self.formatmonthname(theyear, k, colwidth, False)
for k in months)
a(formatstring(names, colwidth, c).rstrip())
a('\n'*l)
headers = (header for k in months)
a(formatstring(headers, colwidth, c).rstrip())
a('\n'*l)
# max number of weeks for this row
height = max(len(cal) for cal in row)
for j in range(height):
weeks = []
for cal in row:
if j >= len(cal):
weeks.append('')
else:
weeks.append(self.formatweek(cal[j], w))
a(formatstring(weeks, colwidth, c).rstrip())
a('\n' * l)
return ''.join(v)
def pryear(self, theyear, w=0, l=0, c=6, m=3):
"""Print a year's calendar."""
print(self.formatyear(theyear, w, l, c, m))
class HTMLCalendar(Calendar):
"""
This calendar returns complete HTML pages.
"""
# CSS classes for the day <td>s
cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
def formatday(self, day, weekday):
"""
Return a day as a table cell.
"""
if day == 0:
return '<td class="noday"> </td>' # day outside month
else:
return '<td class="%s">%d</td>' % (self.cssclasses[weekday], day)
def formatweek(self, theweek):
"""
Return a complete week as a table row.
"""
s = ''.join(self.formatday(d, wd) for (d, wd) in theweek)
return '<tr>%s</tr>' % s
def formatweekday(self, day):
"""
Return a weekday name as a table header.
"""
return '<th class="%s">%s</th>' % (self.cssclasses[day], day_abbr[day])
def formatweekheader(self):
"""
Return a header for a week as a table row.
"""
s = ''.join(self.formatweekday(i) for i in self.iterweekdays())
return '<tr>%s</tr>' % s
def formatmonthname(self, theyear, themonth, withyear=True):
"""
Return a month name as a table row.
"""
if withyear:
s = '%s %s' % (month_name[themonth], theyear)
else:
s = '%s' % month_name[themonth]
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
def formatmonth(self, theyear, themonth, withyear=True):
"""
Return a formatted month as a table.
"""
v = []
a = v.append
a('<table border="0" cellpadding="0" cellspacing="0" class="month">')
a('\n')
a(self.formatmonthname(theyear, themonth, withyear=withyear))
a('\n')
a(self.formatweekheader())
a('\n')
for week in self.monthdays2calendar(theyear, themonth):
a(self.formatweek(week))
a('\n')
a('</table>')
a('\n')
return ''.join(v)
def formatyear(self, theyear, width=3):
"""
Return a formatted year as a table of tables.
"""
v = []
a = v.append
width = max(width, 1)
a('<table border="0" cellpadding="0" cellspacing="0" class="year">')
a('\n')
a('<tr><th colspan="%d" class="year">%s</th></tr>' % (width, theyear))
for i in range(January, January+12, width):
# months in this row
months = range(i, min(i+width, 13))
a('<tr>')
for m in months:
a('<td>')
a(self.formatmonth(theyear, m, withyear=False))
a('</td>')
a('</tr>')
a('</table>')
return ''.join(v)
def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None):
"""
Return a formatted year as a complete HTML page.
"""
if encoding is None:
encoding = sys.getdefaultencoding()
v = []
a = v.append
a('<?xml version="1.0" encoding="%s"?>\n' % encoding)
a('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n')
a('<html>\n')
a('<head>\n')
a('<meta http-equiv="Content-Type" content="text/html; charset=%s" />\n' % encoding)
if css is not None:
a('<link rel="stylesheet" type="text/css" href="%s" />\n' % css)
a('<title>Calendar for %d</title>\n' % theyear)
a('</head>\n')
a('<body>\n')
a(self.formatyear(theyear, width))
a('</body>\n')
a('</html>\n')
return ''.join(v).encode(encoding, "xmlcharrefreplace")
class different_locale:
def __init__(self, locale):
self.locale = locale
def __enter__(self):
self.oldlocale = _locale.getlocale(_locale.LC_TIME)
_locale.setlocale(_locale.LC_TIME, self.locale)
def __exit__(self, *args):
_locale.setlocale(_locale.LC_TIME, self.oldlocale)
class LocaleTextCalendar(TextCalendar):
"""
This class can be passed a locale name in the constructor and will return
month and weekday names in the specified locale. If this locale includes
an encoding all strings containing month and weekday names will be returned
as unicode.
"""
def __init__(self, firstweekday=0, locale=None):
TextCalendar.__init__(self, firstweekday)
if locale is None:
locale = _locale.getdefaultlocale()
self.locale = locale
def formatweekday(self, day, width):
with different_locale(self.locale):
if width >= 9:
names = day_name
else:
names = day_abbr
name = names[day]
return name[:width].center(width)
def formatmonthname(self, theyear, themonth, width, withyear=True):
with different_locale(self.locale):
s = month_name[themonth]
if withyear:
s = "%s %r" % (s, theyear)
return s.center(width)
class LocaleHTMLCalendar(HTMLCalendar):
"""
This class can be passed a locale name in the constructor and will return
month and weekday names in the specified locale. If this locale includes
an encoding all strings containing month and weekday names will be returned
as unicode.
"""
def __init__(self, firstweekday=0, locale=None):
HTMLCalendar.__init__(self, firstweekday)
if locale is None:
locale = _locale.getdefaultlocale()
self.locale = locale
def formatweekday(self, day):
with different_locale(self.locale):
s = day_abbr[day]
return '<th class="%s">%s</th>' % (self.cssclasses[day], s)
def formatmonthname(self, theyear, themonth, withyear=True):
with different_locale(self.locale):
s = month_name[themonth]
if withyear:
s = '%s %s' % (s, theyear)
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
# Support for old module level interface
c = TextCalendar()
firstweekday = c.getfirstweekday
def setfirstweekday(firstweekday):
if not MONDAY <= firstweekday <= SUNDAY:
raise IllegalWeekdayError(firstweekday)
c.firstweekday = firstweekday
monthcalendar = c.monthdayscalendar
prweek = c.prweek
week = c.formatweek
weekheader = c.formatweekheader
prmonth = c.prmonth
month = c.formatmonth
calendar = c.formatyear
prcal = c.pryear
# Spacing of month columns for multi-column year calendar
_colwidth = 7*3 - 1 # Amount printed by prweek()
_spacing = 6 # Number of spaces between columns
def format(cols, colwidth=_colwidth, spacing=_spacing):
"""Prints multi-column formatting for year calendars"""
print(formatstring(cols, colwidth, spacing))
def formatstring(cols, colwidth=_colwidth, spacing=_spacing):
"""Returns a string formatted from n strings, centered within n columns."""
spacing *= ' '
return spacing.join(c.center(colwidth) for c in cols)
EPOCH = 1970
_EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal()
def timegm(tuple):
"""Unrelated but handy function to calculate Unix timestamp from GMT."""
year, month, day, hour, minute, second = tuple[:6]
days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1
hours = days*24 + hour
minutes = hours*60 + minute
seconds = minutes*60 + second
return seconds
def main(args):
import argparse
parser = argparse.ArgumentParser()
textgroup = parser.add_argument_group('text only arguments')
htmlgroup = parser.add_argument_group('html only arguments')
textgroup.add_argument(
"-w", "--width",
type=int, default=2,
help="width of date column (default 2)"
)
textgroup.add_argument(
"-l", "--lines",
type=int, default=1,
help="number of lines for each week (default 1)"
)
textgroup.add_argument(
"-s", "--spacing",
type=int, default=6,
help="spacing between months (default 6)"
)
textgroup.add_argument(
"-m", "--months",
type=int, default=3,
help="months per row (default 3)"
)
htmlgroup.add_argument(
"-c", "--css",
default="calendar.css",
help="CSS to use for page"
)
parser.add_argument(
"-L", "--locale",
default=None,
help="locale to be used from month and weekday names"
)
parser.add_argument(
"-e", "--encoding",
default=None,
help="encoding to use for output"
)
parser.add_argument(
"-t", "--type",
default="text",
choices=("text", "html"),
help="output type (text or html)"
)
parser.add_argument(
"year",
nargs='?', type=int,
help="year number (1-9999)"
)
parser.add_argument(
"month",
nargs='?', type=int,
help="month number (1-12, text only)"
)
options = parser.parse_args(args[1:])
if options.locale and not options.encoding:
parser.error("if --locale is specified --encoding is required")
sys.exit(1)
locale = options.locale, options.encoding
if options.type == "html":
if options.locale:
cal = LocaleHTMLCalendar(locale=locale)
else:
cal = HTMLCalendar()
encoding = options.encoding
if encoding is None:
encoding = sys.getdefaultencoding()
optdict = dict(encoding=encoding, css=options.css)
write = sys.stdout.buffer.write
if options.year is None:
write(cal.formatyearpage(datetime.date.today().year, **optdict))
elif options.month is None:
write(cal.formatyearpage(options.year, **optdict))
else:
parser.error("incorrect number of arguments")
sys.exit(1)
else:
if options.locale:
cal = LocaleTextCalendar(locale=locale)
else:
cal = TextCalendar()
optdict = dict(w=options.width, l=options.lines)
if options.month is None:
optdict["c"] = options.spacing
optdict["m"] = options.months
if options.year is None:
result = cal.formatyear(datetime.date.today().year, **optdict)
elif options.month is None:
result = cal.formatyear(options.year, **optdict)
else:
result = cal.formatmonth(options.year, options.month, **optdict)
write = sys.stdout.write
if options.encoding:
result = result.encode(options.encoding)
write = sys.stdout.buffer.write
write(result)
if __name__ == "__main__":
main(sys.argv)
| lgpl-2.1 | 7,043,734,647,202,540,000 | 31.556802 | 124 | 0.557302 | false |
linsalrob/EdwardsLab | phage_protein_blast_genera/tax_violin_plots.py | 1 | 2239 | """
"""
import os
import sys
import argparse
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument('-f', help='Genome average output file (from genera_per_phage_protein.py', default='/home/redwards/Desktop/gav_all_host.out')
parser.add_argument('-n', help='taxonomy name one of: kingdom / phylum / genus / species', default='genus')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
ynames = {'kingdom' : 'kingdoms', 'phylum' : 'phyla', 'genus' : 'genera', 'species' : 'species'}
col = None
colkey = {'kingdom' : 3, 'phylum' : 4, 'genus' : 5, 'species' : 6}
if args.n not in colkey:
sys.stderr.write("Sorry, taxonomy name must be one of {}\n".format("|".join(list(colkey.keys()))))
sys.exit(-1)
col = colkey[args.n]
want = {'Gut', 'Mouth', 'Nose', 'Skin', 'Lungs'}
data = {}
with open(args.f, 'r') as fin:
for l in fin:
p=l.strip().split("\t")
if p[2] not in want:
p[2] = 'All phages'
#continue ## comment or uncomment this to include/exclude all data
if p[2] not in data:
data[p[2]] = []
data[p[2]].append(float(p[col]))
labels = sorted(data.keys())
scores = []
count = 1
ticks = []
for l in labels:
scores.append(data[l])
ticks.append(count)
count += 1
fig = plt.figure()
ax = fig.add_subplot(111)
# ax.boxplot(alldata)
vp = ax.violinplot(scores, showmeans=True)
for i, j in enumerate(vp['bodies']):
if i == 0:
j.set_color('gray')
elif i == 1:
j.set_color('sandybrown')
else:
j.set_color('lightpink')
ax.set_xlabel("Body Site")
ax.set_ylabel("Average number of {}".format(ynames[args.n]))
ax.set_xticks(ticks)
ax.set_xticklabels(labels, rotation='vertical')
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
fig.set_facecolor('white')
plt.tight_layout()
#plt.show()
fig.savefig("/home/redwards/Desktop/bodysites.png")
| mit | 952,300,114,054,625,500 | 28.853333 | 149 | 0.571237 | false |
euccas/CodingPuzzles-Python | leet/source/searchDFS/permutations.py | 1 | 1421 | class Solution():
def permute(self, nums):
if nums is None:
return [[]]
elif len(nums) <= 1:
return [nums]
result = []
for i, item in enumerate(nums):
#print("i={0}, item={1}".format(i, item))
for p in permute(nums[:i] + nums[i + 1:]):
#print("p={0}, item={1}, append {2}".format(p, item, p + [item]))
result.append([item] + p)
#print("now result is ... {0}".format(result))
return result
class Solution1(object):
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if nums is None:
return []
if len(nums) == 0:
return [[]]
self.result = []
visited = [False for i in nums]
self.dfs(nums, visited, [])
return self.result
def dfs(self, nums, visited, permutation):
if len(nums) == len(permutation):
self.result.append(permutation[:])
for i in range(0, len(nums)):
if visited[i] == True:
continue
permutation.append(nums[i])
visited[i] = True
self.dfs(nums, visited, permutation)
visited[i] = False
permutation.pop()
if __name__ == "__main__":
sln = Solution1()
result = sln.permute([1, 5, 9])
print(result)
| mit | 405,264,178,974,588,860 | 26.326923 | 81 | 0.474314 | false |
ganeti-github-testing/ganeti-test-1 | lib/client/gnt_instance.py | 1 | 62250 | #
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Instance related commands"""
# pylint: disable=W0401,W0614,C0103
# W0401: Wildcard import ganeti.cli
# W0614: Unused import %s from wildcard import (since we need cli)
# C0103: Invalid name gnt-instance
import copy
import itertools
import simplejson
import logging
from ganeti.cli import *
from ganeti import opcodes
from ganeti import constants
from ganeti import compat
from ganeti import utils
from ganeti import errors
from ganeti import netutils
from ganeti import ssh
from ganeti import objects
from ganeti import ht
_EXPAND_CLUSTER = "cluster"
_EXPAND_NODES_BOTH = "nodes"
_EXPAND_NODES_PRI = "nodes-pri"
_EXPAND_NODES_SEC = "nodes-sec"
_EXPAND_NODES_BOTH_BY_TAGS = "nodes-by-tags"
_EXPAND_NODES_PRI_BY_TAGS = "nodes-pri-by-tags"
_EXPAND_NODES_SEC_BY_TAGS = "nodes-sec-by-tags"
_EXPAND_INSTANCES = "instances"
_EXPAND_INSTANCES_BY_TAGS = "instances-by-tags"
_EXPAND_NODES_TAGS_MODES = compat.UniqueFrozenset([
_EXPAND_NODES_BOTH_BY_TAGS,
_EXPAND_NODES_PRI_BY_TAGS,
_EXPAND_NODES_SEC_BY_TAGS,
])
#: default list of options for L{ListInstances}
_LIST_DEF_FIELDS = [
"name", "hypervisor", "os", "pnode", "status", "oper_ram",
]
_MISSING = object()
_ENV_OVERRIDE = compat.UniqueFrozenset(["list"])
_INST_DATA_VAL = ht.TListOf(ht.TDict)
def _ExpandMultiNames(mode, names, client=None):
"""Expand the given names using the passed mode.
For _EXPAND_CLUSTER, all instances will be returned. For
_EXPAND_NODES_PRI/SEC, all instances having those nodes as
primary/secondary will be returned. For _EXPAND_NODES_BOTH, all
instances having those nodes as either primary or secondary will be
returned. For _EXPAND_INSTANCES, the given instances will be
returned.
@param mode: one of L{_EXPAND_CLUSTER}, L{_EXPAND_NODES_BOTH},
L{_EXPAND_NODES_PRI}, L{_EXPAND_NODES_SEC} or
L{_EXPAND_INSTANCES}
@param names: a list of names; for cluster, it must be empty,
and for node and instance it must be a list of valid item
names (short names are valid as usual, e.g. node1 instead of
node1.example.com)
@rtype: list
@return: the list of names after the expansion
@raise errors.ProgrammerError: for unknown selection type
@raise errors.OpPrereqError: for invalid input parameters
"""
# pylint: disable=W0142
if client is None:
client = GetClient()
if mode == _EXPAND_CLUSTER:
if names:
raise errors.OpPrereqError("Cluster filter mode takes no arguments",
errors.ECODE_INVAL)
idata = client.QueryInstances([], ["name"], False)
inames = [row[0] for row in idata]
elif (mode in _EXPAND_NODES_TAGS_MODES or
mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_PRI, _EXPAND_NODES_SEC)):
if mode in _EXPAND_NODES_TAGS_MODES:
if not names:
raise errors.OpPrereqError("No node tags passed", errors.ECODE_INVAL)
ndata = client.QueryNodes([], ["name", "pinst_list",
"sinst_list", "tags"], False)
ndata = [row for row in ndata if set(row[3]).intersection(names)]
else:
if not names:
raise errors.OpPrereqError("No node names passed", errors.ECODE_INVAL)
ndata = client.QueryNodes(names, ["name", "pinst_list", "sinst_list"],
False)
ipri = [row[1] for row in ndata]
pri_names = list(itertools.chain(*ipri))
isec = [row[2] for row in ndata]
sec_names = list(itertools.chain(*isec))
if mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_BOTH_BY_TAGS):
inames = pri_names + sec_names
elif mode in (_EXPAND_NODES_PRI, _EXPAND_NODES_PRI_BY_TAGS):
inames = pri_names
elif mode in (_EXPAND_NODES_SEC, _EXPAND_NODES_SEC_BY_TAGS):
inames = sec_names
else:
raise errors.ProgrammerError("Unhandled shutdown type")
elif mode == _EXPAND_INSTANCES:
if not names:
raise errors.OpPrereqError("No instance names passed",
errors.ECODE_INVAL)
idata = client.QueryInstances(names, ["name"], False)
inames = [row[0] for row in idata]
elif mode == _EXPAND_INSTANCES_BY_TAGS:
if not names:
raise errors.OpPrereqError("No instance tags passed",
errors.ECODE_INVAL)
idata = client.QueryInstances([], ["name", "tags"], False)
inames = [row[0] for row in idata if set(row[1]).intersection(names)]
else:
raise errors.OpPrereqError("Unknown mode '%s'" % mode, errors.ECODE_INVAL)
return inames
def _EnsureInstancesExist(client, names):
"""Check for and ensure the given instance names exist.
This function will raise an OpPrereqError in case they don't
exist. Otherwise it will exit cleanly.
@type client: L{ganeti.luxi.Client}
@param client: the client to use for the query
@type names: list
@param names: the list of instance names to query
@raise errors.OpPrereqError: in case any instance is missing
"""
# TODO: change LUInstanceQuery to that it actually returns None
# instead of raising an exception, or devise a better mechanism
result = client.QueryInstances(names, ["name"], False)
for orig_name, row in zip(names, result):
if row[0] is None:
raise errors.OpPrereqError("Instance '%s' does not exist" % orig_name,
errors.ECODE_NOENT)
def GenericManyOps(operation, fn):
"""Generic multi-instance operations.
The will return a wrapper that processes the options and arguments
given, and uses the passed function to build the opcode needed for
the specific operation. Thus all the generic loop/confirmation code
is abstracted into this function.
"""
def realfn(opts, args):
if opts.multi_mode is None:
opts.multi_mode = _EXPAND_INSTANCES
cl = GetClient()
inames = _ExpandMultiNames(opts.multi_mode, args, client=cl)
if not inames:
if opts.multi_mode == _EXPAND_CLUSTER:
ToStdout("Cluster is empty, no instances to shutdown")
return 0
raise errors.OpPrereqError("Selection filter does not match"
" any instances", errors.ECODE_INVAL)
multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1
if not (opts.force_multi or not multi_on
or ConfirmOperation(inames, "instances", operation)):
return 1
jex = JobExecutor(verbose=multi_on, cl=cl, opts=opts)
for name in inames:
op = fn(name, opts)
jex.QueueJob(name, op)
results = jex.WaitOrShow(not opts.submit_only)
rcode = compat.all(row[0] for row in results)
return int(not rcode)
return realfn
def ListInstances(opts, args):
"""List instances and their properties.
@param opts: the command line options selected by the user
@type args: list
@param args: should be an empty list
@rtype: int
@return: the desired exit code
"""
selected_fields = ParseFields(opts.output, _LIST_DEF_FIELDS)
fmtoverride = dict.fromkeys(["tags", "disk.sizes", "nic.macs", "nic.ips",
"nic.modes", "nic.links", "nic.bridges",
"nic.networks",
"snodes", "snodes.group", "snodes.group.uuid"],
(lambda value: ",".join(str(item)
for item in value),
False))
cl = GetClient()
return GenericList(constants.QR_INSTANCE, selected_fields, args, opts.units,
opts.separator, not opts.no_headers,
format_override=fmtoverride, verbose=opts.verbose,
force_filter=opts.force_filter, cl=cl)
def ListInstanceFields(opts, args):
"""List instance fields.
@param opts: the command line options selected by the user
@type args: list
@param args: fields to list, or empty for all
@rtype: int
@return: the desired exit code
"""
return GenericListFields(constants.QR_INSTANCE, args, opts.separator,
not opts.no_headers)
def AddInstance(opts, args):
"""Add an instance to the cluster.
This is just a wrapper over L{GenericInstanceCreate}.
"""
return GenericInstanceCreate(constants.INSTANCE_CREATE, opts, args)
def BatchCreate(opts, args):
"""Create instances using a definition file.
This function reads a json file with L{opcodes.OpInstanceCreate}
serialisations.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain one element, the json filename
@rtype: int
@return: the desired exit code
"""
(json_filename,) = args
cl = GetClient()
try:
instance_data = simplejson.loads(utils.ReadFile(json_filename))
except Exception, err: # pylint: disable=W0703
ToStderr("Can't parse the instance definition file: %s" % str(err))
return 1
if not _INST_DATA_VAL(instance_data):
ToStderr("The instance definition file is not %s" % _INST_DATA_VAL)
return 1
instances = []
possible_params = set(opcodes.OpInstanceCreate.GetAllSlots())
for (idx, inst) in enumerate(instance_data):
unknown = set(inst.keys()) - possible_params
if unknown:
# TODO: Suggest closest match for more user friendly experience
raise errors.OpPrereqError("Unknown fields in definition %s: %s" %
(idx, utils.CommaJoin(unknown)),
errors.ECODE_INVAL)
op = opcodes.OpInstanceCreate(**inst) # pylint: disable=W0142
op.Validate(False)
instances.append(op)
op = opcodes.OpInstanceMultiAlloc(iallocator=opts.iallocator,
instances=instances)
result = SubmitOrSend(op, opts, cl=cl)
# Keep track of submitted jobs
jex = JobExecutor(cl=cl, opts=opts)
for (status, job_id) in result[constants.JOB_IDS_KEY]:
jex.AddJobId(None, status, job_id)
results = jex.GetResults()
bad_cnt = len([row for row in results if not row[0]])
if bad_cnt == 0:
ToStdout("All instances created successfully.")
rcode = constants.EXIT_SUCCESS
else:
ToStdout("There were %s errors during the creation.", bad_cnt)
rcode = constants.EXIT_FAILURE
return rcode
def ReinstallInstance(opts, args):
"""Reinstall an instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the name of the
instance to be reinstalled
@rtype: int
@return: the desired exit code
"""
# first, compute the desired name list
if opts.multi_mode is None:
opts.multi_mode = _EXPAND_INSTANCES
inames = _ExpandMultiNames(opts.multi_mode, args)
if not inames:
raise errors.OpPrereqError("Selection filter does not match any instances",
errors.ECODE_INVAL)
# second, if requested, ask for an OS
if opts.select_os is True:
op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
result = SubmitOpCode(op, opts=opts)
if not result:
ToStdout("Can't get the OS list")
return 1
ToStdout("Available OS templates:")
number = 0
choices = []
for (name, variants) in result:
for entry in CalculateOSNames(name, variants):
ToStdout("%3s: %s", number, entry)
choices.append(("%s" % number, entry, entry))
number += 1
choices.append(("x", "exit", "Exit gnt-instance reinstall"))
selected = AskUser("Enter OS template number (or x to abort):",
choices)
if selected == "exit":
ToStderr("User aborted reinstall, exiting")
return 1
os_name = selected
os_msg = "change the OS to '%s'" % selected
else:
os_name = opts.os
if opts.os is not None:
os_msg = "change the OS to '%s'" % os_name
else:
os_msg = "keep the same OS"
# third, get confirmation: multi-reinstall requires --force-multi,
# single-reinstall either --force or --force-multi (--force-multi is
# a stronger --force)
multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1
if multi_on:
warn_msg = ("Note: this will remove *all* data for the"
" below instances! It will %s.\n" % os_msg)
if not (opts.force_multi or
ConfirmOperation(inames, "instances", "reinstall", extra=warn_msg)):
return 1
else:
if not (opts.force or opts.force_multi):
usertext = ("This will reinstall the instance '%s' (and %s) which"
" removes all data. Continue?") % (inames[0], os_msg)
if not AskUser(usertext):
return 1
jex = JobExecutor(verbose=multi_on, opts=opts)
for instance_name in inames:
op = opcodes.OpInstanceReinstall(instance_name=instance_name,
os_type=os_name,
force_variant=opts.force_variant,
osparams=opts.osparams,
osparams_private=opts.osparams_private,
osparams_secret=opts.osparams_secret)
jex.QueueJob(instance_name, op)
results = jex.WaitOrShow(not opts.submit_only)
if compat.all(map(compat.fst, results)):
return constants.EXIT_SUCCESS
else:
return constants.EXIT_FAILURE
def RemoveInstance(opts, args):
"""Remove an instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the name of
the instance to be removed
@rtype: int
@return: the desired exit code
"""
instance_name = args[0]
force = opts.force
cl = GetClient()
if not force:
_EnsureInstancesExist(cl, [instance_name])
usertext = ("This will remove the volumes of the instance %s"
" (including mirrors), thus removing all the data"
" of the instance. Continue?") % instance_name
if not AskUser(usertext):
return 1
op = opcodes.OpInstanceRemove(instance_name=instance_name,
ignore_failures=opts.ignore_failures,
shutdown_timeout=opts.shutdown_timeout)
SubmitOrSend(op, opts, cl=cl)
return 0
def RenameInstance(opts, args):
"""Rename an instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain two elements, the old and the
new instance names
@rtype: int
@return: the desired exit code
"""
if not opts.name_check:
if not AskUser("As you disabled the check of the DNS entry, please verify"
" that '%s' is a FQDN. Continue?" % args[1]):
return 1
op = opcodes.OpInstanceRename(instance_name=args[0],
new_name=args[1],
ip_check=opts.ip_check,
name_check=opts.name_check)
result = SubmitOrSend(op, opts)
if result:
ToStdout("Instance '%s' renamed to '%s'", args[0], result)
return 0
def ActivateDisks(opts, args):
"""Activate an instance's disks.
This serves two purposes:
- it allows (as long as the instance is not running)
mounting the disks and modifying them from the node
- it repairs inactive secondary drbds
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
instance_name = args[0]
op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
ignore_size=opts.ignore_size,
wait_for_sync=opts.wait_for_sync)
disks_info = SubmitOrSend(op, opts)
for host, iname, nname in disks_info:
ToStdout("%s:%s:%s", host, iname, nname)
return 0
def DeactivateDisks(opts, args):
"""Deactivate an instance's disks.
This function takes the instance name, looks for its primary node
and the tries to shutdown its block devices on that node.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
instance_name = args[0]
op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name,
force=opts.force)
SubmitOrSend(op, opts)
return 0
def RecreateDisks(opts, args):
"""Recreate an instance's disks.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
instance_name = args[0]
disks = []
if opts.disks:
for didx, ddict in opts.disks:
didx = int(didx)
if not ht.TDict(ddict):
msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
if constants.IDISK_SIZE in ddict:
try:
ddict[constants.IDISK_SIZE] = \
utils.ParseUnit(ddict[constants.IDISK_SIZE])
except ValueError, err:
raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
(didx, err), errors.ECODE_INVAL)
if constants.IDISK_SPINDLES in ddict:
try:
ddict[constants.IDISK_SPINDLES] = \
int(ddict[constants.IDISK_SPINDLES])
except ValueError, err:
raise errors.OpPrereqError("Invalid spindles for disk %d: %s" %
(didx, err), errors.ECODE_INVAL)
disks.append((didx, ddict))
# TODO: Verify modifyable parameters (already done in
# LUInstanceRecreateDisks, but it'd be nice to have in the client)
if opts.node:
if opts.iallocator:
msg = "At most one of either --nodes or --iallocator can be passed"
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
pnode, snode = SplitNodeOption(opts.node)
nodes = [pnode]
if snode is not None:
nodes.append(snode)
else:
nodes = []
op = opcodes.OpInstanceRecreateDisks(instance_name=instance_name,
disks=disks, nodes=nodes,
iallocator=opts.iallocator)
SubmitOrSend(op, opts)
return 0
def GrowDisk(opts, args):
"""Grow an instance's disks.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain three elements, the target instance name,
the target disk id, and the target growth
@rtype: int
@return: the desired exit code
"""
instance = args[0]
disk = args[1]
try:
disk = int(disk)
except (TypeError, ValueError), err:
raise errors.OpPrereqError("Invalid disk index: %s" % str(err),
errors.ECODE_INVAL)
try:
amount = utils.ParseUnit(args[2])
except errors.UnitParseError:
raise errors.OpPrereqError("Can't parse the given amount '%s'" % args[2],
errors.ECODE_INVAL)
op = opcodes.OpInstanceGrowDisk(instance_name=instance,
disk=disk, amount=amount,
wait_for_sync=opts.wait_for_sync,
absolute=opts.absolute)
SubmitOrSend(op, opts)
return 0
def _StartupInstance(name, opts):
"""Startup instances.
This returns the opcode to start an instance, and its decorator will
wrap this into a loop starting all desired instances.
@param name: the name of the instance to act on
@param opts: the command line options selected by the user
@return: the opcode needed for the operation
"""
op = opcodes.OpInstanceStartup(instance_name=name,
force=opts.force,
ignore_offline_nodes=opts.ignore_offline,
no_remember=opts.no_remember,
startup_paused=opts.startup_paused)
# do not add these parameters to the opcode unless they're defined
if opts.hvparams:
op.hvparams = opts.hvparams
if opts.beparams:
op.beparams = opts.beparams
return op
def _RebootInstance(name, opts):
"""Reboot instance(s).
This returns the opcode to reboot an instance, and its decorator
will wrap this into a loop rebooting all desired instances.
@param name: the name of the instance to act on
@param opts: the command line options selected by the user
@return: the opcode needed for the operation
"""
return opcodes.OpInstanceReboot(instance_name=name,
reboot_type=opts.reboot_type,
ignore_secondaries=opts.ignore_secondaries,
shutdown_timeout=opts.shutdown_timeout)
def _ShutdownInstance(name, opts):
"""Shutdown an instance.
This returns the opcode to shutdown an instance, and its decorator
will wrap this into a loop shutting down all desired instances.
@param name: the name of the instance to act on
@param opts: the command line options selected by the user
@return: the opcode needed for the operation
"""
return opcodes.OpInstanceShutdown(instance_name=name,
force=opts.force,
timeout=opts.timeout,
ignore_offline_nodes=opts.ignore_offline,
no_remember=opts.no_remember)
def ReplaceDisks(opts, args):
"""Replace the disks of an instance
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
new_2ndary = opts.dst_node
iallocator = opts.iallocator
if opts.disks is None:
disks = []
else:
try:
disks = [int(i) for i in opts.disks.split(",")]
except (TypeError, ValueError), err:
raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
errors.ECODE_INVAL)
cnt = [opts.on_primary, opts.on_secondary, opts.auto,
new_2ndary is not None, iallocator is not None].count(True)
if cnt != 1:
raise errors.OpPrereqError("One and only one of the -p, -s, -a, -n and -I"
" options must be passed", errors.ECODE_INVAL)
elif opts.on_primary:
mode = constants.REPLACE_DISK_PRI
elif opts.on_secondary:
mode = constants.REPLACE_DISK_SEC
elif opts.auto:
mode = constants.REPLACE_DISK_AUTO
if disks:
raise errors.OpPrereqError("Cannot specify disks when using automatic"
" mode", errors.ECODE_INVAL)
elif new_2ndary is not None or iallocator is not None:
# replace secondary
mode = constants.REPLACE_DISK_CHG
op = opcodes.OpInstanceReplaceDisks(instance_name=args[0], disks=disks,
remote_node=new_2ndary, mode=mode,
iallocator=iallocator,
early_release=opts.early_release,
ignore_ipolicy=opts.ignore_ipolicy)
SubmitOrSend(op, opts)
return 0
def FailoverInstance(opts, args):
"""Failover an instance.
The failover is done by shutting it down on its present node and
starting it on the secondary.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
cl = GetClient()
instance_name = args[0]
force = opts.force
iallocator = opts.iallocator
target_node = opts.dst_node
if iallocator and target_node:
raise errors.OpPrereqError("Specify either an iallocator (-I), or a target"
" node (-n) but not both", errors.ECODE_INVAL)
if not force:
_EnsureInstancesExist(cl, [instance_name])
usertext = ("Failover will happen to image %s."
" This requires a shutdown of the instance. Continue?" %
(instance_name,))
if not AskUser(usertext):
return 1
op = opcodes.OpInstanceFailover(instance_name=instance_name,
ignore_consistency=opts.ignore_consistency,
shutdown_timeout=opts.shutdown_timeout,
iallocator=iallocator,
target_node=target_node,
ignore_ipolicy=opts.ignore_ipolicy)
SubmitOrSend(op, opts, cl=cl)
return 0
def MigrateInstance(opts, args):
"""Migrate an instance.
The migrate is done without shutdown.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
cl = GetClient()
instance_name = args[0]
force = opts.force
iallocator = opts.iallocator
target_node = opts.dst_node
if iallocator and target_node:
raise errors.OpPrereqError("Specify either an iallocator (-I), or a target"
" node (-n) but not both", errors.ECODE_INVAL)
if not force:
_EnsureInstancesExist(cl, [instance_name])
if opts.cleanup:
usertext = ("Instance %s will be recovered from a failed migration."
" Note that the migration procedure (including cleanup)" %
(instance_name,))
else:
usertext = ("Instance %s will be migrated. Note that migration" %
(instance_name,))
usertext += (" might impact the instance if anything goes wrong"
" (e.g. due to bugs in the hypervisor). Continue?")
if not AskUser(usertext):
return 1
# this should be removed once --non-live is deprecated
if not opts.live and opts.migration_mode is not None:
raise errors.OpPrereqError("Only one of the --non-live and "
"--migration-mode options can be passed",
errors.ECODE_INVAL)
if not opts.live: # --non-live passed
mode = constants.HT_MIGRATION_NONLIVE
else:
mode = opts.migration_mode
op = opcodes.OpInstanceMigrate(instance_name=instance_name, mode=mode,
cleanup=opts.cleanup, iallocator=iallocator,
target_node=target_node,
allow_failover=opts.allow_failover,
allow_runtime_changes=opts.allow_runtime_chgs,
ignore_ipolicy=opts.ignore_ipolicy,
ignore_hvversions=opts.ignore_hvversions)
SubmitOrSend(op, cl=cl, opts=opts)
return 0
def MoveInstance(opts, args):
"""Move an instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
cl = GetClient()
instance_name = args[0]
force = opts.force
if not force:
usertext = ("Instance %s will be moved."
" This requires a shutdown of the instance. Continue?" %
(instance_name,))
if not AskUser(usertext):
return 1
op = opcodes.OpInstanceMove(instance_name=instance_name,
target_node=opts.node,
compress=opts.compress,
shutdown_timeout=opts.shutdown_timeout,
ignore_consistency=opts.ignore_consistency,
ignore_ipolicy=opts.ignore_ipolicy)
SubmitOrSend(op, opts, cl=cl)
return 0
def ConnectToInstanceConsole(opts, args):
"""Connect to the console of an instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
instance_name = args[0]
cl = GetClient()
try:
cluster_name = cl.QueryConfigValues(["cluster_name"])[0]
idata = cl.QueryInstances([instance_name], ["console", "oper_state"], False)
if not idata:
raise errors.OpPrereqError("Instance '%s' does not exist" % instance_name,
errors.ECODE_NOENT)
finally:
# Ensure client connection is closed while external commands are run
cl.Close()
del cl
((console_data, oper_state), ) = idata
if not console_data:
if oper_state:
# Instance is running
raise errors.OpExecError("Console information for instance %s is"
" unavailable" % instance_name)
else:
raise errors.OpExecError("Instance %s is not running, can't get console" %
instance_name)
return _DoConsole(objects.InstanceConsole.FromDict(console_data),
opts.show_command, cluster_name)
def _DoConsole(console, show_command, cluster_name, feedback_fn=ToStdout,
_runcmd_fn=utils.RunCmd):
"""Acts based on the result of L{opcodes.OpInstanceConsole}.
@type console: L{objects.InstanceConsole}
@param console: Console object
@type show_command: bool
@param show_command: Whether to just display commands
@type cluster_name: string
@param cluster_name: Cluster name as retrieved from master daemon
"""
console.Validate()
if console.kind == constants.CONS_MESSAGE:
feedback_fn(console.message)
elif console.kind == constants.CONS_VNC:
feedback_fn("Instance %s has VNC listening on %s:%s (display %s),"
" URL <vnc://%s:%s/>",
console.instance, console.host, console.port,
console.display, console.host, console.port)
elif console.kind == constants.CONS_SPICE:
feedback_fn("Instance %s has SPICE listening on %s:%s", console.instance,
console.host, console.port)
elif console.kind == constants.CONS_SSH:
# Convert to string if not already one
if isinstance(console.command, basestring):
cmd = console.command
else:
cmd = utils.ShellQuoteArgs(console.command)
srun = ssh.SshRunner(cluster_name=cluster_name)
ssh_cmd = srun.BuildCmd(console.host, console.user, cmd,
port=console.port,
batch=True, quiet=False, tty=True)
if show_command:
feedback_fn(utils.ShellQuoteArgs(ssh_cmd))
else:
result = _runcmd_fn(ssh_cmd, interactive=True)
if result.failed:
logging.error("Console command \"%s\" failed with reason '%s' and"
" output %r", result.cmd, result.fail_reason,
result.output)
raise errors.OpExecError("Connection to console of instance %s failed,"
" please check cluster configuration" %
console.instance)
else:
raise errors.GenericError("Unknown console type '%s'" % console.kind)
return constants.EXIT_SUCCESS
def _FormatDiskDetails(dev_type, dev, roman):
"""Formats the logical_id of a disk.
"""
if dev_type == constants.DT_DRBD8:
drbd_info = dev["drbd_info"]
data = [
("nodeA", "%s, minor=%s" %
(drbd_info["primary_node"],
compat.TryToRoman(drbd_info["primary_minor"],
convert=roman))),
("nodeB", "%s, minor=%s" %
(drbd_info["secondary_node"],
compat.TryToRoman(drbd_info["secondary_minor"],
convert=roman))),
("port", str(compat.TryToRoman(drbd_info["port"], roman))),
("auth key", str(drbd_info["secret"])),
]
elif dev_type == constants.DT_PLAIN:
vg_name, lv_name = dev["logical_id"]
data = ["%s/%s" % (vg_name, lv_name)]
else:
data = [str(dev["logical_id"])]
return data
def _FormatBlockDevInfo(idx, top_level, dev, roman):
"""Show block device information.
This is only used by L{ShowInstanceConfig}, but it's too big to be
left for an inline definition.
@type idx: int
@param idx: the index of the current disk
@type top_level: boolean
@param top_level: if this a top-level disk?
@type dev: dict
@param dev: dictionary with disk information
@type roman: boolean
@param roman: whether to try to use roman integers
@return: a list of either strings, tuples or lists
(which should be formatted at a higher indent level)
"""
def helper(dtype, status):
"""Format one line for physical device status.
@type dtype: str
@param dtype: a constant from the L{constants.DTS_BLOCK} set
@type status: tuple
@param status: a tuple as returned from L{backend.FindBlockDevice}
@return: the string representing the status
"""
if not status:
return "not active"
txt = ""
(path, major, minor, syncp, estt, degr, ldisk_status) = status
if major is None:
major_string = "N/A"
else:
major_string = str(compat.TryToRoman(major, convert=roman))
if minor is None:
minor_string = "N/A"
else:
minor_string = str(compat.TryToRoman(minor, convert=roman))
txt += ("%s (%s:%s)" % (path, major_string, minor_string))
if dtype in (constants.DT_DRBD8, ):
if syncp is not None:
sync_text = "*RECOVERING* %5.2f%%," % syncp
if estt:
sync_text += " ETA %ss" % compat.TryToRoman(estt, convert=roman)
else:
sync_text += " ETA unknown"
else:
sync_text = "in sync"
if degr:
degr_text = "*DEGRADED*"
else:
degr_text = "ok"
if ldisk_status == constants.LDS_FAULTY:
ldisk_text = " *MISSING DISK*"
elif ldisk_status == constants.LDS_UNKNOWN:
ldisk_text = " *UNCERTAIN STATE*"
else:
ldisk_text = ""
txt += (" %s, status %s%s" % (sync_text, degr_text, ldisk_text))
elif dtype == constants.DT_PLAIN:
if ldisk_status == constants.LDS_FAULTY:
ldisk_text = " *FAILED* (failed drive?)"
else:
ldisk_text = ""
txt += ldisk_text
return txt
# the header
if top_level:
if dev["iv_name"] is not None:
txt = dev["iv_name"]
else:
txt = "disk %s" % compat.TryToRoman(idx, convert=roman)
else:
txt = "child %s" % compat.TryToRoman(idx, convert=roman)
if isinstance(dev["size"], int):
nice_size = utils.FormatUnit(dev["size"], "h", roman)
else:
nice_size = str(dev["size"])
data = [(txt, "%s, size %s" % (dev["dev_type"], nice_size))]
if top_level:
if dev["spindles"] is not None:
data.append(("spindles", dev["spindles"]))
data.append(("access mode", dev["mode"]))
if dev["logical_id"] is not None:
try:
l_id = _FormatDiskDetails(dev["dev_type"], dev, roman)
except ValueError:
l_id = [str(dev["logical_id"])]
if len(l_id) == 1:
data.append(("logical_id", l_id[0]))
else:
data.extend(l_id)
if dev["pstatus"]:
data.append(("on primary", helper(dev["dev_type"], dev["pstatus"])))
if dev["sstatus"]:
data.append(("on secondary", helper(dev["dev_type"], dev["sstatus"])))
data.append(("name", dev["name"]))
data.append(("UUID", dev["uuid"]))
if dev["children"]:
data.append(("child devices", [
_FormatBlockDevInfo(c_idx, False, child, roman)
for c_idx, child in enumerate(dev["children"])
]))
return data
def _FormatInstanceNicInfo(idx, nic, roman=False):
"""Helper function for L{_FormatInstanceInfo()}"""
(name, uuid, ip, mac, mode, link, vlan, _, netinfo) = nic
network_name = None
if netinfo:
network_name = netinfo["name"]
return [
("nic/%s" % str(compat.TryToRoman(idx, roman)), ""),
("MAC", str(mac)),
("IP", str(ip)),
("mode", str(mode)),
("link", str(link)),
("vlan", str(compat.TryToRoman(vlan, roman))),
("network", str(network_name)),
("UUID", str(uuid)),
("name", str(name)),
]
def _FormatInstanceNodesInfo(instance):
"""Helper function for L{_FormatInstanceInfo()}"""
pgroup = ("%s (UUID %s)" %
(instance["pnode_group_name"], instance["pnode_group_uuid"]))
secs = utils.CommaJoin(("%s (group %s, group UUID %s)" %
(name, group_name, group_uuid))
for (name, group_name, group_uuid) in
zip(instance["snodes"],
instance["snodes_group_names"],
instance["snodes_group_uuids"]))
return [
[
("primary", instance["pnode"]),
("group", pgroup),
],
[("secondaries", secs)],
]
def _GetVncConsoleInfo(instance):
"""Helper function for L{_FormatInstanceInfo()}"""
vnc_bind_address = instance["hv_actual"].get(constants.HV_VNC_BIND_ADDRESS,
None)
if vnc_bind_address:
port = instance["network_port"]
display = int(port) - constants.VNC_BASE_PORT
if display > 0 and vnc_bind_address == constants.IP4_ADDRESS_ANY:
vnc_console_port = "%s:%s (display %s)" % (instance["pnode"],
port,
display)
elif display > 0 and netutils.IP4Address.IsValid(vnc_bind_address):
vnc_console_port = ("%s:%s (node %s) (display %s)" %
(vnc_bind_address, port,
instance["pnode"], display))
else:
# vnc bind address is a file
vnc_console_port = "%s:%s" % (instance["pnode"],
vnc_bind_address)
ret = "vnc to %s" % vnc_console_port
else:
ret = None
return ret
def _FormatInstanceInfo(instance, roman_integers):
"""Format instance information for L{cli.PrintGenericInfo()}"""
istate = "configured to be %s" % instance["config_state"]
if instance["run_state"]:
istate += ", actual state is %s" % instance["run_state"]
info = [
("Instance name", instance["name"]),
("UUID", instance["uuid"]),
("Serial number",
str(compat.TryToRoman(instance["serial_no"], convert=roman_integers))),
("Creation time", utils.FormatTime(instance["ctime"])),
("Modification time", utils.FormatTime(instance["mtime"])),
("State", istate),
("Nodes", _FormatInstanceNodesInfo(instance)),
("Operating system", instance["os"]),
("Operating system parameters",
FormatParamsDictInfo(instance["os_instance"], instance["os_actual"],
roman_integers)),
]
if "network_port" in instance:
info.append(("Allocated network port",
str(compat.TryToRoman(instance["network_port"],
convert=roman_integers))))
info.append(("Hypervisor", instance["hypervisor"]))
console = _GetVncConsoleInfo(instance)
if console:
info.append(("console connection", console))
# deprecated "memory" value, kept for one version for compatibility
# TODO(ganeti 2.7) remove.
be_actual = copy.deepcopy(instance["be_actual"])
be_actual["memory"] = be_actual[constants.BE_MAXMEM]
info.extend([
("Hypervisor parameters",
FormatParamsDictInfo(instance["hv_instance"], instance["hv_actual"],
roman_integers)),
("Back-end parameters",
FormatParamsDictInfo(instance["be_instance"], be_actual,
roman_integers)),
("NICs", [
_FormatInstanceNicInfo(idx, nic, roman_integers)
for (idx, nic) in enumerate(instance["nics"])
]),
("Disk template", instance["disk_template"]),
("Disks", [
_FormatBlockDevInfo(idx, True, device, roman_integers)
for (idx, device) in enumerate(instance["disks"])
]),
])
return info
def ShowInstanceConfig(opts, args):
"""Compute instance run-time status.
@param opts: the command line options selected by the user
@type args: list
@param args: either an empty list, and then we query all
instances, or should contain a list of instance names
@rtype: int
@return: the desired exit code
"""
if not args and not opts.show_all:
ToStderr("No instance selected."
" Please pass in --all if you want to query all instances.\n"
"Note that this can take a long time on a big cluster.")
return 1
elif args and opts.show_all:
ToStderr("Cannot use --all if you specify instance names.")
return 1
retcode = 0
op = opcodes.OpInstanceQueryData(instances=args, static=opts.static,
use_locking=not opts.static)
result = SubmitOpCode(op, opts=opts)
if not result:
ToStdout("No instances.")
return 1
PrintGenericInfo([
_FormatInstanceInfo(instance, opts.roman_integers)
for instance in result.values()
])
return retcode
def _ConvertNicDiskModifications(mods):
"""Converts NIC/disk modifications from CLI to opcode.
When L{opcodes.OpInstanceSetParams} was changed to support adding/removing
disks at arbitrary indices, its parameter format changed. This function
converts legacy requests (e.g. "--net add" or "--disk add:size=4G") to the
newer format and adds support for new-style requests (e.g. "--new 4:add").
@type mods: list of tuples
@param mods: Modifications as given by command line parser
@rtype: list of tuples
@return: Modifications as understood by L{opcodes.OpInstanceSetParams}
"""
result = []
for (identifier, params) in mods:
if identifier == constants.DDM_ADD:
# Add item as last item (legacy interface)
action = constants.DDM_ADD
identifier = -1
elif identifier == constants.DDM_REMOVE:
# Remove last item (legacy interface)
action = constants.DDM_REMOVE
identifier = -1
else:
# Modifications and adding/removing at arbitrary indices
add = params.pop(constants.DDM_ADD, _MISSING)
remove = params.pop(constants.DDM_REMOVE, _MISSING)
modify = params.pop(constants.DDM_MODIFY, _MISSING)
if modify is _MISSING:
if not (add is _MISSING or remove is _MISSING):
raise errors.OpPrereqError("Cannot add and remove at the same time",
errors.ECODE_INVAL)
elif add is not _MISSING:
action = constants.DDM_ADD
elif remove is not _MISSING:
action = constants.DDM_REMOVE
else:
action = constants.DDM_MODIFY
elif add is _MISSING and remove is _MISSING:
action = constants.DDM_MODIFY
else:
raise errors.OpPrereqError("Cannot modify and add/remove at the"
" same time", errors.ECODE_INVAL)
assert not (constants.DDMS_VALUES_WITH_MODIFY & set(params.keys()))
if action == constants.DDM_REMOVE and params:
raise errors.OpPrereqError("Not accepting parameters on removal",
errors.ECODE_INVAL)
result.append((action, identifier, params))
return result
def _ParseExtStorageParams(params):
"""Parses the disk params for ExtStorage conversions.
"""
if params:
if constants.IDISK_PROVIDER not in params:
raise errors.OpPrereqError("Missing required parameter '%s' when"
" converting to an ExtStorage disk template" %
constants.IDISK_PROVIDER, errors.ECODE_INVAL)
else:
for param in params.keys():
if (param != constants.IDISK_PROVIDER and
param in constants.IDISK_PARAMS):
raise errors.OpPrereqError("Invalid parameter '%s' when converting"
" to an ExtStorage template (it is not"
" allowed modifying existing disk"
" parameters)" % param, errors.ECODE_INVAL)
return params
def _ParseDiskSizes(mods):
"""Parses disk sizes in parameters.
"""
for (action, _, params) in mods:
if params and constants.IDISK_SPINDLES in params:
params[constants.IDISK_SPINDLES] = \
int(params[constants.IDISK_SPINDLES])
if params and constants.IDISK_SIZE in params:
params[constants.IDISK_SIZE] = \
utils.ParseUnit(params[constants.IDISK_SIZE])
elif action == constants.DDM_ADD:
raise errors.OpPrereqError("Missing required parameter 'size'",
errors.ECODE_INVAL)
return mods
def SetInstanceParams(opts, args):
"""Modifies an instance.
All parameters take effect only at the next restart of the instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
if not (opts.nics or opts.disks or opts.disk_template or opts.hvparams or
opts.beparams or opts.os or opts.osparams or opts.osparams_private
or opts.offline_inst or opts.online_inst or opts.runtime_mem or
opts.new_primary_node or opts.instance_communication is not None):
ToStderr("Please give at least one of the parameters.")
return 1
for param in opts.beparams:
if isinstance(opts.beparams[param], basestring):
if opts.beparams[param].lower() == "default":
opts.beparams[param] = constants.VALUE_DEFAULT
utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT,
allowed_values=[constants.VALUE_DEFAULT])
for param in opts.hvparams:
if isinstance(opts.hvparams[param], basestring):
if opts.hvparams[param].lower() == "default":
opts.hvparams[param] = constants.VALUE_DEFAULT
utils.ForceDictType(opts.hvparams, constants.HVS_PARAMETER_TYPES,
allowed_values=[constants.VALUE_DEFAULT])
FixHvParams(opts.hvparams)
nics = _ConvertNicDiskModifications(opts.nics)
for action, _, __ in nics:
if action == constants.DDM_MODIFY and opts.hotplug and not opts.force:
usertext = ("You are about to hot-modify a NIC. This will be done"
" by removing the existing NIC and then adding a new one."
" Network connection might be lost. Continue?")
if not AskUser(usertext):
return 1
disks = _ParseDiskSizes(_ConvertNicDiskModifications(opts.disks))
# verify the user provided parameters for disk template conversions
if opts.disk_template:
if (not opts.node and
opts.disk_template in constants.DTS_INT_MIRROR):
ToStderr("Changing the disk template to a mirrored one requires"
" specifying a secondary node")
return 1
elif (opts.ext_params and
opts.disk_template != constants.DT_EXT):
ToStderr("Specifying ExtStorage parameters requires converting"
" to the '%s' disk template" % constants.DT_EXT)
return 1
elif (not opts.ext_params and
opts.disk_template == constants.DT_EXT):
ToStderr("Provider option is missing, use either the"
" '--ext-params' or '-e' option")
return 1
if ((opts.file_driver or
opts.file_storage_dir) and
not opts.disk_template in constants.DTS_FILEBASED):
ToStderr("Specifying file-based configuration arguments requires"
" converting to a file-based disk template")
return 1
ext_params = _ParseExtStorageParams(opts.ext_params)
if opts.offline_inst:
offline = True
elif opts.online_inst:
offline = False
else:
offline = None
instance_comm = opts.instance_communication
op = opcodes.OpInstanceSetParams(instance_name=args[0],
nics=nics,
disks=disks,
hotplug=opts.hotplug,
hotplug_if_possible=opts.hotplug_if_possible,
disk_template=opts.disk_template,
ext_params=ext_params,
file_driver=opts.file_driver,
file_storage_dir=opts.file_storage_dir,
remote_node=opts.node,
pnode=opts.new_primary_node,
hvparams=opts.hvparams,
beparams=opts.beparams,
runtime_mem=opts.runtime_mem,
os_name=opts.os,
osparams=opts.osparams,
osparams_private=opts.osparams_private,
force_variant=opts.force_variant,
force=opts.force,
wait_for_sync=opts.wait_for_sync,
offline=offline,
conflicts_check=opts.conflicts_check,
ignore_ipolicy=opts.ignore_ipolicy,
instance_communication=instance_comm)
# even if here we process the result, we allow submit only
result = SubmitOrSend(op, opts)
if result:
ToStdout("Modified instance %s", args[0])
for param, data in result:
ToStdout(" - %-5s -> %s", param, data)
ToStdout("Please don't forget that most parameters take effect"
" only at the next (re)start of the instance initiated by"
" ganeti; restarting from within the instance will"
" not be enough.")
if opts.hvparams:
ToStdout("Note that changing hypervisor parameters without performing a"
" restart might lead to a crash while performing a live"
" migration. This will be addressed in future Ganeti versions.")
return 0
def ChangeGroup(opts, args):
"""Moves an instance to another group.
"""
(instance_name, ) = args
cl = GetClient()
op = opcodes.OpInstanceChangeGroup(instance_name=instance_name,
iallocator=opts.iallocator,
target_groups=opts.to,
early_release=opts.early_release)
result = SubmitOrSend(op, opts, cl=cl)
# Keep track of submitted jobs
jex = JobExecutor(cl=cl, opts=opts)
for (status, job_id) in result[constants.JOB_IDS_KEY]:
jex.AddJobId(None, status, job_id)
results = jex.GetResults()
bad_cnt = len([row for row in results if not row[0]])
if bad_cnt == 0:
ToStdout("Instance '%s' changed group successfully.", instance_name)
rcode = constants.EXIT_SUCCESS
else:
ToStdout("There were %s errors while changing group of instance '%s'.",
bad_cnt, instance_name)
rcode = constants.EXIT_FAILURE
return rcode
# multi-instance selection options
m_force_multi = cli_option("--force-multiple", dest="force_multi",
help="Do not ask for confirmation when more than"
" one instance is affected",
action="store_true", default=False)
m_pri_node_opt = cli_option("--primary", dest="multi_mode",
help="Filter by nodes (primary only)",
const=_EXPAND_NODES_PRI, action="store_const")
m_sec_node_opt = cli_option("--secondary", dest="multi_mode",
help="Filter by nodes (secondary only)",
const=_EXPAND_NODES_SEC, action="store_const")
m_node_opt = cli_option("--node", dest="multi_mode",
help="Filter by nodes (primary and secondary)",
const=_EXPAND_NODES_BOTH, action="store_const")
m_clust_opt = cli_option("--all", dest="multi_mode",
help="Select all instances in the cluster",
const=_EXPAND_CLUSTER, action="store_const")
m_inst_opt = cli_option("--instance", dest="multi_mode",
help="Filter by instance name [default]",
const=_EXPAND_INSTANCES, action="store_const")
m_node_tags_opt = cli_option("--node-tags", dest="multi_mode",
help="Filter by node tag",
const=_EXPAND_NODES_BOTH_BY_TAGS,
action="store_const")
m_pri_node_tags_opt = cli_option("--pri-node-tags", dest="multi_mode",
help="Filter by primary node tag",
const=_EXPAND_NODES_PRI_BY_TAGS,
action="store_const")
m_sec_node_tags_opt = cli_option("--sec-node-tags", dest="multi_mode",
help="Filter by secondary node tag",
const=_EXPAND_NODES_SEC_BY_TAGS,
action="store_const")
m_inst_tags_opt = cli_option("--tags", dest="multi_mode",
help="Filter by instance tag",
const=_EXPAND_INSTANCES_BY_TAGS,
action="store_const")
# this is defined separately due to readability only
add_opts = [
NOSTART_OPT,
OS_OPT,
FORCE_VARIANT_OPT,
NO_INSTALL_OPT,
IGNORE_IPOLICY_OPT,
INSTANCE_COMMUNICATION_OPT,
HELPER_STARTUP_TIMEOUT_OPT,
HELPER_SHUTDOWN_TIMEOUT_OPT,
]
commands = {
"add": (
AddInstance, [ArgHost(min=1, max=1)],
COMMON_CREATE_OPTS + add_opts,
"[...] -t disk-type -n node[:secondary-node] -o os-type <name>",
"Creates and adds a new instance to the cluster"),
"batch-create": (
BatchCreate, [ArgFile(min=1, max=1)],
[DRY_RUN_OPT, PRIORITY_OPT, IALLOCATOR_OPT] + SUBMIT_OPTS,
"<instances.json>",
"Create a bunch of instances based on specs in the file."),
"console": (
ConnectToInstanceConsole, ARGS_ONE_INSTANCE,
[SHOWCMD_OPT, PRIORITY_OPT],
"[--show-cmd] <instance>", "Opens a console on the specified instance"),
"failover": (
FailoverInstance, ARGS_ONE_INSTANCE,
[FORCE_OPT, IGNORE_CONSIST_OPT] + SUBMIT_OPTS +
[SHUTDOWN_TIMEOUT_OPT,
DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT,
IGNORE_IPOLICY_OPT, CLEANUP_OPT],
"[-f] <instance>", "Stops the instance, changes its primary node and"
" (if it was originally running) starts it on the new node"
" (the secondary for mirrored instances or any node"
" for shared storage)."),
"migrate": (
MigrateInstance, ARGS_ONE_INSTANCE,
[FORCE_OPT, NONLIVE_OPT, MIGRATION_MODE_OPT, CLEANUP_OPT, DRY_RUN_OPT,
PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT, ALLOW_FAILOVER_OPT,
IGNORE_IPOLICY_OPT, IGNORE_HVVERSIONS_OPT, NORUNTIME_CHGS_OPT]
+ SUBMIT_OPTS,
"[-f] <instance>", "Migrate instance to its secondary node"
" (only for mirrored instances)"),
"move": (
MoveInstance, ARGS_ONE_INSTANCE,
[FORCE_OPT] + SUBMIT_OPTS +
[SINGLE_NODE_OPT, COMPRESS_OPT,
SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_CONSIST_OPT,
IGNORE_IPOLICY_OPT],
"[-f] <instance>", "Move instance to an arbitrary node"
" (only for instances of type file and lv)"),
"info": (
ShowInstanceConfig, ARGS_MANY_INSTANCES,
[STATIC_OPT, ALL_OPT, ROMAN_OPT, PRIORITY_OPT],
"[-s] {--all | <instance>...}",
"Show information on the specified instance(s)"),
"list": (
ListInstances, ARGS_MANY_INSTANCES,
[NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, VERBOSE_OPT,
FORCE_FILTER_OPT],
"[<instance>...]",
"Lists the instances and their status. The available fields can be shown"
" using the \"list-fields\" command (see the man page for details)."
" The default field list is (in order): %s." %
utils.CommaJoin(_LIST_DEF_FIELDS),
),
"list-fields": (
ListInstanceFields, [ArgUnknown()],
[NOHDR_OPT, SEP_OPT],
"[fields...]",
"Lists all available fields for instances"),
"reinstall": (
ReinstallInstance, [ArgInstance()],
[FORCE_OPT, OS_OPT, FORCE_VARIANT_OPT, m_force_multi, m_node_opt,
m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, m_node_tags_opt,
m_pri_node_tags_opt, m_sec_node_tags_opt, m_inst_tags_opt, SELECT_OS_OPT]
+ SUBMIT_OPTS + [DRY_RUN_OPT, PRIORITY_OPT, OSPARAMS_OPT,
OSPARAMS_PRIVATE_OPT, OSPARAMS_SECRET_OPT],
"[-f] <instance>", "Reinstall a stopped instance"),
"remove": (
RemoveInstance, ARGS_ONE_INSTANCE,
[FORCE_OPT, SHUTDOWN_TIMEOUT_OPT, IGNORE_FAILURES_OPT] + SUBMIT_OPTS
+ [DRY_RUN_OPT, PRIORITY_OPT],
"[-f] <instance>", "Shuts down the instance and removes it"),
"rename": (
RenameInstance,
[ArgInstance(min=1, max=1), ArgHost(min=1, max=1)],
[NOIPCHECK_OPT, NONAMECHECK_OPT] + SUBMIT_OPTS
+ [DRY_RUN_OPT, PRIORITY_OPT],
"<instance> <new_name>", "Rename the instance"),
"replace-disks": (
ReplaceDisks, ARGS_ONE_INSTANCE,
[AUTO_REPLACE_OPT, DISKIDX_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT,
NEW_SECONDARY_OPT, ON_PRIMARY_OPT, ON_SECONDARY_OPT] + SUBMIT_OPTS
+ [DRY_RUN_OPT, PRIORITY_OPT, IGNORE_IPOLICY_OPT],
"[-s|-p|-a|-n NODE|-I NAME] <instance>",
"Replaces disks for the instance"),
"modify": (
SetInstanceParams, ARGS_ONE_INSTANCE,
[BACKEND_OPT, DISK_OPT, FORCE_OPT, HVOPTS_OPT, NET_OPT] + SUBMIT_OPTS +
[DISK_TEMPLATE_OPT, SINGLE_NODE_OPT, OS_OPT, FORCE_VARIANT_OPT,
OSPARAMS_OPT, OSPARAMS_PRIVATE_OPT, DRY_RUN_OPT, PRIORITY_OPT, NWSYNC_OPT,
OFFLINE_INST_OPT, ONLINE_INST_OPT, IGNORE_IPOLICY_OPT, RUNTIME_MEM_OPT,
NOCONFLICTSCHECK_OPT, NEW_PRIMARY_OPT, HOTPLUG_OPT,
HOTPLUG_IF_POSSIBLE_OPT, INSTANCE_COMMUNICATION_OPT,
EXT_PARAMS_OPT, FILESTORE_DRIVER_OPT, FILESTORE_DIR_OPT],
"<instance>", "Alters the parameters of an instance"),
"shutdown": (
GenericManyOps("shutdown", _ShutdownInstance), [ArgInstance()],
[FORCE_OPT, m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt,
m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
m_inst_tags_opt, m_inst_opt, m_force_multi, TIMEOUT_OPT] + SUBMIT_OPTS
+ [DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT, NO_REMEMBER_OPT],
"<instance>", "Stops an instance"),
"startup": (
GenericManyOps("startup", _StartupInstance), [ArgInstance()],
[FORCE_OPT, m_force_multi, m_node_opt, m_pri_node_opt, m_sec_node_opt,
m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
m_inst_tags_opt, m_clust_opt, m_inst_opt] + SUBMIT_OPTS +
[HVOPTS_OPT,
BACKEND_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT,
NO_REMEMBER_OPT, STARTUP_PAUSED_OPT],
"<instance>", "Starts an instance"),
"reboot": (
GenericManyOps("reboot", _RebootInstance), [ArgInstance()],
[m_force_multi, REBOOT_TYPE_OPT, IGNORE_SECONDARIES_OPT, m_node_opt,
m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt] + SUBMIT_OPTS +
[m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
m_inst_tags_opt, SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
"<instance>", "Reboots an instance"),
"activate-disks": (
ActivateDisks, ARGS_ONE_INSTANCE,
SUBMIT_OPTS + [IGNORE_SIZE_OPT, PRIORITY_OPT, WFSYNC_OPT],
"<instance>", "Activate an instance's disks"),
"deactivate-disks": (
DeactivateDisks, ARGS_ONE_INSTANCE,
[FORCE_OPT] + SUBMIT_OPTS + [DRY_RUN_OPT, PRIORITY_OPT],
"[-f] <instance>", "Deactivate an instance's disks"),
"recreate-disks": (
RecreateDisks, ARGS_ONE_INSTANCE,
SUBMIT_OPTS +
[DISK_OPT, NODE_PLACEMENT_OPT, DRY_RUN_OPT, PRIORITY_OPT,
IALLOCATOR_OPT],
"<instance>", "Recreate an instance's disks"),
"grow-disk": (
GrowDisk,
[ArgInstance(min=1, max=1), ArgUnknown(min=1, max=1),
ArgUnknown(min=1, max=1)],
SUBMIT_OPTS + [NWSYNC_OPT, DRY_RUN_OPT, PRIORITY_OPT, ABSOLUTE_OPT],
"<instance> <disk> <size>", "Grow an instance's disk"),
"change-group": (
ChangeGroup, ARGS_ONE_INSTANCE,
[TO_GROUP_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT, PRIORITY_OPT]
+ SUBMIT_OPTS,
"[-I <iallocator>] [--to <group>]", "Change group of instance"),
"list-tags": (
ListTags, ARGS_ONE_INSTANCE, [],
"<instance_name>", "List the tags of the given instance"),
"add-tags": (
AddTags, [ArgInstance(min=1, max=1), ArgUnknown()],
[TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS,
"<instance_name> tag...", "Add tags to the given instance"),
"remove-tags": (
RemoveTags, [ArgInstance(min=1, max=1), ArgUnknown()],
[TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS,
"<instance_name> tag...", "Remove tags from given instance"),
}
#: dictionary with aliases for commands
aliases = {
"start": "startup",
"stop": "shutdown",
"show": "info",
}
def Main():
return GenericMain(commands, aliases=aliases,
override={"tag_type": constants.TAG_INSTANCE},
env_override=_ENV_OVERRIDE)
| bsd-2-clause | 8,941,289,388,210,791,000 | 35.149826 | 80 | 0.618715 | false |
bmazin/ARCONS-pipeline | fluxcal/fluxCal.py | 1 | 29931 | #!/bin/python
'''
fluxCal.py
Created by Seth Meeker on 11-21-2012
Modified on 02-16-2015 to perform absolute fluxCal with point sources
Opens ARCONS observation of a spectrophotometric standard star and
associated wavelength cal file, reads in all photons and converts to energies.
Bins photons to generate a spectrum, then divides this into the known spectrum
of the object to create a Sensitivity curve. This curve is then written out to
h5 file.
Flags are associated with each pixel - see headers/pipelineFlags
for descriptions. Note some flags are set here, others are set
later on when creating photon lists.
'''
import sys,os
import tables
import numpy as np
from scipy import interpolate
from scipy.optimize.minpack import curve_fit
import matplotlib.pyplot as plt
from photometry import LightCurve
from util.FileName import FileName
from util.ObsFile import ObsFile
from util import MKIDStd
from util.readDict import readDict
from util.utils import rebin
from util.utils import gaussianConvolution
from util.utils import makeMovie
from util.utils import fitBlackbody
import hotpix.hotPixels as hp
from scipy.optimize.minpack import curve_fit
from scipy import interpolate
import matplotlib
from matplotlib.backends.backend_pdf import PdfPages
from headers import pipelineFlags
import figureHeader
class FluxCal:
def __init__(self,paramFile,plots=False,verbose=False):
"""
Opens flux file, prepares standard spectrum, and calculates flux factors for the file.
Method is provided in param file. If 'relative' is selected, an obs file with standard star defocused over
the entire array is expected, with accompanying sky file to do sky subtraction.
If any other method is provided, 'absolute' will be done by default, wherein a point source is assumed
to be present. The obs file is then broken into spectral frames with photometry (psf or aper) performed
on each frame to generate the ARCONS observed spectrum.
"""
self.verbose=verbose
self.plots = plots
self.params = readDict()
self.params.read_from_file(paramFile)
run = self.params['run']
sunsetDate = self.params['fluxSunsetLocalDate']
self.fluxTstamp = self.params['fluxTimestamp']
skyTstamp = self.params['skyTimestamp']
wvlSunsetDate = self.params['wvlCalSunsetLocalDate']
wvlTimestamp = self.params['wvlCalTimestamp']
flatCalFileName = self.params['flatCalFileName']
needTimeAdjust = self.params['needTimeAdjust']
self.deadtime = float(self.params['deadtime']) #from firmware pulse detection
self.timeSpacingCut = self.params['timeSpacingCut']
bLoadBeammap = self.params.get('bLoadBeammap',False)
self.method = self.params['method']
self.objectName = self.params['object']
self.r = float(self.params['energyResolution'])
self.photometry = self.params['photometry']
self.centroidRow = self.params['centroidRow']
self.centroidCol = self.params['centroidCol']
self.aperture = self.params['apertureRad']
self.annulusInner = self.params['annulusInner']
self.annulusOuter = self.params['annulusOuter']
self.collectingArea = self.params['collectingArea']
self.startTime = self.params['startTime']
self.intTime = self.params['integrationTime']
fluxFN = FileName(run=run,date=sunsetDate,tstamp=self.fluxTstamp)
self.fluxFileName = fluxFN.obs()
self.fluxFile = ObsFile(self.fluxFileName)
if self.plots:
self.plotSavePath = os.environ['MKID_PROC_PATH']+os.sep+'fluxCalSolnFiles'+os.sep+run+os.sep+sunsetDate+os.sep+'plots'+os.sep
if not os.path.exists(self.plotSavePath): os.mkdir(self.plotSavePath)
if self.verbose: print "Created directory %s"%self.plotSavePath
obsFNs = [fluxFN]
self.obsList = [self.fluxFile]
if self.startTime in ['',None]: self.startTime=0
if self.intTime in ['',None]: self.intTime=-1
if self.method=="relative":
try:
print "performing Relative Flux Calibration"
skyFN = FileName(run=run,date=sunsetDate,tstamp=skyTstamp)
self.skyFileName = skyFN.obs()
self.skyFile = ObsFile(self.skyFileName)
obsFNs.append(skyFN)
self.obsList.append(self.skyFile)
except:
print "For relative flux calibration a sky file must be provided in param file"
self.__del__()
else:
self.method='absolute'
print "performing Absolute Flux Calibration"
if self.photometry not in ['aperture','PSF']: self.photometry='PSF' #default to PSF fitting if no valid photometry selected
timeMaskFileNames = [fn.timeMask() for fn in obsFNs]
timeAdjustFileName = FileName(run=run).timeAdjustments()
#make filename for output fluxCalSoln file
self.fluxCalFileName = FileName(run=run,date=sunsetDate,tstamp=self.fluxTstamp).fluxSoln()
print "Creating flux cal: %s"%self.fluxCalFileName
if wvlSunsetDate != '':
wvlCalFileName = FileName(run=run,date=wvlSunsetDate,tstamp=wvlTimestamp).calSoln()
if flatCalFileName =='':
flatCalFileName=FileName(obsFile=self.fluxFile).flatSoln()
#load cal files for flux file and, if necessary, sky file
for iObs,obs in enumerate(self.obsList):
if bLoadBeammap:
print 'loading beammap',os.environ['MKID_BEAMMAP_PATH']
obs.loadBeammapFile(os.environ['MKID_BEAMMAP_PATH'])
if wvlSunsetDate != '':
obs.loadWvlCalFile(wvlCalFileName)
else:
obs.loadBestWvlCalFile()
obs.loadFlatCalFile(flatCalFileName)
obs.setWvlCutoffs(-1,-1)
if needTimeAdjust:
obs.loadTimeAdjustmentFile(timeAdjustFileName)
timeMaskFileName = timeMaskFileNames[iObs]
print timeMaskFileName
if not os.path.exists(timeMaskFileName):
print 'Running hotpix for ',obs
hp.findHotPixels(obsFile=obs,outputFileName=timeMaskFileName,fwhm=np.inf,useLocalStdDev=True)
print "Flux cal/sky file pixel mask saved to %s"%(timeMaskFileName)
obs.loadHotPixCalFile(timeMaskFileName)
if self.verbose: print "Loaded hot pixel file %s"%timeMaskFileName
#get flat cal binning information since flux cal will need to match it
self.wvlBinEdges = self.fluxFile.flatCalFile.root.flatcal.wavelengthBins.read()
self.nWvlBins = self.fluxFile.flatWeights.shape[2]
self.binWidths = np.empty((self.nWvlBins),dtype=float)
self.binCenters = np.empty((self.nWvlBins),dtype=float)
for i in xrange(self.nWvlBins):
self.binWidths[i] = self.wvlBinEdges[i+1]-self.wvlBinEdges[i]
self.binCenters[i] = (self.wvlBinEdges[i]+(self.binWidths[i]/2.0))
if self.method=='relative':
print "Extracting ARCONS flux and sky spectra"
self.loadRelativeSpectrum()
print "Flux Spectrum loaded"
self.loadSkySpectrum()
print "Sky Spectrum loaded"
elif self.method=='absolute':
print "Extracting ARCONS point source spectrum"
self.loadAbsoluteSpectrum()
print "Loading standard spectrum"
try:
self.loadStdSpectrum(self.objectName)
except KeyError:
print "Invalid spectrum object name"
self.__del__()
sys.exit()
print "Generating sensitivity curve"
self.calculateFactors()
print "Sensitivity Curve calculated"
print "Writing fluxCal to file %s"%self.fluxCalFileName
self.writeFactors(self.fluxCalFileName)
if self.plots: self.makePlots()
print "Done"
def __del__(self):
try:
self.fluxFile.close()
self.calFile.close()
except AttributeError:#fluxFile was never defined
pass
def getDeadTimeCorrection(self, obs): #WRONG RIGHT NOW. NEEDS TO HAVE RAW COUNTS SUMMED, NOT CUBE WHICH EXCLUDES NOISE TAIL
if self.verbose: print "Making raw cube to get dead time correction"
cubeDict = obs.getSpectralCube(firstSec=self.startTime, integrationTime=self.intTime, weighted=False, fluxWeighted=False)
cube= np.array(cubeDict['cube'], dtype=np.double)
wvlBinEdges= cubeDict['wvlBinEdges']
effIntTime= cubeDict['effIntTime']
if self.verbose: print "median effective integration time = ", np.median(effIntTime)
nWvlBins=len(wvlBinEdges)-1
if self.verbose: print "cube shape ", np.shape(cube)
if self.verbose: print "effIntTime shape ", np.shape(effIntTime)
#add third dimension to effIntTime for broadcasting
effIntTime = np.reshape(effIntTime,np.shape(effIntTime)+(1,))
#put cube into counts/s in each pixel
cube /= effIntTime
#CALCULATE DEADTIME CORRECTION
#NEED TOTAL COUNTS PER SECOND FOR EACH PIXEL TO DO PROPERLY
#ASSUMES SAME CORRECTION FACTOR APPLIED FOR EACH WAVELENGTH, MEANING NO WL DEPENDANCE ON DEAD TIME EFFECT
DTCorr = np.zeros((np.shape(cube)[0],np.shape(cube)[1]),dtype=float)
for f in range(0,np.shape(cube)[2]):
#if self.verbose: print cube[:,:,f]
#if self.verbose: print '-----------------------'
DTCorr += cube[:,:,f]
#if self.verbose: print DTCorr
#if self.verbose: print '\n=====================\n'
#Correct for firmware dead time (100us in 2012 ARCONS firmware)
DTCorrNew=DTCorr/(1-DTCorr*self.deadtime)
CorrFactors = DTCorrNew/DTCorr #This is what the frames need to be multiplied by to get their true values
if self.verbose: print "Dead time correction factors: ", CorrFactors
#add third dimension to CorrFactors for broadcasting
CorrFactors = np.reshape(CorrFactors,np.shape(CorrFactors)+(1,))
return CorrFactors
def loadAbsoluteSpectrum(self):
'''
extract the ARCONS measured spectrum of the spectrophotometric standard by breaking data into spectral cube
and performing photometry (aper or psf) on each spectral frame
'''
if self.verbose:print "Making spectral cube"
cubeDict = self.fluxFile.getSpectralCube(firstSec=self.startTime, integrationTime=self.intTime, weighted=True, fluxWeighted=False)
cube= np.array(cubeDict['cube'], dtype=np.double)
effIntTime= cubeDict['effIntTime']
if self.verbose: print "median effective integration time in flux file cube = ", np.median(effIntTime)
if self.verbose: print "cube shape ", np.shape(cube)
if self.verbose: print "effIntTime shape ", np.shape(effIntTime)
#add third dimension to effIntTime for broadcasting
effIntTime = np.reshape(effIntTime,np.shape(effIntTime)+(1,))
#put cube into counts/s in each pixel
cube /= effIntTime
#get dead time correction factors
DTCorr = self.getDeadTimeCorrection(self.fluxFile)
cube*=DTCorr #cube now in units of counts/s and corrected for dead time
if self.plots and not 'figureHeader' in sys.modules:
if self.verbose: print "Saving spectral frames as movie..."
movieCube = np.zeros((self.nWvlBins,np.shape(cube)[0],np.shape(cube)[1]),dtype=float)
for i in xrange(self.nWvlBins):
movieCube[i,:,:] = cube[:,:,i]
makeMovie(movieCube,frameTitles=self.binCenters,cbar=True,outName=self.plotSavePath+'FluxCal_Cube_%s.gif'%(self.objectName), normMin=0, normMax=50)
if self.verbose: print "Movie saved in %s"%self.plotSavePath
LCplot=False #light curve pop-ups not compatible with FLuxCal plotting 2/18/15
#if self.photometry=='PSF': LCplot = False
LC = LightCurve.LightCurve(verbose=self.verbose, showPlot=LCplot)
self.fluxSpectrum=np.empty((self.nWvlBins),dtype=float)
self.skySpectrum=np.zeros((self.nWvlBins),dtype=float)
for i in xrange(self.nWvlBins):
frame = cube[:,:,i]
if self.verbose: print "%s photometry on frame %i of cube, central wvl = %f Angstroms"%(self.photometry,i,self.binCenters[i])
if self.photometry == 'aperture':
fDict = LC.performPhotometry(self.photometry,frame,[[self.centroidCol,self.centroidRow]],expTime=None,aper_radius = self.aperture, annulus_inner = self.annulusInner, annulus_outer = self.annulusOuter, interpolation="linear")
self.fluxSpectrum[i] = fDict['flux']
self.skySpectrum[i] = fDict['skyFlux']
print "Sky estimate = ", fDict['skyFlux']
else:
fDict = LC.performPhotometry(self.photometry,frame,[[self.centroidCol,self.centroidRow]],expTime=None,aper_radius = self.aperture)
self.fluxSpectrum[i] = fDict['flux']
self.fluxSpectrum=self.fluxSpectrum/self.binWidths/self.collectingArea #spectrum now in counts/s/Angs/cm^2
self.skySpectrum=self.skySpectrum/self.binWidths/self.collectingArea
return self.fluxSpectrum, self.skySpectrum
def loadRelativeSpectrum(self):
self.fluxSpectra = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)]
self.fluxEffTime = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)]
for iRow in xrange(self.nRow):
for iCol in xrange(self.nCol):
count = self.fluxFile.getPixelCount(iRow,iCol)
fluxDict = self.fluxFile.getPixelSpectrum(iRow,iCol,weighted=True,firstSec=0,integrationTime=-1)
self.fluxSpectra[iRow][iCol],self.fluxEffTime[iRow][iCol] = fluxDict['spectrum'],fluxDict['effIntTime']
self.fluxSpectra = np.array(self.fluxSpectra)
self.fluxEffTime = np.array(self.fluxEffTime)
DTCorr = self.getDeadTimeCorrection(self.fluxFile)
#print "Bin widths = ",self.binWidths
self.fluxSpectra = self.fluxSpectra/self.binWidths/self.fluxEffTime*DTCorr
self.fluxSpectrum = self.calculateMedian(self.fluxSpectra) #find median of subtracted spectra across whole array
return self.fluxSpectrum
def loadSkySpectrum(self):
self.skySpectra = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)]
self.skyEffTime = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)]
for iRow in xrange(self.nRow):
for iCol in xrange(self.nCol):
count = self.skyFile.getPixelCount(iRow,iCol)
skyDict = self.skyFile.getPixelSpectrum(iRow,iCol,weighted=True,firstSec=0,integrationTime=-1)
self.skySpectra[iRow][iCol],self.skyEffTime[iRow][iCol] = skyDict['spectrum'],skyDict['effIntTime']
self.skySpectra = np.array(self.skySpectra)
self.skyEffTime = np.array(self.skyEffTime)
DTCorr = self.getDeadTimeCorrection(self.skyFile)
self.skySpectra = self.skySpectra/self.binWidths/self.skyEffTime*DTCorr
self.skySpectrum = self.calculateMedian(self.skySpectra) #find median of subtracted spectra across whole array
return self.skySpectrum
def loadStdSpectrum(self, objectName="G158-100"):
#import the known spectrum of the calibrator and rebin to the histogram parameters given
#must be imported into array with dtype float so division later does not have error
std = MKIDStd.MKIDStd()
a = std.load(objectName)
a = std.countsToErgs(a) #convert std spectrum to ergs/s/Angs/cm^2 for BB fitting and cleaning
self.stdWvls = np.array(a[:,0])
self.stdFlux = np.array(a[:,1]) #std object spectrum in ergs/s/Angs/cm^2
if self.plots:
#create figure for plotting standard spectrum modifications
self.stdFig = plt.figure()
self.stdAx = self.stdFig.add_subplot(111)
plt.xlim(3500,12000)
plt.plot(self.stdWvls,self.stdFlux*1E15,linewidth=1,color='grey',alpha=0.75)
convX_rev,convY_rev = self.cleanSpectrum(self.stdWvls,self.stdFlux)
convX = convX_rev[::-1] #convolved spectrum comes back sorted backwards, from long wvls to low which screws up rebinning
convY = convY_rev[::-1]
#rebin cleaned spectrum to flat cal's wvlBinEdges
newa = rebin(convX,convY,self.wvlBinEdges)
rebinnedWvl = np.array(newa[:,0])
rebinnedFlux = np.array(newa[:,1])
if self.plots:
#plot final resampled spectrum
plt.plot(convX,convY*1E15,color='blue')
plt.step(rebinnedWvl,rebinnedFlux*1E15,color = 'black',where='mid')
plt.legend(['%s Spectrum'%self.objectName,'Blackbody Fit','Gaussian Convolved Spectrum','Rebinned Spectrum'],'upper right', numpoints=1)
plt.xlabel(ur"Wavelength (\r{A})")
plt.ylabel(ur"Flux (10$^{-15}$ ergs s$^{-1}$ cm$^{-2}$ \r{A}$^{-1}$)")
plt.ylim(0.9*min(rebinnedFlux)*1E15, 1.1*max(rebinnedFlux)*1E15)
plt.savefig(self.plotSavePath+'FluxCal_StdSpectrum_%s.eps'%self.objectName,format='eps')
#convert standard spectrum back into counts/s/angstrom/cm^2
newa = std.ergsToCounts(newa)
self.binnedSpectrum = np.array(newa[:,1])
def cleanSpectrum(self,x,y):
##=============== BB Fit to extend spectrum beyond 11000 Angstroms ==================
fraction = 1.0/3.0
nirX = np.arange(int(x[(1.0-fraction)*len(x)]),20000)
T, nirY = fitBlackbody(x,y,fraction=fraction,newWvls=nirX,tempGuess=5600)
if self.plots: plt.plot(nirX,nirY*1E15,linestyle='--',linewidth=2, color="black",alpha=0.5)
extendedWvl = np.concatenate((x,nirX[nirX>max(x)]))
extendedFlux = np.concatenate((y,nirY[nirX>max(x)]))
##======= Gaussian convolution to smooth std spectrum to MKIDs median resolution ========
newX, newY = gaussianConvolution(extendedWvl,extendedFlux,xEnMin=0.005,xEnMax=6.0,xdE=0.001,fluxUnits = "lambda",r=self.r,plots=False)
return newX, newY
def calculateFactors(self):
"""
Calculate the sensitivity spectrum: the weighting factors that correct the flat calibrated spectra to the real spectra
For relative calibration:
First subtract sky spectrum from ARCONS observed spectrum. Then take median of this spectrum as it should be identical
across the array, assuming the flat cal has done its job. Then divide this into the known spectrum of the object.
For absolute calibration:
self.fluxSpectra already has sky subtraction included. Simply divide this spectrum into the known standard spectrum.
"""
self.subtractedSpectrum = self.fluxSpectrum - self.skySpectrum
self.subtractedSpectrum = np.array(self.subtractedSpectrum,dtype=float) #cast as floats so division does not fail later
if self.method=='relative':
normWvl = 5500 #Angstroms. Choose an arbitrary wvl to normalize the relative correction at
ind = np.where(self.wvlBinEdges >= normWvl)[0][0]-1
self.subtractedSpectrum = self.subtractedSpectrum/(self.subtractedSpectrum[ind]) #normalize
self.binnedSpectrum = self.binnedSpectrum/(self.binnedSpectrum[ind]) #normalize treated Std spectrum while we are at it
#Calculate FluxCal factors
self.fluxFactors = self.binnedSpectrum/self.subtractedSpectrum
#self.fluxFlags = np.zeros(np.shape(self.fluxFactors),dtype='int')
self.fluxFlags = np.empty(np.shape(self.fluxFactors),dtype='int')
self.fluxFlags.fill(pipelineFlags.fluxCal['good']) #Initialise flag array filled with 'good' flags. JvE 5/1/2013.
#set factors that will cause trouble to 1
#self.fluxFlags[self.fluxFactors == np.inf] = 1
self.fluxFlags[self.fluxFactors == np.inf] = pipelineFlags.fluxCal['infWeight'] #Modified to use flag dictionary - JvE 5/1/2013
self.fluxFactors[self.fluxFactors == np.inf]=1.0
self.fluxFlags[np.isnan(self.fluxFactors)] = pipelineFlags.fluxCal['nanWeight'] #Modified to use flag dictionary - JvE 5/1/2013
self.fluxFactors[np.isnan(self.fluxFactors)]=1.0
self.fluxFlags[self.fluxFactors <= 0]=pipelineFlags.fluxCal['LEzeroWeight'] #Modified to use flag dictionary - JvE 5/1/2013
self.fluxFactors[self.fluxFactors <= 0]=1.0
def calculateMedian(self, spectra):
spectra2d = np.reshape(spectra,[self.nRow*self.nCol,self.nWvlBins])
wvlMedian = np.empty(self.nWvlBins,dtype=float)
for iWvl in xrange(self.nWvlBins):
spectrum = spectra2d[:,iWvl]
goodSpectrum = spectrum[spectrum != 0]#dead pixels need to be taken out before calculating medians
wvlMedian[iWvl] = np.median(goodSpectrum)
return wvlMedian
def makePlots(self):
"""
Output all debugging plots of ARCONS sky and object spectra, known calibrator spectrum, and sensitivity curve
"""
scratchDir = os.getenv('MKID_PROC_PATH')
fluxDir = self.plotSavePath
fluxCalBase = 'FluxCal_%s'%self.objectName
plotFileName = fluxCalBase+".pdf"
fullFluxPlotFileName = os.path.join(fluxDir,plotFileName)
#uncomment to make some plots for the paper. Proper formatting Will also require figureheader to be imported and for movie making to be turned off
self.paperFig = plt.figure()
self.paperAx = self.paperFig.add_subplot(111)
plt.xlim(4000,11000)
plt.plot(self.binCenters,self.fluxFactors,linewidth=3,color='black')
plt.xlabel(ur"Wavelength (\r{A})")
plt.ylabel(ur"Spectral Calibration Curve")
plt.ylim(0,150)
plt.savefig(self.plotSavePath+'FluxCal_Sensitivity_%s.eps'%self.objectName,format='eps')
#save throughput as a .npz file that other code uses when making paper plots
np.savez(self.plotSavePath+'%s_%s_throughput.npz'%(self.objectName.strip(),self.fluxTstamp),throughput=1.0/self.fluxFactors,wvls=self.binCenters)
pp = PdfPages(fullFluxPlotFileName)
#plt.rcParams['font.size'] = 2
wvls = self.binCenters
plt.figure()
ax1 = plt.subplot(111)
ax1.set_title('ARCONS median flat cal\'d flux in counts')
plt.plot(wvls,self.fluxSpectrum)
pp.savefig()
plt.figure()
ax2 = plt.subplot(111)
ax2.set_title('ARCONS median flat cal\'d sky in counts')
plt.plot(wvls,self.skySpectrum)
pp.savefig()
plt.figure()
ax3 = plt.subplot(111)
ax3.set_title('Flux data minus sky in counts')
plt.plot(wvls,self.subtractedSpectrum)
pp.savefig()
plt.figure()
ax4 = plt.subplot(111)
ax4.set_title('Std Spectrum of %s'%(self.objectName))
plt.plot(self.stdWvls,self.stdFlux)
pp.savefig()
plt.figure()
ax5 = plt.subplot(111)
ax5.set_title('Binned Std Spectrum')
plt.plot(wvls,self.binnedSpectrum)
pp.savefig()
plt.figure()
ax6 = plt.subplot(111)
ax6.set_title('Median Sensitivity Spectrum')
ax6.set_xlim((3500,12000))
#ax6.set_ylim((0,5))
plt.plot(wvls,self.fluxFactors)
pp.savefig()
plt.figure()
ax7 = plt.subplot(111)
ax7.set_title('1/Sensitivity (Throughput)')
ax7.set_xlim((3500,12000))
ax7.set_ylim((0,.04))
plt.plot(wvls,1.0/self.fluxFactors)
pp.savefig()
plt.figure()
ax8 = plt.subplot(111)
ax8.set_title('Flux Cal\'d ARCONS Spectrum of Std')
plt.plot(wvls,self.fluxFactors*self.subtractedSpectrum)
pp.savefig()
pp.close()
print "Saved Flux Cal plots to %s"%(fullFluxPlotFileName)
def writeFactors(self,fluxCalFileName):
"""
Write flux cal weights to h5 file
"""
if os.path.isabs(fluxCalFileName) == True:
fullFluxCalFileName = fluxCalFileName
else:
scratchDir = os.getenv('MKID_PROC_PATH')
fluxDir = os.path.join(scratchDir,'fluxCalSolnFiles')
fullFluxCalFileName = os.path.join(fluxDir,fluxCalFileName)
try:
fluxCalFile = tables.openFile(fullFluxCalFileName,mode='w')
except:
print 'Error: Couldn\'t create flux cal file, ',fullFluxCalFileName
return
calgroup = fluxCalFile.createGroup(fluxCalFile.root,'fluxcal','Table of flux calibration weights by wavelength')
caltable = tables.Array(calgroup,'weights',object=self.fluxFactors,title='Flux calibration Weights indexed by wavelengthBin')
flagtable = tables.Array(calgroup,'flags',object=self.fluxFlags,title='Flux cal flags indexed by wavelengthBin. 0 is Good')
bintable = tables.Array(calgroup,'wavelengthBins',object=self.wvlBinEdges,title='Wavelength bin edges corresponding to third dimension of weights array')
fluxCalFile.flush()
fluxCalFile.close()
print "Finished Flux Cal, written to %s"%(fullFluxCalFileName)
def cleanSpectrum_old(self,x,y,objectName):
'''
function to take high resolution spectrum of standard star, extend IR coverage with
an exponential tail, then rebin down to ARCONS resolution. This function has since been
deprecated with the current cleanSpectrum which uses a BB fit to extend IR coverage,
and does the rebinning using a gaussian convolution. This is left in for reference.
'''
#locations and widths of absorption features in Angstroms
#features = [3890,3970,4099,4340,4860,6564,6883,7619]
#widths = [50,50,50,50,50,50,50,50]
#for i in xrange(len(features)):
# #check for absorption feature in std spectrum
# ind = np.where((x<(features[i]+15)) & (x>(features[i]-15)))[0]
# if len(ind)!=0:
# ind = ind[len(ind)/2]
# #if feature is found (flux is higher on both sides of the specified wavelength where the feature should be)
# if y[ind]<y[ind+1] and y[ind]<y[ind-1]:
# #cut out width[i] around feature[i]
# inds = np.where((x >= features[i]+widths[i]) | (x <= features[i]-widths[i]))
# x = x[inds]
# y = y[inds]
#fit a tail to the end of the spectrum to interpolate out to desired wavelength in angstroms
fraction = 3.0/4.0
newx = np.arange(int(x[fraction*len(x)]),20000)
slopeguess = (np.log(y[-1])-np.log(y[fraction*len(x)]))/(x[-1]-x[fraction*len(x)])
print "Guess at exponential slope is %f"%(slopeguess)
guess_a, guess_b, guess_c = float(y[fraction*len(x)]), x[fraction*len(x)], slopeguess
guess = [guess_a, guess_b, guess_c]
fitx = x[fraction*len(x):]
fity = y[fraction*len(x):]
exp_decay = lambda fx, A, x0, t: A * np.exp((fx-x0) * t)
params, cov = curve_fit(exp_decay, fitx, fity, p0=guess, maxfev=2000)
A, x0, t= params
print "A = %s\nx0 = %s\nt = %s\n"%(A, x0, t)
best_fit = lambda fx: A * np.exp((fx-x0)*t)
calcx = np.array(newx,dtype=float)
newy = best_fit(calcx)
#func = interpolate.splrep(x[fration*len(x):],y[fraction*len(x):],s=smooth)
#newx = np.arange(int(x[fraction*len(x)]),self.wvlBinEdges[-1])
#newy = interpolate.splev(newx,func)
wl = np.concatenate((x,newx[newx>max(x)]))
flux = np.concatenate((y,newy[newx>max(x)]))
#new method, rebin data to grid of wavelengths generated from a grid of evenly spaced energy bins
#R=7.0 at 4500
#R=E/dE -> dE = R/E
dE = 0.3936 #eV
start = 1000 #Angs
stop = 20000 #Angs
enBins = ObsFile.makeWvlBins(dE,start,stop)
rebinned = rebin(wl,flux,enBins)
re_wl = rebinned[:,0]
re_flux = rebinned[:,1]
#plt.plot(re_wl,re_flux,color='r')
re_wl = re_wl[np.isnan(re_flux)==False]
re_flux = re_flux[np.isnan(re_flux)==False]
start1 = self.wvlBinEdges[0]
stop1 = self.wvlBinEdges[-1]
#regrid downsampled data
new_wl = np.arange(start1,stop1)
#print re_wl
#print re_flux
#print new_wl
#weight=1.0/(re_flux)**(2/1.00)
print len(re_flux)
weight = np.ones(len(re_flux))
#decrease weights near peak
ind = np.where(re_flux == max(re_flux))[0]
weight[ind] = 0.3
for p in [1,2,3]:
if p==1:
wt = 0.3
elif p==2:
wt = 0.6
elif p==3:
wt = 0.7
try:
weight[ind+p] = wt
except IndexError:
pass
try:
if ind-p >= 0:
weight[ind-p] = wt
except IndexError:
pass
weight[-4:] = 1.0
#weight = [0.7,1,0.3,0.3,0.5,0.7,1,1,1]
#print len(weight)
#weight = re_flux/min(re_flux)
#weight = 1.0/weight
#weight = weight/max(weight)
#print weight
f = interpolate.splrep(re_wl,re_flux,w=weight,k=3,s=max(re_flux)**1.71)
new_flux = interpolate.splev(new_wl,f,der=0)
return new_wl, new_flux
if __name__ == '__main__':
try:
paramFile = sys.argv[1]
except:
paramFile = '/home/srmeeker/ARCONS-pipeline/params/fluxCal.dict'
fc = FluxCal(paramFile, plots=True, verbose=True)
| gpl-2.0 | 1,007,656,973,070,526,800 | 43.016176 | 240 | 0.640406 | false |
emc-openstack/storops | storops_test/lib/test_tasks.py | 1 | 3524 | # coding=utf-8
# Copyright (c) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import shutil
from unittest import TestCase
import tempfile
from hamcrest import assert_that, equal_to, raises
import persistqueue
from storops.lib import tasks
from storops_test.vnx.cli_mock import patch_cli, t_vnx
import time
class TestPQueue(TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='storops')
self.q = tasks.PQueue(self.path)
def tearDown(self):
self.q.stop()
self.q = None
time.sleep(0.1)
shutil.rmtree(self.path, ignore_errors=True)
def test_queue_path(self):
assert_that(self.q.path, equal_to(self.path))
def test_put(self):
fake_vnx = t_vnx()
self.q.put(fake_vnx.delete_lun, name='l1')
def test_get(self):
fake_vnx = t_vnx()
self.q.put(fake_vnx.delete_lun, name='l1')
pickled_item = self.q.get()
assert_that(pickled_item['object']._ip, equal_to(fake_vnx._ip))
assert_that(pickled_item['method'], equal_to('delete_lun'))
assert_that(pickled_item['params']['name'], equal_to('l1'))
self.q.task_done()
self.q = None
self.q = tasks.PQueue(self.path)
assert_that(self.q.get, raises(persistqueue.Empty))
def test_run_empty_queue(self):
self.q.set_interval(0.01)
self.q.start()
# Make sure restart is fine
self.q.start()
@patch_cli
def test_run_tasks(self):
self.q.set_interval(0.01)
fake_vnx = t_vnx()
self.q.put(fake_vnx.delete_lun, name='l1')
self.q.start()
def test_re_enqueue(self):
fake_vnx = t_vnx()
item = {'object': fake_vnx, 'method': 'delete_lun',
'params': {'name': 'l1'}}
self.q.re_enqueue(item)
assert_that(item['retries'], equal_to(1))
def test_re_enqueue_max_retries(self):
fake_vnx = t_vnx()
item = {'object': fake_vnx, 'method': 'delete_lun', 'params': 'l1'}
for i in range(100):
self.q.re_enqueue(item)
self.q.get()
self.q.re_enqueue(item)
assert_that(item['retries'], equal_to(100))
@patch_cli
def test_enqueue_expected_error(self):
self.q.set_interval(0.1)
fake_vnx = t_vnx()
uid = '00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:01'
self.q.put(fake_vnx.delete_hba, hba_uid=uid)
self.q.start()
time.sleep(0.2)
assert_that(self.q.get, raises(persistqueue.Empty))
@patch_cli
def test_enqueue_storops_error(self):
self.q.set_interval(0.1)
fake_vnx = t_vnx()
self.q.put(fake_vnx.create_block_user,
name='b', password='b', role='operator')
self.q.start()
time.sleep(0.2)
reenqueued_item = self.q.get()
assert_that('b', equal_to(reenqueued_item['params']['name']))
| apache-2.0 | -1,893,215,132,781,401,000 | 31.036364 | 78 | 0.608116 | false |
induktio/LamaTrainer | falib/PdaSimulator.py | 1 | 2691 | import sys,os,json,pprint
class PDA():
"""
Represents a Push-Down Automaton
"""
EPSILON = '_e'
MAX_STEPS = 10000
def __init__(self):
pass
def testAutomata(self, pda, testcases):
"""
Tests whether the given PDA accepts or rejects the given testcases appropriately
Args:
* pda: PDA object to be tested
* testcases: Strings used for testing the PDA
Returns:
The result from simulating the PDA with the given inputs::
{'result': True} -- The PDA ran correctly with every input
{'result': False, 'minimal': 'aabb'} -- The PDA did not run correctly on input aabb
"""
if type(testcases) != dict or len(testcases) == 0:
return { 'result': False, 'minimal': 'No testcases defined' }
cases = sorted(sorted(testcases.keys()), key=len) # Order by length, alphabetic
for string in cases:
wanted = testcases[string]
simulate = self.simulate(pda, string)
if wanted != simulate:
return { 'result': False, 'minimal': string }
return { 'result': True }
def simulate(self, pda, string):
"""
Simulates the execution of PDA on a given input
Args:
* pda: PDA object to be executed
* string: Input to be simulated
Returns:
| True, if the PDA accepts the string
| False, if not
"""
visited = set()
queue = [(pda['start'], "", 0)] # Current state, stack, input string index
n = 0
while len(queue) != 0:
current = queue.pop(0)
visited.add(current)
curState,curStack,curIndex = current
if curState in pda['accepting'] and curIndex == len(string):
return True
if curIndex < len(string):
nextInput = string[curIndex]
else:
nextInput = None
n += 1
if n > PDA.MAX_STEPS:
break
for transition in pda['transitions'][curState]:
inputSymbol,popStack,pushStack = transition.split(',')
nextStates = pda['transitions'][curState][transition]
nextStack = curStack
nextIndex = curIndex
valid = True
if inputSymbol == PDA.EPSILON or inputSymbol == nextInput:
if popStack != PDA.EPSILON:
if curStack[-1] == popStack:
nextStack = curStack[0 : len(curStack)-1]
else:
valid = False
if pushStack != PDA.EPSILON:
nextStack += pushStack
if inputSymbol != PDA.EPSILON:
nextIndex = curIndex+1
if valid:
for state in nextStates:
nextItem = (state, nextStack, nextIndex)
if nextItem not in visited:
queue.append(nextItem)
return False
if __name__ == '__main__' and sys.argv[1] and sys.argv[2]:
pp = pprint.PrettyPrinter(indent=4)
comp = PDA()
automata = json.loads(open(sys.argv[1], 'r').read())
#pp.pprint( automata )
print comp.simulate(automata, sys.argv[2])
| gpl-3.0 | 3,181,897,919,882,742,000 | 26.181818 | 87 | 0.644742 | false |
rossumai/keras-multi-gpu | keras_tf_multigpu/examples/kuza55/cifar10_cnn_functional_multigpu.py | 1 | 4556 | '''Train a simple deep CNN on the CIFAR10 small images dataset.
GPU run command with Theano backend (with TensorFlow, the GPU is automatically used):
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatx=float32 python cifar10_cnn.py
It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50 epochs.
(it's still underfitting at that point, though).
'''
from __future__ import print_function
import keras
from keras import backend as K
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.layers import Input, Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.merge import concatenate
from keras.layers.core import Lambda
import os
import tensorflow as tf
from keras_tf_multigpu.kuza55 import make_parallel
# sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
sess = tf.Session()
K.set_session(sess)
ps_device = '/gpu:0'
gpu_count = len([dev for dev in os.environ.get('CUDA_VISIBLE_DEVICES', '').split(',') if len(dev.strip()) > 0])
batch_size = 128
num_classes = 10
epochs = 6
data_augmentation = True
# The data, shuffled and split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
def basic_model():
input = Input(shape=x_train.shape[1:])
x = Conv2D(32, (3, 3), padding='same')(input)
x = Activation('relu')(x)
x = Conv2D(32, (3, 3))(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.25)(x)
x = Conv2D(64, (3, 3), padding='same')(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3))(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.25)(x)
x = Flatten()(x)
x = Dense(512)(x)
x = Activation('relu')(x)
x = Dropout(0.5)(x)
x = Dense(num_classes)(x)
output = Activation('softmax')(x)
model = Model(inputs=input, outputs=output)
print('Single tower model:')
model.summary()
return model
with tf.device(ps_device):
serial_model = basic_model()
print('Serial model:')
serial_model.summary()
model = make_parallel(tower, gpu_count, ps_device)
print('Multi-GPU model:')
model.summary()
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
| mit | -8,435,183,062,125,755,000 | 33.778626 | 111 | 0.663082 | false |
lumig242/Video-Share-System | video/views.py | 1 | 4497 | from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from video.form import *
from video.models import Video,Comment
from django.contrib.auth.decorators import login_required
import json
@login_required
def upload(request):
uploadFlag = True
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
video = Video()
video.owner = request.user
video.title = form.cleaned_data['title']
video.file = request.FILES['file']
video.description = form.cleaned_data["description"]
video.save()
return HttpResponseRedirect('success/')
else:
form = UploadFileForm()
return render_to_response('upload.html', locals(),context_instance=RequestContext(request))
def uploadSuccess(request):
return render_to_response('upload_Success.html',context_instance=RequestContext(request))
def homepage_video_list(request):
highscore = Video.objects.all()
highscore = sorted(highscore, key=lambda x: 1. * x.rating_sum / (1 + x.rating_person))[0:5]
latest = Video.objects.all()[0:5]
return render_to_response('homepage.html', locals(), context_instance=RequestContext(request))
def video_play(request,video_id):
video_object = Video.objects.get(id=video_id)
video_path = video_object.file.url
own = True if request.user == video_object.owner else False
if video_object.rating_person:
points = round(1.0*video_object.rating_sum/video_object.rating_person,1)
else:
points = "Not rated"
# Comment display
commentList = Comment.objects.filter(video=video_object).order_by('-time')
return render_to_response('videoplay.html', locals(),context_instance=RequestContext(request))
def rate_video(request,video_id):
print request.method, video_id
if request.method == 'POST':
print 'hello2'
form = RatingForm(request.POST)
if form.is_valid():
print 'hello3'
video_object = Video.objects.get(id=video_id)
video_object.rating_person += 1
video_object.rating_sum += form.cleaned_data['rate']
video_object.save()
HasRated = True
points = round(1.0*video_object.rating_sum/video_object.rating_person,1)
return HttpResponse(points)
def comment_video(request, video_id):
print request.method, video_id
if request.method == 'POST':
print "hello2"
form = SendCommentForm(request.POST)
if form.is_valid():
print "hello3"
comment = Comment()
comment.author = request.user
comment.video = Video.objects.filter(id=video_id)[0]
comment.content = form.cleaned_data['content']
comment.save()
print str(comment.author.username), str(comment.time), str(comment.content)
s = '<p>'+str(comment.author.username)+ comment.time.strftime(" %b. %d, %Y, %I:%m %p ")+ str(comment.content) + '</p>'
#return HttpResponse(json.dumps({"name":str(comment.author.username), "date":str(comment.time), "content": str(comment.content)}))
return HttpResponse(s)
def video_modify(request,video_id):
modifyFlag = True
video_object = Video.objects.get(id=video_id)
if request.method == 'POST':
uploadFlag = True
form = ModifyVideoForm(request.POST)
if form.is_valid():
video_object.title = form.cleaned_data['title']
video_object.description = form.cleaned_data["description"]
video_object.save()
return HttpResponseRedirect('/videoplay/{}'.format(video_id))
else:
form = ModifyVideoForm()
return render_to_response('upload.html', locals(),context_instance=RequestContext(request))
def video_delete(request,video_id):
video_object = Video.objects.get(id=video_id)
video_object.delete()
return HttpResponseRedirect('/timeline')
def video_share(request,video_id):
video_object = Video.objects.get(id=video_id)
video = Video()
video.owner = request.user
video.title = video_object.title
video.file = video_object.file
video.description = video_object.description
video.save()
return HttpResponseRedirect('/videoplay/{}'.format(video_id))
| mit | -4,826,796,093,921,239,000 | 35.266129 | 142 | 0.653769 | false |
scott48074/Restorative-Justice-App | app/facesheet.py | 1 | 4506 | '''
Takes in a list of values from the database and creates a facesheet.
'''
import os
from docx import Document
from docx.enum.text import WD_ALIGN_PARAGRAPH
def assemble_address(street, apartment, city, state, zip_code):
address = street.title()
if apartment:
address += f' APT: {apartment.title()}'
address += f' {city.title()}, '
address += state.upper()
address += ' ' + zip_code
return address
def parse_row(row_list):
info = {'case_number': row_list[1],
'occurred_date': row_list[2],
'incident_type': row_list[3].title(),
'age': row_list[5],
'name': row_list[7].title(),
'address': assemble_address(row_list[8], row_list[9],
row_list[10], row_list[11],
row_list[12],
),
'DOB': row_list[13],
'phone': row_list[14],
'race': row_list[15].title(),
'sex': row_list[16].title(),
'district': row_list[18].title()}
return info
def district_line(document, district):
p = document.add_paragraph()
p.alignment = WD_ALIGN_PARAGRAPH.RIGHT
p.add_run(f'District: {district}').bold = True
def approval_line(document):
p = document.add_paragraph()
p.alignment = WD_ALIGN_PARAGRAPH.RIGHT
p.add_run('Selection: ').bold = True
p.add_run('Pass').bold = True
p.add_run(' | ').bold = True
p.add_run('Fail').bold = True
p.add_run().add_break()
p.add_run('Background: ').bold = True
p.add_run('Pass').bold = True
p.add_run(' | ').bold = True
p.add_run('Fail').bold = True
p.add_run().add_break()
def case_number_line(document, case_number):
p = document.add_paragraph()
p.add_run(f'Case Number: {case_number}')
def name_line(document, name):
p = document.add_paragraph()
p.add_run(f'Name: {name}')
def bio_line(document, sex, race, dob, age):
lines = ['Sex:\t', 'Race:\t', 'DOB:\t', 'Age:\t']
bio_list = [sex, race, dob, age]
p = document.add_paragraph()
for line, bio in zip(lines, bio_list):
p.add_run(f'{line}{bio}')
p.add_run().add_break()
def charge_line(document):
lines = ['Charge Type: State | Municipal',
'Description:', 'Court Date:', 'Citation#:']
p = document.add_paragraph()
for line in lines:
p.add_run(line)
p.add_run().add_break()
def address_line(document, address):
p = document.add_paragraph()
p.add_run(f'Address: {address}')
def phone_line(document, phone):
p = document.add_paragraph()
p.add_run(f'Phone: {phone}')
p.add_run().add_break()
p.add_run('Email:')
def background_line(document):
lines = ['Court Records:', 'Out of State Records:',
'Local Records:', 'Notes:']
for line in lines:
p = document.add_paragraph()
p.add_run(line).bold = True
def last_name_first(name):
suffix = ['II', 'IV', 'JR', 'SR']
name_list = name.split()
name_list.insert(0, name_list.pop())
if name_list[0][:2].upper() in suffix:
name_list.insert(0, name_list.pop())
name = "_".join(name_list)
return name
def save_facesheet(document, directory, name, district, district_folders):
name = last_name_first(name)
if district_folders:
path = f'{directory}/results/{district}/{name}/{name}.docx'
if not os.path.isdir(f'{directory}/results/{district}/{name}'):
os.makedirs(f'{directory}/results/{district}/{name}')
else:
path = f'{directory}/results/{name}/{name}.docx'
if not os.path.isdir(f'{directory}/results/{name}'):
os.makedirs(f'{directory}/results/{name}')
document.save(path)
def assemble_sheet(row_list, directory, district_folders):
info_dict = parse_row(row_list)
document = Document()
district_line(document, info_dict['district'])
approval_line(document)
case_number_line(document, info_dict['case_number'])
name_line(document, info_dict['name'])
bio_line(document, info_dict['sex'], info_dict['race'], info_dict['DOB'], info_dict['age'])
charge_line(document)
address_line(document, info_dict['address'])
phone_line(document, info_dict['phone'])
background_line(document)
save_facesheet(document, directory, info_dict['name'], info_dict['district'], district_folders)
def main():
pass
if __name__ == '__main__':
main()
| mit | 5,163,028,321,451,028,000 | 28.644737 | 99 | 0.583666 | false |
kri5/pghoard | test/test_restore.py | 1 | 4618 | """
pghoard
Copyright (c) 2015 Ohmu Ltd
See LICENSE for details
"""
from .base import PGHoardTestCase
from dateutil import tz
from pghoard.restore import create_recovery_conf, Restore, RestoreError
from unittest.mock import Mock
import datetime
import os
import pytest
class TestRecoveryConf(PGHoardTestCase):
def test_recovery_targets(self, tmpdir):
r = Restore()
r._load_config = Mock() # pylint: disable=protected-access
r._get_object_storage = Mock() # pylint: disable=protected-access
with pytest.raises(RestoreError) as excinfo:
r.run(args=[
"get-basebackup",
"--config=" + str(tmpdir),
"--target-dir=" + str(tmpdir),
"--site=test",
"--recovery-target-action=promote",
"--recovery-target-name=foobar",
"--recovery-target-xid=42",
])
assert "at most one" in str(excinfo.value)
with pytest.raises(RestoreError) as excinfo:
r.run(args=[
"get-basebackup",
"--config=" + str(tmpdir),
"--target-dir=" + str(tmpdir),
"--site=test",
"--recovery-target-action=promote",
"--recovery-target-time=foobar",
])
assert "recovery_target_time 'foobar'" in str(excinfo.value)
def test_find_nearest_backup(self):
r = Restore()
r.storage = Mock()
basebackups = [{"name": "2015-02-12_0", "metadata": {"start-time": "2015-02-12T14:07:19+00:00"}},
{"name": "2015-02-13_0", "metadata": {"start-time": "2015-02-13T14:07:19+00:00"}}]
r.storage.list_basebackups = Mock(return_value=basebackups)
assert r._find_nearest_basebackup() == "2015-02-13_0" # pylint: disable=protected-access
utc = tz.tzutc()
recovery_time = datetime.datetime(2015, 2, 1)
recovery_time = recovery_time.replace(tzinfo=utc)
with pytest.raises(RestoreError):
r._find_nearest_basebackup(recovery_time) # pylint: disable=protected-access
recovery_time = datetime.datetime(2015, 2, 12, 14, 20)
recovery_time = recovery_time.replace(tzinfo=utc)
assert r._find_nearest_basebackup(recovery_time) == "2015-02-12_0" # pylint: disable=protected-access
def test_create_recovery_conf(self):
td = self.temp_dir
fn = os.path.join(td, "recovery.conf")
def getdata():
with open(fn, "r") as fp:
return fp.read()
assert not os.path.exists(fn)
create_recovery_conf(td, "dummysite", None)
assert "primary_conninfo" not in getdata()
create_recovery_conf(td, "dummysite", "")
assert "primary_conninfo" not in getdata()
create_recovery_conf(td, "dummysite", "dbname='test'")
assert "primary_conninfo" in getdata() # make sure it's there
assert "''test''" in getdata() # make sure it's quoted
assert "standby_mode = 'on'" in getdata()
content = create_recovery_conf(td, "dummysite", "dbname='test'", restore_to_master=True)
assert "primary_conninfo" in content
assert "standby_mode = 'on'" not in content
content = create_recovery_conf(td, "dummysite",
recovery_end_command="echo 'done' > /tmp/done",
recovery_target_xid="42")
assert content == getdata()
assert "primary_conninfo" not in content
assert "recovery_end_command = 'echo ''done'' > /tmp/done'" in content
# NOTE: multiple recovery targets don't really make sense in
# recovery.conf: PostgreSQL just uses the last entry.
# create_recovery_conf doesn't check them as it's called late enough
# for that check to be useless. Let's just make sure we can write
# lines for all of them.
now = datetime.datetime.now()
content = create_recovery_conf(td, "dummysite",
recovery_end_command="/bin/false",
recovery_target_action="shutdown",
recovery_target_name="testpoint",
recovery_target_time=now,
recovery_target_xid="42")
assert "recovery_target_action" in content
assert "recovery_target_name" in content
assert "recovery_target_time" in content
assert "recovery_target_xid" in content
assert str(now) in content
| apache-2.0 | -1,430,156,587,909,244,200 | 43.403846 | 110 | 0.573408 | false |
wtgme/labeldoc2vec | gensim/models/labeldoc2vec.py | 1 | 45979 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Deep learning via the distributed memory and distributed bag of words models from
[1]_, using either hierarchical softmax or negative sampling [2]_ [3]_.
**Make sure you have a C compiler before installing gensim, to use optimized (compiled)
doc2vec training** (70x speedup [blog]_).
Initialize a model with e.g.::
>>> model = Doc2Vec(documents, size=100, window=8, min_count=5, workers=4)
Persist a model to disk with::
>>> model.save(fname)
>>> model = Doc2Vec.load(fname) # you can continue training with the loaded model!
The model can also be instantiated from an existing file on disk in the word2vec C format::
>>> model = Doc2Vec.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> model = Doc2Vec.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
.. [1] Quoc Le and Tomas Mikolov. Distributed Representations of Sentences and Documents. http://arxiv.org/pdf/1405.4053v2.pdf
.. [2] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [3] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed Representations of Words and Phrases and their Compositionality.
In Proceedings of NIPS, 2013.
.. [blog] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
"""
import logging
import os
import warnings
try:
from queue import Queue
except ImportError:
from Queue import Queue
from collections import namedtuple, defaultdict
from timeit import default_timer
from numpy import zeros, exp, random, sum as np_sum, outer, add as np_add, concatenate, \
repeat as np_repeat, array, float32 as REAL, empty, ones, memmap as np_memmap, \
sqrt, newaxis, ndarray, dot, vstack, dtype, divide as np_divide
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from word2vec import Word2Vec, Vocab, train_cbow_pair, train_sg_pair, train_batch_sg
from six.moves import xrange, zip
from six import string_types, integer_types, itervalues
import random
logger = logging.getLogger(__name__)
try:
from gensim.models.labeldoc2vec_inner import train_label_document_dbow, train_label_document_dm, train_label_document_dm_concat
from gensim.models.word2vec_inner import FAST_VERSION # blas-adaptation shared from word2vec
logger.info('Fast version of {0} is being used'.format(__name__))
except ImportError:
logger.info('Slow version of {0} is being used'.format(__name__))
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
# def train_dl_pair(negative, label_index, context_index, alpha, learn_vectors=True, learn_hidden=True,
# context_vectors=None, context_locks=None, label_vectors=None, label_locks=None):
# print '-----------------------------------'
# print '------------Lower version------------'
# print '-----------------------------------'
# l1 = context_vectors[context_index] # input word (NN input/projection layer)
# lock_factor = context_locks[context_index]
#
# neu1e = zeros(l1.shape)
#
# # use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
# neg_size = min(negative+1, len(label_vectors))
# word_indices = random.sample(range(len(label_vectors)), neg_size)
# final_labels = zeros(neg_size)
# if label_index not in word_indices:
# word_indices[0] = label_index
# final_labels[0] = 1
# else:
# index_pos = word_indices.index(label_index)
# final_labels[index_pos] = 1
# l2b = label_vectors[word_indices] # 2d matrix, k+1 x layer1_size
# fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
# gb = (final_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
# if learn_hidden:
# label_vectors[word_indices] += outer(gb, l1) # learn hidden -> output
# neu1e += dot(gb, l2b) # save error
#
# if learn_vectors:
# # l1 += neu1e * lock_factor # learn input -> hidden (mutates model.syn0[word2.index], if that is l1)
# context_vectors[context_index] += neu1e * lock_factor # learn input -> hidden (mutates model.syn0[word2.index], if that is l1)
# return neu1e
#
#
# def train_label_document_dbow(model, doc_words, doctag_indexes, doclabel_indexes, alpha, work=None,
# train_words=False, learn_doctags=True, learn_words=True, learn_hidden=True,
# word_vectors=None, word_locks=None, doctag_vectors=None, doctag_locks=None,
# doclabel_vectors=None, doclabel_locks=None):
# """
# Update distributed bag of words model ("PV-DBOW") by training on a single document.
#
# Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`.
#
# The document is provided as `doc_words`, a list of word tokens which are looked up
# in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
# into the doctag_vectors array.
#
# If `train_words` is True, simultaneously train word-to-word (not just doc-to-word)
# examples, exactly as per Word2Vec skip-gram training. (Without this option,
# word vectors are neither consulted nor updated during DBOW doc vector training.)
#
# Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
# prevent learning-updates to those respective model weights, as if using the
# (partially-)frozen model to infer other compatible vectors.
#
# This is the non-optimized, Python version. If you have cython installed, gensim
# will use the optimized version from doc2vec_inner instead.
#
# """
# if doctag_vectors is None:
# doctag_vectors = model.docvecs.doctag_syn0
# if doctag_locks is None:
# doctag_locks = model.docvecs.doctag_syn0_lockf
#
# if doclabel_vectors is None:
# doclabel_vectors = model.labelvecs.doctag_syn0
# if doclabel_locks is None:
# doclabel_locks = model.labelvecs.doctag_syn0_lockf
#
# if train_words and learn_words:
# train_batch_sg(model, [doc_words], alpha, work)
# for doctag_index in doctag_indexes:
# for word in doc_words:
# train_sg_pair(model, word, doctag_index, alpha, learn_vectors=learn_doctags,
# learn_hidden=learn_hidden, context_vectors=doctag_vectors,
# context_locks=doctag_locks)
# for doclabel_index in doclabel_indexes:
# train_dl_pair(model.negative, doclabel_index, doctag_index, alpha, learn_vectors=learn_doctags,
# learn_hidden=learn_hidden, context_vectors=doctag_vectors,
# context_locks=doctag_locks, label_vectors=doclabel_vectors, label_locks=doclabel_locks)
#
#
# return len(doc_words)
#
# def train_label_document_dm(model, doc_words, doctag_indexes, doclabel_indexes, alpha, work=None, neu1=None,
# learn_doctags=True, learn_words=True, learn_hidden=True,
# word_vectors=None, word_locks=None, doctag_vectors=None, doctag_locks=None,
# doclabel_vectors=None, doclabel_locks=None):
# """
# Update distributed memory model ("PV-DM") by training on a single document.
#
# Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`. This
# method implements the DM model with a projection (input) layer that is
# either the sum or mean of the context vectors, depending on the model's
# `dm_mean` configuration field. See `train_label_document_dm_concat()` for the DM
# model with a concatenated input layer.
#
# The document is provided as `doc_words`, a list of word tokens which are looked up
# in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
# into the doctag_vectors array.
#
# Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
# prevent learning-updates to those respective model weights, as if using the
# (partially-)frozen model to infer other compatible vectors.
#
# This is the non-optimized, Python version. If you have a C compiler, gensim
# will use the optimized version from doc2vec_inner instead.
#
# """
# if word_vectors is None:
# word_vectors = model.syn0
# if word_locks is None:
# word_locks = model.syn0_lockf
#
# if doctag_vectors is None:
# doctag_vectors = model.docvecs.doctag_syn0
# if doctag_locks is None:
# doctag_locks = model.docvecs.doctag_syn0_lockf
#
# if doclabel_vectors is None:
# doclabel_vectors = model.labelvecs.doctag_syn0
# if doclabel_locks is None:
# doclabel_locks = model.labelvecs.doctag_syn0_lockf
#
# word_vocabs = [model.vocab[w] for w in doc_words if w in model.vocab and
# model.vocab[w].sample_int > model.random.rand() * 2**32]
#
# for pos, word in enumerate(word_vocabs):
# reduced_window = model.random.randint(model.window) # `b` in the original doc2vec code
# start = max(0, pos - model.window + reduced_window)
# window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
# word2_indexes = [word2.index for pos2, word2 in window_pos if pos2 != pos]
# l1 = np_sum(word_vectors[word2_indexes], axis=0) + np_sum(doctag_vectors[doctag_indexes], axis=0)
# count = len(word2_indexes) + len(doctag_indexes)
# if model.cbow_mean and count > 1 :
# l1 /= count
# neu1e = train_cbow_pair(model, word, word2_indexes, l1, alpha,
# learn_vectors=False, learn_hidden=learn_hidden)
# if not model.cbow_mean and count > 1:
# neu1e /= count
# if learn_doctags:
# for i in doctag_indexes:
# doctag_vectors[i] += neu1e * doctag_locks[i]
# if learn_words:
# for i in word2_indexes:
# word_vectors[i] += neu1e * word_locks[i]
# for doctag_index in doctag_indexes:
# for doclabel_index in doclabel_indexes:
# train_dl_pair(model.negative, doclabel_index, doctag_index, alpha, learn_vectors=learn_doctags,
# learn_hidden=learn_hidden, context_vectors=doctag_vectors,
# context_locks=doctag_locks, label_vectors=doclabel_vectors, label_locks=doclabel_locks)
#
# return len(word_vocabs)
#
# def train_label_document_dm_concat(model, doc_words, doctag_indexes, doclabel_indexes, alpha, work=None, neu1=None,
# learn_doctags=True, learn_words=True, learn_hidden=True,
# word_vectors=None, word_locks=None, doctag_vectors=None, doctag_locks=None,
# doclabel_vectors=None, doclabel_locks=None):
# """
# Update distributed memory model ("PV-DM") by training on a single document, using a
# concatenation of the context window word vectors (rather than a sum or average).
#
# Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`.
#
# The document is provided as `doc_words`, a list of word tokens which are looked up
# in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
# into the doctag_vectors array.
#
# Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
# prevent learning-updates to those respective model weights, as if using the
# (partially-)frozen model to infer other compatible vectors.
#
# This is the non-optimized, Python version. If you have a C compiler, gensim
# will use the optimized version from doc2vec_inner instead.
#
# """
# if word_vectors is None:
# word_vectors = model.syn0
# if word_locks is None:
# word_locks = model.syn0_lockf
#
# if doctag_vectors is None:
# doctag_vectors = model.docvecs.doctag_syn0
# if doctag_locks is None:
# doctag_locks = model.docvecs.doctag_syn0_lockf
#
# if doclabel_vectors is None:
# doclabel_vectors = model.labelvecs.doctag_syn0
# if doclabel_locks is None:
# doclabel_locks = model.labelvecs.doctag_syn0_lockf
#
# word_vocabs = [model.vocab[w] for w in doc_words if w in model.vocab and
# model.vocab[w].sample_int > model.random.rand() * 2**32]
# doctag_len = len(doctag_indexes)
# if doctag_len != model.dm_tag_count:
# return 0 # skip doc without expected number of doctag(s) (TODO: warn/pad?)
#
# null_word = model.vocab['\0']
# pre_pad_count = model.window
# post_pad_count = model.window
# padded_document_indexes = (
# (pre_pad_count * [null_word.index]) # pre-padding
# + [word.index for word in word_vocabs if word is not None] # elide out-of-Vocabulary words
# + (post_pad_count * [null_word.index]) # post-padding
# )
#
# for pos in range(pre_pad_count, len(padded_document_indexes) - post_pad_count):
# word_context_indexes = (
# padded_document_indexes[(pos - pre_pad_count): pos] # preceding words
# + padded_document_indexes[(pos + 1):(pos + 1 + post_pad_count)] # following words
# )
# word_context_len = len(word_context_indexes)
# predict_word = model.vocab[model.index2word[padded_document_indexes[pos]]]
# # numpy advanced-indexing copies; concatenate, flatten to 1d
# l1 = concatenate((doctag_vectors[doctag_indexes], word_vectors[word_context_indexes])).ravel()
# neu1e = train_cbow_pair(model, predict_word, None, l1, alpha,
# learn_hidden=learn_hidden, learn_vectors=False)
#
# # filter by locks and shape for addition to source vectors
# e_locks = concatenate((doctag_locks[doctag_indexes], word_locks[word_context_indexes]))
# neu1e_r = (neu1e.reshape(-1, model.vector_size)
# * np_repeat(e_locks, model.vector_size).reshape(-1, model.vector_size))
#
# if learn_doctags:
# np_add.at(doctag_vectors, doctag_indexes, neu1e_r[:doctag_len])
# if learn_words:
# np_add.at(word_vectors, word_context_indexes, neu1e_r[doctag_len:])
# for doctag_index in doctag_indexes:
# for doclabel_index in doclabel_indexes:
# train_dl_pair(model.negative, doclabel_index, doctag_index, alpha, learn_vectors=learn_doctags,
# learn_hidden=learn_hidden, context_vectors=doctag_vectors,
# context_locks=doctag_locks, label_vectors=doclabel_vectors, label_locks=doclabel_locks)
#
# return len(padded_document_indexes) - pre_pad_count - post_pad_count
class LabeledTaggedDocument(namedtuple('LabeledTaggedDocument', 'words tags labels')):
"""
A single document, made up of `words` (a list of unicode string tokens)
and `tags` (a list of tokens). Tags may be one or more unicode string
tokens, but typical practice (which will also be most memory-efficient) is
for the tags list to include a unique integer id as the only tag.
Replaces "sentence as a list of words" from Word2Vec.
"""
def __str__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.words, self.tags, self.labels)
class DocvecsArray(utils.SaveLoad):
"""
Default storage of doc vectors during/after training, in a numpy array.
As the 'docvecs' property of a Doc2Vec model, allows access and
comparison of document vectors.
>>> docvec = d2v_model.docvecs[99]
>>> docvec = d2v_model.docvecs['SENT_99'] # if string tag used in training
>>> sims = d2v_model.docvecs.most_similar(99)
>>> sims = d2v_model.docvecs.most_similar('SENT_99')
>>> sims = d2v_model.docvecs.most_similar(docvec)
If only plain int tags are presented during training, the dict (of
string tag -> index) and list (of index -> string tag) stay empty,
saving memory.
Supplying a mapfile_path (as by initializing a Doc2Vec model with a
'docvecs_mapfile' value) will use a pair of memory-mapped
files as the array backing for doctag_syn0/doctag_syn0_lockf values.
The Doc2Vec model automatically uses this class, but a future alternative
implementation, based on another persistence mechanism like LMDB, LevelDB,
or SQLite, should also be possible.
"""
def __init__(self, mapfile_path=None):
self.doctags = {} # string -> Doctag (only filled if necessary)
self.max_rawint = -1 # highest rawint-indexed doctag
self.offset2doctag = [] # int offset-past-(max_rawint+1) -> String (only filled if necessary)
self.count = 0
self.mapfile_path = mapfile_path
def note_doctag(self, key, document_no, document_length):
"""Note a document tag during initial corpus scan, for structure sizing."""
if isinstance(key, int):
self.max_rawint = max(self.max_rawint, key)
else:
if key in self.doctags:
self.doctags[key] = self.doctags[key].repeat(document_length)
else:
self.doctags[key] = Doctag(len(self.offset2doctag), document_length, 1)
self.offset2doctag.append(key)
self.count = self.max_rawint + 1 + len(self.offset2doctag)
def indexed_doctags(self, doctag_tokens):
"""Return indexes and backing-arrays used in training examples."""
return ([self._int_index(index) for index in doctag_tokens if index in self],
self.doctag_syn0, self.doctag_syn0_lockf, doctag_tokens)
def trained_item(self, indexed_tuple):
"""Persist any changes made to the given indexes (matching tuple previously
returned by indexed_doctags()); a no-op for this implementation"""
pass
def _int_index(self, index):
"""Return int index for either string or int index"""
if isinstance(index, int):
return index
else:
return self.max_rawint + 1 + self.doctags[index].offset
def _key_index(self, i_index, missing=None):
"""Return string index for given int index, if available"""
warnings.warn("use DocvecsArray.index_to_doctag", DeprecationWarning)
return self.index_to_doctag(i_index)
def index_to_doctag(self, i_index):
"""Return string key for given i_index, if available. Otherwise return raw int doctag (same int)."""
candidate_offset = i_index - self.max_rawint - 1
if 0 <= candidate_offset < len(self.offset2doctag):
return self.offset2doctag[candidate_offset]
else:
return i_index
def __getitem__(self, index):
"""
Accept a single key (int or string tag) or list of keys as input.
If a single string or int, return designated tag's vector
representation, as a 1D numpy array.
If a list, return designated tags' vector representations as a
2D numpy array: #tags x #vector_size.
"""
if isinstance(index, string_types + (int,)):
return self.doctag_syn0[self._int_index(index)]
return vstack([self[i] for i in index])
def __len__(self):
return self.count
def __contains__(self, index):
if isinstance(index, int):
return index < self.count
else:
return index in self.doctags
def borrow_from(self, other_docvecs):
self.count = other_docvecs.count
self.doctags = other_docvecs.doctags
self.offset2doctag = other_docvecs.offset2doctag
def clear_sims(self):
self.doctag_syn0norm = None
def estimated_lookup_memory(self):
"""Estimated memory for tag lookup; 0 if using pure int tags."""
return 60 * len(self.offset2doctag) + 140 * len(self.doctags)
def reset_weights(self, model):
length = max(len(self.doctags), self.count)
if self.mapfile_path:
self.doctag_syn0 = np_memmap(self.mapfile_path+'.doctag_syn0', dtype=REAL,
mode='w+', shape=(length, model.vector_size))
self.doctag_syn0_lockf = np_memmap(self.mapfile_path+'.doctag_syn0_lockf', dtype=REAL,
mode='w+', shape=(length,))
self.doctag_syn0_lockf.fill(1.0)
else:
self.doctag_syn0 = empty((length, model.vector_size), dtype=REAL)
self.doctag_syn0_lockf = ones((length,), dtype=REAL) # zeros suppress learning
for i in xrange(length):
# construct deterministic seed from index AND model seed
seed = "%d %s" % (model.seed, self.index_to_doctag(i))
self.doctag_syn0[i] = model.seeded_vector(seed)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training or inference** after doing a replace.
The model becomes effectively read-only = you can call `most_similar`, `similarity`
etc., but not `train` or `infer_vector`.
"""
if getattr(self, 'doctag_syn0norm', None) is None or replace:
logger.info("precomputing L2-norms of doc weight vectors")
if replace:
for i in xrange(self.doctag_syn0.shape[0]):
self.doctag_syn0[i, :] /= sqrt((self.doctag_syn0[i, :] ** 2).sum(-1))
self.doctag_syn0norm = self.doctag_syn0
else:
if self.mapfile_path:
self.doctag_syn0norm = np_memmap(
self.mapfile_path+'.doctag_syn0norm', dtype=REAL,
mode='w+', shape=self.doctag_syn0.shape)
else:
self.doctag_syn0norm = empty(self.doctag_syn0.shape, dtype=REAL)
np_divide(self.doctag_syn0, sqrt((self.doctag_syn0 ** 2).sum(-1))[..., newaxis], self.doctag_syn0norm)
def most_similar(self, positive=[], negative=[], topn=10, clip_start=0, clip_end=None, indexer=None):
"""
Find the top-N most similar docvecs known from training. Positive docs contribute
positively towards the similarity, negative docs negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given docs. Docs may be specified as vectors, integer indexes
of trained docvecs, or if the documents were originally presented with string tags,
by the corresponding tags.
The 'clip_start' and 'clip_end' allow limiting results to a particular contiguous
range of the underlying doctag_syn0norm vectors. (This may be useful if the ordering
there was chosen to be significant, such as more popular tag IDs in lower indexes.)
"""
self.init_sims()
clip_end = clip_end or len(self.doctag_syn0norm)
if isinstance(positive, string_types + integer_types) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each doc, if not already present; default to 1.0 for positive and -1.0 for negative docs
positive = [
(doc, 1.0) if isinstance(doc, string_types + (ndarray,) + integer_types)
else doc for doc in positive
]
negative = [
(doc, -1.0) if isinstance(doc, string_types + (ndarray,) + integer_types)
else doc for doc in negative
]
# compute the weighted average of all docs
all_docs, mean = set(), []
for doc, weight in positive + negative:
if isinstance(doc, ndarray):
mean.append(weight * doc)
elif doc in self.doctags or doc < self.count:
mean.append(weight * self.doctag_syn0norm[self._int_index(doc)])
all_docs.add(self._int_index(doc))
else:
raise KeyError("doc '%s' not in trained set" % doc)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
if indexer is not None:
return indexer.most_similar(mean, topn)
dists = dot(self.doctag_syn0norm[clip_start:clip_end], mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_docs), reverse=True)
# ignore (don't return) docs from the input
result = [(self.index_to_doctag(sim), float(dists[sim])) for sim in best if sim not in all_docs]
return result[:topn]
def doesnt_match(self, docs):
"""
Which doc from the given list doesn't go with the others?
(TODO: Accept vectors of out-of-training-set docs, as if from inference.)
"""
self.init_sims()
docs = [doc for doc in docs if doc in self.doctags or 0 <= doc < self.count] # filter out unknowns
logger.debug("using docs %s" % docs)
if not docs:
raise ValueError("cannot select a doc from an empty list")
vectors = vstack(self.doctag_syn0norm[self._int_index(doc)] for doc in docs).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, docs))[0][1]
def similarity(self, d1, d2):
"""
Compute cosine similarity between two docvecs in the trained set, specified by int index or
string tag. (TODO: Accept vectors of out-of-training-set docs, as if from inference.)
"""
return dot(matutils.unitvec(self[d1]), matutils.unitvec(self[d2]))
def n_similarity(self, ds1, ds2):
"""
Compute cosine similarity between two sets of docvecs from the trained set, specified by int
index or string tag. (TODO: Accept vectors of out-of-training-set docs, as if from inference.)
"""
v1 = [self[doc] for doc in ds1]
v2 = [self[doc] for doc in ds2]
return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
def similarity_unseen_docs(self, model, doc_words1, doc_words2, alpha=0.1, min_alpha=0.0001, steps=5):
"""
Compute cosine similarity between two post-bulk out of training documents.
Document should be a list of (word) tokens.
"""
d1 = model.infer_vector(doc_words=doc_words1, alpha=alpha, min_alpha=min_alpha, steps=steps)
d2 = model.infer_vector(doc_words=doc_words2, alpha=alpha, min_alpha=min_alpha, steps=steps)
return dot(matutils.unitvec(d1), matutils.unitvec(d2))
class Doctag(namedtuple('Doctag', 'offset, word_count, doc_count')):
"""A string document tag discovered during the initial vocabulary
scan. (The document-vector equivalent of a Vocab object.)
Will not be used if all presented document tags are ints.
The offset is only the true index into the doctags_syn0/doctags_syn0_lockf
if-and-only-if no raw-int tags were used. If any raw-int tags were used,
string Doctag vectors begin at index (max_rawint + 1), so the true index is
(rawint_index + 1 + offset). See also DocvecsArray.index_to_doctag().
"""
__slots__ = ()
def repeat(self, word_count):
return self._replace(word_count=self.word_count + word_count, doc_count=self.doc_count + 1)
class LabelDoc2Vec(Word2Vec):
"""Class for training, using and evaluating neural networks described in http://arxiv.org/pdf/1405.4053v2.pdf"""
def __init__(self, documents=None, size=300, alpha=0.025, window=8, min_count=5,
max_vocab_size=None, sample=0, seed=1, workers=1, min_alpha=0.0001,
dm=1, hs=1, negative=0, dbow_words=0, dm_mean=0, dm_concat=0, dm_tag_count=1,
docvecs=None, docvecs_mapfile=None, labelvecs=None, labelvecs_mapfile=None,
comment=None, trim_rule=None, **kwargs):
"""
Initialize the model from an iterable of `documents`. Each document is a
LabeledTaggedDocument object that will be used for training.
The `documents` iterable can be simply a list of LabeledTaggedDocument elements, but for larger corpora,
consider an iterable that streams the documents directly from disk/network.
If you don't supply `documents`, the model is left uninitialized -- use if
you plan to initialize it in some other way.
`dm` defines the training algorithm. By default (`dm=1`), 'distributed memory' (PV-DM) is used.
Otherwise, `distributed bag of words` (PV-DBOW) is employed.
`size` is the dimensionality of the feature vectors.
`window` is the maximum distance between the predicted word and context words used for prediction
within a document.
`alpha` is the initial learning rate (will linearly drop to zero as training progresses).
`seed` = for the random number generator.
Note that for a fully deterministically-reproducible run, you must also limit the model to
a single worker thread, to eliminate ordering jitter from OS thread scheduling. (In Python
3, reproducibility between interpreter launches also requires use of the PYTHONHASHSEED
environment variable to control hash randomization.)
`min_count` = ignore all words with total frequency lower than this.
`max_vocab_size` = limit RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types
need about 1GB of RAM. Set to `None` for no limit (default).
`sample` = threshold for configuring which higher-frequency words are randomly downsampled;
default is 0 (off), useful value is 1e-5.
`workers` = use this many worker threads to train the model (=faster training with multicore machines).
`iter` = number of iterations (epochs) over the corpus. The default inherited from Word2Vec is 5,
but values of 10 or 20 are common in published 'Paragraph Vector' experiments.
`hs` = if 1 (default), hierarchical sampling will be used for model training (else set to 0).
`negative` = if > 0, negative sampling will be used, the int for negative
specifies how many "noise words" should be drawn (usually between 5-20).
`dm_mean` = if 0 (default), use the sum of the context word vectors. If 1, use the mean.
Only applies when dm is used in non-concatenative mode.
`dm_concat` = if 1, use concatenation of context vectors rather than sum/average;
default is 0 (off). Note concatenation results in a much-larger model, as the input
is no longer the size of one (sampled or arithmatically combined) word vector, but the
size of the tag(s) and all words in the context strung together.
`dm_tag_count` = expected constant number of document tags per document, when using
dm_concat mode; default is 1.
`dbow_words` if set to 1 trains word-vectors (in skip-gram fashion) simultaneous with DBOW
doc-vector training; default is 0 (faster training of doc-vectors only).
`trim_rule` = vocabulary trimming rule, specifies whether certain words should remain
in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and
returns either util.RULE_DISCARD, util.RULE_KEEP or util.RULE_DEFAULT.
Note: The rule, if given, is only used prune vocabulary during build_vocab() and is not stored as part
of the model.
"""
super(LabelDoc2Vec, self).__init__(
size=size, alpha=alpha, window=window, min_count=min_count, max_vocab_size=max_vocab_size,
sample=sample, seed=seed, workers=workers, min_alpha=min_alpha,
sg=(1+dm) % 2, hs=hs, negative=negative, cbow_mean=dm_mean,
null_word=dm_concat, **kwargs)
self.dbow_words = dbow_words
self.dm_concat = dm_concat
self.dm_tag_count = dm_tag_count
if self.dm and self.dm_concat:
self.layer1_size = (self.dm_tag_count + (2 * self.window)) * self.vector_size
else:
self.layer1_size = size
self.docvecs = docvecs or DocvecsArray(docvecs_mapfile)
self.labelvecs = labelvecs or DocvecsArray(labelvecs_mapfile)
self.comment = comment
if documents is not None:
self.build_vocab(documents, trim_rule=trim_rule)
self.train(documents)
@property
def dm(self):
return not self.sg # opposite of SG
@property
def dbow(self):
return self.sg # same as SG
def clear_sims(self):
super(LabelDoc2Vec, self).clear_sims()
self.docvecs.clear_sims()
self.labelvecs.clear_sims()
def reset_weights(self):
if self.dm and self.dm_concat:
# expand l1 size to match concatenated tags+words length
self.layer1_size = (self.dm_tag_count + (2 * self.window)) * self.vector_size
logger.info("using concatenative %d-dimensional layer1" % (self.layer1_size))
super(LabelDoc2Vec, self).reset_weights()
self.docvecs.reset_weights(self)
self.labelvecs.reset_weights(self)
def reset_from(self, other_model):
"""Reuse shareable structures from other_model."""
self.docvecs.borrow_from(other_model.docvecs)
self.labelvecs.borrow_from(other_model.labelvecs)
super(LabelDoc2Vec, self).reset_from(other_model)
def scan_vocab(self, documents, progress_per=10000, trim_rule=None, update=False):
logger.info("collecting all words and their counts")
document_no = -1
total_words = 0
min_reduce = 1
interval_start = default_timer() - 0.00001 # guard against next sample being identical
interval_count = 0
checked_string_types = 0
vocab = defaultdict(int)
for document_no, document in enumerate(documents):
if not checked_string_types:
if isinstance(document.words, string_types):
logger.warn("Each 'words' should be a list of words (usually unicode strings)."
"First 'words' here is instead plain %s." % type(document.words))
checked_string_types += 1
if document_no % progress_per == 0:
interval_rate = (total_words - interval_count) / (default_timer() - interval_start)
logger.info("PROGRESS: at example #%i, processed %i words (%i/s), %i word types, %i tags, %i labels",
document_no, total_words, interval_rate, len(vocab), len(self.docvecs), len(self.labelvecs))
interval_start = default_timer()
interval_count = total_words
document_length = len(document.words)
for tag in document.tags:
self.docvecs.note_doctag(tag, document_no, document_length)
for label in document.labels:
self.labelvecs.note_doctag(label, document_no, document_length)
for word in document.words:
vocab[word] += 1
total_words += len(document.words)
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
logger.info("collected %i word types and %i unique tags and %i unique labels from a corpus of %i examples and %i words",
len(vocab), len(self.docvecs), len(self.labelvecs), document_no + 1, total_words)
self.corpus_count = document_no + 1
self.raw_vocab = vocab
def _do_train_job(self, job, alpha, inits):
work, neu1 = inits
tally = 0
for doc in job:
indexed_doctags = self.docvecs.indexed_doctags(doc.tags)
indexed_doclabels = self.labelvecs.indexed_doctags(doc.labels)
doctag_indexes, doctag_vectors, doctag_locks, ignored = indexed_doctags
doclabel_indexes, doclabel_vectors, doclabel_locks, ignored = indexed_doclabels
if self.sg:
tally += train_label_document_dbow(self, doc.words, doctag_indexes, doclabel_indexes, alpha, work,
train_words=self.dbow_words, doctag_vectors=doctag_vectors,
doctag_locks=doctag_locks, doclabel_vectors=doclabel_vectors,
doclabel_locks=doclabel_locks)
elif self.dm_concat:
tally += train_label_document_dm_concat(self, doc.words, doctag_indexes, doclabel_indexes, alpha, work, neu1,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks,
doclabel_vectors=doclabel_vectors, doclabel_locks=doclabel_locks)
else:
tally += train_label_document_dm(self, doc.words, doctag_indexes, doclabel_indexes, alpha, work, neu1,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks,
doclabel_vectors=doclabel_vectors, doclabel_locks=doclabel_locks)
self.docvecs.trained_item(indexed_doctags)
self.labelvecs.trained_item(indexed_doclabels)
return tally, self._raw_word_count(job)
def _raw_word_count(self, job):
"""Return the number of words in a given job."""
return sum(len(sentence.words) for sentence in job)
def infer_vector_label(self, doc_words, alpha=0.1, min_alpha=0.0001, steps=5):
"""
Infer a vector for given post-bulk training document.
Document should be a list of (word) tokens.
"""
doctag_vectors = empty((1, self.vector_size), dtype=REAL)
doctag_vectors[0] = self.seeded_vector(' '.join(doc_words))
doctag_locks = ones(1, dtype=REAL)
doctag_indexes = [0]
doclabel_vectors = empty((1, self.vector_size), dtype=REAL)
doclabel_vectors[0] = self.seeded_vector(' '.join(doc_words))
doclabel_locks = ones(1, dtype=REAL)
doclabel_indexes = [0]
work = zeros(self.layer1_size, dtype=REAL)
if not self.sg:
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
for i in range(steps):
if self.sg:
train_label_document_dbow(self, doc_words, doctag_indexes, doclabel_indexes, alpha, work,
learn_words=False, learn_hidden=False,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks,
doclabel_vectors=doclabel_vectors, doclabel_locks=doclabel_locks)
elif self.dm_concat:
train_label_document_dm_concat(self, doc_words, doctag_indexes, doclabel_indexes, alpha, work, neu1,
learn_words=False, learn_hidden=False,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks,
doclabel_vectors=doclabel_vectors, doclabel_locks=doclabel_locks)
else:
train_label_document_dm(self, doc_words, doctag_indexes, doclabel_indexes, alpha, work, neu1,
learn_words=False, learn_hidden=False,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks,
doclabel_vectors=doclabel_vectors, doclabel_locks=doclabel_locks)
alpha = ((alpha - min_alpha) / (steps - i)) + min_alpha
return doctag_vectors[0]
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate required memory for a model using current settings."""
report = report or {}
report['doctag_lookup'] = self.docvecs.estimated_lookup_memory()
report['doctag_syn0'] = self.docvecs.count * self.vector_size * dtype(REAL).itemsize
report['doclabel_lookup'] = self.labelvecs.estimated_lookup_memory()
report['doclabel_syn0'] = self.labelvecs.count * self.vector_size * dtype(REAL).itemsize
return super(LabelDoc2Vec, self).estimate_memory(vocab_size, report=report)
def __str__(self):
"""Abbreviated name reflecting major configuration paramaters."""
segments = []
if self.comment:
segments.append('"%s"' % self.comment)
if self.sg:
if self.dbow_words:
segments.append('dbow+w') # also training words
else:
segments.append('dbow') # PV-DBOW (skip-gram-style)
else: # PV-DM...
if self.dm_concat:
segments.append('dm/c') # ...with concatenative context layer
else:
if self.cbow_mean:
segments.append('dm/m')
else:
segments.append('dm/s')
segments.append('d%d' % self.vector_size) # dimensions
if self.negative:
segments.append('n%d' % self.negative) # negative samples
if self.hs:
segments.append('hs')
if not self.sg or (self.sg and self.dbow_words):
segments.append('w%d' % self.window) # window size, when relevant
if self.min_count > 1:
segments.append('mc%d' % self.min_count)
if self.sample > 0:
segments.append('s%g' % self.sample)
if self.workers > 1:
segments.append('t%d' % self.workers)
return '%s(%s)' % (self.__class__.__name__, ','.join(segments))
class TaggedBrownCorpus(object):
"""Iterate over documents from the Brown corpus (part of NLTK data), yielding
each document out as a LabeledTaggedDocument object."""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
for item_no, line in enumerate(utils.smart_open(fname)):
line = utils.to_unicode(line)
# each file line is a single document in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty documents
continue
yield LabeledTaggedDocument(words, ['%s_SENT_%s' % (fname, item_no)], [])
class TaggedLineDocument(object):
"""Simple format: one document = one line = one LabeledTaggedDocument object.
Words are expected to be already preprocessed and separated by whitespace,
tags are constructed automatically from the document line number."""
def __init__(self, source):
"""
`source` can be either a string (filename) or a file object.
Example::
documents = TaggedLineDocument('myfile.txt')
Or for compressed files::
documents = TaggedLineDocument('compressed_text.txt.bz2')
documents = TaggedLineDocument('compressed_text.txt.gz')
"""
self.source = source
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for item_no, line in enumerate(self.source):
yield LabeledTaggedDocument(utils.to_unicode(line).split(), [item_no], [])
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.smart_open(self.source) as fin:
for item_no, line in enumerate(fin):
yield LabeledTaggedDocument(utils.to_unicode(line).split(), [item_no], [])
| lgpl-2.1 | -8,098,908,495,692,754,000 | 48.760823 | 167 | 0.614911 | false |
grahamking/goodenergy | campaign/management/commands/ge_copy_campaign.py | 1 | 4417 | """Copies the contents (indicators and actions) of one campaign into another
"""
# Copyright 2010,2011 Good Energy Research Inc. <[email protected]>, <[email protected]>
#
# This file is part of Good Energy.
#
# Good Energy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Good Energy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Good Energy. If not, see <http://www.gnu.org/licenses/>.
#
# Disable the pylint check for dynamically added attributes. This happens a lot
# with Django DB model usage.
# pylint: disable-msg=E1101
# pylint: disable-msg=E1103
from django.core.management.base import BaseCommand, CommandError
from profile.models import Profile
from campaign.models import Campaign
from indicator.models import IndicatorLikert, Option
from action.models import Action
def copy_indicators(from_campaign, to_campaign):
"""Copies indicators and options from from_campaign to to_campaign"""
for indicator in IndicatorLikert.objects.filter(campaign=from_campaign):
new_indicator, is_created = IndicatorLikert.objects.get_or_create(
campaign = to_campaign,
position = indicator.position,
name = indicator.name,
question = indicator.question,
is_synthetic = indicator.is_synthetic,
description = indicator.description)
for option in indicator.option_set.all():
Option.objects.get_or_create(
indicator = new_indicator,
value = option.value,
position = option.position)
if is_created:
print('Created indicator %s' % new_indicator)
def copy_actions(from_campaign, to_campaign, action_owner):
"""Copies Actions from from_campaign to to_campaign"""
for action in from_campaign.action_set.all():
new_action, is_created = Action.objects.get_or_create(
campaign = to_campaign,
title = action.title,
description = action.description,
learn_more = action.learn_more,
created_by = action_owner)
if is_created:
print('Created action %s' % new_action)
class Command(BaseCommand):
"""Copies the contents (indicators and actions) of one campaign into another"""
option_list = BaseCommand.option_list
help = 'Copies the contents (indicators and actions) from one campaign into another'
args = '<from_campaign_id> <to_campaign_id> <action_owner_username>'
def handle(
self,
from_campaign_id=None,
to_campaign_id=None,
action_username=None,
*args,
**options):
"""Main entry point for command"""
if not from_campaign_id or not to_campaign_id or not action_username:
raise CommandError('Usage is ge_copy_campaign %s' % self.args)
try:
from_campaign = Campaign.objects.get(id=from_campaign_id)
except Campaign.DoesNotExist:
raise CommandError('FROM Campaign with id %s not found' % from_campaign_id)
try:
to_campaign = Campaign.objects.get(id=to_campaign_id)
except Campaign.DoesNotExist:
raise CommandError('TO Campaign with id %s not found' % to_campaign_id)
try:
action_user = Profile.objects.get(user__username=action_username)
except Profile.DoesNotExist:
raise CommandError("Profile for username %s not found" % action_username)
print('Copying contents of {from_c} into {to_c}.'.\
format(from_c=from_campaign, to_c = to_campaign))
confirm = raw_input('Continue? [y|n]')
if confirm != 'y':
raise CommandError('Abort')
copy_indicators(from_campaign, to_campaign)
copy_actions(from_campaign, to_campaign, action_user)
| agpl-3.0 | -979,541,411,536,322,800 | 37.745614 | 97 | 0.644329 | false |
dpnishant/appmon | tracer/android_tracer.py | 1 | 12107 | #!/usr/bin/python
###
# Copyright (c) 2016 Nishant Das Patnaik.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import os, sys, frida, re, argparse, codecs, json
from termcolor import colored
print("""
___ .______ .______ .___ ___. ______ .__ __.
/ \ | _ \ | _ \ | \/ | / __ \ | \ | |
/ ^ \ | |_) | | |_) | | \ / | | | | | | \| |
/ /_\ \ | ___/ | ___/ | |\/| | | | | | | . ` |
/ _____ \ | | | | | | | | | `--" | | |\ |
/__/ \__\ | _| | _| |__| |__| \______/ |__| \__|
github.com/dpnishant
""")
parser = argparse.ArgumentParser()
parser.add_argument("-a", action="store", dest="app_name", default="",
help='''Process Name;
Accepts "com.twitter.android"''')
parser.add_argument("-c", action="store", dest="class_name", default="",
help='''Class Name;
Example: "OpenSSL*SHA*"''')
parser.add_argument("-m", action="store", dest="method_name", default="",
help='''Method Name;
Example: "*digest*";''')
parser.add_argument("-v", action="version", version="AppMon Android Method Tracer v0.2, Copyright 2016 Nishant Das Patnaik")
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
results = parser.parse_args()
appName = results.app_name
className = results.class_name
classCandidates = []
method = results.method_name
if len(className) >= 1 and len(className) < 3:
print(colored("[ERROR] Class Name should be at least 3 characters", "red"))
sys.exit(1)
def on_message(message, data):
if message["type"] == "send":
payload = json.loads(message["payload"])
if payload["type"] == "classEnum":
if "overloads" in payload and "className" in payload and "methodName" in payload and "argCount" in payload:
classCandidates.append([ payload["className"], payload["overloads"], payload["methodName"], payload["argCount"] ])
print('[FOUND] "%s" in "%s"' % (colored(payload['methodName'], "yellow", attrs=["bold"]), colored(payload['className'], "magenta", attrs=["bold"])))
elif "className" in payload and not "overloads" in payload and not "methodName" in payload:
print('[FOUND] "%s"' % colored(payload['className'], "magenta", attrs=["bold"]))
elif payload['type'] == "methodTrace":
payload['overloadIndex']
print("%(methodName)s \n\tCalled by: %(caller)s \n\tDefined at: %(className)s [%(overloadIndex)s]\n" % { "methodName": colored(payload['methodName'], "green", attrs=["bold"]), "caller": colored(payload['caller'].split("class ")[1], "blue", attrs=["bold"]), "className": colored(payload['className'], "magenta", attrs=["bold"]), "overloadIndex": colored(payload['overloadIndex'], "red", attrs=["bold"]) })
def build_search_script(className, method):
if className and className != "" and not method or method == "":
script = """Java.perform(function (){
function wildcard_search(string, search) {
var prevIndex = -1,
array = search.split('*'),
result = true;
for (var i = 0; i < array.length && result; i++) {
var index = string.indexOf(array[i]);
if (index == -1 || index < prevIndex) {
return false;
}
}
return result;
}
var classes = Java.enumerateLoadedClassesSync();
classes = classes.sort();
for(var i=0; i < classes.length; i++ ) {
if(wildcard_search(classes[i], '%(className)s')) {
var payload = {
"type": "classEnum",
"className": classes[i].replace(/\//gi, '.').replace(/\[/gi, '').replace(/^L/, '').replace(/;$/, '')
};
send(JSON.stringify(payload));
}
}
});
""" % { "className": className }
else:
script = """Java.perform(function() {
function wildcard_search(string, search) {
var prevIndex = -1,
array = search.split('*'),
result = true;
for (var i = 0; i < array.length && result; i++) {
var index = string.indexOf(array[i]);
if (index == -1 || index < prevIndex) {
return false;
}
}
return result;
}
Java.enumerateLoadedClasses({
onMatch: function(name) {
name = name.replace(/\//gi, '.').replace(/\[/gi, '').replace(/^L/, '').replace(/;$/, '');
if (wildcard_search(name, '%(className)s')) {
try {
var handle = Java.use(name);
var currentMethods = handle.class.getMethods();
for (var i = 0; i < currentMethods.length; i++) {
var argsCount = currentMethods[i].toString().split('(')[1].split(')')[0].split(',').length;
var items = currentMethods[i].toString().split('(')[0].split(' ');
var currentMethodName = items[items.length - 1];
currentMethodName = currentMethodName.replace(name.toString(), '');
if (currentMethodName.split('.').length-1 > 1) {
continue
} else {
currentMethodName = currentMethodName.replace('.', '');
}
if (wildcard_search(currentMethodName, '%(methodName)s')) {
if (currentMethodName in handle) {
var overload_count = handle[currentMethodName].overloads.length;
var payload = {
"type": "classEnum",
"className": name,
"overloads": overload_count,
"methodName": currentMethodName,
"argCount": argsCount
};
send(JSON.stringify(payload));
} else {
console.log(currentMethodName + ' not found in ' + name);
}
}
}
} catch (e) { console.log(e.stack); }
}
},
onComplete: function() {}
});
});
""" % { "className": className, "methodName": method }
return script
def begin_instrumentation(appName, script_source):
device = frida.get_usb_device()
try:
session = device.attach(appName)
except Exception as e:
print(colored('[ERROR]: ' + str(e), "red"))
sys.exit()
try:
script = session.create_script(script_source)
script.on('message', on_message)
script.load()
except Exception as e:
print(colored('[ERROR]: ' + str(e), "red"))
sys.exit()
def enumerate_overloads(overloadIndx, currentClassName, overload_count, methodName):
generated_overloads = []
template ="""
var class_%(overloadIndx)s = "%(currentClassName)s";
var c_%(overloadIndx)s = Java.use(class_%(overloadIndx)s);
c_%(overloadIndx)s.%(methodName)s.overloads[i].implementation = function(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) {
var methodName = c_%(overloadIndx)s.%(methodName)s.overloads[i].toString().split("function")[1].split("{")[0].trim().split("(")[0];
var argTypes = getType(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15);
var args = "";
for (var i = 0; i < argTypes.length; i++) {
if (i != argTypes.length - 1) {
args += argTypes[i] + " arg" + i + ", ";
} else {
args += argTypes[i] + " arg" + i;
}
}
var methodName = methodName + "(" + args + ")";
var payload = {
"type": "methodTrace",
"methodName": methodName,
"className": class_%(overloadIndx)s,
"overloadIndex": ovrldindexplaceholder,
"caller": this.getClass().toString()
};
send(JSON.stringify(payload));
return this.%(methodName)s.overloads[i].apply(this, arguments);
};""" % { "overloadIndx": overloadIndx, "currentClassName": currentClassName, "methodName": methodName }
for index in range(0, overload_count):
argString = ""
current_template = ""
current_overload = ""
current_template = template
current_template = current_template.replace("overloads[i]", "overloads[" + str(index) +"]")
current_template = current_template.replace("ovrldindexplaceholder", str(index))
generated_overloads.append(current_template)
return generated_overloads
def build_trace_script(candidates, methodName):
all_overloads = ""
generated_trace_scripts = []
for candidate in candidates:
overloadIndx = str(candidates.index(candidate))
for overload_variant in enumerate_overloads(overloadIndx, candidate[0], candidate[1], candidate[2]):
if overload_variant == "":
continue
all_overloads += overload_variant
tracer_template = """'use strict';
var checkType = function(arg) {
var type = "";
if (arg.getClass) {
type = arg.getClass().toString().split("class ")[1];
} else if (typeof arg === "string") {
type = "String";
} else if (typeof arg === "number") {
type = "Number";
} else if (typeof arg === "boolean") {
type = "Boolean";
} else if (arg.length) {
type = "Array";
} else if (typeof arg === "object") {
type = "Object";
} else {
type = typeof arg;
}
return type;
}
var getType = function(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) {
var type = [];
if (a1) {
type.push(checkType(a1));
}
if (a2) {
type.push(checkType(a2));
}
if (a3) {
type.push(checkType(a3));
}
if (a4) {
type.push(checkType(a4));
}
if (a5) {
type.push(checkType(a5));
}
if (a6) {
type.push(checkType(a6));
}
if (a7) {
type.push(checkType(a7));
}
if (a8) {
type.push(checkType(a8));
}
if (a9) {
type.push(checkType(a9));
}
if (a10) {
type.push(checkType(a10));
}
if (a11) {
type.push(checkType(a11));
}
if (a12) {
type.push(checkType(a12));
}
if (a13) {
type.push(checkType(a13));
}
if (a14) {
type.push(checkType(a14));
}
if (a15) {
type.push(checkType(a15));
}
return type;
}
Java.perform(function () {
%s
});
""" % (all_overloads)
generated_trace_scripts.append(tracer_template)
return generated_trace_scripts
def generate_tracer_js(scriptName, txtScript):
script_dir = "__handlers__"
if not os.path.exists(script_dir):
os.makedirs(script_dir)
tracer_file_path = os.path.join(script_dir, scriptName + ".js")
with codecs.open(tracer_file_path, 'w', 'utf-8') as f:
f.write(txtScript)
return tracer_file_path
if not method or method == "" and not className or className == "":
print(colored('Enumerating loaded classes...', "green", attrs=["bold"]))
else:
print('Searching method "%s" in loaded classes...' % colored(method, "green", attrs=["bold"]))
begin_instrumentation(appName, build_search_script(className, method))
if len(classCandidates) > 0:
tracer_script_source = ""
for script in build_trace_script(classCandidates, method):
tracer_script_source += script
begin_instrumentation(appName, tracer_script_source)
print(colored("\nTracing methods...\n", "blue", attrs=["bold"]))
try:
sys.stdin.readlines()
except KeyboardInterrupt:
sys.exit()
else:
print(colored('Didn\'t find anything...quitting!', "red"))
sys.exit() | apache-2.0 | 7,836,066,327,096,006,000 | 36.255385 | 416 | 0.550508 | false |
oroszgy/spaCy.hu | spacy/tests/regression/test_issue910.py | 1 | 3547 | from __future__ import unicode_literals
import json
import os
import random
import contextlib
import shutil
import pytest
import tempfile
from pathlib import Path
import pathlib
from ...gold import GoldParse
from ...pipeline import EntityRecognizer
from ...en import English
try:
unicode
except NameError:
unicode = str
@pytest.fixture
def train_data():
return [
["hey",[]],
["howdy",[]],
["hey there",[]],
["hello",[]],
["hi",[]],
["i'm looking for a place to eat",[]],
["i'm looking for a place in the north of town",[[31,36,"location"]]],
["show me chinese restaurants",[[8,15,"cuisine"]]],
["show me chines restaurants",[[8,14,"cuisine"]]],
["yes",[]],
["yep",[]],
["yeah",[]],
["show me a mexican place in the centre",[[31,37,"location"], [10,17,"cuisine"]]],
["bye",[]],["goodbye",[]],
["good bye",[]],
["stop",[]],
["end",[]],
["i am looking for an indian spot",[[20,26,"cuisine"]]],
["search for restaurants",[]],
["anywhere in the west",[[16,20,"location"]]],
["central indian restaurant",[[0,7,"location"],[8,14,"cuisine"]]],
["indeed",[]],
["that's right",[]],
["ok",[]],
["great",[]]
]
@pytest.fixture
def additional_entity_types():
return ['cuisine', 'location']
@contextlib.contextmanager
def temp_save_model(model):
model_dir = Path(tempfile.mkdtemp())
# store the fine tuned model
with (model_dir / "config.json").open('w') as file_:
data = json.dumps(model.cfg)
if not isinstance(data, unicode):
data = data.decode('utf8')
file_.write(data)
model.model.dump((model_dir / 'model').as_posix())
yield model_dir
shutil.rmtree(model_dir.as_posix())
@pytest.mark.xfail
@pytest.mark.models
def test_issue910(train_data, additional_entity_types):
'''Test that adding entities and resuming training works passably OK.
There are two issues here:
1) We have to readd labels. This isn't very nice.
2) There's no way to set the learning rate for the weight update, so we
end up out-of-scale, causing it to learn too fast.
'''
nlp = English()
doc = nlp(u"I am looking for a restaurant in Berlin")
ents_before_train = [(ent.label_, ent.text) for ent in doc.ents]
# Fine tune the ner model
for entity_type in additional_entity_types:
if entity_type not in nlp.entity.cfg['actions']['1']:
nlp.entity.add_label(entity_type)
nlp.entity.learn_rate = 0.001
for itn in range(4):
random.shuffle(train_data)
for raw_text, entity_offsets in train_data:
doc = nlp.make_doc(raw_text)
nlp.tagger(doc)
gold = GoldParse(doc, entities=entity_offsets)
loss = nlp.entity.update(doc, gold)
with temp_save_model(nlp.entity) as model_dir:
# Load the fine tuned model
loaded_ner = EntityRecognizer.load(model_dir, nlp.vocab)
for entity_type in additional_entity_types:
if entity_type not in loaded_ner.cfg['actions']['1']:
loaded_ner.add_label(entity_type)
doc = nlp(u"I am looking for a restaurant in Berlin", entity=False)
nlp.tagger(doc)
loaded_ner(doc)
ents_after_train = [(ent.label_, ent.text) for ent in doc.ents]
assert ents_before_train == ents_after_train
| mit | 762,616,782,780,650,200 | 30.389381 | 94 | 0.581054 | false |
andrew-lundgren/gwpy | gwpy/cli/spectrum.py | 1 | 5127 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) Joseph Areeda (2015)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
#
""" Spectrum plots
"""
from cliproduct import CliProduct
class Spectrum(CliProduct):
def get_action(self):
"""Return the string used as "action" on command line."""
return 'spectrum'
def init_cli(self, parser):
"""Set up the argument list for this product"""
self.arg_chan1(parser)
self.arg_freq(parser)
self.arg_ax_xlf(parser)
self.arg_ax_logy(parser)
self.arg_plot(parser)
self.xaxis_is_freq = True
return
def get_ylabel(self, args):
"""Text for y-axis label"""
if args.nology:
ylabel = r'$\mathrm{log_{10} ASD}$ ' \
r'$\left( \frac{\mathrm{Counts}}' \
r'{\sqrt{\mathrm{Hz}}}\right)$'
else:
ylabel = r'$\mathrm{ASD}$ $\left( \frac{\mathrm{Counts}}' \
r'{\sqrt{\mathrm{Hz}}}\right)$'
return ylabel
def get_title(self):
"""Start of default super title, first channel is appended to it"""
return 'Spectrum: '
def get_xlabel(self):
xlabel = 'Frequency (Hz)'
return xlabel
def freq_is_y(self):
"""This plot puts frequency on the y-axis of the graph"""
return False
def gen_plot(self, arg_list):
"""Generate the plot from time series and arguments"""
self.is_freq_plot = True
fftlen = 1.0
if arg_list.secpfft:
fftlen = float(arg_list.secpfft)
self.secpfft = fftlen
ovlap = 0.5
if arg_list.overlap:
ovlap = float(arg_list.overlap)
self.overlap = ovlap
self.log(2, "Calculating spectrum secpfft: %.2f, overlap: %.2f" %
(fftlen, ovlap))
spectra = []
# calculate and plot the first spectrum
spectrum = self.timeseries[0].asd(fftlen, fftlen*ovlap)
spectra.append(spectrum)
fs = self.timeseries[0].sample_rate.value
self.fmin = 1/self.secpfft
self.fmax = fs/2
self.ymin = spectrum.value.min()
self.ymax = spectrum.value.max()
label = self.timeseries[0].channel.name
if len(self.start_list) > 1:
label += ", %s" % self.timeseries[0].epoch.gps
spectrum.name = label
self.plot = spectrum.plot()
# if we have more time series calculate and add to the first plot
if len(self.timeseries) > 1:
for idx in range(1, len(self.timeseries)):
specb = self.timeseries[idx].asd(fftlen, ovlap*fftlen)
spectra.append(specb)
fsb = self.timeseries[idx].sample_rate.value
self.fmax = max(self.fmax, fsb/2)
self.ymin = min(self.ymin, specb.value.min())
self.ymax = max(self.ymax, specb.value.max())
label = self.timeseries[idx].channel.name
if len(self.start_list) > 1:
label += ", %s" % self.timeseries[idx].epoch.gps
specb.name = label
self.plot.add_frequencyseries(specb)
self.log(2, ('Frequency range: [%f, %f]' % (self.fmin, self.fmax)))
# if the specified frequency limits adjust our ymin and ymax values
# at this point self.ymin and self.ymax represent the full spectra
if arg_list.fmin or arg_list.fmax:
import numpy
mymin = self.ymax # guaranteed to be >= anything we look at
mymax = self.ymin # guaranteed to be <= anything we look at
myfmin = self.fmin
myfmax = self.fmax
if arg_list.fmin:
myfmin = float(arg_list.fmin)
if arg_list.fmax:
myfmax = float(arg_list.fmax)
for idx in range(0, len(spectra)):
t = numpy.where(spectra[idx].frequencies.value >= myfmin)
if t[0].size:
strt = t[0][0]
t = numpy.where(spectra[idx].frequencies.value >= myfmax)
if t[0].size:
stop = t[0][0]
else:
stop = spectra[idx].frequencies.size - 1
mymin = min(mymin, numpy.min(spectra[idx].value[strt:stop]))
mymax = max(mymax, numpy.max(spectra[idx].value[strt:stop]))
self.ymin = mymin
self.ymax = mymax
return
| gpl-3.0 | 1,358,993,382,451,397,000 | 35.105634 | 80 | 0.561537 | false |
shpakoo/YAP | YAP_MiSeq.py | 1 | 29538 | ########################################################################################
## This file is a part of YAP package of scripts. https://github.com/shpakoo/YAP
## Distributed under the MIT license: http://www.opensource.org/licenses/mit-license.php
## Copyright (c) 2011-2013 Sebastian Szpakowski
########################################################################################
#################################################
## A pipeline for miseq data
## OTUs (certain regions of 16S and ITS supported)
## This is for demultiplexed MiSeq data
#################################################
import sys, os.path
from optparse import OptionParser, OptionGroup
from StepsLibrary import *
from StepsLibrary_EXP import *
from collections import defaultdict
from Queue import Queue
_author="Sebastian Szpakowski"
_date="2013/04/01"
_version="Version 5"
#################################################
## Classes
##
class InfoValidator:
def __init__(self,filename):
self.filename = filename
self.info = GeneralPurposeParser(filename, sep=",")
self.URI = "http://confluence/display/~sszpakow/YAP"
self.epilogue = "\n***\tPlease correct before continuing...\n***\t{0}\n".format(self.URI)
self.header = ""
self.tech = ""
self.files, self.barcodes ,self.primersF, self.primersR, self.sampleIDs = self.parse()
print ("***\tValidation complete, no obvious errors found.\n")
def parse(self):
counter=0;
print ("\n***\tValidating your template\n\t{0} ...\n".format(self.filename))
files = set()
barcodes = set()
primersF = set()
primersR = set()
sampleIDs = set()
for line in self.info:
if counter == 0:
self.header = line
has = ",".join (self.header)
needed454 = "path,file,barcode,forward,reverse,use,both,SampleID"
neededMiSeq = "path,file1,file2,forward,reverse,SampleID"
if has.lower().startswith( needed454.lower()) :
self.tech = "454"
elif has.lower().startswith( neededMiSeq.lower()) :
self.tech = "MiSeq"
else:
self.error( "Your template's header is incorrect or missing:\nhas :\t{0}\nneed (454):\t{1}\n\t(illumina)\t{2}".format(has, needed454, neededMiSeq), 101)
if not ("SampleID" in self.header):
self.error( "Your template has\n\t'{0}' instead of \n\t'SampleID' in the column's header.".format(self.header[7]), 102)
else:
files.add("{0}/{1}".format(line[0], line[1].strip()))
if self.tech == "454":
barcodes.add(line[2])
primersF.add(line[3])
primersR.add(line[4])
sampleIDs.add(line[7])
elif self.tech == "MiSeq":
if line[2].strip() != "":
files.add("{0}/{1}".format(line[0], line[2].strip()))
primersF.add(line[3])
primersR.add(line[4])
sampleIDs.add(line[5])
counter+=1
##### files
for f in files:
if not os.path.isfile(f):
self.error("file doesn't exist\n\t{0}".format(f), 103)
##### F primers
if len(primersF)>1:
self.error("Multiple forward primers specified:\n\t{0}\n\tnot supported in the current version of YAP".format("\n\t".join(primersF)), 104)
if list(primersF)[0].strip() =="" :
self.error("Forward primer should not be empty", 104)
##### R primers
if len(primersF)>1:
self.error("Multiple reverse primers specified:\n\t{0}\n\tnot supported in the current version of YAP".format("\n\t".join(primersR)), 105)
if list(primersR)[0].strip() =="" :
self.error("Reverse primer should not be empty", 105)
##### sampleIDs
spaces = set()
ill = ("\\","/", "~", "-", "+", "#")
illegalchars = set()
digitstart = set()
for s in sampleIDs:
if s.count(" ")>0:
spaces.add(s)
for k in ill:
if s.count(k)>0:
illegalchars.add(s)
if s[0].isdigit():
digitstart.add(s)
hint = "*You could create two columns: \n\tSampleID, compliant with YAP (excel function: SUBSTITUTE()) and\n\tOriginalIDs, where any character is allowed."
if len(spaces)>0:
M = "The following samplesID(s) have spaces in them:\n\t"
for s in spaces:
M = "{0}'{1}',".format(M, s)
M = "{0}\n\n\t{1}".format(M, hint)
self.error(M, 106)
if len(illegalchars)>0:
M = "The following samplesID(s) have illegal chars in them {0}:\n\t".format(", ".join(ill))
for s in illegalchars:
M = "{0}'{1}',".format(M, s)
M = "{0}\n\n\t{1}".format(M, hint)
self.error(M, 107)
if len(digitstart)>0:
M = "The following samplesID(s) start with numbers:\n\t".format(", ".join(ill))
for s in digitstart:
M = "{0}'{1}',".format(M, s)
M = "{0}\n\n\t{1}".format(M, hint)
self.error(M, 108)
return (files, barcodes, primersF, primersR, sampleIDs)
def error(self, message, code):
print "!!!\t{0}\n{1}".format(message, self.epilogue)
sys.exit(code)
def getTrimpoints(self):
primers = self.primersF.union(self.primersR)
if "AGAGTTTGATYMTGGCTCAG" in primers and "ATTACCGCGGCTGCTGG" in primers:
return "1044", "13127", "1044-13127"
else:
return "0", "0", "unknown"
def getTech(self):
return self.tech
class InfoParserMiSeq:
def __init__(self, filename):
self.filename = filename
self.info = GeneralPurposeParser(filename, sep=",", skip=1)
self.store = list()
self.IDs = defaultdict(str)
self.primers = set()
self.forward = ""
self.reverse = ""
for line in self.info:
path = line[0]
file1 = line[1]
file2 = line[2]
forward = line[3]
reverse = line[4]
if path.endswith("/"):
path = path[:-1]
path1 = "%s/%s" % (path, file1)
path2 = "%s/%s" % (path, file2)
if file2=="":
self.store.append([path1])
self.IDs[path1] = line[5]
else:
self.store.append([path1, path2])
self.IDs[path1] = line[5]
self.IDs[path2] = line[5]
if reverse =="" or forward =="":
print "%s: please provide both primers for file(s):'%s' " % (x, ",".join(file1, file2))
sys.exit(11)
else:
self.primers.add(">_primer_F\n%s\n" % (forward))
self.primers.add(">_primer_F_rc\n%s\n" % (revComp(forward)))
self.primers.add(">_primer_R\n%s\n" % (reverse))
self.primers.add(">_primer_R_rc\n%s\n" % (revComp(reverse)))
self.forward = forward
self.reverse = reverse
def getFiles(self):
return (self.store)
def getSampleID(self, file):
return self.IDs[file]
def getPrimerFilename(self):
primerfilename = "primers.fasta"
if len(self.primers)>4:
print "The annotation file has more than 2 primers !"
for p in self.primers:
print "%s" % (p.strip())
sys.exit(15)
primerfile = open(primerfilename , "w")
for p in self.primers:
primerfile.write(p)
primerfile.close()
return (primerfilename)
#################################################
## Functions
##
def preprocess():
forprocessing = InfoParserMiSeq(options.fn_info)
PREPROCESS = list()
for files in forprocessing.getFiles():
INS = {}
if len(files) == 2:
M1 = files[0]
M2 = files[1]
sampleid = forprocessing.getSampleID(M1)
INS = {"mate1": ["%s~%s" % (M1, sampleid)], "mate2": ["%s~%s" % (M2, sampleid)]}
else:
M1 = files[0]
sampleid = forprocessing.getSampleID(M1)
INS = {"fastq": ["%s~%s" % (M1, sampleid)]}
#### import files
if options.head == 0:
x = FileImport(INS)
else:
x = FileMiniImport(INS, {"lines": options.head})
#### determine the encoding of fastQ
Q = getQ(M1)
if Q == "":
print (Q)
print "Q issues"
print files
sys.exit(1)
### generate quality information:
ARGS = {
"-h": options.minqual,
"-m": "",
"-v": ""
}
qc = SQA(ARGS, [x])
supplementary.append(qc)
### split into smaller files for parallelization
### 100,000 sequences (x4 since fastq)
ARGS = {
"types": "mate1,mate2,fastq",
"chunk": "400000"
}
P0 = FileSplit(ARGS, [x])
#### trim fastQ files
ARGS = {
"-h": options.minqual,
}
P1 = SQAtrim(ARGS, [P0])
#### overlap mates if available
if len(files)==2:
ARGS = {
"-M": "200",
"-p": Q,
"-r": "250"
}
P2 = Flash({}, ARGS, [P1])
else:
P2 = P1
#### convert fastq to fasta
ARGS = {
"-Q": Q
}
P3 = fastq2fasta(dict(), ARGS, [P2])
#### use fuzznuc to find cut primer sequences
ARGS = {
"-f": forprocessing.forward,
"-r": forprocessing.reverse,
"-m": "1"
}
P4 = PrimerClipper ( {}, ARGS, [P3])
### make fastA headers less problematic
P5 = FastaHeadHash({}, {}, [P4])
P6 = FileMerger("fasta", [P5])
P7 = MakeGroupsFile([P6], sampleid)
P8 = MakeNamesFile([P6])
PREPROCESS.extend([P6,P7,P8])
A1 = FileMerger("fasta,group,name", PREPROCESS)
args = {"mingroupmembers": options.mingroupmembers,
"report": "failing"}
A2 = GroupRetriever(args, [A1])
args = {
"force" : "fasta,name,group",
"find": "groups"
}
A3 = MothurStep("remove.groups", options.nodesize, dict(), args, [A2])
return (A3)
def finalize(input):
clean = CleanFasta(dict(), [input])
####### remove sequences that are too short, and with ambiguous bases
args = { "minlength" : "%s" % ( options.minlength ),
"maxambig" : "0",
"force": "fasta,name,group"}
clean2 = MothurStep("screen.seqs", options.nodesize, dict(), args, [clean])
args = {"mingroupmembers": 0,
"report": "passing"}
clean2a = GroupRetriever(args, [clean2])
OutputStep("2-NOISY", "groupstats,fasta,group,name,list,svg,pdf,tiff,taxsummary,globalsummary,localsummary", clean2a)
###################### CDHIT-454
#### unique and de-noise
args = {}
### strictly unique collapsing
if options.strictlevel==1:
args= {
"c" : "1.0",
"b" : "8",
"aS": "1.0",
"g" : "1",
"M" : "50000",
"T" : "%s" % (options.nodesize)
}
### aggressive de-noising:
elif options.strictlevel==2:
args= {
"c" : "0.98",
"b" : "10",
"aS": "0.0",
"g" : "1",
"M" : "0",
"T" : "%s" % (options.nodesize)
}
#### de-noise/unique collapse
CD_1 = CDHIT_454(options.nodesize, args, [clean2])
CD_2 = CDHIT_Mothurize(dict(), CD_1)
args = {"mingroupmembers": 0,
"report": "passing"}
CD_2a = GroupRetriever(args, [CD_2])
OutputStep("3-UNIQUE", "groupstats,tre,fasta,group,name,list,svg,pdf,tiff,taxsummary,globalsummary,localsummary", CD_2a)
#### add reference sequences to the merged experiments' file
CD_3 = FileMerger("fasta,name,group,qfile", [CD_2, REF_1, REF_2, REF_3])
#### align to reference database
inputs = {"reference": ["%s/%s" % (options.dir_anno, _alignment)] }
args = { "flip":"t",
"ksize": "8"
}
CD_4 = MothurStep("align.seqs", options.nodesize, inputs, args, [CD_3])
#### AlignmentSummary determining alignment trimming options
#### sets trimstart and trimend variables that can be used by in subsequent steps.
#### threshold means to keep the center part of the alignment with at least
#### the fraction of maximum coverage
args = {"ref": _referenceseqname, "thresh": options.dynthresh}
CD_5 = AlignmentSummary(args,[CD_4])
#### alignment plots
if _trimstart != _trimend:
args = {"ref": _referenceseqname,
"trimstart" : _trimstart,
"trimend" : _trimend
}
else:
args = {"ref": _referenceseqname,
"trimstart" : "find",
"trimend" : "find"
}
CD_6 = AlignmentPlot(args,[CD_5])
#supplementary.append(CD_5)
supplementary.append(CD_6)
###########################
args = {"mingroupmembers": 0,
"report": "passing"}
CD_4a = GroupRetriever(args, [CD_4])
OutputStep("4-ALIGNED", "groupstats,tre,fasta,group,name,list,svg,pdf,tiff,taxsummary,globalsummary,localsummary", CD_4a)
cleanCD = cleanup(CD_5)
args = {"mingroupmembers": 0,
"report": "passing"}
cleanCDa = GroupRetriever(args, [cleanCD])
OutputStep("5-CLEAN", "groupstats,fasta,group,name,list,svg,pdf,tiff,taxsummary,globalsummary,localsummary", cleanCDa)
clusterCD = CDHITCluster(cleanCD)
x = plotsAndStats(clusterCD)
INS = {"annotation" : [options.fn_info]}
ARGS = {"dist": "0.03"}
output1 = R_defaultplots(INS, ARGS, x)
output2 = AnnotateClusters(dict(), dict(), output1)
return (output2)
def cleanup(input):
### remove the "ref" group
args = {
"force" : "fasta,name,group",
"groups": "ref"
}
s15 = MothurStep("remove.groups", options.nodesize, dict(), args, [input])
####### remove sequences that are too short (bad alignment?)
args = {
"minlength" : "%s" % (options.minlength),
"maxambig" : "0",
"force" : "fasta,name,group" ,
}
s16 = MothurStep("screen.seqs", options.nodesize, dict(), args, [s15])
####### find chimeric sequences
toremove = list()
for ch in [ "uchime" ]:
### chimeras against reference
args = {"force" : "fasta,reference"}
inputs = {"reference": ["%s/%s" % (options.dir_anno, _alignment)] }
A = MothurStep("chimera.%s" % (ch),options.nodesize, inputs, args, [s16])
toremove.append(A)
if not options.quickmode:
### chimeras against self
args ={"force": "name,group,fasta"}
inputs = {}
A = MothurStep("chimera.%s" % (ch),options.nodesize, inputs, args, [s16])
toremove.append(A)
### merge all accnos files and remove ALL chimeras
allchimeras = FileMerger("accnos", toremove)
s17 = MothurStep("remove.seqs",options.nodesize, dict(), dict(), allchimeras)
#### if primer trimming points are not unknown
if _trimstart!=_trimend:
### primer cut
args = {
"s" : _trimstart,
"e": _trimend,
}
else:
args = {
"s" : "find:trimstart",
"e" : "find:trimend"
}
s18a = AlignmentTrim(dict(), args, [s17])
####### remove sequence fragments, bad alignments (?)
args = {}
if options.dynamic:
args = { "minlength" : "50" ,
"force": "fasta,name,group"}
else:
args = { "minlength" : "%s" % (options.minlength),
"force": "fasta,name,group"}
s18b = MothurStep("screen.seqs", options.nodesize, dict(), args, [s18a])
### build a tree
#s18b_tree = ClearcutTree({}, s18b)
####### remove empty columns
args = {"vertical" : "T"}
s19 = MothurStep("filter.seqs",options.nodesize, dict(), args, [s18b])
####### taxonomy
inputs = { "reference": ["%s/%s" % (options.dir_anno,_trainset)],
"taxonomy": ["%s/%s" % (options.dir_anno, _taxonomy )]
}
args = { "iters" : "100",
"cutoff": "60"
}
s20 = MothurStep("classify.seqs", options.nodesize, inputs, args, [s19])
### remove - and . for subsequent clustering efforts
s21 = CleanFasta(dict(), [s20])
return (s21)
def CDHITCluster(input):
cdhits = list()
for arg in ["0.99", "0.97", "0.95", "0.90"]:
args = {"c": arg,
"d" : "0",
"n": "8",
"g": "1",
"M": "10000",
"T": "%s" % (options.nodesize)
}
CD_1 = CDHIT_EST(options.nodesize, args, [input])
### make sth. analogous to mothur's labels
arg = 1.0 - float(arg)
if arg == 0:
arg = "unique"
else:
arg = "%s" % (arg)
args = {"mode": arg
}
CD_2 = CDHIT_Mothurize(args, CD_1)
CD_2a = CDHIT_Perls({}, CD_2)
cdhits.append(CD_2)
READY = FileMerger("list,rabund,sabund", cdhits)
SORTED = FileSort("list,rabund,sabund", READY)
return (SORTED)
def plotsAndStats(input):
### all groups!
args = {"mingroupmembers": 0,
"report": "passing"}
s23 = GroupRetriever(args, [input])
######## make a shared file
args = {"label" : "0.01-0.03-0.05-0.1", "find": "groups"}
s24 = MothurStep("make.shared", options.nodesize, dict(), args, [s23])
args = {
"label" : "0.01-0.03-0.05-0.1",
"basis" : "otu"
}
s25a= MothurStep("classify.otu", options.nodesize, dict(), args, [s24])
args = {
"taxonomy": "otu.taxonomy",
"taxsummary": "otu.taxsummary"
}
s25aa = FileType(args, [s25a])
args = {
"label" : "0.01-0.03-0.05-0.1",
"basis" : "sequence"
}
s25b = MothurStep("classify.otu", options.nodesize, dict(), args, [s24])
args = {
"taxonomy": "seq.taxonomy",
"taxsummary": "seq.taxsummary"
}
s25bb = FileType(args, [s25b])
args = {"force" : "list", "calc": "nseqs-sobs-simpson-invsimpson-chao-shannon-shannoneven-coverage"}
s26 = MothurStep("summary.single",options.nodesize, dict(), args, [s25bb])
args = {"summary": "globalsummary"}
s26a = FileType(args, [s26])
args = {"force" : "shared", "calc": "nseqs-sobs-simpson-invsimpson-chao-shannon-shannoneven-coverage"}
s27 = MothurStep("summary.single", options.nodesize, dict(), args, [s25bb])
args = {"summary": "localsummary"}
s27a = FileType(args, [s27])
args = {"force" : "shared", "calc": "thetayc-jclass-braycurtis"}
s28 = MothurStep("tree.shared", options.nodesize, dict(), args, [s24])
supplementary.append(s28)
args = {"force" : "list", "calc": "nseqs-sobs-simpson-invsimpson-chao-shannon-shannoneven-coverage", "freq": "0.01"}
s29 = MothurStep("rarefaction.single", options.nodesize, dict(), args, [s24])
#return ([s23, s24, s25aa, s25bb, s26a, s27a, s28, s29])
if options.quickmode:
return ([s23, s24, s25aa, s25bb, s26a, s27a, s28, s29])
else:
args = {"force" : "shared", "calc": "nseqs-sobs-simpson-invsimpson-chao-shannon-shannoneven-coverage", "freq": "0.05"}
s30 = MothurStep("rarefaction.single",options.nodesize, dict(), args, [s24])
return ([s23, s24, s25aa, s25bb, s26a, s27a, s28, s29, s30])
#################################################
## Arguments
##
parser = OptionParser()
group = OptionGroup(parser, "Required", description="Will not run without these !")
group.add_option("-P", "--PROJECT", dest="project", default="",
help="project code", metavar="#")
group.add_option("-E", "--EMAIL", dest="email", default="",
help="e-mail address", metavar="@")
group.add_option("-i", "--info", dest="fn_info", default="",
help="mapping: file, barcode, primer, sample information. File should be in CSV format", metavar="allinfo.csv")
parser.add_option_group(group)
group = OptionGroup(parser, "Optional Configuration", description="parameters to alter if necessary")
group.add_option("-Y", "--Yap", dest="mode", default="16S",
help="""Which Pipeline: 16S ITS [%default]""", metavar="#")
group.add_option("-D", "--dynamic", dest="dynamic", action = "store_true", default=False,
help="""If specified, alignment will be scanned for primer locations and trimmed accordingly. Otherwise a database of known primers and trimming points will be used. [%default]""", metavar="#")
group.add_option("-d", "--thresh", dest="dynthresh", default=0.75, type="float",
help="""in conjunction with -D, otherwise this is ignored. This allows to specify how much of the alignment to keep using the per-base coverage. The [%default] value indicates that ends of the alignment are trimmed until a base has a coverage of [%default] * peak coverage.""", metavar="#")
group.add_option("-a", "--annotations", dest="dir_anno", default="/usr/local/devel/ANNOTATION/sszpakow/ANNOTATION/",
help="directory that stores auxilliary files\n[%default]", metavar="annotations")
group.add_option("-S", "--SAMPLE", dest="sampletimes", default=0, type="int",
help="perform sub.sampling of all reads based on the number of reads in smallest group. if 0 - all reads are used. if 1 - the sampling will be performed once, if 2 or more, then 2 or more independent samplings are going to be performed.\n[%default]", metavar="#")
group.add_option("-m", "--minlen", dest="minlength", default=200, type="int",
help="what is the minimum length of reads to process\n[%default]", metavar="#")
group.add_option("-g", "--mingroupsize", dest="mingroupmembers", default=100, type="int",
help="after demultiplexing, discard groups with fewer reads than #\n[%default]", metavar="#")
group.add_option("-Q", "--minqual", dest="minqual", default=30, type="int",
help="Keep stretches of reads this good or better #\n[%default]", metavar="#")
group.add_option("-q", "--quick", dest="quickmode", action = "store_true", default=False,
help="""If specified, only single, reference DB based chimera checking will be used. [%default]""", metavar="#")
parser.add_option("-H", "--head", dest="head", default=0, type="int",
help="For dry runs, import only # of lines from the input files")
group.add_option("-x", "--strict", dest="strictlevel", default=2, type="int",
help="""how strict to be at pre-clustering:
1 very strict, conservative denoising (precluster identical sequences)
2 less strict, aggresive denoising (precluster using 98% similarity)
[%default]""", metavar="#")
parser.add_option_group(group)
group = OptionGroup(parser, "Technical", description="could be useful sometimes")
group.add_option("-C", "--NODESIZE", dest="nodesize", default=30,
help="maximum number of grid node's CPUs to use\n[%default]", metavar="#")
parser.add_option_group(group)
(options, args) = parser.parse_args()
#################################################
## Begin
##
if options.fn_info == "" or options.email == "" or options.project =="":
parser.print_help()
sys.exit(1)
if not options.mode in ("16S", "ITS"):
parser.print_help()
sys.exit(2)
### parameters specific to YAP incarnations
### 16S V1-V3
if options.mode=="16S":
### file in the annotations directory that has reference sequences
_referenceseq = "ecolis.fasta"
### which fasta ID use as the reference (if file has more than one)
_referenceseqname = "e_coli2_genbank"
### mothur's compendium of ALIGNED 16S sequences
_alignment = "silva.bacteria.fasta"
### mothur's curated version of RDP's curated train set and corresponding taxonomy
_trainset = "trainset9_032012.pds.fasta"
_taxonomy = "trainset9_032012.pds.tax"
### until automatic primer detection is implemented, these are coordinates of primers
### when aligned to the silva.bacteria.fasta (for in-silico PCR and subsequent primer trimming)
#_trimstart = "1044"
#_trimend = "13127"
### ITS NSI1 - NLB4 (barcoded)
elif options.mode=="ITS":
_referenceseq = "yeastITS.fasta"
_referenceseqname = "AF293_reference"
_alignment = "FungalITSseed.092012.1.aln.fasta"
_trainset = "FungalITSdb.092012.1.fasta"
_taxonomy = "FungalITSdb.092012.1.tax"
#_trimstart = "1716"
#_trimend = "2795"
else:
parser.print_help()
sys.exit(2)
validator = InfoValidator(options.fn_info)
_trimstart , _trimend, _region = validator.getTrimpoints()
_tech = validator.getTech()
BOH = init(options.project, options.email)
BOH.toPrint("-----", "GLOBAL", "We are in %s mode" % (options.mode))
BOH.toPrint("-----", "GLOBAL", "We will be processing %s data" % (_tech))
if options.dynamic or _region == "unknown":
BOH.toPrint("-----", "GLOBAL", "Dynamic alignment trimming enabled")
BOH.toPrint("-----", "GLOBAL", "Alignment will be trimmed using %s * peak coverage threshold" % (options.dynthresh))
_trimstart = "0"
_trimend = "0"
else:
BOH.toPrint("-----", "GLOBAL", "Alignment trimming predefined: %s - %s" % (_trimstart, _trimend))
#############################
#######################
##### reference:
inputs = {"fasta": ["%s/%s" % (options.dir_anno, _referenceseq)] }
REF = FileImport(inputs)
REF_1 = MakeNamesFile([REF])
REF_2 = MakeGroupsFile([REF], "ref")
REF_3 = MakeQualFile ([REF], "40" )
##############################
supplementary = list()
READY = preprocess()
OutputStep("1-PREPROCESS", "groupstats,fasta,group,name,list,pdf,svg,tiff,taxsummary,globalsummary,localsummary", READY)
if options.sampletimes==0:
tmp = finalize(READY)
y = R_rarefactions(dict(), dict(), tmp)
z = R_OTUplots(dict(), dict(), tmp)
supplementary.append(y)
supplementary.append(z)
OutputStep("6-ENTIRE", "groupstats,fasta,group,name,list,pdf,svg,tiff,taxsummary,globalsummary,localsummary,phylotax", [tmp])
OutputStep("8-TBC", "phylotax,group,list,fasta", [tmp])
#else:
# thefinalset = list()
# for k in xrange(0, options.sampletimes):
# args = {
# "force" : "fasta,name,group",
# "persample": "T",
# "iter": "%s" % (k)
# }
# sampled = MothurStep("sub.sample", options.nodesize, dict(), args, [READY])
# tmp = finalize(sampled)
# y = R_rarefactions(dict(), dict(), tmp)
# z = R_OTUplots(dict(), dict(), tmp)
# supplementary.append(y)
# supplementary.append(z)
# OutputStep("SAMPLED_%s" % (k), "groupstats,fasta,group,name,list,pdf,svg,tiff,taxsummary,globalsummary,localsummary", [tmp])
# thefinalset.append(tmp)
#
OutputStep("7-SUPP_PLOTS", "tre,pdf,png,svg,tiff,r_nseqs,rarefaction,r_simpson,r_invsimpson,r_chao,r_shannon,r_shannoneven,r_coverage", supplementary)
###########################################################################
##
##################################################
### Finish
##################################################
| mit | -7,766,473,033,472,721,000 | 36.201511 | 308 | 0.511138 | false |
varlog00/Sigil | src/Resource_Files/python3lib/xmlprocessor.py | 1 | 16367 | #!/usr/bin/env python3
import sys
import os
from sigil_bs4 import BeautifulSoup
from sigil_bs4.builder._lxml import LXMLTreeBuilderForXML
import re
from urllib.parse import unquote
from urllib.parse import urlsplit
from lxml import etree
from io import BytesIO
from opf_newparser import Opf_Parser
ASCII_CHARS = set(chr(x) for x in range(128))
URL_SAFE = set('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789' '#' '_.-/~')
IRI_UNSAFE = ASCII_CHARS - URL_SAFE
TEXT_FOLDER_NAME = "Text"
ebook_xml_empty_tags = ["meta", "item", "itemref", "reference", "content"]
def get_void_tags(mtype):
voidtags = []
if mtype == "application/oebps-package+xml":
voidtags = ["item", "itemref", "mediatype", "mediaType", "reference"]
elif mtype == "application/x-dtbncx+xml":
voidtags = ["meta", "reference", "content"]
elif mtype == "application/smil+xml":
voidtags = ["text", "audio"]
elif mtype == "application/oebps-page-map+xml":
voidtags = ["page"]
else:
voidtags = ebook_xml_empty_tags
return voidtags
# returns a quoted IRI (not a URI)
def quoteurl(href):
if isinstance(href,bytes):
href = href.decode('utf-8')
(scheme, netloc, path, query, fragment) = urlsplit(href, scheme="", allow_fragments=True)
if scheme != "":
scheme += "://"
href = href[len(scheme):]
result = []
for char in href:
if char in IRI_UNSAFE:
char = "%%%02x" % ord(char)
result.append(char)
return scheme + ''.join(result)
# unquotes url/iri
def unquoteurl(href):
if isinstance(href,bytes):
href = href.decode('utf-8')
href = unquote(href)
return href
def _remove_xml_header(data):
newdata = data
return re.sub(r'<\s*\?xml\s*[^\?>]*\?*>\s*','',newdata, count=1,flags=re.I)
def _well_formed(data):
result = True
newdata = data
if isinstance(newdata, str):
newdata = newdata.encode('utf-8')
try:
parser = etree.XMLParser(encoding='utf-8', recover=False)
tree = etree.parse(BytesIO(newdata), parser)
except Exception:
result = False
pass
return result
def _reformat(data):
newdata = data
if isinstance(newdata, str):
newdata = newdata.encode('utf-8')
parser = etree.XMLParser(encoding='utf-8', recover=True, ns_clean=True,
remove_comments=True, remove_pis=True, strip_cdata=True, resolve_entities=False)
tree = etree.parse(BytesIO(newdata), parser)
newdata = etree.tostring(tree.getroot(),encoding='UTF-8', xml_declaration=False)
return newdata
# does not support cdata sections yet
def _make_it_sane(data):
# first remove all comments as they may contain unescaped xml reserved characters
# that will confuse the remaining _make_it_sane regular expressions
comments = re.compile(r'''<!--.*?-->''', re.DOTALL)
data = comments.sub("",data)
# remove invalid tags that freak out lxml
emptytag = re.compile(r'''(<\s*[/]*\s*>)''')
data=emptytag.sub("", data);
# handle double tag start
badtagstart = re.compile(r'''(<[^>]*<)''')
extrastart = re.compile(r'''<\s*<''');
missingend = re.compile(r'''<\s*[a-zA-Z:]+[^<]*\s<''')
startinattrib = re.compile(r'''<\s*[a-z:A-Z]+[^<]*["'][^<"']*<''')
mo = badtagstart.search(data)
while mo is not None:
fixdata = data[mo.start(1):mo.end(1)]
mextra = extrastart.match(fixdata)
mmiss = missingend.match(fixdata)
mattr = startinattrib.match(fixdata)
if mextra is not None:
fixdata = fixdata[1:]
elif mattr is not None:
fixdata = fixdata[0:-1] + "<"
elif mmiss is not None:
fixdata = fixdata[0:-1].rstrip() + "> <"
else:
fixdata = "<" + fixdata[1:]
data = data[0:mo.start(1)] + fixdata + data[mo.end(1):]
mo = badtagstart.search(data)
# handle double tag end
badtagend = re.compile(r'''(>[^<]*>)''')
extraend = re.compile(r'''>\s*>''');
missingstart = re.compile(r'''>\s[^>]*[a-zA-Z:]+[^>]*>''')
endinattrib = re.compile(r'''>[^>]*["'][^>'"]*>''')
mo = badtagend.search(data)
while mo is not None:
fixdata = data[mo.start(1):mo.end(1)]
mextra = extraend.match(fixdata)
mmiss = missingstart.match(fixdata)
mattr = endinattrib.match(fixdata)
if mextra is not None:
fixdata = fixdata[0:-1]
elif mattr is not None:
fixdata = ">" + fixdata[1:]
elif mmiss is not None:
fixdata = "> <" + fixdata[1:].lstrip()
else:
fixdata = fixdata[0:-1] + ">"
data = data[0:mo.start(1)] + fixdata + data[mo.end(1):]
mo = badtagend.search(data)
return data
# ncx_text_pattern = re.compile(r'''(<text>)\s*(\S[^<]*\S)\s*(</text>)''',re.IGNORECASE)
# re.sub(ncx_text_pattern,r'\1\2\3',newdata)
# data is expectedd to be in unicode
def WellFormedXMLErrorCheck(data, mtype=""):
newdata = _remove_xml_header(data)
if isinstance(newdata, str):
newdata = newdata.encode('utf-8')
line = "-1"
column = "-1"
message = "well-formed"
try:
parser = etree.XMLParser(encoding='utf-8', recover=False)
tree = etree.parse(BytesIO(newdata), parser)
except Exception:
line = "0"
column = "0"
message = "exception"
if len(parser.error_log) > 0:
error = parser.error_log[0]
message = error.message
if isinstance(message, bytes):
message = message.decode('utf-8')
line = "%d" % error.line
column = "%d" % error.column
pass
result = [line, column, message]
return result
def IsWellFormedXML(data, mtype=""):
[line, column, message] = WellFormedXMLErrorCheck(data, mtype)
result = line == "-1"
return result
# data is expected to be in unicode
# note: bs4 with lxml for xml strips whitespace so always prettyprint xml
def repairXML(data, mtype="", indent_chars=" "):
newdata = _remove_xml_header(data)
# if well-formed - don't mess with it
if _well_formed(newdata):
return data
newdata = _make_it_sane(newdata)
if not _well_formed(newdata):
newdata = _reformat(newdata)
if mtype == "application/oebps-package+xml":
newdata = newdata.decode('utf-8')
newdata = Opf_Parser(newdata).rebuild_opfxml()
# lxml requires utf-8 on Mac, won't work with unicode
if isinstance(newdata, str):
newdata = newdata.encode('utf-8')
voidtags = get_void_tags(mtype)
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=voidtags)
soup = BeautifulSoup(newdata, features=None, from_encoding="utf-8", builder=xmlbuilder)
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=indent_chars)
return newdata
def anchorNCXUpdates(data, originating_filename, keylist, valuelist):
data = _remove_xml_header(data)
# lxml on a Mac does not seem to handle full unicode properly, so encode as utf-8
data = data.encode('utf-8')
# rebuild serialized lookup dictionary
id_dict = {}
for i in range(0, len(keylist)):
id_dict[ keylist[i] ] = valuelist[i]
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=ebook_xml_empty_tags)
soup = BeautifulSoup(data, features=None, from_encoding="utf-8", builder=xmlbuilder)
original_filename_with_relative_path = TEXT_FOLDER_NAME + "/" + originating_filename
for tag in soup.find_all("content"):
if "src" in tag.attrs:
src = tag["src"]
if src.find(":") == -1:
parts = src.split('#')
if (parts is not None) and (len(parts) > 1) and (parts[0] == original_filename_with_relative_path) and (parts[1] != ""):
fragment_id = parts[1]
if fragment_id in id_dict:
attribute_value = TEXT_FOLDER_NAME + "/" + quoteurl(id_dict[fragment_id]) + "#" + fragment_id
tag["src"] = attribute_value
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=" ")
return newdata
def performNCXSourceUpdates(data, currentdir, keylist, valuelist):
data = _remove_xml_header(data)
# lxml on a Mac does not seem to handle full unicode properly, so encode as utf-8
data = data.encode('utf-8')
# rebuild serialized lookup dictionary
updates = {}
for i in range(0, len(keylist)):
updates[ keylist[i] ] = valuelist[i]
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=ebook_xml_empty_tags)
soup = BeautifulSoup(data, features=None, from_encoding="utf-8", builder=xmlbuilder)
for tag in soup.find_all("content"):
if "src" in tag.attrs:
src = tag["src"]
if src.find(":") == -1:
parts = src.split('#')
url = parts[0]
fragment = ""
if len(parts) > 1:
fragment = parts[1]
bookrelpath = os.path.join(currentdir, unquoteurl(url))
bookrelpath = os.path.normpath(bookrelpath)
bookrelpath = bookrelpath.replace(os.sep, "/")
if bookrelpath in updates:
attribute_value = updates[bookrelpath]
if fragment != "":
attribute_value = attribute_value + "#" + fragment
attribute_value = quoteurl(attribute_value)
tag["src"] = attribute_value
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=" ")
return newdata
def performOPFSourceUpdates(data, currentdir, keylist, valuelist):
data = _remove_xml_header(data)
# lxml on a Mac does not seem to handle full unicode properly, so encode as utf-8
data = data.encode('utf-8')
# rebuild serialized lookup dictionary
updates = {}
for i in range(0, len(keylist)):
updates[ keylist[i] ] = valuelist[i]
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=ebook_xml_empty_tags)
soup = BeautifulSoup(data, features=None, from_encoding="utf-8", builder=xmlbuilder)
for tag in soup.find_all(["item","reference","site"]):
if "href" in tag.attrs :
href = tag["href"]
if href.find(":") == -1 :
parts = href.split('#')
url = parts[0]
fragment = ""
if len(parts) > 1:
fragment = parts[1]
bookrelpath = os.path.join(currentdir, unquoteurl(url))
bookrelpath = os.path.normpath(bookrelpath)
bookrelpath = bookrelpath.replace(os.sep, "/")
if bookrelpath in updates:
attribute_value = updates[bookrelpath]
if fragment != "":
attribute_value = attribute_value + "#" + fragment
attribute_value = quoteurl(attribute_value)
tag["href"] = attribute_value
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=" ")
return newdata
# Note xml_updates has paths relative to the OEBPS folder as base
# As if they were meant only for OEBPS/content.opf and OEBPS/toc.ncx
# So adjust them to be relative to the Misc directory where .smil files live in Sigil
def performSMILUpdates(data, currentdir, keylist, valuelist):
data = _remove_xml_header(data)
# lxml on a Mac does not seem to handle full unicode properly, so encode as utf-8
data = data.encode('utf-8')
# rebuild serialized lookup dictionary of xml_updates, properly adjusted
updates = {}
for i in range(0, len(keylist)):
updates[ keylist[i] ] = "../" + valuelist[i]
xml_empty_tags = ["text", "audio"]
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=xml_empty_tags)
soup = BeautifulSoup(data, features=None, from_encoding="utf-8", builder=xmlbuilder)
for tag in soup.find_all(["body","seq","text","audio"]):
for att in ["src", "epub:textref"]:
if att in tag.attrs :
ref = tag[att]
if ref.find(":") == -1 :
parts = ref.split('#')
url = parts[0]
fragment = ""
if len(parts) > 1:
fragment = parts[1]
bookrelpath = os.path.join(currentdir, unquoteurl(url))
bookrelpath = os.path.normpath(bookrelpath)
bookrelpath = bookrelpath.replace(os.sep, "/")
if bookrelpath in updates:
attribute_value = updates[bookrelpath]
if fragment != "":
attribute_value = attribute_value + "#" + fragment
attribute_value = quoteurl(attribute_value)
tag[att] = attribute_value
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=" ")
return newdata
# Note xml_updates has urls/iris relative to the OEBPS folder as base
# As if they were meant only for OEBPS/content.opf and OEBPS/toc.ncx
# So adjust them to be relative to the Misc directory where page-map.xml lives
def performPageMapUpdates(data, currentdir, keylist, valuelist):
data = _remove_xml_header(data)
# lxml on a Mac does not seem to handle full unicode properly, so encode as utf-8
data = data.encode('utf-8')
# rebuild serialized lookup dictionary of xml_updates properly adjusted
updates = {}
for i in range(0, len(keylist)):
updates[ keylist[i] ] = "../" + valuelist[i]
xml_empty_tags = ["page"]
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=xml_empty_tags)
soup = BeautifulSoup(data, features=None, from_encoding="utf-8", builder=xmlbuilder)
for tag in soup.find_all(["page"]):
for att in ["href"]:
if att in tag.attrs :
ref = tag[att]
if ref.find(":") == -1 :
parts = ref.split('#')
url = parts[0]
fragment = ""
if len(parts) > 1:
fragment = parts[1]
bookrelpath = os.path.join(currentdir, unquoteurl(url))
bookrelpath = os.path.normpath(bookrelpath)
bookrelpath = bookrelpath.replace(os.sep, "/")
if bookrelpath in updates:
attribute_value = updates[bookrelpath]
if fragment != "":
attribute_value = attribute_value + "#" + fragment
attribute_value = quoteurl(attribute_value)
tag[att] = attribute_value
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=" ")
return newdata
def main():
argv = sys.argv
opfxml = '''
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="BookId" version="2.0">
<metadata xmlns:mydc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">
<mydc:identifier id="BookId" opf:scheme="UUID">urn:uuid:a418a8f1-dcbc-4c5d-a18f-533765e34ee8</mydc:identifier>
</metadata>
<manifest>
<!-- this has a lot of bad characters & < > \" \'-->
<item href="toc.ncx" id="ncx" media-type="application/x-dtbncx+xml" />
<item href="Text/Section0001.xhtml" id="Section0001.xhtml" media-type="application/xhtml+xml" />
</manifest>
<
<spine toc="ncx">
<itemref idref="Section0001.xhtml">
</spine>
<text>
this is a bunch of nonsense
</text>
<text>
this is a bunch of nonsense 1
</text>
<text>
this is a bunch of nonsense 2
</text>
<guide />
</package>
'''
print(argv)
if not argv[-1].endswith("xmlprocessor.py"):
with open(argv[-1],'rb') as f:
opfxml = f.read();
if isinstance(opfxml, bytes):
opfxml = opfxml.decode('utf-8')
print(repairXML(opfxml, "application/oebps-package+xml"))
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | -1,157,109,961,533,576,400 | 39.412346 | 136 | 0.587646 | false |
rdcrt/pystarling | test/api_objects/test_Account.py | 1 | 1419 | import dateutil
import pytest
from pystarling.api_objects.Account import Account
class TestAccount(object):
test_data = {
'id': 'ee8152d7-6ff2-4f79-b9de-39861bdec427',
'number': '99999999',
'sortCode': '608371',
'iban': 'GB26SRLG60837199999999',
'bic': 'SRLGGB2L',
'currency': 'GBP',
'createdAt': '2017-05-16T12:00:00.000Z'
}
incomplete_data = {
'id': 'ee8152d7-6ff2-4f79-b9de-39861bdec427'
}
def test_incomplete_data_raises_error(self):
with pytest.raises(KeyError):
Account(self.incomplete_data)
def test_data_parsed_correctly(self):
account = Account(self.test_data)
assert account.id == 'ee8152d7-6ff2-4f79-b9de-39861bdec427'
assert account.sort_code == '608371'
assert account.number == '99999999'
assert account.iban == 'GB26SRLG60837199999999'
assert account.bic == 'SRLGGB2L'
assert account.currency == 'GBP'
assert account.created_at == dateutil.parser.parse('2017-05-16T12:00:00.000Z')
def test_get_readable_sort_code_formatted_correctly(self):
account = Account(self.test_data)
assert account.get_readable_sort_code() == '60-83-71'
def test_get_readable_iban_formatted_correctly(self):
account = Account(self.test_data)
assert account.get_readable_iban() == "GB26 SRLG 6083 7199 9999 99"
| mit | 2,434,672,469,967,569,000 | 32.785714 | 86 | 0.639183 | false |
APMonitor/arduino | 2_Regression/2nd_order_MIMO/GEKKO/tclab_2nd_order_linear.py | 1 | 3283 | import numpy as np
import time
import matplotlib.pyplot as plt
import random
# get gekko package with:
# pip install gekko
from gekko import GEKKO
import pandas as pd
# import data
data = pd.read_csv('data.txt')
tm = data['Time (sec)'].values
Q1s = data[' Heater 1'].values
Q2s = data[' Heater 2'].values
T1s = data[' Temperature 1'].values
T2s = data[' Temperature 2'].values
#########################################################
# Initialize Model as Estimator
#########################################################
m = GEKKO(name='tclab-mhe')
#m.server = 'http://127.0.0.1' # if local server is installed
# 120 second time horizon, 40 steps
m.time = tm
# Parameters to Estimate
K1 = m.FV(value=0.5)
K1.STATUS = 1
K1.FSTATUS = 0
K1.LOWER = 0.1
K1.UPPER = 1.0
K2 = m.FV(value=0.3)
K2.STATUS = 1
K2.FSTATUS = 0
K2.LOWER = 0.1
K2.UPPER = 1.0
K3 = m.FV(value=0.1)
K3.STATUS = 1
K3.FSTATUS = 0
K3.LOWER = 0.0001
K3.UPPER = 1.0
tau12 = m.FV(value=150)
tau12.STATUS = 1
tau12.FSTATUS = 0
tau12.LOWER = 50.0
tau12.UPPER = 250
tau3 = m.FV(value=15)
tau3.STATUS = 0
tau3.FSTATUS = 0
tau3.LOWER = 10
tau3.UPPER = 20
# Measured inputs
Q1 = m.MV(value=0)
Q1.FSTATUS = 1 # measured
Q1.value = Q1s
Q2 = m.MV(value=0)
Q2.FSTATUS = 1 # measured
Q2.value = Q2s
# Ambient temperature
Ta = m.Param(value=23.0) # degC
# State variables
TH1 = m.SV(value=T1s[0])
TH2 = m.SV(value=T2s[0])
# Measurements for model alignment
TC1 = m.CV(value=T1s)
TC1.STATUS = 1 # minimize error between simulation and measurement
TC1.FSTATUS = 1 # receive measurement
TC1.MEAS_GAP = 0.1 # measurement deadband gap
TC2 = m.CV(value=T1s[0])
TC2.STATUS = 1 # minimize error between simulation and measurement
TC2.FSTATUS = 1 # receive measurement
TC2.MEAS_GAP = 0.1 # measurement deadband gap
TC2.value = T2s
# Heat transfer between two heaters
DT = m.Intermediate(TH2-TH1)
# Empirical correlations
m.Equation(tau12 * TH1.dt() + (TH1-Ta) == K1*Q1 + K3*DT)
m.Equation(tau12 * TH2.dt() + (TH2-Ta) == K2*Q2 - K3*DT)
m.Equation(tau3 * TC1.dt() + TC1 == TH1)
m.Equation(tau3 * TC2.dt() + TC2 == TH2)
# Global Options
m.options.IMODE = 5 # MHE
m.options.EV_TYPE = 2 # Objective type
m.options.NODES = 3 # Collocation nodes
m.options.SOLVER = 3 # IPOPT
m.options.COLDSTART = 0 # COLDSTART on first cycle
# Predict Parameters and Temperatures
# use remote=False for local solve
m.solve()
# Create plot
plt.figure(figsize=(10,7))
ax=plt.subplot(2,1,1)
ax.grid()
plt.plot(tm,T1s,'ro',label=r'$T_1$ measured')
plt.plot(tm,TC1.value,'k-',label=r'$T_1$ predicted')
plt.plot(tm,T2s,'bx',label=r'$T_2$ measured')
plt.plot(tm,TC2.value,'k--',label=r'$T_2$ predicted')
plt.ylabel('Temperature (degC)')
plt.legend(loc=2)
ax=plt.subplot(2,1,2)
ax.grid()
plt.plot(tm,Q1s,'r-',label=r'$Q_1$')
plt.plot(tm,Q2s,'b:',label=r'$Q_2$')
plt.ylabel('Heaters')
plt.xlabel('Time (sec)')
plt.legend(loc='best')
# Print optimal values
print('K1: ' + str(K1.newval))
print('K2: ' + str(K2.newval))
print('K3: ' + str(K3.newval))
print('tau12: ' + str(tau12.newval))
print('tau3: ' + str(tau3.newval))
# Save figure
plt.savefig('tclab_estimation.png')
plt.show()
| apache-2.0 | -3,404,360,134,149,806,000 | 22.318519 | 70 | 0.624733 | false |
DistrictDataLabs/yellowbrick | yellowbrick/contrib/scatter.py | 1 | 11862 | # yellowbrick.contrib.scatter
# Implements a 2d scatter plot for feature analysis.
#
# Author: Nathan Danielsen
# Created: Fri Feb 26 19:40:00 2017 -0400
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: scatter.py [a89633e] [email protected] $
"""
Implements a 2D scatter plot for feature analysis.
"""
##########################################################################
# Imports
##########################################################################
import itertools
import numpy as np
from yellowbrick.features.base import DataVisualizer
from yellowbrick.utils import is_dataframe, is_structured_array
from yellowbrick.utils import has_ndarray_int_columns
from yellowbrick.exceptions import YellowbrickValueError
from yellowbrick.style.colors import resolve_colors
##########################################################################
# Quick Methods
##########################################################################
def scatterviz(
X,
y=None,
ax=None,
features=None,
classes=None,
color=None,
colormap=None,
markers=None,
alpha=1.0,
**kwargs
):
"""Displays a bivariate scatter plot.
This helper function is a quick wrapper to utilize the ScatterVisualizer
(Transformer) for one-off analysis.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n, default: None
An array or series of target or class values
ax : matplotlib axes, default: None
The axes to plot the figure on.
features : list of strings, default: None
The names of two features or columns.
More than that will raise an error.
classes : list of strings, default: None
The names of the classes in the target
color : list or tuple of colors, default: None
Specify the colors for each individual class
colormap : string or matplotlib cmap, default: None
Sequential colormap for continuous target
markers : iterable of strings, default: ,+o*vhd
Matplotlib style markers for points on the scatter plot points
alpha : float, default: 1.0
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
Returns
-------
viz : ScatterVisualizer
Returns the fitted, finalized visualizer
"""
# Instantiate the visualizer
visualizer = ScatterVisualizer(
ax=ax,
features=features,
classes=classes,
color=color,
colormap=colormap,
markers=markers,
alpha=alpha,
**kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y, **kwargs)
visualizer.transform(X)
# Return the visualizer object
return visualizer
##########################################################################
# Static ScatterVisualizer Visualizer
##########################################################################
class ScatterVisualizer(DataVisualizer):
"""
ScatterVisualizer is a bivariate feature data visualization algorithm that
plots using the Cartesian coordinates of each point.
Parameters
----------
ax : a matplotlib plot, default: None
The axis to plot the figure on.
x : string, default: None
The feature name that corresponds to a column name or index postion
in the matrix that will be plotted against the x-axis
y : string, default: None
The feature name that corresponds to a column name or index postion
in the matrix that will be plotted against the y-axis
features : a list of two feature names to use, default: None
List of two features that correspond to the columns in the array.
The order of the two features correspond to X and Y axes on the
graph. More than two feature names or columns will raise an error.
If a DataFrame is passed to fit and features is None, feature names
are selected that are the columns of the DataFrame.
classes : a list of class names for the legend, default: None
If classes is None and a y value is passed to fit then the classes
are selected from the target vector.
color : optional list or tuple of colors to colorize points, default: None
Use either color to colorize the points on a per class basis or
colormap to color them on a continuous scale.
colormap : optional string or matplotlib cmap to colorize points, default: None
Use either color to colorize the points on a per class basis or
colormap to color them on a continuous scale.
markers : iterable of strings, default: ,+o*vhd
Matplotlib style markers for points on the scatter plot points
alpha : float, default: 1.0
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
kwargs : keyword arguments passed to the super class.
These parameters can be influenced later on in the visualization
process, but can and should be set as early as possible.
"""
def __init__(
self,
ax=None,
x=None,
y=None,
features=None,
classes=None,
color=None,
colormap=None,
markers=None,
alpha=1.0,
**kwargs
):
"""
Initialize the base scatter with many of the options required in order
to make the visualization work.
"""
super(ScatterVisualizer, self).__init__(
ax=ax,
features=features,
classes=classes,
color=color,
colormap=colormap,
**kwargs
)
self.x = x
self.y = y
self.alpha = alpha
self.markers = itertools.cycle(
kwargs.pop("markers", (",", "+", "o", "*", "v", "h", "d"))
)
self.color = color
self.colormap = colormap
if self.x is not None and self.y is not None and self.features is not None:
raise YellowbrickValueError("Please specify x,y or features, not both.")
if self.x is not None and self.y is not None and self.features is None:
self.features = [self.x, self.y]
# Ensure with init that features doesn't have more than two features
if features is not None:
if len(features) != 2:
raise YellowbrickValueError(
"ScatterVisualizer only accepts two features."
)
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the parallel coords
visualization since it has both the X and y data required for the
viz and the transform method does not.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with 2 features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
_, ncols = X.shape
# NOTE: Do not call super for this class, it conflicts with the fit.
# Setting these variables is similar to the old behavior of DataVisualizer.
# TODO: refactor to make use of the new DataVisualizer functionality
self.features_ = self.features
self.classes_ = self.classes
if ncols == 2:
X_two_cols = X
if self.features_ is None:
self.features_ = ["Feature One", "Feature Two"]
# Handle the feature names if they're None.
elif self.features_ is not None and is_dataframe(X):
X_two_cols = X[self.features_].values
# handle numpy named/ structured array
elif self.features_ is not None and is_structured_array(X):
X_selected = X[self.features_]
X_two_cols = X_selected.copy().view(
(np.float64, len(X_selected.dtype.names))
)
# handle features that are numeric columns in ndarray matrix
elif self.features_ is not None and has_ndarray_int_columns(self.features_, X):
f_one, f_two = self.features_
X_two_cols = X[:, [int(f_one), int(f_two)]]
else:
raise YellowbrickValueError(
"""
ScatterVisualizer only accepts two features, please
explicitly set these two features in the init kwargs or
pass a matrix/ dataframe in with only two columns."""
)
# Store the classes for the legend if they're None.
if self.classes_ is None:
# TODO: Is this the most efficient method?
self.classes_ = [str(label) for label in np.unique(y)]
# Draw the instances
self.draw(X_two_cols, y, **kwargs)
# Fit always returns self.
return self
def draw(self, X, y, **kwargs):
"""Called from the fit method, this method creates a scatter plot that
draws each instance as a class or target colored point, whose location
is determined by the feature data set.
"""
# Set the axes limits
self.ax.set_xlim([-1, 1])
self.ax.set_ylim([-1, 1])
# set the colors
color_values = resolve_colors(
n_colors=len(self.classes_), colormap=self.colormap, colors=self.color
)
colors = dict(zip(self.classes_, color_values))
# Create a data structure to hold the scatter plot representations
to_plot = {}
for kls in self.classes_:
to_plot[kls] = [[], []]
# Add each row of the data set to to_plot for plotting
# TODO: make this an independent function for override
for i, row in enumerate(X):
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
x_, y_ = row_[0], row_[1]
kls = self.classes_[y[i]]
to_plot[kls][0].append(x_)
to_plot[kls][1].append(y_)
# Add the scatter plots from the to_plot function
# TODO: store these plots to add more instances to later
# TODO: make this a separate function
for i, kls in enumerate(self.classes_):
self.ax.scatter(
to_plot[kls][0],
to_plot[kls][1],
marker=next(self.markers),
color=colors[kls],
label=str(kls),
alpha=self.alpha,
**kwargs
)
self.ax.axis("equal")
def finalize(self, **kwargs):
"""
Adds a title and a legend and ensures that the axis labels are set as
the feature names being visualized.
Parameters
----------
kwargs: generic keyword arguments.
Notes
-----
Generally this method is called from show and not directly by the user.
"""
# Divide out the two features
feature_one, feature_two = self.features_
# Set the title
self.set_title(
"Scatter Plot: {0} vs {1}".format(str(feature_one), str(feature_two))
)
# Add the legend
self.ax.legend(loc="best")
self.ax.set_xlabel(str(feature_one))
self.ax.set_ylabel(str(feature_two))
# Alias for ScatterViz
ScatterViz = ScatterVisualizer
| apache-2.0 | -159,934,294,901,768,700 | 32.041783 | 87 | 0.583966 | false |
dichen001/Go4Jobs | JackChen/hash/18. 4Sum.py | 1 | 1449 | """
Given an array S of n integers, are there elements a, b, c, and d in S such that a + b + c + d = target? Find all unique quadruplets in the array which gives the sum of target.
Note: The solution set must not contain duplicate quadruplets.
For example, given array S = [1, 0, -1, 0, -2, 2], and target = 0.
A solution set is:
[
[-1, 0, 0, 1],
[-2, -1, 1, 2],
[-2, 0, 0, 2]
]
"""
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
nums.sort()
results = []
for i in range(len(nums)-3):
if i > 0 and nums[i] == nums[i-1]: continue
sum_3 = target - nums[i]
for j in range(i+1, len(nums) -2):
if j > i+1 and nums[j] == nums[j-1]: continue
l, h, sum_2 = j+1, len(nums) - 1, sum_3 - nums[j]
while l < h:
if nums[l] + nums[h] < sum_2:
l += 1
elif nums[l] + nums[h] > sum_2:
h -= 1
else:
results.append([nums[i], nums[j], nums[l], nums[h]])
while l < h and nums[l] == nums[l+1]: l += 1
while l < h and nums[h] == nums[h-1]: h -= 1
l, h = l+1, h-1
return results | gpl-3.0 | 125,500,663,320,398,770 | 34.275 | 176 | 0.429952 | false |
santiago-salas-v/walas | node_images.py | 1 | 1746 | import matplotlib
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
patch1 = matplotlib.patches.Circle(
[0.5,0.5],0.05
)
patch2 = matplotlib.patches.Rectangle(
[0.3,0.3],0.4, 0.4, alpha=0.5,
fill=False, edgecolor='black',
linestyle = '--'
)
arrow1 = matplotlib.patches.Arrow(
0, 0.5,0.45,0, width=0.05,
color='black'
)
arrow2 = matplotlib.patches.Arrow(
0.55, 0.5,0.45,0, width=0.05,
color='black'
)
line1 = matplotlib.lines.Line2D(
[0.5,0.5], [0,0.45],
linestyle='--', color='black'
)
text1 = matplotlib.text.Text(
0, 0.45, '$n_{A0}$\n$V_0$\n$U_A=0$'
)
text2 = matplotlib.text.Text(
0.8, 0.45, '$n_{A1}$\n$V_1$\n$U_{A1}$'
)
for artist in [
patch1,patch2,arrow1,arrow2,
line1,text1,text2
]:
ax.add_artist(artist)
ax.set_frame_on(False)
ax.set_axis_off()
ax.set_aspect(1.0)
fig.
fig = plt.figure()
ax = fig.add_subplot(111)
patch1 = matplotlib.patches.Circle(
[0.5,0.5],0.05
)
patch2 = matplotlib.patches.Rectangle(
[0.3,0.3],0.4, 0.4, alpha=0.5,
fill=False, edgecolor='black',
linestyle = '--'
)
arrow1 = matplotlib.patches.Arrow(
0, 0.5,0.45,0, width=0.05,
color='black'
)
arrow2 = matplotlib.patches.Arrow(
0.55, 0.5,0.45,0, width=0.05,
color='black'
)
arrow3 = matplotlib.patches.Arrow(
0.5, 0.0, 0,0.45, width=0.05,
color='black'
)
text1 = matplotlib.text.Text(
0, 0.45, '$n_{A0}$\n$V_0$\n$U_A=0$'
)
text2 = matplotlib.text.Text(
0.8, 0.45, '$n_{A1}$\n$V_1$\n$U_{A1}$'
)
text3 = matplotlib.text.Text(
0.55, 0.1, '$n_{Ar}$\n$V_r$'
)
for artist in [
patch1,patch2,arrow1,arrow2,
arrow3,text1,text2,text3
]:
ax.add_artist(artist)
ax.set_frame_on(False)
ax.set_axis_off()
ax.set_aspect(1.0) | mit | -8,638,347,603,755,213,000 | 20.567901 | 42 | 0.611111 | false |
audiohacked/pyBusPirate | src/buspirate/uart.py | 1 | 5375 | # Created by Sean Nelson on 2018-08-19.
# Copyright 2018 Sean Nelson <[email protected]>
#
# This file is part of pyBusPirate.
#
# pyBusPirate is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# pyBusPirate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyBusPirate. If not, see <https://www.gnu.org/licenses/>.
""" UART class """
from enum import IntEnum
from buspirate.base import BusPirate
class UartSpeed(IntEnum):
""" UART Speed Enum """
BAUD_300 = 0b0000
BAUD_1200 = 0b0001
BAUD_2400 = 0b0010
BAUD_4800 = 0b0011
BAUD_9600 = 0b0100
BAUD_19200 = 0b0101
BAUD_31250 = 0b0110
BAUD_MIDI = 0b0110
MIDI = 0b0110
BAUD_38400 = 0b0111
BAUD_57600 = 0b1000
BAUD_115200 = 0b1010
class UartConfiguration(object):
""" UART Configuration Enum Base """
class PinOutput(IntEnum):
""" Enum for Pin Output """
HIZ = 0b00000
V3P3 = 0b10000
PIN_HIZ = 0b00000
PIN_3P3V = 0b10000
class DataBitsAndParity(IntEnum):
""" Enum for Data bits and Parity """
EIGHT_NONE = 0b0000
EIGHT_EVEN = 0b0100
EIGHT_ODD = 0b1000
NINE_NONE = 0b1100
class StopBits(IntEnum):
""" Enum for Stop bits """
ONE = 0b00
TWO = 0b10
class RxPolarity(IntEnum):
""" Enum for Rx Polarity """
IDLE_1 = 0b0
IDLE_0 = 0b1
class UART(BusPirate):
""" UART BitBanging on the BusPirate """
@property
def enter(self) -> bool:
"""
Enter UART Mode on the BusPirate
:returns: returns Success or Failure
"""
self.write(0x03)
return self.read(4) == "ART1"
def echo_rx(self, start_stop: int = 0) -> bool:
"""
Enable disable RX Echoing
:param start_stop: Give 0 for Start Echo, Give 1 to Stop Echo
:type start_stop: int
:returns: Success or Failure
:rtype: bool
"""
self.write(0x02|start_stop)
return self.read(1) == 0x01
def manual_baudrate(self, brg_register: int = 0x0000) -> bool:
"""
Set Baudrate Manually
:param brg_register: BRG Register value based on 32mhz osc, divider = 2, and BRGH = 1
:type brg_register: int
:returns: Success or Failure
:rtype: bool
"""
data = [0x07, brg_register]
self.write(data)
return self.read(3) == [0x01, 0x01, 0x01]
@property
def bridge_mode(self) -> bool:
"""
Enable Bridge mode. Hard Reset BP to exit.
:returns: Success or Failure
:rtype: bool
"""
self.write(0x0F)
return self.read(1) == 0x01
@property
def speed(self):
""" Speed Property Getter """
return self._speed
@speed.setter
def speed(self, value):
""" Speed Property Setter """
self._speed = value
return self.uart_speed(value)
def uart_speed(self, baudrate: int = UartSpeed.BAUD_115200) -> bool:
"""
Set UART Speed
:param baudrate: Uart Baud Rates
:type baudrate: int
:returns: Success or Failure
:rtype: bool
"""
self.write(0x60|baudrate)
return self.read(1) == 0x01
@property
def config(self):
""" Configuration Property Getter """
return self._config
@config.setter
def config(self, value):
""" Configuration Property Setter """
self._config = value
pin_outputs = value & 0b1000
data_parity = value & 0b0100
uastop_bits = value & 0b0010
rx_polarity = value & 0b0001
return self.uart_configuration(pin_outputs, data_parity, uastop_bits, rx_polarity)
def uart_configuration(self,
pin_output: int = UartConfiguration.PinOutput.HIZ,
databits_parity: int = UartConfiguration.DataBitsAndParity.EIGHT_NONE,
stop_bits: int = UartConfiguration.StopBits.ONE,
rx_polarity: int = UartConfiguration.RxPolarity.IDLE_1) -> bool:
"""
UART Configuration
:param pin_output: The Pin Configuration for Power Pins
:type pin_output: int.
:param clock_phase: The Pin Configuration for Pull Up Pins
:type clock_phase: int.
:param clock_edge: The Pin Configuration for AUX pin
:type clock_edge: int.
:param sample_time: The Pin Configuration for Chip Select Pin
:type sample_time: int.
:returns: returns Success or Failure
:rtype: bool.
"""
uart_configuration = 0
uart_configuration += pin_output
uart_configuration += databits_parity
uart_configuration += stop_bits
uart_configuration += rx_polarity
self.write(0x80|uart_configuration)
return self.read(1) == 0x01
if __name__ == '__main__':
pass
| gpl-2.0 | 5,007,357,886,985,508,000 | 27.439153 | 97 | 0.599256 | false |
Re4son/Kali-Pi | Menus/menu-9p.py | 1 | 2924 | #!/usr/bin/env python
import kalipi
from kalipi import *
#############################
## Local Functions ##
## Local Functions ##
#############################
#############################
## Buttons ##
# define all of the buttons
label1 = Button(labelPadding * " " + " ", originX, originX, buttonHeight, buttonWidth * 3 + spacing * 2, tron_ora, tron_yel, labelFont)
label2 = Button(labelPadding * " " + " ", originX, originY, buttonHeight, buttonWidth * 3 + spacing * 2, tron_ora, tron_yel, labelFont)
label3 = Button(labelPadding * " " + " ", originX, originY + buttonHeight + spacing, buttonHeight, buttonWidth * 3 + spacing * 2, tron_ora, tron_yel, labelFont)
button7 = Button(labelPadding * " " + " <<<", originX, originY + (buttonHeight * 2) + (spacing * 2), buttonHeight, buttonWidth, tron_ora, tron_yel, labelFont)
button9 = Button(labelPadding * " " + " Refresh", originX + (buttonWidth * 2) + (spacing * 2), originY + (buttonHeight * 2) + (spacing * 2), buttonHeight, buttonWidth, tron_ora, tron_yel, labelFont)
# Define each button press action
def button(number):
if number == 7:
if button7.disable == 1:
return
# Previous page
pygame.quit()
page=os.environ["MENUDIR"] + "menu-pin.py"
retPage=kalipi.get_retPage()
args = [page, retPage]
os.execvp("python", ["python"] + args)
sys.exit()
if number == 9:
if button9.disable == 1:
return
# Refresh
pygame.quit()
menu9p()
## Buttons ##
#############################
def menu9p():
# Init screen
kalipi.screen()
# Outer Border
kalipi.border(tron_ora)
#############################
## Buttons ##
# Buttons and labels
# See variables at the top of the document to adjust the menu
# First Row
# label 1
label1.text=labelPadding * " " + kalipi.get_clock()
label1.draw()
# Second Row
# Button 2
label2.text=labelPadding * " " + kalipi.get_temp()
label2.draw()
# Third Row
# Label 3
label3.text=labelPadding * " " + kalipi.get_volts()
label3.draw()
# Fourth Row
# Button 7
button7.disable = 0 # "1" disables button
if button7.disable == 1:
button7.draw()
else:
# Add button launch code here
button7.draw()
# Button 9
button9.disable = 0 # "1" disables button
if button9.disable == 1:
button9.draw()
else:
# Add button launch code here
button9.draw()
## Buttons ##
#############################
#############################
## Input loop ##
while 1:
butNo=kalipi.inputLoop("menu-9p.py")
button(butNo)
## Input loop ##
#############################
if __name__ == "__main__":
menu9p()
| gpl-3.0 | 1,339,044,619,860,689,200 | 25.107143 | 202 | 0.515048 | false |
JKarathiya/Lean | Algorithm.Python/InceptionDateSelectionRegressionAlgorithm.py | 1 | 2432 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Algorithm.Framework.Selection import *
from QuantConnect.Data import *
from QuantConnect.Data.UniverseSelection import *
from datetime import timedelta
### <summary>
### Regression algorithm to test universe additions and removals with open positions
### </summary>
### <meta name="tag" content="regression test" />
class InceptionDateSelectionRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2013,10,1)
self.SetEndDate(2013,10,31)
self.SetCash(100000)
self.changes = None
self.UniverseSettings.Resolution = Resolution.Hour
# select IBM once a week, empty universe the other days
self.AddUniverseSelection(CustomUniverseSelectionModel("my-custom-universe", lambda dt: ["IBM"] if dt.day % 7 == 0 else []))
# Adds SPY 5 days after StartDate and keep it in Universe
self.AddUniverseSelection(InceptionDateUniverseSelectionModel("spy-inception", {"SPY": self.StartDate + timedelta(5)}));
def OnData(self, slice):
if self.changes is None:
return
# we'll simply go long each security we added to the universe
for security in self.changes.AddedSecurities:
self.SetHoldings(security.Symbol, .5)
self.changes = None
def OnSecuritiesChanged(self, changes):
# liquidate removed securities
for security in changes.RemovedSecurities:
self.Liquidate(security.Symbol, "Removed from Universe")
self.changes = changes | apache-2.0 | -5,988,067,062,033,275,000 | 38.209677 | 132 | 0.733333 | false |
kret0s/gnuhealth-live | tryton/server/trytond-3.8.3/trytond/ir/property.py | 1 | 5773 | # This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from decimal import Decimal
from ..model import ModelView, ModelSQL, fields
from ..transaction import Transaction
from ..cache import Cache
from ..pool import Pool
__all__ = [
'Property',
]
_CAST = {
'numeric': Decimal,
'integer': int,
'float': float,
}
class Property(ModelSQL, ModelView):
"Property"
__name__ = 'ir.property'
_rec_name = 'field'
value = fields.Reference('Value', selection='models_get')
res = fields.Reference('Resource', selection='models_get', select=True)
field = fields.Many2One('ir.model.field', 'Field',
ondelete='CASCADE', required=True, select=True)
_models_get_cache = Cache('ir_property.models_get', context=False)
@classmethod
def models_get(cls):
pool = Pool()
Model = pool.get('ir.model')
models = cls._models_get_cache.get(None)
if models:
return models
cursor = Transaction().cursor
model = Model.__table__()
cursor.execute(*model.select(model.model, model.name,
order_by=model.name.asc))
models = cursor.fetchall() + [('', '')]
cls._models_get_cache.set(None, models)
return models
@classmethod
def get(cls, names, model, res_ids=None):
"""
Return named property values for each res_ids of model
"""
pool = Pool()
ModelAccess = pool.get('ir.model.access')
res = {}
ModelAccess.check(model, 'read')
names_list = True
if not isinstance(names, list):
names_list = False
names = [names]
if res_ids is None:
res_ids = []
properties = cls.search([
('field.name', 'in', names),
['OR',
('res', '=', None),
('res', 'in', ['%s,%s' % (model, x) for x in res_ids]),
],
], order=[])
default_vals = dict((x, None) for x in names)
for property_ in (x for x in properties if not x.res):
value = property_.value
val = None
if value is not None:
if not isinstance(value, basestring):
val = int(value)
else:
if property_.field.ttype in _CAST:
cast = _CAST[property_.field.ttype]
val = cast(value.split(',')[1])
elif property_.field.ttype in ('char', 'selection'):
val = value.split(',')[1]
else:
raise Exception('Not implemented')
default_vals[property_.field.name] = val
if not res_ids:
if not names_list:
return default_vals[names[0]]
return default_vals
for name in names:
res[name] = dict((x, default_vals[name]) for x in res_ids)
for property_ in (x for x in properties if x.res):
val = None
if property_.value is not None:
if not isinstance(property_.value, basestring):
val = int(property_.value)
else:
if property_.field.ttype in _CAST:
cast = _CAST[property_.field.ttype]
val = cast(property_.value.split(',')[1])
elif property_.field.ttype in ('char', 'selection'):
val = property_.value.split(',')[1]
else:
raise Exception('Not implemented')
res[property_.field.name][int(property_.res)] = val
if not names_list:
return res[names[0]]
return res
@staticmethod
def _set_values(model, res_id, val, field_id):
return {
'value': val,
'res': model + ',' + str(res_id),
'field': field_id,
}
@classmethod
def set(cls, name, model, ids, val):
"""
Set named property value for ids of model
Return the id of the record created
"""
pool = Pool()
ModelField = pool.get('ir.model.field')
ModelAccess = pool.get('ir.model.access')
ModelAccess.check(model, 'write')
model_field, = ModelField.search([
('name', '=', name),
('model.model', '=', model),
], order=[], limit=1)
Model = pool.get(model)
field = Model._fields[name]
properties = cls.search([
('field', '=', model_field.id),
('res', 'in', [model + ',' + str(res_id) for res_id in ids]),
], order=[])
cls.delete(properties)
defaults = cls.search([
('field', '=', model_field.id),
('res', '=', None),
], order=[], limit=1)
default_val = None
if defaults:
value = cls(defaults[0].id).value
default_val = None
if value is not None:
if not isinstance(value, basestring):
default_val = int(value)
else:
if field._type in _CAST:
cast = _CAST[field._type]
default_val = cast(value.split(',')[1])
elif field._type in ('char', 'selection'):
default_val = value.split(',')[1]
else:
raise Exception('Not implemented')
if (val != default_val):
for res_id in ids:
vals = cls._set_values(model, res_id, val, model_field.id)
cls.create([vals])
| gpl-3.0 | -8,129,677,329,808,463,000 | 32.760234 | 75 | 0.493677 | false |
larsks/cloud-init | cloudinit/sources/DataSourceBigstep.py | 2 | 1917 | # Copyright (C) 2015-2016 Bigstep Cloud Ltd.
#
# Author: Alexandru Sirbu <[email protected]>
#
# This file is part of cloud-init. See LICENSE file for license information.
import errno
import json
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
LOG = logging.getLogger(__name__)
class DataSourceBigstep(sources.DataSource):
dsname = 'Bigstep'
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.metadata = {}
self.vendordata_raw = ""
self.userdata_raw = ""
def _get_data(self, apply_filter=False):
url = get_url_from_file()
if url is None:
return False
response = url_helper.readurl(url)
decoded = json.loads(response.contents.decode())
self.metadata = decoded["metadata"]
self.vendordata_raw = decoded["vendordata_raw"]
self.userdata_raw = decoded["userdata_raw"]
return True
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
return 'metadata (%s)' % get_url_from_file()
def get_url_from_file():
try:
content = util.load_file("/var/lib/cloud/data/seed/bigstep/url")
except IOError as e:
# If the file doesn't exist, then the server probably isn't a Bigstep
# instance; otherwise, another problem exists which needs investigation
if e.errno == errno.ENOENT:
return None
else:
raise
return content
# Used to match classes to dependencies
datasources = [
(DataSourceBigstep, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
# vi: ts=4 expandtab
| gpl-3.0 | 2,950,398,994,128,199,700 | 27.61194 | 79 | 0.664058 | false |
derks/cement | tests/core/exc_tests.py | 1 | 1924 | """Tests for cement.core.exc."""
import unittest
from nose.tools import eq_, raises
from nose import SkipTest
from cement.core import exc
from cement.utils import test_helper as _t
class ExceptionTestCase(unittest.TestCase):
def setUp(self):
self.app = _t.prep()
@raises(exc.CementConfigError)
def test_cement_config_error(self):
try:
raise exc.CementConfigError("CementConfigError Test")
except exc.CementConfigError as e:
eq_(e.msg, "CementConfigError Test")
eq_(e.__str__(), "CementConfigError Test")
raise
@raises(exc.CementRuntimeError)
def test_cement_runtime_error(self):
try:
raise exc.CementRuntimeError("CementRuntimeError Test")
except exc.CementRuntimeError as e:
eq_(e.msg, "CementRuntimeError Test")
eq_(e.__str__(), "CementRuntimeError Test")
raise
@raises(exc.CementArgumentError)
def test_cement_argument_error(self):
try:
raise exc.CementArgumentError("CementArgumentError Test")
except exc.CementArgumentError as e:
eq_(e.msg, "CementArgumentError Test")
eq_(e.__str__(), "CementArgumentError Test")
raise
@raises(exc.CementInterfaceError)
def test_cement_interface_error(self):
try:
raise exc.CementInterfaceError("CementInterfaceError Test")
except exc.CementInterfaceError as e:
eq_(e.msg, "CementInterfaceError Test")
eq_(e.__str__(), "CementInterfaceError Test")
raise
@raises(exc.CementSignalError)
def test_cement_signal_error(self):
try:
import signal
raise exc.CementSignalError(signal.SIGTERM, 5)
except exc.CementSignalError as e:
eq_(e.signum, signal.SIGTERM)
eq_(e.frame, 5)
raise
| bsd-3-clause | -5,528,875,962,445,368,000 | 32.754386 | 71 | 0.614345 | false |
praphull27/diskBasedLdaBenchmarkingTools | readXmlAndOutputToTxt.py | 1 | 1444 | from bs4 import BeautifulSoup
import re
import os
import multiprocessing
def read_and_tokenize (file_name):
xml_file_handle = open(file_name, 'rb')
xml_file_contents = xml_file_handle.read()
xml_file_handle.close()
xml_file_text = ''
full_text_all = BeautifulSoup(xml_file_contents).find_all(class_="full_text")
for full_text in full_text_all:
xml_file_text += full_text.get_text(" ")
xml_file_text = re.sub(r'[^a-zA-Z]', ' ', xml_file_text)
xml_file_text = (xml_file_text.strip()).lower()
xml_file_text_tokenized = xml_file_text.split()
xml_file_filtered_words = [word for word in xml_file_text_tokenized if len(word) >=3]
xml_file_filtered_text = " ".join(xml_file_filtered_words)
return xml_file_filtered_text
root_path = "/Users/praphull/Desktop/msProject/nyt_corpus/"
paths = [os.path.join(root, name) for root, dirs, files in os.walk(root_path) for name in files]
paths_list = []
num = 10000
no_of_parts = len(paths) / num
if len(paths) % num != 0:
no_of_parts += 1
paths_list = [paths[a*num:(a+1)*num] for a in range(no_of_parts)]
out_handle = open("nyt_corpus_original.txt", 'wb')
file_count = 0
for paths in paths_list:
p = multiprocessing.Pool(processes=(multiprocessing.cpu_count() - 1))
results = p.map(read_and_tokenize, paths)
p.close()
p.join()
out_handle.write("\n".join(results) + "\n")
file_count += 1
if file_count % 10 == 0:
print file_count*num
else:
print '.'
out_handle.close()
#1855658 | mit | 5,477,891,135,625,652,000 | 28.489796 | 96 | 0.687673 | false |
dc3-plaso/dfvfs | dfvfs/credentials/keychain.py | 1 | 2743 | # -*- coding: utf-8 -*-
"""The path specification key chain.
The key chain is used to manage credentials for path specifications.
E.g. BitLocker Drive Encryption (BDE) encrypted volumes can require a
credential (e.g. password) to access the unencrypted data (unlock).
"""
from dfvfs.credentials import manager
class KeyChain(object):
"""Class that implements the key chain."""
def __init__(self):
"""Initializes the key chain."""
super(KeyChain, self).__init__()
self._credentials_per_path_spec = {}
def Empty(self):
"""Empties the key chain."""
self._credentials_per_path_spec = {}
def ExtractCredentialsFromPathSpec(self, path_spec):
"""Extracts credentials from a path specification.
Args:
path_spec (PathSpec): path specification to extract credentials from.
"""
credentials = manager.CredentialsManager.GetCredentials(path_spec)
for identifier in credentials.CREDENTIALS:
value = getattr(path_spec, identifier, None)
if value is None:
continue
self.SetCredential(path_spec, identifier, value)
def GetCredential(self, path_spec, identifier):
"""Retrieves a specific credential from the key chain.
Args:
path_spec (PathSpec): path specification.
identifier (str): credential identifier.
Returns:
object: credential or None if the credential for the path specification
is not set.
"""
credentials = self._credentials_per_path_spec.get(path_spec.comparable, {})
return credentials.get(identifier, None)
def GetCredentials(self, path_spec):
"""Retrieves all credentials for the path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
dict[str,object]: credentials for the path specification.
"""
return self._credentials_per_path_spec.get(path_spec.comparable, {})
def SetCredential(self, path_spec, identifier, data):
"""Sets a specific credential for the path specification.
Args:
path_spec (PathSpec): path specification.
identifier (str): credential identifier.
data (object): credential data.
Raises:
KeyError: if the credential is not supported by the path specification
type.
"""
supported_credentials = manager.CredentialsManager.GetCredentials(path_spec)
if identifier not in supported_credentials.CREDENTIALS:
raise KeyError((
u'Unsuppored credential: {0:s} for path specification type: '
u'{1:s}').format(identifier, path_spec.type_indicator))
credentials = self._credentials_per_path_spec.get(path_spec.comparable, {})
credentials[identifier] = data
self._credentials_per_path_spec[path_spec.comparable] = credentials
| apache-2.0 | -8,760,681,104,130,690,000 | 31.654762 | 80 | 0.692308 | false |
hbldh/skboost | skboost/stumps/decision_stump.py | 1 | 17561 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`decision_stump`
==================
.. module:: decision_stump
:platform: Unix, Windows
:synopsis:
.. moduleauthor:: hbldh <[email protected]>
Created on 2014-08-31, 01:52
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from warnings import warn
from operator import itemgetter
import concurrent.futures as cfut
import psutil
import numpy as np
from scipy.sparse import issparse
import six
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state, check_array
from numpy.lib.arraysetops import unique
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import _tree
try:
import skboost.stumps.ext.classifiers as c_classifiers
except ImportError as e:
c_classifiers = None
_all__ = ["NMMDecisionStump", ]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
class DecisionStump(DecisionTreeClassifier):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
Not used in Stratos Decision Stump.
max_features : int, float, string or None, optional (default=None)
Not used in Stratos Decision Stump.
max_depth : integer or None, optional (default=None)
Not used in Stratos Decision Stump. Always a depth 1 tree.
min_samples_split : integer, optional (default=2)
Not used in Stratos Decision Stump.
min_samples_leaf : integer, optional (default=1)
Not used in Stratos Decision Stump.
random_state : int, RandomState instance or None, optional (default=None)
Not used in Stratos Decision Stump. Nothing random in learning.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`classes_` : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
`n_classes_` : int or list
Alwats 2 fr this class.
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
random_state=None,
min_density=None,
compute_importances=None,
distributed_learning=True,
calculate_probabilites=False,
method='bp'):
super(DecisionStump, self).__init__(criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
random_state=random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
self.distributed_learning = distributed_learning
self.calculate_probabilites = calculate_probabilites
self.method = method
def fit(self, X, y, sample_mask=None,
X_argsorted=None, check_input=True, sample_weight=None):
# Deprecations
if sample_mask is not None:
warn("The sample_mask parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
# Convert data
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in six.moves.range(self.n_outputs_):
classes_k, y[:, k] = unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
max_depth = 1
max_features = 10
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if self.method == 'bp':
self.tree_ = _fit_binary_decision_stump_breakpoint(
X, y, sample_weight, X_argsorted, self.calculate_probabilites)
elif self.method == 'bp_threaded':
self.tree_ = _fit_binary_decision_stump_breakpoint_threaded(
X, y, sample_weight, X_argsorted, self.calculate_probabilites)
else:
self.tree_ = _fit_binary_decision_stump_breakpoint(
X, y, sample_weight, X_argsorted, self.calculate_probabilites)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = check_array(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
if self.tree_.get('direction') > 0:
return ((X[:, self.tree_.get('best_dim')] > self.tree_.get('threshold')) * 2) - 1
else:
return ((X[:, self.tree_.get('best_dim')] <= self.tree_.get('threshold')) * 2) - 1
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by arithmetical order.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = check_array(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = np.array(self.tree_['probabilities']).take(self.predict(X) > 0, axis=0)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in six.moves.range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def _fit_binary_decision_stump_breakpoint(X, y, sample_weight,
argsorted_X=None,
calculate_probabilities=False):
Y = (y.flatten() * 2) - 1
results = {
'min_value': None,
'best_dim': 0,
'threshold': 0,
'direction': 0,
'probabilities': []
}
if sample_weight is None:
sample_weight = np.ones(shape=(X.shape[0],), dtype='float') / (X.shape[0],)
else:
sample_weight /= np.sum(sample_weight)
classifier_result = []
for dim in six.moves.range(X.shape[1]):
if argsorted_X is not None:
sorted_x = X[argsorted_X[:, dim], dim]
w = sample_weight[argsorted_X[:, dim]]
sorted_y = Y[argsorted_X[:, dim]]
else:
data_order = np.argsort(X[:, dim])
sorted_x = X[data_order, dim]
w = sample_weight[data_order]
sorted_y = Y[data_order]
breakpoint_indices = np.where(np.diff(sorted_x))[0] + 1
w_pos_c = (w * (sorted_y > 0)).cumsum()
w_neg_c = (w * (sorted_y < 0)).cumsum()
left_errors = w_pos_c[breakpoint_indices] - w_neg_c[breakpoint_indices] + w_neg_c[-1]
right_errors = w_neg_c[breakpoint_indices] - w_pos_c[breakpoint_indices] + w_pos_c[-1]
best_left_point = np.argmin(left_errors)
best_right_point = np.argmin(right_errors)
if best_left_point < best_right_point:
output = [dim,
left_errors[best_left_point],
(sorted_x[breakpoint_indices[best_left_point] + 1] +
sorted_x[breakpoint_indices[best_left_point]]) / 2,
1]
else:
output = [dim,
right_errors[best_right_point],
(sorted_x[breakpoint_indices[best_right_point] + 1] +
sorted_x[breakpoint_indices[best_right_point]]) / 2,
-1]
classifier_result.append(output)
del sorted_x, sorted_y, left_errors, right_errors, w, w_pos_c, w_neg_c
# Sort the returned data after lowest error.
classifier_result = sorted(classifier_result, key=itemgetter(1))
best_result = classifier_result[0]
results['best_dim'] = int(best_result[0])
results['min_value'] = float(best_result[1])
# If the data is in integers, then set the threshold in integer as well.
if X.dtype.kind in ('u', 'i'):
results['threshold'] = int(best_result[2])
else:
results['threshold'] = float(best_result[2])
# Direction is defined as 1 if the positives labels are at
# higher values and -1 otherwise.
results['direction'] = int(best_result[3])
if calculate_probabilities:
results['probabilities'] = _calculate_probabilities(
X[:, results['best_dim']], Y, results)
return results
def _fit_binary_decision_stump_breakpoint_threaded(X, y, sample_weight,
argsorted_X=None,
calculate_probabilities=False):
Y = y.flatten() * 2 - 1
results = {
'min_value': None,
'best_dim': 0,
'threshold': 0,
'direction': 0,
'probabilities': []
}
if sample_weight is None:
sample_weight = np.ones(shape=(X.shape[0],), dtype='float') / (X.shape[0],)
else:
sample_weight /= np.sum(sample_weight)
classifier_result = []
tpe = cfut.ThreadPoolExecutor(max_workers=psutil.cpu_count())
futures = []
if argsorted_X is not None:
for dim in six.moves.range(X.shape[1]):
futures.append(
tpe.submit(_breakpoint_learn_one_dimension, dim, X[:, dim], Y, sample_weight, argsorted_X[:, dim]))
else:
for dim in six.moves.range(X.shape[1]):
futures.append(tpe.submit(_breakpoint_learn_one_dimension, dim, X[:, dim], Y, sample_weight))
for future in cfut.as_completed(futures):
classifier_result.append(future.result())
# Sort the returned data after lowest error.
classifier_result = sorted(classifier_result, key=itemgetter(1))
best_result = classifier_result[0]
results['best_dim'] = int(best_result[0])
results['min_value'] = float(best_result[1])
# If the data is in integers, then set the threshold in integer as well.
if X.dtype.kind in ('u', 'i'):
results['threshold'] = int(best_result[2])
else:
results['threshold'] = float(best_result[2])
# Direction is defined as 1 if the positives labels are at
# higher values and -1 otherwise.
results['direction'] = int(best_result[3])
if calculate_probabilities:
results['probabilities'] = _calculate_probabilities(X[:, results['best_dim']], Y, results)
return results
def _calculate_probabilities(X, Y, results):
if results['direction'] > 0:
labels = X > results['threshold']
else:
labels = X <= results['threshold']
n_correct_negs = sum(Y[-labels] < 0)
n_false_negs = sum(Y[-labels] > 0)
n_false_pos = sum(Y[labels] < 0)
n_correct_pos = sum(Y[labels] > 0)
return [[n_correct_negs / len(Y), n_false_negs / len(Y)],
[n_false_pos / len(Y), n_correct_pos / len(Y)]]
def _breakpoint_learn_one_dimension(dim_nbr, x, y, sample_weights, sorting_argument=None):
if sorting_argument is None:
sorting_argument = np.argsort(x)
sorted_x = x[sorting_argument]
w = sample_weights[sorting_argument]
sorted_y = y[sorting_argument]
breakpoint_indices = np.where(np.diff(sorted_x))[0] + 1
w_pos_c = (w * (sorted_y > 0)).cumsum()
w_neg_c = (w * (sorted_y < 0)).cumsum()
left_errors = w_pos_c[breakpoint_indices] - w_neg_c[breakpoint_indices] + w_neg_c[-1]
right_errors = w_neg_c[breakpoint_indices] - w_pos_c[breakpoint_indices] + w_pos_c[-1]
best_left_point = np.argmin(left_errors)
best_right_point = np.argmin(right_errors)
if best_left_point < best_right_point:
output = [dim_nbr,
left_errors[best_left_point],
(sorted_x[breakpoint_indices[best_left_point] - 1] +
sorted_x[breakpoint_indices[best_left_point]]) / 2,
1]
else:
output = [dim_nbr,
right_errors[best_right_point],
(sorted_x[breakpoint_indices[best_right_point] + 1] +
sorted_x[breakpoint_indices[best_right_point]]) / 2,
-1]
return output
| mit | -5,267,852,490,259,074,000 | 35.509356 | 115 | 0.553442 | false |
mozilla/addons-server | src/olympia/amo/tests/test_helpers.py | 1 | 15332 | # -*- coding: utf-8 -*-
import mimetypes
import os
from datetime import datetime, timedelta
from unittest.mock import Mock, patch
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import NoReverseMatch
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.utils.encoding import force_bytes
import pytest
from pyquery import PyQuery
import olympia
from olympia import amo
from olympia.amo import urlresolvers, utils
from olympia.amo.reverse import set_url_prefix
from olympia.amo.templatetags import jinja_helpers
from olympia.amo.tests import SQUOTE_ESCAPED, TestCase, reverse_ns
from olympia.amo.utils import ImageCheck
ADDONS_TEST_FILES = os.path.join(
os.path.dirname(olympia.__file__), 'devhub', 'tests', 'addons'
)
pytestmark = pytest.mark.django_db
def render(s, context=None):
if context is None:
context = {}
t = utils.from_string(s)
return t.render(context)
def test_strip_controls():
# We want control codes like \x0c to disappear.
assert 'I ove you' == jinja_helpers.strip_controls('I \x0cove you')
def test_finalize():
"""We want None to show up as ''. We do this in JINJA_CONFIG."""
assert '' == render('{{ x }}', {'x': None})
def test_slugify_spaces():
"""We want slugify to preserve spaces, but not at either end."""
assert utils.slugify(' b ar ') == 'b-ar'
assert utils.slugify(' b ar ', spaces=True) == 'b ar'
assert utils.slugify(' b ar ', spaces=True) == 'b ar'
def test_page_title():
request = Mock()
title = 'Oh hai!'
s = render('{{ page_title("%s") }}' % title, {'request': request})
assert s == '%s :: Add-ons for Firefox' % title
# Check the dirty unicodes.
s = render(
'{{ page_title(x) }}',
{'request': request, 'x': force_bytes('\u05d0\u05d5\u05e1\u05e3')},
)
def test_page_title_markup():
"""If the title passed to page_title is a jinja2 Markup object, don't cast
it back to a string or it'll get double escaped. See issue #1062."""
request = Mock()
# Markup isn't double escaped.
res = render(
'{{ page_title("{0}"|format_html("It\'s all text")) }}', {'request': request}
)
assert res == f'It{SQUOTE_ESCAPED}s all text :: Add-ons for Firefox'
def test_template_escaping():
"""Test that tests various formatting scenarios we're using in our
templates and makes sure they're working as expected.
"""
# Simple HTML in a translatable string
expected = '<a href="...">This is a test</a>'
assert render('{{ _(\'<a href="...">This is a test</a>\') }}') == expected
# Simple HTML in a translatable string, with |format_html works
# as expected
expected = '<a href="...">This is a test</a>'
original = '{{ _(\'<a href="...">{0}</a>\')|format_html("This is a test") }}'
assert render(original) == expected
# The html provided in the translatable string won't be escaped
# but all arguments are.
expected = '<a href="...">This is a <h1>test</h1></a>'
original = (
'{{ _(\'<a href="...">{0}</a>\')|format_html("This is a <h1>test</h1>") }}'
)
assert render(original) == expected
# Unless marked explicitly as safe
expected = '<a href="...">This is a <h1>test</h1></a>'
original = (
'{{ _(\'<a href="...">{0}</a>\')'
'|format_html("This is a <h1>test</h1>"|safe) }}'
)
assert render(original) == expected
# Document how newstyle gettext behaves, everything that get's passed in
# like that needs to be escaped!
expected = '<script></script>'
assert render('{{ _(foo) }}', {'foo': '<script></script>'}) != expected
assert render('{{ _(foo|escape) }}', {'foo': '<script></script>'}) == expected
# Various tests for gettext related helpers and make sure they work
# properly just as `_()` does.
expected = '<b>5 users</b>'
assert (
render(
"{{ ngettext('<b>{0} user</b>', '<b>{0} users</b>', 2)" '|format_html(5) }}'
)
== expected
)
# You could also mark the whole output as |safe but note that this
# still escapes the arguments of |format_html unless explicitly
# marked as safe
expected = '<b><script> users</b>'
assert (
render(
"{{ ngettext('<b>{0} user</b>', '<b>{0} users</b>', 2)"
'|format_html("<script>")|safe }}'
)
== expected
)
@patch('olympia.amo.templatetags.jinja_helpers.reverse')
def test_url(mock_reverse):
render('{{ url("viewname", 1, z=2) }}')
mock_reverse.assert_called_with(
'viewname', args=(1,), kwargs={'z': 2}, add_prefix=True
)
render('{{ url("viewname", 1, z=2, host="myhost") }}')
mock_reverse.assert_called_with(
'viewname', args=(1,), kwargs={'z': 2}, add_prefix=True
)
def test_drf_url():
fragment = '{{ drf_url("addon-detail", pk="a3615") }}'
rf = RequestFactory()
request = rf.get('/hello/')
rendered = render(fragment, context={'request': request})
# As no /vX/ in the request, RESTFRAMEWORK['DEFAULT_VERSION'] is used.
assert rendered == jinja_helpers.absolutify(
reverse_ns('addon-detail', args=['a3615'])
)
with pytest.raises(NoReverseMatch):
# Without a request it can't resolve the name correctly.
render(fragment, context={})
def test_urlparams():
url = '/en-US/firefox/themes/category'
c = {
'base': url,
'base_frag': url + '#hash',
'base_query': url + '?x=y',
'sort': 'name',
'frag': 'frag',
}
# Adding a query.
s = render('{{ base_frag|urlparams(sort=sort) }}', c)
assert s == '%s?sort=name#hash' % url
# Adding a fragment.
s = render('{{ base|urlparams(frag) }}', c)
assert s == '%s#frag' % url
# Replacing a fragment.
s = render('{{ base_frag|urlparams(frag) }}', c)
assert s == '%s#frag' % url
# Adding query and fragment.
s = render('{{ base_frag|urlparams(frag, sort=sort) }}', c)
assert s == '%s?sort=name#frag' % url
# Adding query with existing params.
s = render('{{ base_query|urlparams(frag, sort=sort) }}', c)
amo.tests.assert_url_equal(s, '%s?sort=name&x=y#frag' % url)
# Replacing a query param.
s = render('{{ base_query|urlparams(frag, x="z") }}', c)
assert s == '%s?x=z#frag' % url
# Params with value of None get dropped.
s = render('{{ base|urlparams(sort=None) }}', c)
assert s == url
# Removing a query
s = render('{{ base_query|urlparams(x=None) }}', c)
assert s == url
def test_urlparams_unicode():
url = '/xx?evil=reco\ufffd\ufffd\ufffd\u02f5'
utils.urlparams(url)
def test_urlparams_returns_safe_string():
s = render('{{ "https://foo.com/"|urlparams(param="help+me") }}', {})
assert s == 'https://foo.com/?param=help%2Bme'
s = render('{{ "https://foo.com/"|urlparams(param="obiwankénobi") }}', {})
assert s == 'https://foo.com/?param=obiwank%C3%A9nobi'
s = render('{{ "https://foo.com/"|urlparams(param=42) }}', {})
assert s == 'https://foo.com/?param=42'
s = render('{{ "https://foo.com/"|urlparams(param="") }}', {})
assert s == 'https://foo.com/?param='
s = render('{{ "https://foo.com/"|urlparams(param="help%2Bme") }}', {})
assert s == 'https://foo.com/?param=help%2Bme'
s = render('{{ "https://foo.com/"|urlparams(param="a%20b") }}', {})
assert s == 'https://foo.com/?param=a+b'
s = render('{{ "https://foo.com/"|urlparams(param="%AAA") }}', {})
assert s == 'https://foo.com/?param=%AAA'
string = render(
'{{ unsafe_url|urlparams }}',
{
'unsafe_url': "http://url.with?foo=<script>alert('awesome')</script>"
'&baa=that'
},
)
assert string == (
'http://url.with?foo=%3Cscript%3Ealert%28%27awesome%27%29%3C%2Fscript%3E'
'&baa=that'
)
string = render(
'{{ "http://safe.url?baa=that"|urlparams(foo=unsafe_param) }}',
{'unsafe_param': "<script>alert('awesome')</script>"},
)
assert string == (
'http://safe.url?baa=that'
'&foo=%3Cscript%3Ealert%28%27awesome%27%29%3C%2Fscript%3E'
)
def test_isotime():
time = datetime(2009, 12, 25, 10, 11, 12)
s = render('{{ d|isotime }}', {'d': time})
assert s == '2009-12-25T10:11:12Z'
s = render('{{ d|isotime }}', {'d': None})
assert s == ''
def test_epoch():
time = datetime(2009, 12, 25, 10, 11, 12)
s = render('{{ d|epoch }}', {'d': time})
assert s == '1261735872'
s = render('{{ d|epoch }}', {'d': None})
assert s == ''
def test_locale_url():
rf = RequestFactory()
request = rf.get('/de', SCRIPT_NAME='/z')
prefixer = urlresolvers.Prefixer(request)
set_url_prefix(prefixer)
s = render('{{ locale_url("mobile") }}')
assert s == '/z/de/mobile'
def test_external_url():
redirect_url = settings.REDIRECT_URL
secretkey = settings.REDIRECT_SECRET_KEY
settings.REDIRECT_URL = 'http://example.net'
settings.REDIRECT_SECRET_KEY = 'sekrit'
try:
myurl = 'http://example.com'
s = render('{{ "%s"|external_url }}' % myurl)
assert s == urlresolvers.get_outgoing_url(myurl)
finally:
settings.REDIRECT_URL = redirect_url
settings.REDIRECT_SECRET_KEY = secretkey
@patch('olympia.amo.templatetags.jinja_helpers.urlresolvers.get_outgoing_url')
def test_linkify_bounce_url_callback(mock_get_outgoing_url):
mock_get_outgoing_url.return_value = 'bar'
res = urlresolvers.linkify_bounce_url_callback({(None, 'href'): 'foo'})
# Make sure get_outgoing_url was called.
assert res == {(None, 'href'): 'bar'}
mock_get_outgoing_url.assert_called_with('foo')
@patch(
'olympia.amo.templatetags.jinja_helpers.urlresolvers.linkify_bounce_url_callback'
)
def test_linkify_with_outgoing_text_links(mock_linkify_bounce_url_callback):
def side_effect(attrs, new=False):
attrs[(None, 'href')] = 'bar'
return attrs
mock_linkify_bounce_url_callback.side_effect = side_effect
res = urlresolvers.linkify_with_outgoing('a text http://example.com link')
# Use PyQuery because the attributes could be rendered in any order.
doc = PyQuery(res)
assert doc('a[href="bar"][rel="nofollow"]')[0].text == 'http://example.com'
@patch(
'olympia.amo.templatetags.jinja_helpers.urlresolvers.linkify_bounce_url_callback'
)
def test_linkify_with_outgoing_markup_links(mock_linkify_bounce_url_callback):
def side_effect(attrs, new=False):
attrs[(None, 'href')] = 'bar'
return attrs
mock_linkify_bounce_url_callback.side_effect = side_effect
res = urlresolvers.linkify_with_outgoing(
'a markup <a href="http://example.com">link</a> with text'
)
# Use PyQuery because the attributes could be rendered in any order.
doc = PyQuery(res)
assert doc('a[href="bar"][rel="nofollow"]')[0].text == 'link'
def get_image_path(name):
return os.path.join(settings.ROOT, 'src', 'olympia', 'amo', 'tests', 'images', name)
def get_uploaded_file(name):
data = open(get_image_path(name), mode='rb').read()
return SimpleUploadedFile(name, data, content_type=mimetypes.guess_type(name)[0])
def get_addon_file(name):
return os.path.join(ADDONS_TEST_FILES, name)
class TestAnimatedImages(TestCase):
def test_animated_images(self):
img = ImageCheck(open(get_image_path('animated.png'), mode='rb'))
assert img.is_animated()
img = ImageCheck(open(get_image_path('non-animated.png'), mode='rb'))
assert not img.is_animated()
img = ImageCheck(open(get_image_path('animated.gif'), mode='rb'))
assert img.is_animated()
img = ImageCheck(open(get_image_path('non-animated.gif'), mode='rb'))
assert not img.is_animated()
def test_junk(self):
img = ImageCheck(open(__file__, 'rb'))
assert not img.is_image()
img = ImageCheck(open(get_image_path('non-animated.gif'), mode='rb'))
assert img.is_image()
def test_jinja_trans_monkeypatch():
# This tests the monkeypatch in manage.py that prevents localizers from
# taking us down.
render('{% trans come_on=1 %}% (come_on)s{% endtrans %}')
render('{% trans come_on=1 %}%(come_on){% endtrans %}')
render('{% trans come_on=1 %}%(come_on)z{% endtrans %}')
@pytest.mark.parametrize(
'url,site,expected',
[
('', None, settings.EXTERNAL_SITE_URL),
('', '', settings.EXTERNAL_SITE_URL),
(None, None, settings.EXTERNAL_SITE_URL),
('foo', None, f'{settings.EXTERNAL_SITE_URL}/foo'),
('foobar', 'http://amo.com', 'http://amo.com/foobar'),
('abc', 'https://localhost', 'https://localhost/abc'),
('http://addons.mozilla.org', None, 'http://addons.mozilla.org'),
('https://addons.mozilla.org', None, 'https://addons.mozilla.org'),
('https://amo.com', 'https://addons.mozilla.org', 'https://amo.com'),
('woo', 'www', 'woo'),
],
)
def test_absolutify(url, site, expected):
"""Make sure we correct join a base URL and a possibly relative URL."""
assert jinja_helpers.absolutify(url, site) == expected
def test_timesince():
month_ago = datetime.now() - timedelta(days=30)
assert jinja_helpers.timesince(month_ago) == '1 month ago'
assert jinja_helpers.timesince(None) == ''
def test_timeuntil():
a_month_in_the_future = datetime.now() + timedelta(days=31)
assert jinja_helpers.timeuntil(a_month_in_the_future) == '1 month'
a_week_in_the_future = datetime.now() + timedelta(days=14, hours=1)
assert jinja_helpers.timeuntil(a_week_in_the_future) == '2 weeks'
def test_format_unicode():
# This makes sure there's no UnicodeEncodeError when doing the string
# interpolation.
assert render('{{ "foo {0}"|format_html("baré") }}') == 'foo baré'
class TestStoragePath(TestCase):
@override_settings(ADDONS_PATH=None, MEDIA_ROOT='/path/')
def test_without_settings(self):
del settings.ADDONS_PATH
path = jinja_helpers.user_media_path('addons')
assert path == '/path/addons'
@override_settings(ADDONS_PATH='/another/path/')
def test_with_settings(self):
path = jinja_helpers.user_media_path('addons')
assert path == '/another/path/'
class TestMediaUrl(TestCase):
@override_settings(USERPICS_URL=None)
def test_without_settings(self):
del settings.USERPICS_URL
settings.MEDIA_URL = '/mediapath/'
url = jinja_helpers.user_media_url('userpics')
assert url == '/mediapath/userpics/'
SPACELESS_TEMPLATE = """
<div> <div>outside</div>
<b>tag</b> <em>is fine</em>
{% spaceless %}
<div prop=" inside props is left alone ">not</div>
<i>space </i> <span>between
</span>
{% endspaceless %}
<div>outside again </div>
</div>
"""
SPACELESS_RESULT = """
<div> <div>outside</div>
<b>tag</b> <em>is fine</em>
<div prop=" inside props is left alone ">not</div><i>space </i><span>between
</span><div>outside again </div>
</div>"""
def test_spaceless_extension():
assert render(SPACELESS_TEMPLATE) == SPACELESS_RESULT
| bsd-3-clause | -319,905,060,783,356,350 | 30.929167 | 88 | 0.609161 | false |
GoogleCloudPlatform/bigquery-utils | tools/cloud_functions/gcs_event_based_ingest/tests/conftest.py | 1 | 20146 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for gcs_ocn_bq_ingest"""
import json
import os
import time
import uuid
from typing import List
import pytest
from google.cloud import bigquery
from google.cloud import error_reporting
from google.cloud import storage
import gcs_ocn_bq_ingest.common.ordering
import gcs_ocn_bq_ingest.common.utils
TEST_DIR = os.path.realpath(os.path.dirname(__file__))
LOAD_JOB_POLLING_TIMEOUT = 10 # seconds
@pytest.fixture(scope="module")
def bq() -> bigquery.Client:
"""BigQuery Client"""
return bigquery.Client(location="US")
@pytest.fixture(scope="module")
def gcs() -> storage.Client:
"""GCS Client"""
return storage.Client()
@pytest.fixture(scope="module")
def error() -> error_reporting.Client:
"""GCS Client"""
return error_reporting.Client()
@pytest.fixture
def gcs_bucket(request, gcs) -> storage.bucket.Bucket:
"""GCS bucket for test artifacts"""
bucket = gcs.create_bucket(str(uuid.uuid4()))
bucket.versioning_enabled = True
bucket.patch()
# overide default field delimiter at bucket level
load_config_json = {
"fieldDelimiter": "|",
}
load_json_blob: storage.Blob = bucket.blob("_config/load.json")
load_json_blob.upload_from_string(json.dumps(load_config_json))
def teardown():
load_json_blob.delete()
bucket.versioning_enabled = False
bucket.patch()
for obj in gcs.list_blobs(bucket_or_name=bucket, versions=True):
obj.delete()
bucket.delete(force=True)
request.addfinalizer(teardown)
return bucket
@pytest.fixture
def mock_env(gcs, monkeypatch):
"""environment variable mocks"""
# Infer project from ADC of gcs client.
monkeypatch.setenv("GCP_PROJECT", gcs.project)
monkeypatch.setenv("FUNCTION_NAME", "integration-test")
monkeypatch.setenv("FUNCTION_TIMEOUT_SEC", "540")
monkeypatch.setenv("BQ_PROJECT", gcs.project)
@pytest.fixture
def ordered_mock_env(mock_env, monkeypatch):
"""environment variable mocks"""
monkeypatch.setenv("ORDER_PER_TABLE", "TRUE")
@pytest.fixture
def dest_dataset(request, bq, mock_env, monkeypatch):
random_dataset = (f"test_bq_ingest_gcf_"
f"{str(uuid.uuid4())[:8].replace('-','_')}")
dataset = bigquery.Dataset(f"{os.getenv('GCP_PROJECT')}"
f".{random_dataset}")
dataset.location = "US"
bq.create_dataset(dataset)
monkeypatch.setenv("BQ_LOAD_STATE_TABLE",
f"{dataset.dataset_id}.serverless_bq_loads")
print(f"created dataset {dataset.dataset_id}")
def teardown():
bq.delete_dataset(dataset, delete_contents=True, not_found_ok=True)
request.addfinalizer(teardown)
return dataset
@pytest.fixture
def dest_table(request, bq, mock_env, dest_dataset) -> bigquery.Table:
with open(os.path.join(TEST_DIR, "resources",
"nation_schema.json")) as schema_file:
schema = gcs_ocn_bq_ingest.common.utils.dict_to_bq_schema(
json.load(schema_file))
table = bigquery.Table(
f"{os.environ.get('GCP_PROJECT')}"
f".{dest_dataset.dataset_id}.cf_test_nation_"
f"{str(uuid.uuid4()).replace('-','_')}",
schema=schema,
)
table = bq.create_table(table)
def teardown():
bq.delete_table(table, not_found_ok=True)
request.addfinalizer(teardown)
return table
@pytest.fixture(scope="function")
def gcs_data(request, gcs_bucket, dest_dataset,
dest_table) -> storage.blob.Blob:
data_objs = []
for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]:
data_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nation",
test_file))
data_objs.append(data_obj)
def teardown():
for do in data_objs:
if do.exists:
do.delete()
request.addfinalizer(teardown)
return data_objs[-1]
@pytest.fixture(scope="function")
def gcs_data_under_sub_dirs(request, gcs_bucket, dest_dataset,
dest_table) -> storage.blob.Blob:
data_objs = []
for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]:
data_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, "foo", "bar", "baz", test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nation",
test_file))
data_objs.append(data_obj)
def teardown():
for do in data_objs:
if do.exists():
do.delete()
request.addfinalizer(teardown)
return data_objs[-1]
@pytest.fixture(scope="function")
def gcs_truncating_load_config(request, gcs_bucket, dest_dataset,
dest_table) -> storage.blob.Blob:
config_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id,
dest_table.table_id,
"_config",
"load.json",
]))
config_obj.upload_from_string(
json.dumps({"writeDisposition": "WRITE_TRUNCATE"}))
def teardown():
if config_obj.exists():
config_obj.delete()
request.addfinalizer(teardown)
return config_obj
@pytest.fixture(scope="function")
def gcs_batched_data(request, gcs_bucket, dest_dataset,
dest_table) -> List[storage.blob.Blob]:
"""
upload two batches of data
"""
data_objs = []
for batch in ["batch0", "batch1"]:
for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]:
data_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_table.table_id, batch, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nation",
test_file))
data_objs.append(data_obj)
def teardown():
for do in data_objs:
if do.exists():
do.delete()
request.addfinalizer(teardown)
return [data_objs[-1], data_objs[-4]]
@pytest.fixture
def gcs_external_config(request, gcs_bucket, dest_dataset,
dest_table) -> List[storage.blob.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = "INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext"
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, "_config", "external.json"
]))
with open(os.path.join(TEST_DIR, "resources",
"nation_schema.json")) as schema:
fields = json.load(schema)
config = {
"schema": {
"fields": fields
},
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
config_objs.append(sql_obj)
config_objs.append(config_obj)
def teardown():
for do in config_objs:
if do.exists():
do.delete()
request.addfinalizer(teardown)
return config_objs
@pytest.fixture(scope="function")
def gcs_partitioned_data(request, gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.blob.Blob]:
data_objs = []
for partition in ["$2017041101", "$2017041102"]:
for test_file in ["nyc_311.csv", "_SUCCESS"]:
data_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_partitioned_table.table_id,
partition, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, test_file))
data_objs.append(data_obj)
def teardown():
for dobj in data_objs:
# we expect some backfill files to be removed by the cloud function.
if dobj.exists():
dobj.delete()
request.addfinalizer(teardown)
return [data_objs[-1], data_objs[-3]]
@pytest.fixture(scope="function")
def dest_partitioned_table(request, bq: bigquery.Client, mock_env,
dest_dataset) -> bigquery.Table:
public_table: bigquery.Table = bq.get_table(
bigquery.TableReference.from_string(
"bigquery-public-data.new_york_311.311_service_requests"))
schema = public_table.schema
table: bigquery.Table = bigquery.Table(
f"{os.environ.get('GCP_PROJECT')}"
f".{dest_dataset.dataset_id}.cf_test_nyc_311_"
f"{str(uuid.uuid4()).replace('-','_')}",
schema=schema,
)
table.time_partitioning = bigquery.TimePartitioning()
table.time_partitioning.type_ = bigquery.TimePartitioningType.HOUR
table.time_partitioning.field = "created_date"
table = bq.create_table(table)
def teardown():
bq.delete_table(table, not_found_ok=True)
request.addfinalizer(teardown)
return table
def bq_wait_for_rows(bq_client: bigquery.Client, table: bigquery.Table,
expected_num_rows: int):
"""
polls tables.get API for number of rows until reaches expected value or
times out.
This is mostly an optimization to speed up the test suite without making it
flaky.
"""
start_poll = time.monotonic()
actual_num_rows = 0
while time.monotonic() - start_poll < LOAD_JOB_POLLING_TIMEOUT:
bq_table: bigquery.Table = bq_client.get_table(table)
actual_num_rows = bq_table.num_rows
if actual_num_rows == expected_num_rows:
return
if actual_num_rows > expected_num_rows:
raise AssertionError(
f"{table.project}.{table.dataset_id}.{table.table_id} has"
f"{actual_num_rows} rows. expected {expected_num_rows} rows.")
raise AssertionError(
f"Timed out after {LOAD_JOB_POLLING_TIMEOUT} seconds waiting for "
f"{table.project}.{table.dataset_id}.{table.table_id} to "
f"reach {expected_num_rows} rows."
f"last poll returned {actual_num_rows} rows.")
@pytest.fixture
def dest_ordered_update_table(request, gcs, gcs_bucket, bq, mock_env,
dest_dataset) -> bigquery.Table:
with open(os.path.join(TEST_DIR, "resources",
"ordering_schema.json")) as schema_file:
schema = gcs_ocn_bq_ingest.common.utils.dict_to_bq_schema(
json.load(schema_file))
table = bigquery.Table(
f"{os.environ.get('GCP_PROJECT')}.{dest_dataset.dataset_id}"
f".cf_test_ordering_{str(uuid.uuid4()).replace('-','_')}",
schema=schema,
)
table = bq.create_table(table)
# Our test query only updates on a single row so we need to populate
# original row.
# This can be used to simulate an existing _bqlock from a prior run of the
# subscriber loop with a job that has succeeded.
job: bigquery.LoadJob = bq.load_table_from_json(
[{
"id": 1,
"alpha_update": ""
}],
table,
job_id_prefix=gcs_ocn_bq_ingest.common.constants.DEFAULT_JOB_PREFIX)
# The subscriber will be responsible for cleaning up this file.
bqlock_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}", table.table_id,
"_bqlock"
]))
bqlock_obj.upload_from_string(job.job_id)
def teardown():
bq.delete_table(table, not_found_ok=True)
if bqlock_obj.exists():
bqlock_obj.delete()
request.addfinalizer(teardown)
return table
@pytest.fixture(scope="function")
def gcs_ordered_update_data(
request, gcs_bucket, dest_dataset,
dest_ordered_update_table) -> List[storage.blob.Blob]:
data_objs = []
older_success_blob: storage.blob.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id, "00", "_SUCCESS"
]))
older_success_blob.upload_from_string("")
data_objs.append(older_success_blob)
chunks = {
"01",
"02",
"03",
}
for chunk in chunks:
for test_file in ["data.csv", "_SUCCESS"]:
data_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id, chunk, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "ordering",
chunk, test_file))
data_objs.append(data_obj)
def teardown():
for dobj in data_objs:
if dobj.exists():
dobj.delete()
request.addfinalizer(teardown)
return list(filter(lambda do: do.name.endswith("_SUCCESS"), data_objs))
@pytest.fixture(scope="function")
def gcs_backlog(request, gcs, gcs_bucket,
gcs_ordered_update_data) -> List[storage.blob.Blob]:
data_objs = []
# We will deal with the last incremental in the test itself to test the
# behavior of a new backlog subscriber.
for success_blob in gcs_ordered_update_data:
gcs_ocn_bq_ingest.common.ordering.backlog_publisher(gcs, success_blob)
backlog_blob = \
gcs_ocn_bq_ingest.common.ordering.success_blob_to_backlog_blob(
success_blob
)
backlog_blob.upload_from_string("")
data_objs.append(backlog_blob)
def teardown():
for dobj in data_objs:
if dobj.exists():
dobj.delete()
request.addfinalizer(teardown)
return list(filter(lambda do: do.name.endswith("_SUCCESS"), data_objs))
@pytest.fixture
def gcs_external_update_config(request, gcs_bucket, dest_dataset,
dest_ordered_update_table) -> storage.Blob:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = """
UPDATE {dest_dataset}.{dest_table} dest
SET alpha_update = CONCAT(dest.alpha_update, src.alpha_update)
FROM temp_ext src
WHERE dest.id = src.id
"""
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id, "_config", "external.json"
]))
with open(os.path.join(TEST_DIR, "resources",
"ordering_schema.json")) as schema:
fields = json.load(schema)
config = {
"schema": {
"fields": fields
},
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
backfill_blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id,
gcs_ocn_bq_ingest.common.constants.BACKFILL_FILENAME
]))
backfill_blob.upload_from_string("")
config_objs.append(sql_obj)
config_objs.append(config_obj)
config_objs.append(backfill_blob)
def teardown():
for do in config_objs:
if do.exists():
do.delete()
request.addfinalizer(teardown)
return backfill_blob
@pytest.mark.usefixtures("bq", "gcs_bucket", "dest_dataset",
"dest_partitioned_table")
@pytest.fixture
def gcs_external_partitioned_config(
request, bq, gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.blob.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
dest_dataset.dataset_id,
dest_partitioned_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = "INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext;"
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_partitioned_table.table_id, "_config",
"external.json"
]))
public_table: bigquery.Table = bq.get_table(
bigquery.TableReference.from_string(
"bigquery-public-data.new_york_311.311_service_requests"))
config = {
"schema": public_table.to_api_repr()['schema'],
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
config_objs.append(sql_obj)
config_objs.append(config_obj)
def teardown():
for do in config_objs:
if do.exists:
do.delete()
request.addfinalizer(teardown)
return config_objs
@pytest.fixture
def no_use_error_reporting(monkeypatch):
monkeypatch.setenv("USE_ERROR_REPORTING_API", "False")
@pytest.fixture
def gcs_external_config_bad_statement(
request, gcs_bucket, dest_dataset, dest_table,
no_use_error_reporting) -> List[storage.blob.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = ("INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext;\n"
"INSERT {dest_dataset}.{dest_table} SELECT 1/0;")
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, "_config", "external.json"
]))
with open(os.path.join(TEST_DIR, "resources",
"nation_schema.json")) as schema:
fields = json.load(schema)
config = {
"schema": {
"fields": fields
},
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
config_objs.append(sql_obj)
config_objs.append(config_obj)
def teardown():
for do in config_objs:
if do.exists():
do.delete()
request.addfinalizer(teardown)
return config_objs
| apache-2.0 | 5,131,152,216,655,499,000 | 31.079618 | 80 | 0.59729 | false |
535521469/crawler_sth | scrapyd/app.py | 1 | 1586 | from twisted.application.service import Application
from twisted.application.internet import TimerService, TCPServer
from twisted.web import server
from twisted.python import log
from scrapy.utils.misc import load_object
from .interfaces import IEggStorage, IPoller, ISpiderScheduler, IEnvironment
from .launcher import Launcher
from .eggstorage import FilesystemEggStorage
from .scheduler import SpiderScheduler
from .poller import QueuePoller
from .environ import Environment
from .website import Root
from .config import Config
def application(config):
app = Application("Scrapyd")
http_port = config.getint('http_port', 6800)
bind_address = config.get('bind_address', '0.0.0.0')
poller = QueuePoller(config)
eggstorage = FilesystemEggStorage(config)
scheduler = SpiderScheduler(config)
environment = Environment(config)
app.setComponent(IPoller, poller)
app.setComponent(IEggStorage, eggstorage)
app.setComponent(ISpiderScheduler, scheduler)
app.setComponent(IEnvironment, environment)
laupath = config.get('launcher', 'scrapyd.launcher.Launcher')
laucls = load_object(laupath)
launcher = laucls(config, app)
timer = TimerService(5, poller.poll)
webservice = TCPServer(http_port, server.Site(Root(config, app)), interface=bind_address)
log.msg(format="Scrapyd web console available at http://%(bind_address)s:%(http_port)s/",
bind_address=bind_address, http_port=http_port)
launcher.setServiceParent(app)
timer.setServiceParent(app)
webservice.setServiceParent(app)
return app
| bsd-3-clause | -6,586,102,020,023,485,000 | 34.244444 | 93 | 0.759142 | false |
deepmind/lab2d | dmlab2d/lib/game_scripts/levels/clean_up/play.py | 1 | 3449 | # Copyright 2020 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing the `clean_up` level.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `SPACE` to fire clean.
Use `LEFT_CTRL` to fire fine.
Use `TAB` to switch between players.
Use `[]` to switch between levels.
Use `R` to restart a level.
Use `ESCAPE` to quit.
"""
import argparse
import collections
import json
from typing import Mapping
from dmlab2d import ui_renderer
_ACTION_MAP = {
'move': ui_renderer.get_direction_pressed,
'turn': ui_renderer.get_turn_pressed,
'fireClean': ui_renderer.get_space_key_pressed,
'fireFine': ui_renderer.get_left_control_pressed
}
_FRAMES_PER_SECOND = 8
def _run(rgb_observation: str, config: Mapping[str, str]):
"""Run multiplayer environment, with per player rendering and actions."""
player_count = int(config.get('numPlayers', '1'))
score = collections.defaultdict(float)
total_contrib = collections.defaultdict(float)
prefixes = [str(i + 1) + '.' for i in range(player_count)]
ui = ui_renderer.Renderer(
config=config,
action_map=_ACTION_MAP,
rgb_observation=rgb_observation,
player_prefixes=[str(i + 1) + '.' for i in range(player_count)],
frames_per_second=_FRAMES_PER_SECOND)
def player_printer(idx: int):
print(f'Player({idx}) contrib({total_contrib[idx]}) score({score[idx]})')
for step in ui.run():
if step.type == ui_renderer.StepType.FIRST:
print(f'=== Start episode {step.episode} ===')
print_player = False
for idx, prefix in enumerate(prefixes):
reward = step.env.observation(prefix + 'REWARD')
score[idx] += reward
contrib = step.env.observation(prefix + 'CONTRIB')
total_contrib[idx] += contrib
if step.player == idx and (reward != 0 or contrib != 0):
print_player = True
if print_player:
player_printer(step.player)
if step.type == ui_renderer.StepType.LAST:
print(f'=== End episode {step.episode} ===')
for idx in range(player_count):
player_printer(idx)
print('======')
print('=== Exiting ===')
for idx in range(player_count):
player_printer(idx)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
parser.add_argument(
'--players', type=int, default=4, help='Number of players.')
args = parser.parse_args()
if 'levelName' not in args.settings:
args.settings['levelName'] = 'clean_up'
if 'numPlayers' not in args.settings:
args.settings['numPlayers'] = args.players
for k in args.settings:
args.settings[k] = str(args.settings[k])
_run(args.observation, args.settings)
if __name__ == '__main__':
main()
| apache-2.0 | 7,325,948,526,025,038,000 | 30.642202 | 80 | 0.677298 | false |
ray-project/ray | release/tune_tests/scalability_tests/workloads/test_network_overhead.py | 1 | 1297 | """Networking overhead (200 trials on 200 nodes)
In this run, we will start 100 trials and run them on 100 different nodes.
This test will thus measure the overhead that comes with network communication
and specifically log synchronization.
Cluster: cluster_100x2.yaml
Test owner: krfricke
Acceptance criteria: Should run faster than 500 seconds.
Theoretical minimum time: 300 seconds
"""
import argparse
import ray
from ray import tune
from ray.tune.utils.release_test_util import timed_tune_run
def main(smoke_test: bool = False):
ray.init(address="auto")
num_samples = 100 if not smoke_test else 20
results_per_second = 0.01
trial_length_s = 300
max_runtime = 1000
timed_tune_run(
name="result network overhead",
num_samples=num_samples,
results_per_second=results_per_second,
trial_length_s=trial_length_s,
max_runtime=max_runtime,
resources_per_trial={"cpu": 2}, # One per node
sync_config=tune.SyncConfig(sync_to_driver=True))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test",
action="store_true",
default=False,
help="Finish quickly for training.")
args = parser.parse_args()
main(args.smoke_test)
| apache-2.0 | 4,828,814,425,852,465,000 | 24.94 | 78 | 0.683115 | false |
cdiener/pyart | asciinator.py | 1 | 1723 | #!/usr/bin/env python
# asciinator.py
#
# Copyright 2014 Christian Diener <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from __future__ import print_function # for python2 compat
import sys;
from PIL import Image;
import numpy as np
# ascii chars sorted by "density"
chars = np.asarray(list(' .,:;irsXA253hMHGS#9B&@'))
# check command line arguments
if len(sys.argv) != 4:
print( 'Usage: asciinator.py image scale factor' )
sys.exit()
# set basic program parameters
# f = filename, SC = scale, GCF = gamma correction factor, WCF = width correction factor
f, SC, GCF, WCF = sys.argv[1], float(sys.argv[2]), float(sys.argv[3]), 7.0/4.0
# open, scale and normalize image by pixel intensities
img = Image.open(f)
S = (int(img.size[0]*SC*WCF), int(img.size[1]*SC))
img = np.sum( np.asarray(img.resize(S), dtype="float"), axis=2)
img -= img.min()
img = (1.0 - img/img.max())**GCF*(chars.size-1)
# Assemble and print ascii art
print( "\n".join(("".join(r) for r in chars[img.astype(int)])))
print()
| gpl-3.0 | -7,175,192,178,625,269,000 | 32.784314 | 89 | 0.702263 | false |
bmazin/ARCONS-pipeline | astrometry/guide-centroid/manage.py | 1 | 5233 | from FitsAnalysis import convert,StarCalibration
from catalog import queryVizier,queryFitsImage
import os
import warnings
from radec import radec
from functions import *
#ignore the warning caused by astropy
warnings.filterwarnings("ignore")
#This specifies the center of fits images retrieved from the data base
#Though it is possible to specify by name, it is a good idea to use 'RA DEC' in degrees to avoid erros
#pos = '104.9566125,14.2341555'
#pos = 'corot18b'
#RA,DEC = convert([['6:59:49.587','+14:14:02.96']])[0]
#print RA,DEC
#pos = '%s,%s' %(RA,DEC)
#PSR 0656+14
pos = '104.95,14.24'
#source of data:'USNO-B1.0' or '2MASS' are usually enough. For full list: http://cdsarc.u-strasbg.fr/viz-bin/vizHelp?cats/U.htx
source = 'USNO-B1.0'
#source = '2MASS'
#name of the saved files
tfitsTable = 'test.fits'
tfitsImage = 'test_image.fits'
#if manCat=True, manual catalog will be used instead of vizier
#if semiManCat=True, stars will be added on top of the vizier catalog stars
#stars appended in both cases are specified in manCatFile
#notice that manCat and semiManCat can't both be true at the same time
manCat = False
semiManCat = True
manCatFile = 'manCat.cat'
calHeight = 3
#saving directory of all the calibrated files in relative path
caldir = './cal/'
#directory of fits images to be calibrated, put all the files here
fdir = './origin/'
sedir = './config/'
#the distoriton parameter file
paramFile = None
#if manual = False, the program will use sextractor to find source and match the correponding stars in the images
#also make sure the ./origin/ folder has appropriate sextractor parameters files and parameters
manual = False
#if calibrate is True, all the files that are calibrated will be used as data points to calculate distortion parameters
calibrate = False
#next, if automatic calibration is chosen, it is best to first manually correct the reference pixel coordinate on the header. This greatly increases the chances of calibrating.
refFix = True
#specificy the RA,DEC of the obect in CRVAL1 AND CRAVAL2 and the approximate pixel coordinate in the guider pixel coordinate.
CRVAL1 = 104.950558
CRVAL2 = 14.239306
CRPIX1 = 629
CRPIX2 = 318
'''
-----------------------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------------------
Input Ends Here
-----------------------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------------------
'''
#it will overwrite any existing files with the same names, the file 'test_vo.xml' is not important and can be ignored
queryVizier(tfitsTable,source=source,pos=pos)
queryFitsImage(tfitsImage,'test_vo.xml',pos=pos)
if manCat and semiManCat:
raise ValueError, 'Manual catalog and semi-manual catalog cannot be True all at once!'
elif manCat:
catOption = 'full'
elif semiManCat:
catOption = 'semi'
else:
catOption = None
#perform linear and polynomial calibration to each file in dir specified
for fitsImage in os.listdir(fdir):
#I am separation lines
print '--------------------------------------------------------------------------'
print '--------------------------------------------------------------------------'
print '> Calibrating %s...' %(fitsImage)
#fix reference value if refFix is True
if refFix:
updateHeader(fdir+fitsImage,'CRVAL1',CRVAL1)
updateHeader(fdir+fitsImage,'CRVAL2',CRVAL2)
updateHeader(fdir+fitsImage,'CRPIX1',CRPIX1)
updateHeader(fdir+fitsImage,'CRPIX2',CRPIX2)
try:
cal = StarCalibration(fitsImage,tfitsTable,tfitsImage,manual,paramFile=paramFile,caldir=caldir,fdir=fdir,sedir=sedir,height=3,manCat=catOption,manCatFile=manCatFile)
cal.linCal()
if paramFile != None:
distHeaderUpdate(caldir+fitsImage[:-5]+'_offCal_rotCal.fits',caldir+fitsImage[:-5]+'_allCal.fits',paramFile)
#cal.distCal()
except ValueError as err:
print '> WARNING: %s is NOT calibrated: %s ' %(fitsImage,err)
#try to remove the intermediate files after calibration
try:
os.remove(caldir + fitsImage[:-5] + '_offCal.fits')
os.remove(caldir + fitsImage[:-5] + '.check')
print 'clean up completed'
except:
pass
if calibrate:
#just choose a random file in the original folder in order to call the function
dummyList = os.listdir(fdir)
print dummyList
firstDummy = dummyList[0]
cal= StarCalibration(firstDummy,tfitsTable,tfitsImage,manual,paramFile=None,caldir=caldir,fdir=fdir,sedir=sedir,manCat=catOption,manCatFile=manCatFile)
cal.distCal(addFiles=dummyList[1:])
'''
#testing scripts
#convert world coordinate(in degrees) to ARCONS coordinate
worldCoor = [98.172398,-0.0315900]
#worldCoor = [98.169492,-0.03306112]
#guide stars 20121207/112636.fits
worldCoor = [104.95365,14.241674]
worldCoor = [104.9578,14.241021]
photon = [35.9084,32.5359]
test = radec(tolError=1000)
nlist = test.centroid(worldCoor=worldCoor)
mapp = test.photonMapping('090001',15.72,14.65)
''' | gpl-2.0 | 9,176,613,468,110,063,000 | 35.859155 | 176 | 0.648003 | false |
katchengli/tech-interview-prep | interview_cake/ic3.py | 1 | 1451 | #constraint: list_of_ints will always have at least 3 integers
#can have negative numbers
def highest_product_three_ints(list_of_ints):
biggest_int = max(list_of_ints)
list_of_ints.remove(biggest_int)
max_int1 = max(list_of_ints)
list_of_ints.remove(max_int1)
max_int2 = max(list_of_ints)
list_of_ints.remove(max_int2)
if list_of_ints:
min_int1 = min(list_of_ints)
list_of_ints.remove(min_int1)
else:
return biggest_int * max_int1 * max_int2
if list_of_ints:
min_int2 = min(list_of_ints)
#list_of_ints.remove(min_int2)
else:
min_int2 = max_int2
potent_highest_product1 = biggest_int * min_int1 * min_int2
potent_highest_product2 = biggest_int * max_int1 * max_int2
if potent_highest_product1 > potent_highest_product2:
return potent_highest_product1
else:
return potent_highest_product2
print(highest_product_three_ints([3, 4, 5, 6]))
#should return 120
print(highest_product_three_ints([-10, -10, 5, 6]))
#should return 600
print(highest_product_three_ints([-60, -100, -1, -2]))
#should return -120
print(highest_product_three_ints([600, 200, -1, -2]))
#should return 1200
print(highest_product_three_ints([1000, -1000, -1, 1]))
#should return 1000000
print(highest_product_three_ints([1000, -1000, -1, 1, 800]))
#should return 1000000
print(highest_product_three_ints([1000, -1000, -1, 1, -800]))
#should return 800000000
| apache-2.0 | 2,525,693,542,367,842,300 | 30.543478 | 63 | 0.671261 | false |
mitocw/latex2edx | latex2edx/test/test_custom_html.py | 1 | 2044 | import os
import unittest
from lxml import etree
from io import StringIO
from latex2edx.main import latex2edx
from latex2edx.test.util import make_temp_directory
class MakeTeX(object):
def __init__(self, tex):
buf = """\\documentclass[12pt]{article}\n\\usepackage{edXpsl}\n\n\\begin{document}"""
buf += tex
buf += "\\end{document}"
self.buf = buf
@property
def fp(self):
return StringIO(self.buf)
class TestCustomHtml(unittest.TestCase):
def test_custom_html1(self):
tex = ('\\begin{edXcourse}{1.00x}{1.00x Fall 2013}[url_name=2013_Fall]\n'
'\n'
'\\begin{edXchapter}{Unit 1}[start="2013-11-22"]\n'
'\n'
'\\begin{edXsection}{Introduction}\n'
'\n'
'\\begin{edXtext}{My Name}[url_name=text_url_name]\n'
'Hello world!\n\n'
'\n'
'\\begin{html}{span}[style="display:none;color:red;border-style:solid" data-x=3]\n'
'this is red text with a border\n'
'\\end{html}\n\n'
'\n'
'\\end{edXtext}\n'
'\\end{edXsection}\n'
'\\end{edXchapter}\n'
'\\end{edXcourse}\n'
)
with make_temp_directory() as tmdir:
os.chdir(tmdir)
fp = MakeTeX(tex).fp
l2e = latex2edx(tmdir + '/test.tex', fp=fp, do_images=False, output_dir=tmdir)
l2e.xhtml2xbundle()
print("xbundle = ")
print(str(l2e.xb))
print()
# self.assertIn(r'<html display_name="My Name" url_name="text_url_name">', str(l2e.xb))
xml = etree.fromstring(str(l2e.xb))
html = xml.find('.//html')
self.assertTrue(html.get('display_name') == 'My Name')
self.assertIn('<span style="display:none;color:red;border-style:solid" data-x="3">this is red text with a border </span>', str(l2e.xb))
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | -7,746,251,309,045,981,000 | 31.967742 | 147 | 0.527397 | false |
hemidactylus/flaskbiblio | config.py | 1 | 1074 | import os
# directories and so on
basedir = os.path.abspath(os.path.dirname(__file__))
DB_DIRECTORY=os.path.join(basedir,'app/database')
DB_NAME='biblio.db'
# stuff for Flask
WTF_CSRF_ENABLED = True
from sensible_config import SECRET_KEY
# formats, etc
DATETIME_STR_FORMAT = '%Y-%m-%d %H:%M:%S'
SHORT_DATETIME_STR_FORMAT = '%d/%m/%y'
FILENAME_DATETIME_STR_FORMAT = '%Y_%m_%d'
USERS_TIMEZONE='Europe/Rome'
# similarity thresholds for author (last- and complete-) names
SIMILAR_USE_DIGRAMS=True # otherwise: use single-letter grams
# Different thresholds are required depending on the type of vectoring
if SIMILAR_USE_DIGRAMS:
SIMILAR_AUTHOR_THRESHOLD=0.7
SIMILAR_BOOK_THRESHOLD=0.7
else:
SIMILAR_AUTHOR_THRESHOLD=0.90
SIMILAR_BOOK_THRESHOLD=0.93
# what are the smallest tokens to employ in similar-search in book titles?
MINIMUM_SIMILAR_BOOK_TOKEN_SIZE=4
# Are multiple books with the same title allowed? (suggested: yes)
ALLOW_DUPLICATE_BOOKS=True
# temporary directory for storing import-related files
TEMP_DIRECTORY=os.path.join(basedir,'app/temp')
| gpl-3.0 | -1,613,392,918,894,984,700 | 29.685714 | 74 | 0.752328 | false |
valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/francetv.py | 1 | 15999 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
)
from ..utils import (
clean_html,
determine_ext,
ExtractorError,
int_or_none,
parse_duration,
try_get,
url_or_none,
)
from .dailymotion import DailymotionIE
class FranceTVBaseInfoExtractor(InfoExtractor):
def _make_url_result(self, video_or_full_id, catalog=None):
full_id = 'francetv:%s' % video_or_full_id
if '@' not in video_or_full_id and catalog:
full_id += '@%s' % catalog
return self.url_result(
full_id, ie=FranceTVIE.ie_key(),
video_id=video_or_full_id.split('@')[0])
class FranceTVIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://
sivideo\.webservices\.francetelevisions\.fr/tools/getInfosOeuvre/v2/\?
.*?\bidDiffusion=[^&]+|
(?:
https?://videos\.francetv\.fr/video/|
francetv:
)
(?P<id>[^@]+)(?:@(?P<catalog>.+))?
)
'''
_TESTS = [{
# without catalog
'url': 'https://sivideo.webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=162311093&callback=_jsonp_loader_callback_request_0',
'md5': 'c2248a8de38c4e65ea8fae7b5df2d84f',
'info_dict': {
'id': '162311093',
'ext': 'mp4',
'title': '13h15, le dimanche... - Les mystères de Jésus',
'description': 'md5:75efe8d4c0a8205e5904498ffe1e1a42',
'timestamp': 1502623500,
'upload_date': '20170813',
},
}, {
# with catalog
'url': 'https://sivideo.webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=NI_1004933&catalogue=Zouzous&callback=_jsonp_loader_callback_request_4',
'only_matching': True,
}, {
'url': 'http://videos.francetv.fr/video/NI_657393@Regions',
'only_matching': True,
}, {
'url': 'francetv:162311093',
'only_matching': True,
}, {
'url': 'francetv:NI_1004933@Zouzous',
'only_matching': True,
}, {
'url': 'francetv:NI_983319@Info-web',
'only_matching': True,
}, {
'url': 'francetv:NI_983319',
'only_matching': True,
}, {
'url': 'francetv:NI_657393@Regions',
'only_matching': True,
}, {
# france-3 live
'url': 'francetv:SIM_France3',
'only_matching': True,
}]
def _extract_video(self, video_id, catalogue=None):
# Videos are identified by idDiffusion so catalogue part is optional.
# However when provided, some extra formats may be returned so we pass
# it if available.
info = self._download_json(
'https://sivideo.webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/',
video_id, 'Downloading video JSON', query={
'idDiffusion': video_id,
'catalogue': catalogue or '',
})
if info.get('status') == 'NOK':
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, info['message']),
expected=True)
allowed_countries = info['videos'][0].get('geoblocage')
if allowed_countries:
georestricted = True
geo_info = self._download_json(
'http://geo.francetv.fr/ws/edgescape.json', video_id,
'Downloading geo restriction info')
country = geo_info['reponse']['geo_info']['country_code']
if country not in allowed_countries:
raise ExtractorError(
'The video is not available from your location',
expected=True)
else:
georestricted = False
def sign(manifest_url, manifest_id):
for host in ('hdfauthftv-a.akamaihd.net', 'hdfauth.francetv.fr'):
signed_url = url_or_none(self._download_webpage(
'https://%s/esi/TA' % host, video_id,
'Downloading signed %s manifest URL' % manifest_id,
fatal=False, query={
'url': manifest_url,
}))
if signed_url:
return signed_url
return manifest_url
is_live = None
formats = []
for video in info['videos']:
if video['statut'] != 'ONLINE':
continue
video_url = video['url']
if not video_url:
continue
if is_live is None:
is_live = (try_get(
video, lambda x: x['plages_ouverture'][0]['direct'],
bool) is True) or '/live.francetv.fr/' in video_url
format_id = video['format']
ext = determine_ext(video_url)
if ext == 'f4m':
if georestricted:
# See https://github.com/rg3/youtube-dl/issues/3963
# m3u8 urls work fine
continue
formats.extend(self._extract_f4m_formats(
sign(video_url, format_id) + '&hdcore=3.7.0&plugin=aasp-3.7.0.39.44',
video_id, f4m_id=format_id, fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
sign(video_url, format_id), video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id=format_id,
fatal=False))
elif video_url.startswith('rtmp'):
formats.append({
'url': video_url,
'format_id': 'rtmp-%s' % format_id,
'ext': 'flv',
})
else:
if self._is_valid_url(video_url, video_id, format_id):
formats.append({
'url': video_url,
'format_id': format_id,
})
self._sort_formats(formats)
title = info['titre']
subtitle = info.get('sous_titre')
if subtitle:
title += ' - %s' % subtitle
title = title.strip()
subtitles = {}
subtitles_list = [{
'url': subformat['url'],
'ext': subformat.get('format'),
} for subformat in info.get('subtitles', []) if subformat.get('url')]
if subtitles_list:
subtitles['fr'] = subtitles_list
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'description': clean_html(info['synopsis']),
'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', info['image']),
'duration': int_or_none(info.get('real_duration')) or parse_duration(info['duree']),
'timestamp': int_or_none(info['diffusion']['timestamp']),
'is_live': is_live,
'formats': formats,
'subtitles': subtitles,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
catalog = mobj.group('catalog')
if not video_id:
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
video_id = qs.get('idDiffusion', [None])[0]
catalog = qs.get('catalogue', [None])[0]
if not video_id:
raise ExtractorError('Invalid URL', expected=True)
return self._extract_video(video_id, catalog)
class FranceTVSiteIE(FranceTVBaseInfoExtractor):
_VALID_URL = r'https?://(?:(?:www\.)?france\.tv|mobile\.france\.tv)/(?:[^/]+/)*(?P<id>[^/]+)\.html'
_TESTS = [{
'url': 'https://www.france.tv/france-2/13h15-le-dimanche/140921-les-mysteres-de-jesus.html',
'info_dict': {
'id': '162311093',
'ext': 'mp4',
'title': '13h15, le dimanche... - Les mystères de Jésus',
'description': 'md5:75efe8d4c0a8205e5904498ffe1e1a42',
'timestamp': 1502623500,
'upload_date': '20170813',
},
'params': {
'skip_download': True,
},
'add_ie': [FranceTVIE.ie_key()],
}, {
# france3
'url': 'https://www.france.tv/france-3/des-chiffres-et-des-lettres/139063-emission-du-mardi-9-mai-2017.html',
'only_matching': True,
}, {
# france4
'url': 'https://www.france.tv/france-4/hero-corp/saison-1/134151-apres-le-calme.html',
'only_matching': True,
}, {
# france5
'url': 'https://www.france.tv/france-5/c-a-dire/saison-10/137013-c-a-dire.html',
'only_matching': True,
}, {
# franceo
'url': 'https://www.france.tv/france-o/archipels/132249-mon-ancetre-l-esclave.html',
'only_matching': True,
}, {
# france2 live
'url': 'https://www.france.tv/france-2/direct.html',
'only_matching': True,
}, {
'url': 'https://www.france.tv/documentaires/histoire/136517-argentine-les-500-bebes-voles-de-la-dictature.html',
'only_matching': True,
}, {
'url': 'https://www.france.tv/jeux-et-divertissements/divertissements/133965-le-web-contre-attaque.html',
'only_matching': True,
}, {
'url': 'https://mobile.france.tv/france-5/c-dans-l-air/137347-emission-du-vendredi-12-mai-2017.html',
'only_matching': True,
}, {
'url': 'https://www.france.tv/142749-rouge-sang.html',
'only_matching': True,
}, {
# france-3 live
'url': 'https://www.france.tv/france-3/direct.html',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
catalogue = None
video_id = self._search_regex(
r'data-main-video=(["\'])(?P<id>(?:(?!\1).)+)\1',
webpage, 'video id', default=None, group='id')
if not video_id:
video_id, catalogue = self._html_search_regex(
r'(?:href=|player\.setVideo\(\s*)"http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"',
webpage, 'video ID').split('@')
return self._make_url_result(video_id, catalogue)
class FranceTVEmbedIE(FranceTVBaseInfoExtractor):
_VALID_URL = r'https?://embed\.francetv\.fr/*\?.*?\bue=(?P<id>[^&]+)'
_TESTS = [{
'url': 'http://embed.francetv.fr/?ue=7fd581a2ccf59d2fc5719c5c13cf6961',
'info_dict': {
'id': 'NI_983319',
'ext': 'mp4',
'title': 'Le Pen Reims',
'upload_date': '20170505',
'timestamp': 1493981780,
'duration': 16,
},
'params': {
'skip_download': True,
},
'add_ie': [FranceTVIE.ie_key()],
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://api-embed.webservices.francetelevisions.fr/key/%s' % video_id,
video_id)
return self._make_url_result(video['video_id'], video.get('catalog'))
class FranceTVInfoIE(FranceTVBaseInfoExtractor):
IE_NAME = 'francetvinfo.fr'
_VALID_URL = r'https?://(?:www|mobile|france3-regions)\.francetvinfo\.fr/(?:[^/]+/)*(?P<id>[^/?#&.]+)'
_TESTS = [{
'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
'info_dict': {
'id': '84981923',
'ext': 'mp4',
'title': 'Soir 3',
'upload_date': '20130826',
'timestamp': 1377548400,
'subtitles': {
'fr': 'mincount:2',
},
},
'params': {
'skip_download': True,
},
'add_ie': [FranceTVIE.ie_key()],
}, {
'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html',
'only_matching': True,
}, {
'url': 'http://www.francetvinfo.fr/economie/entreprises/les-entreprises-familiales-le-secret-de-la-reussite_933271.html',
'only_matching': True,
}, {
'url': 'http://france3-regions.francetvinfo.fr/bretagne/cotes-d-armor/thalassa-echappee-breizh-ce-venredi-dans-les-cotes-d-armor-954961.html',
'only_matching': True,
}, {
# Dailymotion embed
'url': 'http://www.francetvinfo.fr/politique/notre-dame-des-landes/video-sur-france-inter-cecile-duflot-denonce-le-regard-meprisant-de-patrick-cohen_1520091.html',
'md5': 'ee7f1828f25a648addc90cb2687b1f12',
'info_dict': {
'id': 'x4iiko0',
'ext': 'mp4',
'title': 'NDDL, référendum, Brexit : Cécile Duflot répond à Patrick Cohen',
'description': 'Au lendemain de la victoire du "oui" au référendum sur l\'aéroport de Notre-Dame-des-Landes, l\'ancienne ministre écologiste est l\'invitée de Patrick Cohen. Plus d\'info : https://www.franceinter.fr/emissions/le-7-9/le-7-9-27-juin-2016',
'timestamp': 1467011958,
'upload_date': '20160627',
'uploader': 'France Inter',
'uploader_id': 'x2q2ez',
},
'add_ie': ['Dailymotion'],
}, {
'url': 'http://france3-regions.francetvinfo.fr/limousin/emissions/jt-1213-limousin',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
dailymotion_urls = DailymotionIE._extract_urls(webpage)
if dailymotion_urls:
return self.playlist_result([
self.url_result(dailymotion_url, DailymotionIE.ie_key())
for dailymotion_url in dailymotion_urls])
video_id, catalogue = self._search_regex(
(r'id-video=([^@]+@[^"]+)',
r'<a[^>]+href="(?:https?:)?//videos\.francetv\.fr/video/([^@]+@[^"]+)"'),
webpage, 'video id').split('@')
return self._make_url_result(video_id, catalogue)
class FranceTVInfoSportIE(FranceTVBaseInfoExtractor):
IE_NAME = 'sport.francetvinfo.fr'
_VALID_URL = r'https?://sport\.francetvinfo\.fr/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://sport.francetvinfo.fr/les-jeux-olympiques/retour-sur-les-meilleurs-moments-de-pyeongchang-2018',
'info_dict': {
'id': '6e49080e-3f45-11e8-b459-000d3a2439ea',
'ext': 'mp4',
'title': 'Retour sur les meilleurs moments de Pyeongchang 2018',
'timestamp': 1523639962,
'upload_date': '20180413',
},
'params': {
'skip_download': True,
},
'add_ie': [FranceTVIE.ie_key()],
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(r'data-video="([^"]+)"', webpage, 'video_id')
return self._make_url_result(video_id, 'Sport-web')
class GenerationWhatIE(InfoExtractor):
IE_NAME = 'france2.fr:generation-what'
_VALID_URL = r'https?://generation-what\.francetv\.fr/[^/]+/video/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://generation-what.francetv.fr/portrait/video/present-arms',
'info_dict': {
'id': 'wtvKYUG45iw',
'ext': 'mp4',
'title': 'Generation What - Garde à vous - FRA',
'uploader': 'Generation What',
'uploader_id': 'UCHH9p1eetWCgt4kXBYCb3_w',
'upload_date': '20160411',
},
'params': {
'skip_download': True,
},
'add_ie': ['Youtube'],
}, {
'url': 'http://generation-what.francetv.fr/europe/video/present-arms',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
youtube_id = self._search_regex(
r"window\.videoURL\s*=\s*'([0-9A-Za-z_-]{11})';",
webpage, 'youtube id')
return self.url_result(youtube_id, ie='Youtube', video_id=youtube_id)
class CultureboxIE(FranceTVBaseInfoExtractor):
_VALID_URL = r'https?://(?:m\.)?culturebox\.francetvinfo\.fr/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://culturebox.francetvinfo.fr/opera-classique/musique-classique/c-est-baroque/concerts/cantates-bwv-4-106-et-131-de-bach-par-raphael-pichon-57-268689',
'info_dict': {
'id': 'EV_134885',
'ext': 'mp4',
'title': 'Cantates BWV 4, 106 et 131 de Bach par Raphaël Pichon 5/7',
'description': 'md5:19c44af004b88219f4daa50fa9a351d4',
'upload_date': '20180206',
'timestamp': 1517945220,
'duration': 5981,
},
'params': {
'skip_download': True,
},
'add_ie': [FranceTVIE.ie_key()],
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
if ">Ce live n'est plus disponible en replay<" in webpage:
raise ExtractorError(
'Video %s is not available' % display_id, expected=True)
video_id, catalogue = self._search_regex(
r'["\'>]https?://videos\.francetv\.fr/video/([^@]+@.+?)["\'<]',
webpage, 'video id').split('@')
return self._make_url_result(video_id, catalogue)
class FranceTVJeunesseIE(FranceTVBaseInfoExtractor):
_VALID_URL = r'(?P<url>https?://(?:www\.)?(?:zouzous|ludo)\.fr/heros/(?P<id>[^/?#&]+))'
_TESTS = [{
'url': 'https://www.zouzous.fr/heros/simon',
'info_dict': {
'id': 'simon',
},
'playlist_count': 9,
}, {
'url': 'https://www.ludo.fr/heros/ninjago',
'info_dict': {
'id': 'ninjago',
},
'playlist_count': 10,
}, {
'url': 'https://www.zouzous.fr/heros/simon?abc',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
playlist = self._download_json(
'%s/%s' % (mobj.group('url'), 'playlist'), playlist_id)
if not playlist.get('count'):
raise ExtractorError(
'%s is not available' % playlist_id, expected=True)
entries = []
for item in playlist['items']:
identity = item.get('identity')
if identity and isinstance(identity, compat_str):
entries.append(self._make_url_result(identity))
return self.playlist_result(entries, playlist_id)
| gpl-3.0 | 156,838,244,433,017,060 | 30.034951 | 257 | 0.633173 | false |
mharrys/sudoku | sudoku.py | 1 | 7848 | import fileinput
from dlx import DLX
from numpy import array, unique
from optparse import OptionParser
class SudokuError(Exception):
"""Raised when any error related to Sudoku is found during construction
and validation such as unexpected values or contradictions.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value.encode('string_escape')
class Sudoku(object):
"""Complete all necessary steps to solve a Sudoku challenge using
Dancing Links (DLX) including validating the challenge and building and
validating the possible solution found by DLX.
The expected input is one line of 81 characters where each unknown digit
is represented as a '.' (dot).
"""
def __init__(self, validate, pretty):
self.validate = validate
self.pretty = pretty
def solve(self, line):
"""Return list of solutions from specified line.
Return empty list if no solutions are found and return at most
one solution if validation is enabled or all solutions if validation
is disabled. It is possible for a Sudoku challenge to have more than
one solution but such challenge is concidered to be an invalid.
"""
grid = self.build_challenge(line)
self.validate_challenge(grid)
self.grids = []
dlx = DLX.from_sudoku(grid, self.result)
dlx.run(self.validate)
return self.grids
def build_challenge(self, line):
"""Returns 9x9 numpy array from specified line.
SudokuError is raised if unexpected value is found.
"""
grid = []
for c in line:
if c != '.':
if c < '1' or c > '9':
msg = 'Unexpected value "%s" when building challenge.' % c
raise SudokuError(msg)
grid.append(int(c))
else:
grid.append(0)
return array(grid).reshape(9, 9)
def validate_challenge(self, grid):
"""Search specified grid (9x9 numpy array) for contradictions.
SudokuError is raised if a contradiction is found.
"""
# validate rows
for row in grid:
cells = []
for cell in row:
if cell != 0:
if cell in cells:
msg = 'Row digits are not unique in challenge.'
raise SudokuError(msg)
else:
cells.append(cell)
# validate columns
for column in grid.transpose():
cells = []
for cell in column:
if cell != 0:
if cell in cells:
msg = 'Column digits are not unique in challenge.'
raise SudokuError(msg)
else:
cells.append(cell)
# validate boxes
for i in range(3):
# row slice
rs = i * 3
re = i * 3 + 3
for j in range(3):
# column slice
cs = j * 3
ce = j * 3 + 3
# box slice
box = grid[rs:re, cs:ce]
cells = []
for cell in box.flatten():
if cell != 0:
if cell in cells:
msg = 'Box digits are no unique in challenge.'
raise SudokuError(msg)
else:
cells.append(cell)
def build_solution(self, s):
"""Return 9x9 grid from a solution found by DLX.
"""
rows = []
for k in s:
rows.append(k.ID)
rows.sort()
grid = []
for row in rows:
grid.append(row % 9 + 1)
return array(grid).reshape(9, 9)
def validate_solution(self, grid):
"""Search specified grid (9x9 numpy array) for contradictions.
SudokuError is raised if a contradiction is found.
"""
# validate cells
for cell in grid.flatten():
if cell not in range(1, 10):
msg = 'Cell digit is not between 1 and 9 in solution.'
raise SudokuError(msg)
# validate rows
for row in grid:
if unique(row).size != 9:
msg = 'Row digits are not unique in solution.'
raise SudokuError(msg)
# validate columns
for col in grid.transpose():
if unique(col).size != 9:
msg = 'Column digits are not unique in solution.'
raise SudokuError(msg)
# validate boxes
for i in range(3):
# row slice
rs = i * 3
re = i * 3 + 3
for j in range(3):
# column slice
cs = j * 3
ce = j * 3 + 3
# box slice
box = grid[rs:re, cs:ce]
if unique(box.flatten()).size != 9:
msg = 'Box digits are not unique in solution.'
raise SudokuError(msg)
def result(self, solutions, s):
"""Build, validate and save recieved solution.
SudokuError is raised if validation is enabled and more than one
solution exist or contradiction is found in solution.
"""
grid = self.build_solution(s)
if self.validate:
if solutions > 1:
msg = 'More than one solution exist.'
raise SudokuError(msg)
self.validate_solution(grid)
if self.pretty:
self.grids.append(self.format_pretty(grid))
else:
self.grids.append(self.format_simple(grid))
def format_simple(self, grid):
"""Return solution in the same format as expected input line.
"""
f = ''
for s in grid.ravel():
f += str(s)
return f
def format_pretty(self, grid):
"""Return solution in a more human readable format.
"""
f = '+-------+-------+-------+\n'
for i, s in enumerate(grid):
num = str(s)[1:-1].replace(',', '')
f += '| %s | %s | %s |\n' % (num[0:5], num[6:11], num[12:17])
if (i + 1) % 3 == 0:
f += '+-------+-------+-------+'
if (i + 1) < len(grid):
f += '\n'
return f
def print_error(n, msg):
print('sudoku: Error on line %s: %s' % (n, msg))
def print_solutions(grids):
for grid in grids:
print(grid)
def solve_line(sudoku, line, line_num):
if len(line) < 82 or line[81] != '\n':
print_error(line_num, 'Input line must be exactly 81 chars long.')
else:
grids = []
try:
grids = sudoku.solve(line[:81]) # slice off '\n'
except SudokuError as e:
print_error(line_num, e)
else:
print_solutions(grids)
def solve_line_by_line(options, args):
sudoku = Sudoku(options.validate, options.pretty)
for line in fileinput.input(args):
solve_line(sudoku, line, fileinput.lineno())
if __name__ == '__main__':
parser = OptionParser()
parser.add_option(
'-v',
'--validate',
dest='validate',
help='validate solution (longer search time)',
action='store_true'
)
parser.add_option(
'-p',
'--pretty',
dest='pretty',
help='pretty print solution',
action='store_true'
)
options, args = parser.parse_args()
try:
solve_line_by_line(options, args)
except IOError as e:
print('sudoku: %s' % e)
except (KeyboardInterrupt, SystemExit) as e:
print('')
print('sudoku: Interrupt caught ... exiting')
| gpl-3.0 | 3,664,689,965,295,890,400 | 28.727273 | 78 | 0.507645 | false |
Bekt/tweetement | src/service.py | 1 | 3578 | import logging
import string
import tweepy
from credentials import (consumer_key, consumer_secret)
from models import Stopword
from collections import Counter
class Service(object):
# Map uppercase to lowercase, and deletes any punctuation.
trans = {ord(string.ascii_uppercase[i]): ord(string.ascii_lowercase[i])
for i in range(26)}
trans.update({ord(c): None for c in string.punctuation})
def __init__(self, access_token='', access_token_secret=''):
self._tw_api = None
self._access_token = access_token
self._access_token_secret = access_token_secret
@property
def tw_api(self):
"""Tweepy API client."""
if self._tw_api is None:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(self._access_token, self._access_token_secret)
self._tw_api = tweepy.API(auth)
return self._tw_api
def fetch(self, query, limit=100):
"""Fetches search results for the given query."""
# Cursor doesn't work with dev_appserver.py :(
# return list(tweepy.Cursor(self.tw_api.search, q=query, lang='en',
# result_type='popular').items(limit))
query += ' -filter:retweets'
# Try to get as many 'popular' posts as possible.
# Twitter limits this really hard.
res_type = 'popular'
last_id = -1
tweets = []
while len(tweets) < limit:
count = limit - len(tweets)
try:
t = self.tw_api.search(q=query, count=count, result_type=res_type,
lang='en', max_id=str(last_id - 1))
if len(t) < 3 and res_type == 'popular':
tweets.extend(t)
res_type = 'mixed'
last_id = -1
continue
if len(t) < 3 and res_type == 'mixed':
tweets.extend(t)
break
tweets.extend(t)
last_id = t[-1].id
except tweepy.TweepError as e:
logging.exception(e)
break
return tweets
@staticmethod
def top_hashtags(tweets, limit=5):
"""Extracts most frequent hashtags from given tweets."""
hashtags = Counter()
for t in tweets:
for h in t.entities['hashtags']:
if 'text' in h:
hashtags[h['text'].lower()] += 1
top = hashtags.most_common(limit)
return ['#' + t[0] for t in top]
@staticmethod
def top_keywords(tweets, limit=5, exclude=set()):
"""Extracts most frequent keywords from given tweets."""
exc = set()
for w in exclude:
ok, text = _token_okay(w)
if ok:
exc.add(text)
words = Counter()
for t in tweets:
for token in set(t.text.split()):
ok, text = _token_okay(token)
if ok and text not in exc:
words[text] += 1
top = words.most_common(limit)
return [t[0] for t in top]
def _token_okay(text):
"""Decides whether the given token is a valid expandable query."""
text = ''.join(c for c in text if 127 > ord(c) > 31)
try:
text = text.translate(Service.trans)
except TypeError:
return False, text
if (len(text) < 2 or text.isdigit()
or Stopword.gql('WHERE token = :1', text).get() is not None):
return False, text
return True, text
| mit | -3,306,388,078,736,274,000 | 34.425743 | 82 | 0.536333 | false |
nbessi/pyhiccup | pyhiccup/page.py | 1 | 3037 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi
# Copyright 2014
# Original concept by James Reeves
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License 3
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from __future__ import unicode_literals
DOC_TYPES = {
'html4': "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01//EN\" "
"\"http://www.w3.org/TR/html4/strict.dtd\">\n",
'xhtml-strict': "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 ""Strict//EN\" "
"\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n",
'xhtml-transitional': "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" "
"\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n",
'html5': "<!DOCTYPE html>\n",
}
DEFAULT_XMLNS = 'http://www.w3.org/1999/xhtml'
XMl_DECLARATION = '<?xml version="1.0" encoding="UTF-8"?>'
def get_doc_type(doc_type):
"""Return a DOCTYPE declaration
:param doc_type: doc type string must be in ``page.DOC_TYPES``
:type doc_type: str
:return: DOCTYPE declaration
:rtype: str
"""
if doc_type not in DOC_TYPES:
raise ValueError(
'Invalid DOCTYPE %s available values are %s' %
(doc_type, DOC_TYPES.keys())
)
return DOC_TYPES[doc_type]
def build_html_enclosing_tag(etype, **kwargs):
"""Generate html tag list representation
:param etype: html doc type `html5, html4, xhtml-strict,
xhtml-transitional`
:type etype: str
:param kwargs: dict of attribute for HTML tag will override defaults
:type kwargs: dict
:return: html tag list representation ['html', {'xmlns': ...}]
:rtype: dict
"""
attrs = {}
if etype in DOC_TYPES:
attrs['lang'] = 'en'
attrs['dir'] = 'rtl'
attrs['xml:lang'] = 'en'
if 'xhtml' in etype:
attrs[u'xmlns'] = DEFAULT_XMLNS
attrs.update(kwargs)
return ['html', attrs]
def build_xml_enclosing_tag(etype, **kwargs):
"""Generate XML root tag list representation
:param etype: root tag name
:type etype: str
:param kwargs: dict of attribute for root tag
:type kwargs: dict
:return: root xml tag list representation ['atag', {'attr': ...}]
:rtype: dict
"""
return [etype, kwargs]
| agpl-3.0 | 384,448,600,303,087,940 | 32.01087 | 93 | 0.591373 | false |
docusign/docusign-python-client | docusign_esign/models/external_file.py | 1 | 7550 | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ExternalFile(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'_date': 'str',
'id': 'str',
'img': 'str',
'name': 'str',
'size': 'str',
'supported': 'str',
'type': 'str',
'uri': 'str'
}
attribute_map = {
'_date': 'date',
'id': 'id',
'img': 'img',
'name': 'name',
'size': 'size',
'supported': 'supported',
'type': 'type',
'uri': 'uri'
}
def __init__(self, _date=None, id=None, img=None, name=None, size=None, supported=None, type=None, uri=None): # noqa: E501
"""ExternalFile - a model defined in Swagger""" # noqa: E501
self.__date = None
self._id = None
self._img = None
self._name = None
self._size = None
self._supported = None
self._type = None
self._uri = None
self.discriminator = None
if _date is not None:
self._date = _date
if id is not None:
self.id = id
if img is not None:
self.img = img
if name is not None:
self.name = name
if size is not None:
self.size = size
if supported is not None:
self.supported = supported
if type is not None:
self.type = type
if uri is not None:
self.uri = uri
@property
def _date(self):
"""Gets the _date of this ExternalFile. # noqa: E501
# noqa: E501
:return: The _date of this ExternalFile. # noqa: E501
:rtype: str
"""
return self.__date
@_date.setter
def _date(self, _date):
"""Sets the _date of this ExternalFile.
# noqa: E501
:param _date: The _date of this ExternalFile. # noqa: E501
:type: str
"""
self.__date = _date
@property
def id(self):
"""Gets the id of this ExternalFile. # noqa: E501
# noqa: E501
:return: The id of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ExternalFile.
# noqa: E501
:param id: The id of this ExternalFile. # noqa: E501
:type: str
"""
self._id = id
@property
def img(self):
"""Gets the img of this ExternalFile. # noqa: E501
# noqa: E501
:return: The img of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._img
@img.setter
def img(self, img):
"""Sets the img of this ExternalFile.
# noqa: E501
:param img: The img of this ExternalFile. # noqa: E501
:type: str
"""
self._img = img
@property
def name(self):
"""Gets the name of this ExternalFile. # noqa: E501
# noqa: E501
:return: The name of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ExternalFile.
# noqa: E501
:param name: The name of this ExternalFile. # noqa: E501
:type: str
"""
self._name = name
@property
def size(self):
"""Gets the size of this ExternalFile. # noqa: E501
Reserved: TBD # noqa: E501
:return: The size of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this ExternalFile.
Reserved: TBD # noqa: E501
:param size: The size of this ExternalFile. # noqa: E501
:type: str
"""
self._size = size
@property
def supported(self):
"""Gets the supported of this ExternalFile. # noqa: E501
# noqa: E501
:return: The supported of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._supported
@supported.setter
def supported(self, supported):
"""Sets the supported of this ExternalFile.
# noqa: E501
:param supported: The supported of this ExternalFile. # noqa: E501
:type: str
"""
self._supported = supported
@property
def type(self):
"""Gets the type of this ExternalFile. # noqa: E501
# noqa: E501
:return: The type of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ExternalFile.
# noqa: E501
:param type: The type of this ExternalFile. # noqa: E501
:type: str
"""
self._type = type
@property
def uri(self):
"""Gets the uri of this ExternalFile. # noqa: E501
# noqa: E501
:return: The uri of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""Sets the uri of this ExternalFile.
# noqa: E501
:param uri: The uri of this ExternalFile. # noqa: E501
:type: str
"""
self._uri = uri
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ExternalFile, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExternalFile):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit | 7,504,362,721,954,708,000 | 23.121406 | 140 | 0.513907 | false |
edx/ecommerce | ecommerce/extensions/voucher/migrations/0001_initial.py | 1 | 3161 | # -*- coding: utf-8 -*-
from decimal import Decimal
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0001_initial'),
('offer', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Voucher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(verbose_name='Name', max_length=128, help_text='This will be shown in the checkout and basket once the voucher is entered')),
('code', models.CharField(max_length=128, verbose_name='Code', unique=True, db_index=True, help_text='Case insensitive / No spaces allowed')),
('usage', models.CharField(default='Multi-use', max_length=128, verbose_name='Usage', choices=[('Single use', 'Can be used once by one customer'), ('Multi-use', 'Can be used multiple times by multiple customers'), ('Once per customer', 'Can only be used once per customer')])),
('start_datetime', models.DateTimeField(verbose_name='Start datetime')),
('end_datetime', models.DateTimeField(verbose_name='End datetime')),
('num_basket_additions', models.PositiveIntegerField(default=0, verbose_name='Times added to basket')),
('num_orders', models.PositiveIntegerField(default=0, verbose_name='Times on orders')),
('total_discount', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Total discount')),
('date_created', models.DateField(auto_now_add=True)),
('offers', models.ManyToManyField(related_name='vouchers', verbose_name='Offers', to='offer.ConditionalOffer')),
],
options={
'verbose_name_plural': 'Vouchers',
'get_latest_by': 'date_created',
'verbose_name': 'Voucher',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='VoucherApplication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateField(auto_now_add=True, verbose_name='Date Created')),
('order', models.ForeignKey(verbose_name='Order', to='order.Order', on_delete=models.CASCADE)),
('user', models.ForeignKey(null=True, verbose_name='User', to=settings.AUTH_USER_MODEL, blank=True, on_delete=models.CASCADE)),
('voucher', models.ForeignKey(verbose_name='Voucher', related_name='applications', to='voucher.Voucher', on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'Voucher Applications',
'verbose_name': 'Voucher Application',
'abstract': False,
},
bases=(models.Model,),
),
]
| agpl-3.0 | 9,208,904,800,063,817,000 | 53.5 | 293 | 0.598545 | false |
tensorflow/tfx | tfx/examples/mnist/mnist_pipeline_native_keras_e2e_test.py | 1 | 5568 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""E2E Tests for tfx.examples.mnist.mnist_pipeline_native_keras."""
import os
from typing import Text
import unittest
import tensorflow as tf
from tfx.dsl.io import fileio
from tfx.examples.mnist import mnist_pipeline_native_keras
from tfx.orchestration import metadata
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
@unittest.skipIf(tf.__version__ < '2',
'Uses keras Model only compatible with TF 2.x')
class MNISTPipelineNativeKerasEndToEndTest(tf.test.TestCase):
def setUp(self):
super(MNISTPipelineNativeKerasEndToEndTest, self).setUp()
self._test_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
self._pipeline_name = 'keras_test'
self._data_root = os.path.join(os.path.dirname(__file__), 'data')
self._module_file = os.path.join(
os.path.dirname(__file__), 'mnist_utils_native_keras.py')
self._module_file_lite = os.path.join(
os.path.dirname(__file__), 'mnist_utils_native_keras_lite.py')
self._serving_model_dir = os.path.join(self._test_dir, 'serving_model')
self._serving_model_dir_lite = os.path.join(
self._test_dir, 'serving_model_lite')
self._pipeline_root = os.path.join(self._test_dir, 'tfx', 'pipelines',
self._pipeline_name)
self._metadata_path = os.path.join(self._test_dir, 'tfx', 'metadata',
self._pipeline_name, 'metadata.db')
def assertExecutedOnce(self, component: Text) -> None:
"""Check the component is executed exactly once."""
component_path = os.path.join(self._pipeline_root, component)
self.assertTrue(fileio.exists(component_path))
outputs = fileio.listdir(component_path)
self.assertIn('.system', outputs)
outputs.remove('.system')
system_paths = [
os.path.join('.system', path)
for path in fileio.listdir(os.path.join(component_path, '.system'))
]
self.assertNotEmpty(system_paths)
self.assertIn('.system/executor_execution', system_paths)
outputs.extend(system_paths)
self.assertNotEmpty(outputs)
for output in outputs:
execution = fileio.listdir(os.path.join(component_path, output))
self.assertLen(execution, 1)
def assertPipelineExecution(self) -> None:
self.assertExecutedOnce('ImportExampleGen')
self.assertExecutedOnce('Evaluator.mnist')
self.assertExecutedOnce('Evaluator.mnist_lite')
self.assertExecutedOnce('ExampleValidator')
self.assertExecutedOnce('Pusher.mnist')
self.assertExecutedOnce('Pusher.mnist_lite')
self.assertExecutedOnce('SchemaGen')
self.assertExecutedOnce('StatisticsGen')
self.assertExecutedOnce('Trainer.mnist')
self.assertExecutedOnce('Trainer.mnist_lite')
self.assertExecutedOnce('Transform')
def testMNISTPipelineNativeKeras(self):
if not tf.executing_eagerly():
self.skipTest('The test requires TF2.')
BeamDagRunner().run(
mnist_pipeline_native_keras._create_pipeline(
pipeline_name=self._pipeline_name,
data_root=self._data_root,
module_file=self._module_file,
module_file_lite=self._module_file_lite,
serving_model_dir=self._serving_model_dir,
serving_model_dir_lite=self._serving_model_dir_lite,
pipeline_root=self._pipeline_root,
metadata_path=self._metadata_path,
beam_pipeline_args=[]))
self.assertTrue(fileio.exists(self._serving_model_dir))
self.assertTrue(fileio.exists(self._serving_model_dir_lite))
self.assertTrue(fileio.exists(self._metadata_path))
metadata_config = metadata.sqlite_metadata_connection_config(
self._metadata_path)
expected_execution_count = 11
with metadata.Metadata(metadata_config) as m:
artifact_count = len(m.store.get_artifacts())
execution_count = len(m.store.get_executions())
self.assertGreaterEqual(artifact_count, execution_count)
self.assertEqual(execution_count, expected_execution_count)
self.assertPipelineExecution()
# Runs pipeline the second time.
BeamDagRunner().run(
mnist_pipeline_native_keras._create_pipeline(
pipeline_name=self._pipeline_name,
data_root=self._data_root,
module_file=self._module_file,
module_file_lite=self._module_file_lite,
serving_model_dir=self._serving_model_dir,
serving_model_dir_lite=self._serving_model_dir_lite,
pipeline_root=self._pipeline_root,
metadata_path=self._metadata_path,
beam_pipeline_args=[]))
# Asserts cache execution.
with metadata.Metadata(metadata_config) as m:
# Artifact count is unchanged.
self.assertLen(m.store.get_artifacts(), artifact_count)
self.assertLen(m.store.get_executions(), expected_execution_count * 2)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | -3,441,124,305,496,440,000 | 40.552239 | 76 | 0.683908 | false |
AlfioEmanueleFresta/practical-ecb-lib | cp_ecb/image.py | 1 | 4402 | from PIL import Image
class InMemoryImage:
"""
A very simple class to represent an image.
"""
def __init__(self, w, h, c=3,
b=b'', encrypted=False):
"""
Instantiate a new image.
:param w: The width of the image (px).
:param h: The height of the image (px).
:param c: The number of colour channels of the image. Default is 3.
:param b: A byte literal for the body of the image.
:param encrypted: A flag to say whether the image is encrypted or not.
"""
self.w = w
self.h = h
self.c = c
self.b = b
self.encrypted = encrypted
def __repr__(self):
return "<InMemoryImage(%s): channels=%d, width=%d, height=%d>" % (
"encrypted" if self.encrypted else "unencrypted",
self.c, self.w, self.h
)
def load_image(input_file, encrypted=False):
"""
Load an image file into memory as a InMemoryImage object.
:param input_file: The file to load.
:param encrypted: Whether to flag the file as an encrypted image or not.
:return: An instantiated InMemoryImage object.
"""
image_file = Image.open(input_file)
image = image_file.convert('RGB')
image_size = image.size
image_b = b''
for y in range(image_size[1]):
for x in range(image_size[0]):
r, g, b = image.getpixel((x, y))
image_b += bytes([r, g, b])
image_file.close()
return InMemoryImage(w=image_size[0], h=image_size[1],
c=3, b=image_b, encrypted=encrypted)
def save_image(image, output_file):
output = Image.new("RGB", (image.w, image.h))
maxlen = len(image.b) - (len(image.b) % image.c)
data = tuple(tuple(image.b[i:i + image.c]) for i in range(0, maxlen, image.c))
data = data[:(image.w * image.h)]
output.putdata(data)
output.save(output_file)
def _crypt_image(encrypt, image, function):
if type(image) is not InMemoryImage:
raise ValueError("You need to pass this function a valid InMemoryImage object.")
if encrypt and image.encrypted:
raise ValueError("The input image is already encrypted.")
elif (not encrypt) and (not image.encrypted):
raise ValueError("The input image is not flagged as encrypted and can't be decrypted.")
image.b = function(image.b)
# Allow return list of ordinals
if type(image.b) is list:
image.b = bytes(image.b)
image.encrypted = encrypt
return image
def encrypt_image(image, function):
"""
Encrypt the content of an InMemoryImage using a given function.
:param image: The unencrypted InMemoryImage object.
:param function: An encryption function which takes a single bytes literal and returns a single bytes literal.
:return: An encrypted InMemoryImage object.
"""
return _crypt_image(encrypt=True, image=image, function=function)
def decrypt_image(image, function):
"""
Decrypt the content of an InMemoryImage using a given function.
:param image: The encrypted InMemoryImage object.
:param function: A decryption function which takes a single bytes literal and returns a single bytes literal.
:return: An unencrypted InMemoryImage object.
"""
return _crypt_image(encrypt=False, image=image, function=function)
def encrypt_image_file(input_file, function, output_file):
"""
Loads an image file, encrypts its contents and saves it as another image file.
:param input_file: The original unencrytped image file.
:param function: The encryption function to use. This must take a single bytes literal and return a single bytes literal.
:param output_file: The file name for the encrypted image.
"""
image = load_image(input_file)
image = encrypt_image(image, function)
save_image(image, output_file)
def decrypt_image_file(input_file, function, output_file):
"""
Loads an encrypted image file, decrypts its contents and saves it as another image file.
:param input_file: The encrypted image file.
:param function: The decryption function to use. This must take a single bytes literal and return a single bytes literal.
:param output_file: The file name for the decrypted image.
"""
image = load_image(input_file, encrypted=True)
image = decrypt_image(image, function)
save_image(image, output_file)
| gpl-3.0 | 2,049,984,958,980,118,500 | 33.124031 | 125 | 0.655838 | false |
pvagner/orca | test/keystrokes/firefox/line_nav_lists.py | 1 | 9203 | #!/usr/bin/python
"""Test of HTML list presentation."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
# Work around some new quirk in Gecko that causes this test to fail if
# run via the test harness rather than manually.
sequence.append(KeyComboAction("<Control>r"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.AssertPresentationAction(
"1. Top of file",
["BRAILLE LINE: 'Welcome to a List of Lists h1'",
" VISIBLE: 'Welcome to a List of Lists h1', cursor=1",
"SPEECH OUTPUT: 'Welcome to a List of Lists heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Line Down",
["BRAILLE LINE: 'Lists are not only fun to make, they are fun to use. They help us:'",
" VISIBLE: 'Lists are not only fun to make, ', cursor=1",
"SPEECH OUTPUT: 'Lists are not only fun to make, they are fun to use. They help us:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"3. Line Down",
["BRAILLE LINE: '1. remember what the heck we are doing each day'",
" VISIBLE: '1. remember what the heck we are', cursor=1",
"SPEECH OUTPUT: '1. remember what the heck we are doing each day.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"4. Line Down",
["BRAILLE LINE: '2. arrange long and arbitrary lines of text into ordered lists that are pleasing to the eye and suggest some'",
" VISIBLE: '2. arrange long and arbitrary li', cursor=1",
"SPEECH OUTPUT: '2. arrange long and arbitrary lines of text into ordered lists that are pleasing to the eye and suggest some.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"5. Line Down",
["BRAILLE LINE: 'sense of priority, even if it is artificial'",
" VISIBLE: 'sense of priority, even if it is', cursor=1",
"SPEECH OUTPUT: 'sense of priority, even if it is artificial.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"6. Line Down",
["BRAILLE LINE: '3. look really cool when we carry them around on yellow Post-Itstm.'",
" VISIBLE: '3. look really cool when we carr', cursor=1",
"SPEECH OUTPUT: '3. look really cool when we carry them around on yellow Post-Itstm.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"7. Line Down",
["BRAILLE LINE: '4. and that other thing I keep forgetting.'",
" VISIBLE: '4. and that other thing I keep f', cursor=1",
"SPEECH OUTPUT: '4. and that other thing I keep forgetting.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"8. Line Down",
["BRAILLE LINE: 'Your ordered lists can start at a strange number, like:'",
" VISIBLE: 'Your ordered lists can start at ', cursor=1",
"SPEECH OUTPUT: 'Your ordered lists can start at a strange number, like:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"9. Line Down",
["KNOWN ISSUE: Gecko is not exposing this as a roman numeral.",
"BRAILLE LINE: '6. And use roman numerals,'",
" VISIBLE: '6. And use roman numerals,', cursor=1",
"SPEECH OUTPUT: '6. And use roman numerals,.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"10. Line Down",
["BRAILLE LINE: 'g. You might try using letters as well,'",
" VISIBLE: 'g. You might try using letters a', cursor=1",
"SPEECH OUTPUT: 'g. You might try using letters as well,.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"11. Line Down",
["BRAILLE LINE: 'H. Maybe you prefer Big Letters,'",
" VISIBLE: 'H. Maybe you prefer Big Letters,', cursor=1",
"SPEECH OUTPUT: 'H. Maybe you prefer Big Letters,.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"12. Line Down",
["KNOWN ISSUE: Gecko is not exposing this as a roman numeral.",
"BRAILLE LINE: '9. or small roman numerals'",
" VISIBLE: '9. or small roman numerals', cursor=1",
"SPEECH OUTPUT: '9. or small roman numerals.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"13. Line Up",
["BRAILLE LINE: 'H. Maybe you prefer Big Letters,'",
" VISIBLE: 'H. Maybe you prefer Big Letters,', cursor=1",
"SPEECH OUTPUT: 'H. Maybe you prefer Big Letters,.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"14. Line Up",
["BRAILLE LINE: 'g. You might try using letters as well,'",
" VISIBLE: 'g. You might try using letters a', cursor=1",
"SPEECH OUTPUT: 'g. You might try using letters as well,.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"15. Line Up",
["KNOWN ISSUE: Gecko is not exposing this as a roman numeral.",
"BRAILLE LINE: '6. And use roman numerals,'",
" VISIBLE: '6. And use roman numerals,', cursor=1",
"SPEECH OUTPUT: '6. And use roman numerals,.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"16. Line Up",
["BRAILLE LINE: 'Your ordered lists can start at a strange number, like:'",
" VISIBLE: 'Your ordered lists can start at ', cursor=1",
"SPEECH OUTPUT: 'Your ordered lists can start at a strange number, like:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"17. Line Up",
["BRAILLE LINE: '4. and that other thing I keep forgetting.'",
" VISIBLE: '4. and that other thing I keep f', cursor=1",
"SPEECH OUTPUT: '4. and that other thing I keep forgetting.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"18. Line Up",
["BRAILLE LINE: '3. look really cool when we carry them around on yellow Post-Itstm.'",
" VISIBLE: '3. look really cool when we carr', cursor=1",
"SPEECH OUTPUT: '3. look really cool when we carry them around on yellow Post-Itstm.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"19. Line Up",
["BRAILLE LINE: 'sense of priority, even if it is artificial'",
" VISIBLE: 'sense of priority, even if it is', cursor=1",
"SPEECH OUTPUT: 'sense of priority, even if it is artificial.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"20. Line Up",
["BRAILLE LINE: '2. arrange long and arbitrary lines of text into ordered lists that are pleasing to the eye and suggest some'",
" VISIBLE: '2. arrange long and arbitrary li', cursor=1",
"SPEECH OUTPUT: '2. arrange long and arbitrary lines of text into ordered lists that are pleasing to the eye and suggest some.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"21. Line Up",
["BRAILLE LINE: '1. remember what the heck we are doing each day'",
" VISIBLE: '1. remember what the heck we are', cursor=1",
"SPEECH OUTPUT: '1. remember what the heck we are doing each day.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"22. Line Up",
["BRAILLE LINE: 'Lists are not only fun to make, they are fun to use. They help us:'",
" VISIBLE: 'Lists are not only fun to make, ', cursor=1",
"SPEECH OUTPUT: 'Lists are not only fun to make, they are fun to use. They help us:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"23. Line Up",
["BRAILLE LINE: 'Welcome to a List of Lists h1'",
" VISIBLE: 'Welcome to a List of Lists h1', cursor=1",
"SPEECH OUTPUT: 'Welcome to a List of Lists heading level 1'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| lgpl-2.1 | 6,838,572,798,517,745,000 | 44.559406 | 136 | 0.700532 | false |
pythonchelle/opencomparison | apps/accounts/urls.py | 1 | 1313 | from django.conf import settings
from django.conf.urls.defaults import patterns, include, url
from django.views.generic.simple import direct_to_template
from registration.views import activate
from registration.views import register
from accounts.forms import RegistrationForm
urlpatterns = patterns('',
url(r'^register/$',
register,
{'backend': "accounts.backends.DjangoPackagesRegistrationBackend",
'form_class': RegistrationForm},
name='registration_register'),
url(r'^activate/complete/$',
direct_to_template,
{'template': 'registration/activation_complete.html'},
name='registration_activation_complete'),
url(r'^activate/(?P<activation_key>\w+)/$',
activate,
{'backend': "accounts.backends.DjangoPackagesRegistrationBackend"},
name='registration_activate'),
url(r'^register/complete/$',
direct_to_template,
{'template': 'registration/registration_complete.html'},
name='registration_complete'),
url(r'^register/closed/$',
direct_to_template,
{'template': 'registration/registration_closed.html'},
name='registration_disallowed'),
(r'', include('registration.auth_urls')),
) | mit | -5,129,256,107,149,540,000 | 36.542857 | 78 | 0.64128 | false |
ismail-s/warehouse | tests/unit/test_config.py | 1 | 15964 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import pretend
import pytest
import zope.interface
from pyramid import renderers
from warehouse import config
from warehouse.utils.wsgi import ProxyFixer, VhmRootRemover
class TestCSPTween:
def test_csp_policy(self):
response = pretend.stub(headers={})
handler = pretend.call_recorder(lambda request: response)
registry = pretend.stub(
settings={
"csp": {
"default-src": ["*"],
"style-src": ["'self'", "example.net"],
},
},
)
tween = config.content_security_policy_tween_factory(handler, registry)
request = pretend.stub(path="/project/foobar/")
assert tween(request) is response
assert response.headers == {
"Content-Security-Policy":
"default-src *; style-src 'self' example.net",
}
def test_csp_policy_debug_disables(self):
response = pretend.stub(headers={})
handler = pretend.call_recorder(lambda request: response)
registry = pretend.stub(
settings={
"csp": {
"default-src": ["*"],
"style-src": ["'self'", "example.net"],
},
},
)
tween = config.content_security_policy_tween_factory(handler, registry)
request = pretend.stub(path="/_debug_toolbar/foo/")
assert tween(request) is response
assert response.headers == {}
class TestRequireHTTPSTween:
def test_noops_when_disabled(self):
handler = pretend.stub()
registry = pretend.stub(
settings=pretend.stub(
get=pretend.call_recorder(lambda k, v: False),
),
)
assert config.require_https_tween_factory(handler, registry) is handler
assert registry.settings.get.calls == [
pretend.call("enforce_https", True),
]
@pytest.mark.parametrize(
("params", "scheme"),
[
({}, "https"),
({":action": "thing"}, "https"),
({}, "http"),
],
)
def test_allows_through(self, params, scheme):
request = pretend.stub(params=params, scheme=scheme)
response = pretend.stub()
handler = pretend.call_recorder(lambda req: response)
registry = pretend.stub(
settings=pretend.stub(
get=lambda k, v: True,
),
)
tween = config.require_https_tween_factory(handler, registry)
assert tween(request) is response
assert handler.calls == [pretend.call(request)]
@pytest.mark.parametrize(
("params", "scheme"),
[
({":action": "thing"}, "http"),
],
)
def test_rejects(self, params, scheme):
request = pretend.stub(params=params, scheme=scheme)
handler = pretend.stub()
registry = pretend.stub(
settings=pretend.stub(
get=lambda k, v: True,
),
)
tween = config.require_https_tween_factory(handler, registry)
resp = tween(request)
assert resp.status == "403 SSL is required"
assert resp.headers["X-Fastly-Error"] == "803"
assert resp.content_type == "text/plain"
assert resp.body == b"SSL is required."
@pytest.mark.parametrize(
("path", "expected"),
[
("/foo/bar/", True),
("/static/wat/", False),
("/_debug_toolbar/thing/", False),
],
)
def test_activate_hook(path, expected):
request = pretend.stub(path=path)
assert config.activate_hook(request) == expected
@pytest.mark.parametrize(
("environ", "name", "envvar", "coercer", "default", "expected"),
[
({}, "test.foo", "TEST_FOO", None, None, {}),
(
{"TEST_FOO": "bar"}, "test.foo", "TEST_FOO", None, None,
{"test.foo": "bar"},
),
(
{"TEST_INT": "1"}, "test.int", "TEST_INT", int, None,
{"test.int": 1},
),
({}, "test.foo", "TEST_FOO", None, "lol", {"test.foo": "lol"}),
(
{"TEST_FOO": "bar"}, "test.foo", "TEST_FOO", None, "lol",
{"test.foo": "bar"},
),
],
)
def test_maybe_set(monkeypatch, environ, name, envvar, coercer, default,
expected):
for key, value in environ.items():
monkeypatch.setenv(key, value)
settings = {}
config.maybe_set(settings, name, envvar, coercer=coercer, default=default)
assert settings == expected
@pytest.mark.parametrize(
("environ", "base", "name", "envvar", "expected"),
[
({}, "test", "foo", "TEST_FOO", {}),
({"TEST_FOO": "bar"}, "test", "foo", "TEST_FOO", {"test.foo": "bar"}),
(
{"TEST_FOO": "bar thing=other"}, "test", "foo", "TEST_FOO",
{"test.foo": "bar", "test.thing": "other"},
),
(
{"TEST_FOO": "bar thing=other wat=\"one two\""},
"test", "foo", "TEST_FOO",
{"test.foo": "bar", "test.thing": "other", "test.wat": "one two"},
),
],
)
def test_maybe_set_compound(monkeypatch, environ, base, name, envvar,
expected):
for key, value in environ.items():
monkeypatch.setenv(key, value)
settings = {}
config.maybe_set_compound(settings, base, name, envvar)
assert settings == expected
@pytest.mark.parametrize("factory", [None, pretend.stub()])
def test_find_service_factory(monkeypatch, factory):
context_iface = pretend.stub()
provided_by = pretend.call_recorder(lambda context: context_iface)
monkeypatch.setattr(zope.interface, "providedBy", provided_by)
config_or_request = pretend.stub(
registry=pretend.stub(
adapters=pretend.stub(
lookup=pretend.call_recorder(lambda *a, **kw: factory),
),
),
)
if factory is None:
with pytest.raises(ValueError):
config.find_service_factory(config_or_request)
else:
assert config.find_service_factory(config_or_request) is factory
@pytest.mark.parametrize(
("settings", "environment", "other_settings"),
[
(None, config.Environment.production, {}),
({}, config.Environment.production, {}),
(
{"my settings": "the settings value"},
config.Environment.production,
{},
),
(None, config.Environment.development, {}),
({}, config.Environment.development, {}),
(
{"my settings": "the settings value"},
config.Environment.development,
{},
),
(None, config.Environment.production, {"warehouse.theme": "my_theme"}),
],
)
def test_configure(monkeypatch, settings, environment, other_settings):
json_renderer_obj = pretend.stub()
json_renderer_cls = pretend.call_recorder(lambda **kw: json_renderer_obj)
monkeypatch.setattr(renderers, "JSON", json_renderer_cls)
xmlrpc_renderer_obj = pretend.stub()
xmlrpc_renderer_cls = pretend.call_recorder(
lambda **kw: xmlrpc_renderer_obj
)
monkeypatch.setattr(config, "XMLRPCRenderer", xmlrpc_renderer_cls)
if environment == config.Environment.development:
monkeypatch.setenv("WAREHOUSE_ENV", "development")
class FakeRegistry(dict):
def __init__(self):
self.settings = {
"warehouse.token": "insecure token",
"warehouse.env": environment,
"camo.url": "http://camo.example.com/",
"pyramid.reload_assets": False,
"dirs.packages": "/srv/data/pypi/packages/",
}
configurator_settings = other_settings.copy()
configurator_obj = pretend.stub(
registry=FakeRegistry(),
include=pretend.call_recorder(lambda include: None),
add_directive=pretend.call_recorder(lambda name, fn: None),
add_wsgi_middleware=pretend.call_recorder(lambda m, *a, **kw: None),
add_renderer=pretend.call_recorder(lambda name, renderer: None),
add_request_method=pretend.call_recorder(lambda fn: None),
add_jinja2_renderer=pretend.call_recorder(lambda renderer: None),
add_jinja2_search_path=pretend.call_recorder(lambda path, name: None),
get_settings=lambda: configurator_settings,
add_settings=pretend.call_recorder(
lambda d: configurator_settings.update(d)
),
add_tween=pretend.call_recorder(lambda tween_factory: None),
add_static_view=pretend.call_recorder(lambda name, path, **kw: None),
scan=pretend.call_recorder(lambda ignore: None),
)
configurator_cls = pretend.call_recorder(lambda settings: configurator_obj)
monkeypatch.setattr(config, "Configurator", configurator_cls)
cachebuster_obj = pretend.stub()
cachebuster_cls = pretend.call_recorder(lambda p, reload: cachebuster_obj)
monkeypatch.setattr(config, "ManifestCacheBuster", cachebuster_cls)
transaction_manager = pretend.stub()
transaction = pretend.stub(
TransactionManager=pretend.call_recorder(lambda: transaction_manager),
)
monkeypatch.setattr(config, "transaction", transaction)
result = config.configure(settings=settings)
expected_settings = {
"warehouse.env": environment,
"warehouse.commit": None,
"site.name": "Warehouse",
}
if environment == config.Environment.development:
expected_settings.update({
"enforce_https": False,
"pyramid.reload_templates": True,
"pyramid.reload_assets": True,
"pyramid.prevent_http_cache": True,
"debugtoolbar.hosts": ["0.0.0.0/0"],
"debugtoolbar.panels": [
"pyramid_debugtoolbar.panels.versions.VersionDebugPanel",
"pyramid_debugtoolbar.panels.settings.SettingsDebugPanel",
"pyramid_debugtoolbar.panels.headers.HeaderDebugPanel",
(
"pyramid_debugtoolbar.panels.request_vars."
"RequestVarsDebugPanel"
),
"pyramid_debugtoolbar.panels.renderings.RenderingsDebugPanel",
"pyramid_debugtoolbar.panels.logger.LoggingPanel",
(
"pyramid_debugtoolbar.panels.performance."
"PerformanceDebugPanel"
),
"pyramid_debugtoolbar.panels.routes.RoutesDebugPanel",
"pyramid_debugtoolbar.panels.sqla.SQLADebugPanel",
"pyramid_debugtoolbar.panels.tweens.TweensDebugPanel",
(
"pyramid_debugtoolbar.panels.introspection."
"IntrospectionDebugPanel"
),
],
})
if settings is not None:
expected_settings.update(settings)
assert configurator_cls.calls == [pretend.call(settings=expected_settings)]
assert result is configurator_obj
assert configurator_obj.add_wsgi_middleware.calls == [
pretend.call(ProxyFixer, token="insecure token"),
pretend.call(VhmRootRemover),
]
assert configurator_obj.include.calls == (
[
pretend.call(x) for x in [
(
"pyramid_debugtoolbar"
if environment == config.Environment.development else None
),
]
if x is not None
]
+
[
pretend.call(".logging"),
pretend.call("pyramid_jinja2"),
pretend.call("pyramid_tm"),
pretend.call("pyramid_services"),
pretend.call("pyramid_rpc.xmlrpc"),
pretend.call(".legacy.action_routing"),
pretend.call(".i18n"),
pretend.call(".db"),
pretend.call(".search"),
pretend.call(".aws"),
pretend.call(".celery"),
pretend.call(".sessions"),
pretend.call(".cache.http"),
pretend.call(".cache.origin"),
pretend.call(".csrf"),
pretend.call(".accounts"),
pretend.call(".packaging"),
pretend.call(".redirects"),
pretend.call(".routes"),
pretend.call(".raven"),
]
+
[
pretend.call(x) for x in [
configurator_settings.get("warehouse.theme"),
]
if x
]
)
assert configurator_obj.add_jinja2_renderer.calls == [
pretend.call(".html"),
pretend.call(".txt"),
pretend.call(".xml"),
]
assert configurator_obj.add_jinja2_search_path.calls == [
pretend.call("warehouse:templates", name=".html"),
pretend.call("warehouse:templates", name=".txt"),
pretend.call("warehouse:templates", name=".xml"),
]
assert configurator_obj.add_settings.calls == [
pretend.call({"jinja2.newstyle": True}),
pretend.call({
"tm.attempts": 3,
"tm.manager_hook": mock.ANY,
"tm.activate_hook": config.activate_hook,
"tm.annotate_user": False,
}),
pretend.call({
"csp": {
"connect-src": ["'self'"],
"default-src": ["'none'"],
"font-src": ["'self'", "fonts.gstatic.com"],
"frame-ancestors": ["'none'"],
"img-src": [
"'self'",
"http://camo.example.com/",
"https://secure.gravatar.com",
],
"referrer": ["origin-when-cross-origin"],
"reflected-xss": ["block"],
"report-uri": [None],
"script-src": ["'self'"],
"style-src": ["'self'", "fonts.googleapis.com"],
},
}),
]
add_settings_dict = configurator_obj.add_settings.calls[1].args[0]
assert add_settings_dict["tm.manager_hook"](pretend.stub()) is \
transaction_manager
assert configurator_obj.add_directive.calls == [
pretend.call("find_service_factory", config.find_service_factory),
]
assert configurator_obj.add_request_method.calls == [
pretend.call(config.find_service_factory),
]
assert configurator_obj.add_tween.calls == [
pretend.call("warehouse.config.content_security_policy_tween_factory"),
pretend.call("warehouse.config.require_https_tween_factory"),
]
assert configurator_obj.add_static_view.calls == [
pretend.call(
name="static",
path="warehouse:static/dist/",
cache_max_age=0,
cachebust=cachebuster_obj,
),
pretend.call(name="locales", path="warehouse:locales/"),
]
assert cachebuster_cls.calls == [
pretend.call("warehouse:static/dist/manifest.json", reload=False),
]
assert configurator_obj.scan.calls == [
pretend.call(ignore=["warehouse.migrations.env", "warehouse.wsgi"]),
]
assert configurator_obj.add_renderer.calls == [
pretend.call("json", json_renderer_obj),
pretend.call("xmlrpc", xmlrpc_renderer_obj),
]
assert json_renderer_cls.calls == [
pretend.call(sort_keys=True, separators=(",", ":")),
]
assert xmlrpc_renderer_cls.calls == [pretend.call(allow_none=True)]
| apache-2.0 | -714,326,451,956,778,200 | 34.008772 | 79 | 0.567339 | false |
jamesiter/JimV-N | models/event_process.py | 1 | 9567 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import libvirt
from models.initialize import guest_event_emit
from models import Guest
__author__ = 'James Iter'
__date__ = '2017/6/15'
__contact__ = '[email protected]'
__copyright__ = '(c) 2017 by James Iter.'
class EventProcess(object):
conn = None
guest_callbacks = list()
VIR_DOMAIN_EVENT_SHUTDOWN_GUEST = 1
VIR_DOMAIN_EVENT_SHUTDOWN_HOST = 2
def __init__(self):
pass
@classmethod
def guest_event_callback(cls, conn, dom, event, detail, opaque):
if not isinstance(dom, libvirt.virDomain):
# 跳过已经不再本宿主机的 guest
return
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED and detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_MIGRATED:
# Guest 从本宿主机迁出完成后不做状态通知
return
Guest.guest_state_report(dom=dom)
if event == libvirt.VIR_DOMAIN_EVENT_DEFINED:
if detail == libvirt.VIR_DOMAIN_EVENT_DEFINED_ADDED:
# 创建出一个 Guest 后被触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_DEFINED_UPDATED:
# 更新 Guest 配置后被触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_DEFINED_RENAMED:
# 变更 Guest 名称,待测试。猜测为 Guest 变更为新名称时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_DEFINED_FROM_SNAPSHOT:
# Config was restored from a snapshot 待测试。猜测为,依照一个 Guest 快照的当前配置,创建一个新的 Guest
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_UNDEFINED:
if detail == libvirt.VIR_DOMAIN_EVENT_UNDEFINED_REMOVED:
# 删除一个 Guest 定义
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_UNDEFINED_RENAMED:
# 变更 Guest 名称,待测试。猜测为 Guest 旧名称消失时触发
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
if detail == libvirt.VIR_DOMAIN_EVENT_STARTED_BOOTED:
# 正常启动
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STARTED_MIGRATED:
# Guest 从另一个宿主机迁入时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STARTED_RESTORED:
# 从一个状态文件中恢复 Guest
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STARTED_FROM_SNAPSHOT:
# 从快照中恢复 Guest 时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STARTED_WAKEUP:
# 唤醒时触发,待测试。
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
if detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_PAUSED:
# 管理员暂停 Guest 时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED:
# 为了在线迁移,临时暂停当前准备迁出的 Guest 时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_IOERROR:
# 磁盘 IO 错误时,被暂停时触发,待测试
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_WATCHDOG:
# 触发看门狗时触发,待测试
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_RESTORED:
# 从暂停的 Guest 状态文件恢复时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_FROM_SNAPSHOT:
# 从暂停的 Guest 快照恢复时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR:
# 调用 libvirt API 失败后触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY:
# 以 post-copy 模式迁移 Guest,被暂停时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY_FAILED:
# post-copy 模式迁移失败时触发
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
if detail == libvirt.VIR_DOMAIN_EVENT_RESUMED_UNPAUSED:
# 取消暂停,正常恢复时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_RESUMED_MIGRATED:
# Guest 迁移的目标宿主机,迁移完成时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_RESUMED_FROM_SNAPSHOT:
# 从快照恢复时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_RESUMED_POSTCOPY:
# 恢复,但迁移任然在 post-copy 模式下进行,待测试
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
if detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_SHUTDOWN:
# 正常关机时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_DESTROYED:
# 从宿主机中强行断开 Guest 电源时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_CRASHED:
# Guest 崩溃时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_MIGRATED:
# Guest 从本宿主机迁出完成后触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_SAVED:
# 保存 Guest 为状态文件后触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_FAILED:
# 宿主机上的模拟器或管理器失败时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT:
# 加载完离线快照后触发,待测试
pass
elif event == libvirt.VIR_DOMAIN_EVENT_SHUTDOWN:
if detail == libvirt.VIR_DOMAIN_EVENT_SHUTDOWN_FINISHED:
# Guest 正常关机后触发
pass
elif detail == cls.VIR_DOMAIN_EVENT_SHUTDOWN_GUEST:
# Guest 自己触发关机信号后触发(即,此时硬件还运行着,系统已经被关闭。有别于 poweroff),待测试
pass
elif detail == cls.VIR_DOMAIN_EVENT_SHUTDOWN_HOST:
# 从宿主机通过信号方式关闭 Guest 后触发
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_PMSUSPENDED:
if detail == libvirt.VIR_DOMAIN_EVENT_PMSUSPENDED_MEMORY:
# Guest 的内存被电源管理器暂停
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_PMSUSPENDED_DISK:
# Guest 的磁盘被电源管理器暂停
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_CRASHED:
if detail == libvirt.VIR_DOMAIN_EVENT_CRASHED_PANICKED:
# Guest 奔溃时触发
pass
else:
pass
else:
pass
@staticmethod
def guest_event_migration_iteration_callback(conn, dom, iteration, opaque):
try:
migrate_info = dict()
migrate_info['type'], migrate_info['time_elapsed'], migrate_info['time_remaining'], \
migrate_info['data_total'], migrate_info['data_processed'], migrate_info['data_remaining'], \
migrate_info['mem_total'], migrate_info['mem_processed'], migrate_info['mem_remaining'], \
migrate_info['file_total'], migrate_info['file_processed'], migrate_info['file_remaining'] = \
dom.jobInfo()
guest_event_emit.migrating(uuid=dom.UUIDString(), migrating_info=migrate_info)
except libvirt.libvirtError as e:
pass
@staticmethod
def guest_event_device_added_callback(conn, dom, dev, opaque):
Guest.update_xml(dom=dom)
@staticmethod
def guest_event_device_removed_callback(conn, dom, dev, opaque):
Guest.update_xml(dom=dom)
@classmethod
def guest_event_register(cls):
cls.conn = libvirt.open()
cls.conn.domainEventRegister(cls.guest_event_callback, None)
# 参考地址:https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainEventID
cls.guest_callbacks.append(cls.conn.domainEventRegisterAny(
None, libvirt.VIR_DOMAIN_EVENT_ID_MIGRATION_ITERATION,
cls.guest_event_migration_iteration_callback, None))
cls.guest_callbacks.append(cls.conn.domainEventRegisterAny(
None, libvirt.VIR_DOMAIN_EVENT_ID_DEVICE_ADDED,
cls.guest_event_device_added_callback, None))
cls.guest_callbacks.append(cls.conn.domainEventRegisterAny(
None, libvirt.VIR_DOMAIN_EVENT_ID_DEVICE_REMOVED,
cls.guest_event_device_removed_callback, None))
@classmethod
def guest_event_deregister(cls):
cls.conn.domainEventDeregister(cls.guest_event_callback)
for eid in cls.guest_callbacks:
cls.conn.domainEventDeregisterAny(eid)
| gpl-3.0 | 7,831,306,273,110,688,000 | 35.497872 | 110 | 0.55847 | false |
petrvanblokland/Xierpa3 | xierpa3/sites/examples/helloworldblueprint/make.py | 1 | 9548 | # -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
# xierpa server
# Copyright (c) 2014+ [email protected], www.petr.com, www.xierpa.com
#
# X I E R P A 3
# Distribution by the MIT License.
#
# -----------------------------------------------------------------------------
#
# make.py
#
import webbrowser
from xierpa3.toolbox.transformer import TX
from xierpa3.components import Theme, Page, Column
from xierpa3.builders.cssbuilder import CssBuilder
from xierpa3.builders.htmlbuilder import HtmlBuilder
from xierpa3.attributes import Em, Margin, Perc, Color
from xierpa3.descriptors.media import Media
from xierpa3.descriptors.blueprint import BluePrint
BODYFAMILY = 'Impact, Verdana, sans'
CAPTIONFAMILY = 'Georgia, serif'
class HelloWorldBluePrintText(Column):
# Get Constants->Config as class variable, so inheriting classes can redefine values.
C = Theme.C
# The BluePrint defined the parameters for the component. They can be adjusted by parent
# components who implement this component on a page, or by inheriting classes that
# only want to redefine part of the parameters. The actual self.style is created during
# compilation of the start (not during run-time) as cascading result of all parent BLUEPRINT
# dictionaries.
# Furthermore the documentation builder is using the BluePrint instance to visualize
# the interface of each component available.
#
BLUEPRINT = BluePrint(
# Attribute, documentation about the attribute.
# Main div block
bodyFamily=BODYFAMILY, doc_bodyFamily=u'Body font family of this example. For now, in this example we only use system fonts.',
fontSize=Em(4), doc_fontSize=u'Font size of the body text, relative to the body font size.',
lineHeight=Em(1.2), doc_lineHeight=u'Line height (leading) of body text.',
textAlign=C.CENTER, doc_textAlign=u'Horizontal alignment of text.',
color=Color('yellow'), doc_color=u'Color of the main column.',
colorTablet=Color('orange'), doc_colorTablet=u'Text color of the main column for tablet.',
colorMobile=Color('red'), doc_colorMobile=u'Text color of the main column for mobile.',
backgroundColor=Color('red'), doc_backgroundColor=u'Background color of the main column',
backgroundColorTablet=Color('green'), doc_backgroundColorTablet=u'Background color of the main column for tablet.',
backgroundColorMobile=Color('#BBB'), doc_backgroundColorMobile=u'Background color of the main column for mobile.',
paddingTop=Em(0.5), doc_paddingTop=u'Padding on top of the page',
paddingBottom=Em(0.5), doc_paddingBottom=u'Padding at bottom of the page.',
margin=Margin(0, C.AUTO), doc_margin=u'Page margin of the column. In this case, horizontally centered on the page.',
width=Perc(80), doc_width=u'Width of the main column. Default is 80% os the page with.',
maxWidth=700, doc_maxWidth=u'Maximal width of the column.',
minWidth=300, doc_minWidth=u'Minimal width of the column.',
# Caption
captionFont=CAPTIONFAMILY, doc_captionFont=u'Caption font family for this example. For now, in this example we only use system fonts.',
captionColor=Color('#888'), doc_captionColor=u'Color of the caption.',
captionPaddingTop=Em(0.2), doc_captionPaddingTop=u'Padding top of the caption.',
)
def buildBlock(self, b):
u"""Build the column, using the parameters from the class BluePrint instance.
This dictionary is builds the **self.style()** by cascading all BlurPrint instances
of the parent classes. The result is a complete specification of all the parameters
the direction the style and behavior of this component."""
s = self.style
b.div(class_=self.getClassName(), color=s.color, margin=s.margin,
width=s.width, maxwidth=s.maxWidth, minwidth=s.minWidth, backgroundcolor=s.backgroundColor,
paddingtop=s.paddingTop, paddingbottom=s.paddingBottom, fontfamily=s.bodyFamily,
fontsize=s.fontSize, textalign=s.textAlign, lineheight=s.lineHeight,
# Now define the @media parameters, where they belong: inside the definition of the element.
# The media parameters are collected and sorted for output at the end of the CSS document.
media=(
# Example for table, show lighter background, change color of text and smaller size.
Media(min=self.C.M_TABLET_MIN, max=self.C.M_TABLET_MAX, backgroundcolor=s.backgroundColorTablet,
color=s.colorTablet, fontsize=Em(3), width=self.C.AUTO, float=self.C.NONE),
# For mobile, even more lighter background, change color of text and smaller size.
Media(max=self.C.M_MOBILE_MAX, backgroundcolor=s.backgroundColorMobile,
color=s.colorMobile, fontsize=Em(2), width=self.C.AUTO, float=self.C.NONE)
))
b.text('Hello parametric world.')
# One of the advantages of using a real programming language to generate
# HTML/CSS code, is that repetitions can be written as a loop. Not necessary
# fewer lines, but more expandable and less redundant distribution of
# knowledge in the code.
data = (
# class, minWidth, maxWidth, text
('c1', self.C.M_DESKTOP_MIN, None, 'Responsive desktop mode.' ),
('c2', self.C.M_TABLET_MIN, self.C.M_TABLET_MAX, 'Responsive tablet mode.' ),
('c3', None, self.C.M_MOBILE_MAX, 'Responsive mobile mode..' ),
)
for class_, minWidth, maxWidth, text in data:
b.div(class_=class_, display=self.C.NONE, fontsize=Em(0.7), color=Color(self.C.WHITE),
media=Media(min=minWidth, max=maxWidth, display=self.C.BLOCK))
b.text(text)
b._div()
b._div()
b.div(class_=self.C.CLASS_CAPTION, color=s.captionColor, margin=Margin(0, self.C.AUTO),
width=Perc(100), maxwidth=700, minwidth=300,
paddingtop=s.captionPaddingTop, fontfamily=s.captionFont, fontsize=Em(0.9),
textalign=s.textAlign, fontstyle=self.C.ITALIC,
# Change background color of the line to indicate the illustrate the difference for mobile size.
#media=Media(max=self.M_MOBILE_MAX, backgroundcolor='yellow', color='#222', fontsize=Em(1),
# margin=0, width=Perc(100),
)
b.text('Responsive page, generated by Xierpa3. Using BluePrint parameters.')
b._div()
class HelloWorldBluePrint(Theme):
u"""The **HelloWorldResponsive** class implements a basic "Hello, world!" page, running as
batch process, saving the result as an HTML file. Double click the generated file or
drag to a browser see the result."""
TITLE = u'The responsive "Hello, world!" page using BluePrint styling.' # Use as title of window.
def baseComponents(self):
u"""Create a theme site with just one single template home page. Answer a list
of page instances that are used as templates for this site."""
# Create an instance (=object) of the text component to be placed on the page.
hw = HelloWorldBluePrintText()
# Create an instance (=object) of the page, containing the "hw" component.
# The class is also the page name in the url.
# Components can be a single component or a list of components.
homePage = Page(class_=self.C.TEMPLATE_INDEX, components=hw, title=self.TITLE)
# Answer a list of types of pages for this site.
return [homePage]
def make(self, root):
u"""The instance of this class builds CSS and HTML files at the optional path **root**.
If not defined, then the default ~/Desktop/Xierpa3Examples/[component.name] is used as export path,
as set by Builder.DEFAULT_ROOTPATH"""
# Create an "instance" (=object) of type "HelloWorldLayout". The type (=class) defines
# the behavior of the object that is made by calling the class.
if root is None:
root = TX.asDir(self.C.PATH_EXAMPLES) # Expand user path to full directory path.
# C S S
# Create the main CSS builder instance to build the SASS/CSS part of the site.
cssBuilder = CssBuilder()
# Compile (=build) the SCSS to CSS and save the file in "css/style.css".
self.build(cssBuilder) # Build from entire site theme, not just from template. Result is stream in builder.
cssBuilder.save(self, root)
# H T M L
# Create the main HTML builder instance to build the HTML part of the site.
htmlBuilder = HtmlBuilder()
# Compile the HTML and save the resulting HTML file in "helloWorld.html".
self.build(htmlBuilder) # Build from entire site theme, not just from template. Result is stream in builder.
# Answer the path, so we can directly open the file with a browser.
return htmlBuilder.save(self, root)
if __name__ == '__main__':
# This construction "__name__ == '__main__'" makes this Python file only
# be executed when called in direct mode, such as "python make.py" in the terminal.
# Since no rootPath is added to make(), the file export is in ~/Desktop/Xierpa3Examples/HelloWorldBluePrint/
site = HelloWorldBluePrint()
path = site.make()
webbrowser.open(path) # Open file path with browser
| mit | -8,731,468,573,358,479,000 | 59.43038 | 143 | 0.662233 | false |
ericmoritz/requests_debug | test.py | 1 | 6351 | import threading
import Queue
from wsgiref.simple_server import make_server
from functools import partial
from pprint import pprint
from requests_debug import debug as requests_debug
import requests
import time
from testfixtures import compare
from contextlib import contextmanager
import logging
logging.basicConfig(level=logging.DEBUG)
def client_thread_target(results_q, thread_id, url):
for n in xrange(2):
requests.get(
url,
params={"thread_id": thread_id, "n": n}
)
results_q.put(
(thread_id, requests_debug.checkpoint_id(), requests_debug.items())
)
def client_thread(results_q, thread_id, url):
return threading.Thread(
target=partial(
client_thread_target,
results_q,
thread_id,
url,
)
)
def server_timeout_thread(timeout, http_server):
time.sleep(timeout)
stop_server(http_server)
@contextmanager
def start_server():
def app(environ, start_response):
if "error" in environ.get('PATH_INFO', ''):
start_response("302 Moved Temporarily", [
("Location", environ['PATH_INFO'])])
return []
elif "404" in environ.get('PATH_INFO', ''):
start_response("404 Not Found", [])
return []
else:
start_response("200 OK", [])
return ["ok."]
http_server = make_server('127.0.0.1', 0, app)
timeout_thread = threading.Thread(
target=partial(
server_timeout_thread,
3,
http_server))
timeout_thread.start()
server_thread = threading.Thread(target=http_server.serve_forever)
server_thread.start()
yield http_server
stop_server(http_server)
def stop_server(http_server):
http_server.shutdown()
def server_port(http_server):
return http_server.server_address[1]
def test_exception():
requests_debug.install_hook()
with start_server() as http_server:
url = make_url(
server_port(http_server),
"error/")
try:
requests.get(url)
except requests.TooManyRedirects, e:
stop_server(http_server)
compare(
normalize_items(requests_debug.items()),
[{'checkpoint_id': requests_debug.checkpoint_id(),
'method': 'get',
'status': None,
'url': url}])
def test_uninstall_hook():
def assert_items(items_cb):
with start_server() as http_server:
url = make_url(server_port(http_server),
"test.py")
requests.get(url)
compare(
normalize_items(requests_debug.items()),
items_cb(url)
)
# install the hook
requests_debug.install_hook()
# assert that the hook is working
assert_items(lambda url: [
{'method': 'get',
'checkpoint_id': requests_debug.checkpoint_id(),
'status': 200,
'url': url}
])
# uninstall the hook
requests_debug.uninstall_hook()
# assert that nothing is recorded when we uninstall the hook
assert_items(lambda url: [])
def make_url(port, path):
return "http://localhost:{0}/".format(port) + path
# make the results look like the values we care about
def normalize_items(items):
return [
{'method': item['method'],
'checkpoint_id': item['checkpoint_id'],
'status': item['status'],
'url': item['url']}
for item in items
]
def test_threading():
"""
Assert that the thread locals actually work correctly by making requests
"""
with start_server() as http_server:
requests_debug.install_hook()
make_url_ = partial(make_url, server_port(http_server))
results_q = Queue.Queue()
client_threads = [
client_thread(results_q, 0, make_url_("test.py")),
client_thread(results_q, 1, make_url_("test.py")),
client_thread(results_q, 2, make_url_("404")),
]
# use an ordered dict to keep things sorted
# as we collect the results
results = []
for client in client_threads:
client.start()
for client in client_threads:
# we may not get the result for the client
# we're on but we need to collect that many
# values, so this is a quick way to do that.
# this may timeout and return None if a request
# takes longer than 2 seconds (it shouldn't)
results.append(results_q.get(True, 2))
results.sort(key=lambda x: x[0])
def normalize(results):
return [
(thread_id, checkpoint_id, normalize_items(items))
for thread_id, checkpoint_id, items in results
]
compare(normalize(results), [
(0, results[0][1], [
{'method': 'get',
'checkpoint_id': results[0][1],
'status': 200,
'url': make_url_("test.py?thread_id=0&n=0")},
{'method': 'get',
'checkpoint_id': results[0][1],
'status': 200,
'url': make_url_("test.py?thread_id=0&n=1")},
]),
(1, results[1][1], [
{'method': 'get',
'checkpoint_id': results[1][1],
'status': 200,
'url': make_url_("test.py?thread_id=1&n=0")},
{'method': 'get',
'checkpoint_id': results[1][1],
'status': 200,
'url': make_url_("test.py?thread_id=1&n=1")},
]),
(2, results[2][1], [
{'method': 'get',
'checkpoint_id': results[2][1],
'status': 404,
'url': make_url_("404?thread_id=2&n=0")},
{'method': 'get',
'checkpoint_id': results[2][1],
'status': 404,
'url': make_url_("404?thread_id=2&n=1")},
])])
if __name__ == '__main__':
test_threading()
| mit | -395,756,526,548,463,300 | 28.133028 | 79 | 0.516139 | false |
bd-j/magellanic | magellanic/sfhs/prediction_scripts/predicted_total.py | 1 | 5894 | import sys, pickle, copy
import numpy as np
import matplotlib.pyplot as pl
import astropy.io.fits as pyfits
import magellanic.regionsed as rsed
import magellanic.mcutils as utils
from magellanic.lfutils import *
try:
import fsps
from sedpy import observate
except ImportError:
#you wont be able to predict the integrated spectrum or magnitudes
# filterlist must be set to None in calls to total_cloud_data
sps = None
wlengths = {'2': '{4.5\mu m}',
'4': '{8\mu m}'}
dmod = {'smc':18.9,
'lmc':18.5}
cloud_info = {}
cloud_info['smc'] = [utils.smc_regions(), 20, 23, [7, 13, 16], [3,5,6]]
cloud_info['lmc'] = [utils.lmc_regions(), 48, 38, [7, 11, 13, 16], [3,4,5,6]]
def total_cloud_data(cloud, filternames = None, basti=False,
lfstring=None, agb_dust=1.0,
one_metal=None):
#########
# SPS
#########
#
if filternames is not None:
sps = fsps.StellarPopulation(add_agb_dust_model=True)
sps.params['sfh'] = 0
sps.params['agb_dust'] = agb_dust
dust = ['nodust', 'agbdust']
sps.params['imf_type'] = 0.0 #salpeter
filterlist = observate.load_filters(filternames)
else:
filterlist = None
##########
# SFHs
##########
regions, nx, ny, zlist, zlist_basti = cloud_info[cloud.lower()]
if basti:
zlist = basti_zlist
if 'header' in regions.keys():
rheader = regions.pop('header') #dump the header info from the reg. dict
total_sfhs = None
for n, dat in regions.iteritems():
total_sfhs = sum_sfhs(total_sfhs, dat['sfhs'])
total_zmet = dat['zmet']
#collapse SFHs to one metallicity
if one_metal is not None:
ts = None
for sfh in total_sfhs:
ts = sum_sfhs(ts, sfh)
total_sfh = ts
zlist = [zlist[one_metal]]
total_zmet = [total_zmet[one_metal]]
#############
# LFs
############
bins = rsed.lfbins
if lfstring is not None:
# these are stored as a list of different metallicities
lffiles = [lfstring.format(z) for z in zlist]
lf_base = [read_villaume_lfs(f) for f in lffiles]
#get LFs broken out by age and metallicity as well as the total
lfs_zt, lf, logages = rsed.one_region_lfs(copy.deepcopy(total_sfhs), lf_base)
else:
lfs_zt, lf, logages = None, None, None
###########
# SED
############
if filterlist is not None:
spec, wave, mass = rsed.one_region_sed(copy.deepcopy(total_sfhs), total_zmet, sps)
mags = observate.getSED(wave, spec*rsed.to_cgs, filterlist=filterlist)
maggies = 10**(-0.4 * np.atleast_1d(mags))
else:
maggies, mass = None, None
#############
# Write output
############
total_values = {}
total_values['agb_clf'] = lf
total_values['agb_clfs_zt'] = lfs_zt
total_values['clf_mags'] = bins
total_values['logages'] = logages
total_values['sed_ab_maggies'] = maggies
total_values['sed_filters'] = filternames
total_values['lffile'] = lfstring
total_values['mstar'] = mass
total_values['zlist'] = zlist
return total_values, total_sfhs
def sum_sfhs(sfhs1, sfhs2):
"""
Accumulate individual sets of SFHs into a total set of SFHs. This
assumes that the individual SFH sets all have the same number and
order of metallicities, and the same time binning.
"""
if sfhs1 is None:
return copy.deepcopy(sfhs2)
elif sfhs2 is None:
return copy.deepcopy(sfhs1)
else:
out = copy.deepcopy(sfhs1)
for s1, s2 in zip(out, sfhs2):
s1['sfr'] += s2['sfr']
return out
if __name__ == '__main__':
filters = ['galex_NUV', 'spitzer_irac_ch2',
'spitzer_irac_ch4', 'spitzer_mips_24']
#filters = None
ldir, cdir = 'lf_data/', 'composite_lfs/'
outst = '{0}_n2teffcut.p'
# total_cloud_data will loop over the appropriate (for the
# isochrone) metallicities for a given lfst filename template
lfst = '{0}z{{0:02.0f}}_tau{1:2.1f}_vega_irac{2}_n2_teffcut_lf.txt'
basti = False
agb_dust=1.0
agebins = np.arange(9)*0.3 + 7.4
#loop over clouds (and bands and agb_dust) to produce clfs
for cloud in ['smc']:
rdir = '{0}cclf_{1}_'.format(cdir, cloud)
for band in ['2','4']:
lfstring = lfst.format(ldir, agb_dust, band)
dat, sfhs = total_cloud_data(cloud, filternames=filters, agb_dust=agb_dust,
lfstring=lfstring, basti=basti)
agebins = sfhs[0]['t1'][3:-1]
outfile = lfstring.replace(ldir, rdir).replace('z{0:02.0f}_','').replace('.txt','.dat')
write_clf_many([dat['clf_mags'], dat['agb_clf']], outfile, lfstring)
#fig, ax = plot_weighted_lfs(dat, agebins = agebins, dm=dmod[cloud])
#fig.suptitle('{0} @ IRAC{1}'.format(cloud.upper(), band))
#fig.savefig('byage_clfs/{0}_clfs_by_age_and_Z_irac{1}'.format(cloud, band))
#pl.close(fig)
colheads = (len(agebins)-1) * ' N<m(t={})'
colheads = colheads.format(*(agebins[:-1]+agebins[1:])/2.)
tbin_lfs = np.array([rebin_lfs(lf, ages, agebins) for lf, ages
in zip(dat['agb_clfs_zt'], dat['logages'])])
write_clf_many([dat['clf_mags'], tbin_lfs.sum(axis=0)],
outfile.replace(cdir,'byage_clfs/'), lfstring,
colheads=colheads)
pl.figure()
for s, z in zip(sfhs, dat['zlist']):
pl.step(s['t1'], s['sfr'], where='post', label='zind={0}'.format(z), linewidth=3)
pl.legend(loc=0)
pl.title(cloud.upper())
print(cloud, dat['mstar'])
| gpl-2.0 | 5,455,226,854,893,435,000 | 34.721212 | 99 | 0.557686 | false |
bccp/abopt | abopt/legacy/tests/test_vmad.py | 1 | 2670 | from __future__ import print_function
from abopt.legacy.vmad import VM, microcode, programme, Zero, Tape
from numpy.testing import assert_raises, assert_array_equal, assert_allclose
import numpy
class TestVM(VM):
@microcode(ain=['x'], aout=['y'])
def unitary(self, x, y, factor):
y[...] = x * factor
@unitary.defvjp
def _(self, _x, _y, factor):
_x[...] = _y * factor
@microcode(ain=['x1', 'x2'], aout=['y'])
def binary(self, x1, x2, y):
y[...] = x1 + x2
@binary.defvjp
def _(self, _x1, _x2, _y):
_x1[...] = _y
_x2[...] = _y
def test_single_compute():
vm = TestVM()
code = vm.code()
code.unitary(x='a', y='b', factor=3.0)
b = code.compute('b', {'a' : 1.0})
assert_array_equal(b, 3.0)
def test_single_gradient():
vm = TestVM()
code = vm.code()
code.unitary(x='a', y='b', factor=3.0)
b, _a = code.compute_with_gradient(['b', '_a'], {'a' : 1.0}, {'_b': 1.0})
assert_array_equal(b, 3.0)
assert_array_equal(_a, 3.0)
def test_nested_gradient():
vm = TestVM()
code = vm.code()
code.unitary(x='a', y='b', factor=3.0)
code.unitary(x='b', y='c', factor=3.0)
c = code.compute('c', {'a' : 1.0})
assert_array_equal(c, 9.0)
_a = code.compute_with_gradient('_a', {'a' : 1.0}, {'_c': 1.0})
c, _a = code.compute_with_gradient(['c', '_a'], {'a' : 1.0}, {'_c': 1.0})
assert_array_equal(c, 9.0)
assert_array_equal(_a, 9.0)
def test_partial_gradient():
vm = TestVM()
code = vm.code()
code.unitary(x='a', y='b1', factor=3.0)
code.unitary(x='a', y='b2', factor=3.0)
code.unitary(x='a', y='b3', factor=3.0)
code.unitary(x='a', y='b4', factor=3.0)
code.binary(x1='b1', x2='b2', y='c1')
code.binary(x1='b3', x2='b4', y='c2')
code.binary(x1='c1', x2='c2', y='d')
d, tape = code.compute('d', {'a' : 1.0}, return_tape=True)
assert_array_equal(d, 12.0)
gradient = vm.gradient(tape)
d, _a = code.compute_with_gradient(['d', '_a'], {'a' : 1.0}, {'_d': 1.0})
assert_array_equal(d, 12.0)
assert_array_equal(_a, 12.0)
def test_inplace_gradient():
vm = TestVM()
code = vm.code()
code.unitary(x='a', y='a', factor=3.0)
code.unitary(x='a', y='b1', factor=3.0)
code.unitary(x='a', y='b2', factor=3.0)
code.binary(x1='b1', x2='b2', y='b1')
code.unitary(x='b1', y='d', factor=3.0)
d, tape = code.compute('d', {'a' : 1.0}, return_tape=True)
assert_array_equal(d, 54.0)
gradient = vm.gradient(tape)
d, _a = code.compute_with_gradient(['d', '_a'], {'a' : 1.0}, {'_d': 1.0})
assert_array_equal(d, 54.0)
assert_array_equal(_a, 54.0)
| gpl-3.0 | 6,831,476,231,680,616,000 | 29.340909 | 77 | 0.535955 | false |
reverse-CSE-591/reverse | driver.py | 1 | 19133 | #!/usr/bin/python -tt
#####################################################################################################################
# CSE 591: Security and Vulnerability Analysis
#
# Team 5:
#
# Kartheek Nallepalli
# Bhargavi Rajagopalan
# Priya Pipada
# Ayush Maheshwari
# Nikhil Aourpally
#
#
# This is the driver program. Run the main function here to find potential vulnerabilities in the website
#####################################################################################################################
# Python Imports
from __future__ import division
from bs4 import BeautifulSoup
from lxml import html
from os import system, path
from random import randint
from urlparse import urlparse
import ast
import json
import math
import nltk
import re
import requests
import sys
import time
import urllib
import urllib2
# This is a global set that contains all the URL's crawled from the website.
urls = set()
stopWords = []
#####################################################################################################################
# This method takes in a form to be filled and the url and tries to guess valid inputs that would result in a
# successful response from the server
# Inputs:
# params[] (List[String]): list of parameters along with the types in the following format.
# ex: ["username::text", "password::password"]
# action (String): The action the form should take when submitted
# url (String): The page URL for getting the HTML data and figuring out what to fill
# Output:
# validResponse (String): returns the HTML string of the valid response
#####################################################################################################################
def getValidResponse(params, action, url, cookies):
formInput={}
for key in params:
value = params[key]
formInput[key] = generateValue(value['label'],value['type'])
#print cookies, type(cookies)
(header,validResponse) = constructPostRequest(formInput, cookies, action)
return validResponse
#####################################################################################################################
# This method constructs a HTTP Post Request to submit the form to it
# Inputs:
#Output:
#####################################################################################################################
def constructPostRequest(formInput, input_cookies, action):
r = requests.post(action, data=formInput, verify=False, cookies=input_cookies)
return (r.headers,r.text)
#####################################################################################################################
# This method takes in a form to be filled and the url and inserts <scripts> into the fields.
# Inputs:
# params{} (Dictionary): list of parameters along with the types in the following format.
# ex: ["username::text", "password::password"]
# action (String): The action the form should take when submitted
# Output:
# xssResponse (String): returns the HTML response
#####################################################################################################################
def getXssResponse(params, action, url, cookies):
formInput={}
for key in params:
value = params[key]
formInput[key]="<sCript>xssAttack</sCript>"
(header,xssInjResponse) = constructPostRequest(formInput,cookies,action)
return xssInjResponse
#####################################################################################################################
# This method computes the XSS injection score for the given response
# Inputs:
#Output:
#####################################################################################################################
def getXssScore(xssResponse, input_cookies):
urls = open("crawledURLs.txt")
for url in urls:
response = requests.get(re.sub("\n","",url), verify=False, cookies=input_cookies).text
if bool(re.search('<sCript>xssAttack</sCript>', response)):
return 1
return 0
#####################################################################################################################
# This method takes in a form to be filled and the url and tries SQL injection in the fields
# Inputs:
# params[] (List[String]): list of parameters along with the types in the following format.
# ex: ["username::text", "password::password"]
# action (String): The action the form should take when submitted
# Output:
# xssResponse (String): returns the HTML response
#####################################################################################################################
def getSqlInjResponse(params, action, url, cookies):
formInput={}
for key in params:
value = params[key]
formInput[key] ="' or 1=1 --'"
(header,sqlInjResponse) = constructPostRequest(formInput,cookies,action)
return sqlInjResponse
#####################################################################################################################
# This method takes in two HTML strings, compares them and assigns a similarity score. The idea is to use this
# score to see how similar pages with valid and invalid outputs are.
# Inputs:
# html_1 (String): The first HTML page
# html_2 (String): The second HTML page
# Output:
# score (double): similarity between pages
#####################################################################################################################
def getSimilarityScore(html_1, html_2):
cleanResponse1 = BeautifulSoup(html_1).get_text()
cleanResponse2 = BeautifulSoup(html_2).get_text()
return calculateCosineSimilarity(formatVector(cleanResponse1), formatVector(cleanResponse2))
#####################################################################################################################
# The method calculates the cosine similarity between two groups
# Inputs:
#Output:
#####################################################################################################################
def calculateCosineSimilarity(group1, group2):
doc1sq = doc2sq = frequency = 0
for i in group1:
if i in group2:
frequency += group1[i] * group2[i]
for j in group1:
doc1sq += math.pow(group1[j], 2)
for k in group2:
doc2sq += math.pow(group2[k], 2)
score = float(frequency) / (math.sqrt(doc1sq) * math.sqrt(doc2sq))
return score
#####################################################################################################################
# This method constructs a HTTP Post Request to submit the form to it
# Inputs:
#Output:
#####################################################################################################################
def formatVector(response):
global stopWords
cleanResponse = map(lambda x:re.split(" ", x), re.split("\n", response))
vectorList = []
vectorDict = {}
for i in cleanResponse:
vectorList.extend(i)
vector = []
for i in vectorList:
if str(i) != '' or str(i) not in stopWords:
vector.append(i.lower())
for j in vector:
if j in vectorDict:
vectorDict[j] += 1
else:
vectorDict[j] = 1
return vectorDict
#####################################################################################################################
# This method takes in the original label extracted, gets the similarity score and predicts the valid form entries
# by understanding meaning of the labes and mapping them to known labels using dictionary similarity and edit-
# distance score.
#
# TODO : Faced problems with wu-palmer similarity over wordNet (flase positives and not all terms present)
# Currently using just the edit distance
#
# Inputs:
# label (String): Label generated from the scarppy code extended
# Output:
# generated value (String): Valid generated form input value
#####################################################################################################################
def getLabel(orglabel):
userset = ['user','username','user_name']
maxscore =0
newlabel =''
for field in userset:
score = getEdidDistanceScore(orglabel, field)
if(score > maxscore):
maxscore = score
newlabel = 'username'
#print 'Max score' + str(maxscore), 'Label' + newlabel
if(maxscore<0.5):
newlabel = orglabel
return newlabel
#####################################################################################################################
# This method generates random values based on the form field type and implements intelligent form filling
# Inputs:
#Output:
#####################################################################################################################
def generateValue(label, labeltype):
if labeltype == 'text':
newlabel = getLabel(label)
if newlabel == 'username':
return 'reverse'+ str(time.time())
else:
return 'reverserandom'+ str(time.time())
elif labeltype == 'password':
return 'reversePass'+ str(time.time())
elif labeltype == 'email':
return 'reverse'+str(time.time())+'@reverse.com'
elif labeltype == 'number':
return randint(0,10000)
#####################################################################################################################
# Helper methods
#####################################################################################################################
# Get the specific form parameters
def getFormParams(link):
params = {}
labels = []
source = link['source'].replace("\n","")
for i in range(0, len(source)):
label = ''
if source[i] == '>':
while source[i] != '<':
label += source[i]
i = i + 1
if i >= len(source) - 1:
break;
if label[1:] and not label[1:].isspace():
labels.append(label[1:])
i = 0
for j in link['form']:
params[j['name']] = {}
params[j['name']]['type'] = j['type']
params[j['name']]['label'] = labels[0]
i = i + 1
return (link['target'], params)
# This method gets the list of stopwords
def getStopWords():
global stopWords
f = open("stopwords.en")
for i in f:
stopWords.append(re.sub("\n","",i))
# Get the edit-distance score between two words
def getEdidDistanceScore(word1, word2):
distance = nltk.metrics.distance.edit_distance(word1, word2, transpositions=False)
avgLength = (len(word1) + len(word2))/2
score = distance/avgLength
return score
#Get cookies from user
def getCookies():
flag = 0
cookies = {}
print "Enter cookies(Press X to exit): "
while True:
if not flag:
key = raw_input("Enter Key: ")
flag = 1
if key == 'X':
break;
else:
value = raw_input("Enter value: ")
flag = 0
if value == 'X':
break;
cookies[key] = value
return cookies
#####################################################################################################################
# Method to inject malicious input values into the application to check if nth order SQL injection is possible
#####################################################################################################################
def nthOrderSQLInjection(params, action, url, cookies, index, urlForms):
UserName = "reverse_12345"
Password = "aXb675hjWF@"
SQLKeyWord = "' union select "
TableInfo = 'from dual;--'
responseString = None
for i in range(0,5):
formInput = {}
ParameterPadding = 'Null,' * i
Parameter = '"Evilmax"' + str(index) + ' '
MaliciousInputValue = UserName + SQLKeyWord + ParameterPadding + Parameter + TableInfo
for key in params:
value = params[key]
if value['type'] != 'password':
formInput[key] = MaliciousInputValue
else:
formInput[key] = Password
constructPostRequest(formInput, cookies, action)
for urlForm in urlForms:
(newAction, newParams) = getFormParams(urlForm)
newFormInput = {}
for newParam in newParams:
value = newParams[newParam]
if value['type'] != 'password':
newFormInput[newParam] = UserName
else:
newFormInput[newParam] = Password
(header, response) = constructPostRequest(formInput, cookies, newAction)
if 'EvilMax' in response:
SplitString = response.split("EvilMax")
Index = SplitString[1].split(' ')
if index != Index:
responseString = responseString + "nth Order SQL injection present in " + newAction + "\n"
return responseString
#####################################################################################################################
# The method takes the URLs extracted from the crawler scrapy and performs a "deeper" crawling by seeing if the
# server is setting any cookies after login and adds that to the list of cookies.
#Output: Updates cookies (Dictionary)
#####################################################################################################################
def deepCrawling(urlForms,cookies):
storedFormInputs=[]
formInput={}
login=False
for urlForm in urlForms:
(action, params) = getFormParams(urlForm)
credentials = {'username': None, 'password' : None}
for key in params:
value = params[key]
if value['type'] != 'submit':
formInput[key] = generateValue(value['label'],value['type'])
newLabel = getLabel(value['label'])
if newLabel == 'username':
credentials['username'] = formInput[key]
if value['type'] == 'password':
credentials['password'] = formInput[key]
if credentials:
storedFormInputs.append(credentials)
(header,response) = constructPostRequest(formInput,cookies,action)
if "registered" in response.lower() or "created" in response.lower() or "authenticated" in response.lower():
login=True
if login == True:
for urlForm in urlForms:
(action, params) = getFormParams(urlForm)
for storedFormInput in storedFormInputs:
formInput = {}
for key in params:
value = params[key]
newLabel = getLabel(value['label'])
if newLabel == 'username':
formInput[key] = storedFormInput['username']
if value['type'] == 'password' and storedFormInput['password']:
formInput[key] = storedFormInput['password']
(header, response) = constructPostRequest(formInput,cookies,action)
if 'set-cookie' in header.keys():
newCookie = str(header['set-cookie']).split(';')[0]
CookieSplit = str(newCookie).split('=')
cookies[CookieSplit[0]] = CookieSplit[1]
return cookies
#####################################################################################################################
# This is the main method that gets called and submits the report on possible vulnerabilities
#####################################################################################################################
def main():
# Init Global variables
getStopWords()
# Add the required headers, most likely its just the login cookie for the page.
#opener = urllib2.build_opener()
#opener.addheaders.append(('Cookie', 'cse591=kP047iYtubEZ6ZnMKmxO'))
# domain = "129.219.253.30:80"
url = raw_input("Enter the web address: ")
cookies = getCookies()
domain = urlparse(url).netloc
# Remove any residual files
system("rm items.json")
system("rm crawledURLs.txt")
system("rm reverse_report")
system("rm reverse_response")
# Use Scrapy to get recursively get all URLs, Stores the
system("scrapy crawl ReverseCrawler -a domain="+domain+" -a start_urls="+url+" -a cookies=\""+str(cookies)+"\" -o items.json")
#cookies = ast.literal_eval(cookies)
# Iterate over all the URL's and their forms
UrlForms = json.load(open("items.json"))
print "\n\n\n"
# Open report, response file
reportFile = open('reverse_report','w')
responseFile = open('reverse_response','w')
# Perform a deeper crawling and re-crawl using scrapy to fetch more URLs
cookies = deepCrawling(UrlForms,cookies)
system("rm -f items.json")
system("scrapy crawl ReverseCrawler -a domain="+domain+" -a start_urls="+url+" -a cookies=\""+str(cookies)+"\" -o items.json")
UrlForms = json.load(open("items.json"))
# Iterate through all possible forms
index = 0
for urlForm in UrlForms:
(action, params) = getFormParams(urlForm)
print "[INFO] action: ", action
# Get the valid response
validResponse = getValidResponse(params, action, url, cookies)
# Append the resposes to response file
responseFile.write("%%%%%%%%%%%%%%%%%%%%%%%%%% Start Valid Response %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n")
responseFile.write(action + "\n")
responseFile.write(str(params) + "\n")
responseFile.write(BeautifulSoup(validResponse).get_text() + "\n")
responseFile.write("############################ Start SQL Injection response ###########################\n")
# Attempt SQL Injection and Get the score
sqlInjResponse = getSqlInjResponse(params, action, url, cookies)
responseFile.write(BeautifulSoup(sqlInjResponse).get_text() + "\n")
responseFile.write("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ Start XSS response @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n")
sqlInjectionScore = float(1) - getSimilarityScore(validResponse, sqlInjResponse)
print "[INFO] SQL_INJ_Score = ", sqlInjectionScore
# Attempt nth Order SQL injection
responseString = nthOrderSQLInjection(params, action, url, cookies, index, UrlForms)
# Attempt XSS and get the score
xssResponse = getXssResponse(params, action, url, cookies)
responseFile.write("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n")
xssScore = getXssScore(xssResponse, cookies)
print "[INFO] XSS_Score = ", xssScore
# Add scores to the report
reportFile.write("[Params]:: " + str(params) + "\n")
reportFile.write("[Action]:: " + action + "\n")
reportFile.write("[SQL_Inj_Score]:: " + str(sqlInjectionScore) + "\n")
reportFile.write("[XSS_Inj_Score]:: " + str(xssScore) + "\n\n")
if responseString is not None:
reportFile.write("[nth Order SQL Injection]::" + responseString + "\n")
print "\n\n"
index = index + 1
# Close the report
reportFile.close()
responseFile.close()
if __name__ == '__main__':
main()
| mit | 6,833,721,684,920,172,000 | 39.111111 | 130 | 0.516176 | false |
woelfware/BluMote | test/button_tx.py | 1 | 1963 | #!/usr/bin/env python
# Copyright (C) 2011 Woelfware
from bluetooth import *
import blumote
import cPickle
from glob import glob
import os
import sys
import time
class Blumote_Client(blumote.Services):
def __init__(self):
blumote.Services.__init__(self)
self.addr = None
def find_blumote_pods(self, pod_name = None):
if pod_name is None:
pod_name = self.service["name"]
print "Searching for \"%s\" service..." % (pod_name)
return find_service(name = pod_name)
def connect_to_blumote_pod(self, addr):
self.client_sock = BluetoothSocket(RFCOMM)
self.client_sock.connect((addr, 1))
def transport_tx(self, cmd, msg):
full_msg = struct.pack("B", cmd)
full_msg += msg
self.client_sock.send(full_msg)
def ir_transmit(self, msg):
self.transport_tx(self.cmd_codes.ir_transmit, msg)
return self.client_sock.recv(128)
if __name__ == "__main__":
bm_remote = Blumote_Client()
found = False
while not found:
try:
nearby_devices = discover_devices(lookup_names = True)
except:
print 'failed to find a blumote... retrying'
nearby_devices = ()
print 'found %d device(s)' % len(nearby_devices)
for addr, name in nearby_devices:
if name[:len('BluMote')] == 'BluMote':
print 'connecting to', addr, name
bm_remote.connect_to_blumote_pod(addr)
found = True
break
buttons = glob('*.pkl')
print 'Available buttons:'
for i, button in enumerate(buttons):
print '\t%i: %s' % (i, os.path.splitext(button)[0])
print
while True:
selection = raw_input('Select a button to transmit (-1 to quit): ')
try:
selection = int(selection)
except ValueError:
print 'Invalid selection'
continue
if selection == -1:
break
if ((selection < 0) or (selection >= len(buttons))):
print 'Invalid selecion'
continue
button = open(buttons[selection], 'rb')
key_code = cPickle.load(button)
button.close()
bm_remote.ir_transmit(''.join(['\x03', key_code]))
bm_remote.client_sock.close()
| gpl-3.0 | -4,726,947,572,065,195,000 | 23.5375 | 69 | 0.671931 | false |
VcamX/grpc | src/python/grpcio/grpc/framework/alpha/_face_utilities.py | 1 | 7822 | # Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import abc
import collections
import six
# face_interfaces is referenced from specification in this module.
from grpc.framework.common import cardinality
from grpc.framework.face import interfaces as face_interfaces # pylint: disable=unused-import
from grpc.framework.face import utilities as face_utilities
from grpc.framework.alpha import _reexport
from grpc.framework.alpha import interfaces
def _qualified_name(service_name, method_name):
return '/%s/%s' % (service_name, method_name)
# TODO(nathaniel): This structure is getting bloated; it could be shrunk if
# implementations._Stub used a generic rather than a dynamic underlying
# face-layer stub.
class InvocationBreakdown(six.with_metaclass(abc.ABCMeta)):
"""An intermediate representation of invocation-side views of RPC methods.
Attributes:
cardinalities: A dictionary from RPC method name to interfaces.Cardinality
value.
qualified_names: A dictionary from unqualified RPC method name to
service-qualified RPC method name.
face_cardinalities: A dictionary from service-qualified RPC method name to
to cardinality.Cardinality value.
request_serializers: A dictionary from service-qualified RPC method name to
callable behavior to be used serializing request values for the RPC.
response_deserializers: A dictionary from service-qualified RPC method name
to callable behavior to be used deserializing response values for the
RPC.
"""
class _EasyInvocationBreakdown(
InvocationBreakdown,
collections.namedtuple(
'_EasyInvocationBreakdown',
('cardinalities', 'qualified_names', 'face_cardinalities',
'request_serializers', 'response_deserializers'))):
pass
class ServiceBreakdown(six.with_metaclass(abc.ABCMeta)):
"""An intermediate representation of service-side views of RPC methods.
Attributes:
implementations: A dictionary from service-qualified RPC method name to
face_interfaces.MethodImplementation implementing the RPC method.
request_deserializers: A dictionary from service-qualified RPC method name
to callable behavior to be used deserializing request values for the RPC.
response_serializers: A dictionary from service-qualified RPC method name
to callable behavior to be used serializing response values for the RPC.
"""
class _EasyServiceBreakdown(
ServiceBreakdown,
collections.namedtuple(
'_EasyServiceBreakdown',
('implementations', 'request_deserializers', 'response_serializers'))):
pass
def break_down_invocation(service_name, method_descriptions):
"""Derives an InvocationBreakdown from several RPC method descriptions.
Args:
service_name: The package-qualified full name of the service.
method_descriptions: A dictionary from RPC method name to
interfaces.RpcMethodInvocationDescription describing the RPCs.
Returns:
An InvocationBreakdown corresponding to the given method descriptions.
"""
cardinalities = {}
qualified_names = {}
face_cardinalities = {}
request_serializers = {}
response_deserializers = {}
for name, method_description in six.iteritems(method_descriptions):
qualified_name = _qualified_name(service_name, name)
method_cardinality = method_description.cardinality()
cardinalities[name] = method_description.cardinality()
qualified_names[name] = qualified_name
face_cardinalities[qualified_name] = _reexport.common_cardinality(
method_cardinality)
request_serializers[qualified_name] = method_description.serialize_request
response_deserializers[qualified_name] = (
method_description.deserialize_response)
return _EasyInvocationBreakdown(
cardinalities, qualified_names, face_cardinalities, request_serializers,
response_deserializers)
def break_down_service(service_name, method_descriptions):
"""Derives a ServiceBreakdown from several RPC method descriptions.
Args:
method_descriptions: A dictionary from RPC method name to
interfaces.RpcMethodServiceDescription describing the RPCs.
Returns:
A ServiceBreakdown corresponding to the given method descriptions.
"""
implementations = {}
request_deserializers = {}
response_serializers = {}
for name, method_description in six.iteritems(method_descriptions):
qualified_name = _qualified_name(service_name, name)
method_cardinality = method_description.cardinality()
if method_cardinality is interfaces.Cardinality.UNARY_UNARY:
def service(
request, face_rpc_context,
service_behavior=method_description.service_unary_unary):
return service_behavior(
request, _reexport.rpc_context(face_rpc_context))
implementations[qualified_name] = face_utilities.unary_unary_inline(
service)
elif method_cardinality is interfaces.Cardinality.UNARY_STREAM:
def service(
request, face_rpc_context,
service_behavior=method_description.service_unary_stream):
return service_behavior(
request, _reexport.rpc_context(face_rpc_context))
implementations[qualified_name] = face_utilities.unary_stream_inline(
service)
elif method_cardinality is interfaces.Cardinality.STREAM_UNARY:
def service(
request_iterator, face_rpc_context,
service_behavior=method_description.service_stream_unary):
return service_behavior(
request_iterator, _reexport.rpc_context(face_rpc_context))
implementations[qualified_name] = face_utilities.stream_unary_inline(
service)
elif method_cardinality is interfaces.Cardinality.STREAM_STREAM:
def service(
request_iterator, face_rpc_context,
service_behavior=method_description.service_stream_stream):
return service_behavior(
request_iterator, _reexport.rpc_context(face_rpc_context))
implementations[qualified_name] = face_utilities.stream_stream_inline(
service)
request_deserializers[qualified_name] = (
method_description.deserialize_request)
response_serializers[qualified_name] = (
method_description.serialize_response)
return _EasyServiceBreakdown(
implementations, request_deserializers, response_serializers)
| bsd-3-clause | 5,831,767,619,180,549,000 | 41.743169 | 94 | 0.749297 | false |
ovnicraft/openerp-server | openerp/addons/base/module/wizard/base_update_translations.py | 1 | 2901 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
import tools
import cStringIO
from tools.translate import _
class base_update_translations(osv.osv_memory):
def _get_languages(self, cr, uid, context):
lang_obj = self.pool.get('res.lang')
ids = lang_obj.search(cr, uid, ['&', ('active', '=', True), ('translatable', '=', True),])
langs = lang_obj.browse(cr, uid, ids)
return [(lang.code, lang.name) for lang in langs]
def _get_lang_name(self, cr, uid, lang_code):
lang_obj = self.pool.get('res.lang')
ids = lang_obj.search(cr, uid, [('code', '=', lang_code)])
if not ids:
raise osv.except_osv(_('Error!'), _('No language with code "%s" exists') % lang_code)
lang = lang_obj.browse(cr, uid, ids[0])
return lang.name
def act_update(self, cr, uid, ids, context=None):
this = self.browse(cr, uid, ids)[0]
lang_name = self._get_lang_name(cr, uid, this.lang)
buf = cStringIO.StringIO()
tools.trans_export(this.lang, ['all'], buf, 'csv', cr)
tools.trans_load_data(cr, buf, 'csv', this.lang, lang_name=lang_name)
buf.close()
return {'type': 'ir.actions.act_window_close'}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(base_update_translations, self).default_get(cr, uid, fields, context=context)
if context.get('active_model') != "res.lang":
return res
record_id = context.get('active_id', False) or False
if record_id:
lang = self.pool.get('res.lang').browse(cr, uid, record_id).code
res.update(lang=lang)
return res
_name = 'base.update.translations'
_columns = {
'lang': fields.selection(_get_languages, 'Language', required=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,133,423,672,400,212,000 | 40.442857 | 98 | 0.592554 | false |
citrix-openstack-build/python-openstackclient | openstackclient/tests/common/test_clientmanager.py | 1 | 1180 | # Copyright 2012-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from openstackclient.common import clientmanager
from openstackclient.tests import utils
class Container(object):
attr = clientmanager.ClientCache(lambda x: object())
def __init__(self):
pass
class TestClientManager(utils.TestCase):
def setUp(self):
super(TestClientManager, self).setUp()
def test_singleton(self):
# NOTE(dtroyer): Verify that the ClientCache descriptor only invokes
# the factory one time and always returns the same value after that.
c = Container()
self.assertEqual(c.attr, c.attr)
| apache-2.0 | 6,764,901,367,657,484,000 | 32.714286 | 77 | 0.714407 | false |
PageArkanis/StEVE | steve/constellation.py | 1 | 1217 | from steve.backend.sqlitedb import SDB
from steve.system import System
class Constellation(object):
def __init__(self, universe, data):
self.universe = universe
self.regionID = data[0]
self.uid = data[1]
self.name = data[2]
self.x = data[3]
self.y = data[4]
self.z = data[5]
self.xMin = data[6]
self.xMax = data[7]
self.yMin = data[8]
self.yMax = data[9]
self.zMin = data[10]
self.zMax = data[11]
self.factionID = data[12]
self.radius = data[13]
self._systems = {}
@property
def system(self):
if len(self._constellations) == 0:
query = 'SELECT * from mapSolarSystems WHERE constellationID = %' % self.uid
for entry in SDB.queryAll(query):
system = System(self.universe, entry)
self._systems[system.name] = system
self._systems[system.uid] = system
return self._systems
@property
def region(self):
return self.universe.regions[self.regionID]
| agpl-3.0 | 3,538,158,798,730,244,000 | 26.659091 | 88 | 0.501233 | false |
blueyed/coveragepy | tests/test_templite.py | 1 | 10970 | # coding: utf-8
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for coverage.templite."""
import re
from coverage.templite import Templite, TempliteSyntaxError, TempliteValueError
from tests.coveragetest import CoverageTest
# pylint: disable=unused-variable
class AnyOldObject(object):
"""Simple testing object.
Use keyword arguments in the constructor to set attributes on the object.
"""
def __init__(self, **attrs):
for n, v in attrs.items():
setattr(self, n, v)
class TempliteTest(CoverageTest):
"""Tests for Templite."""
run_in_temp_dir = False
def try_render(self, text, ctx=None, result=None):
"""Render `text` through `ctx`, and it had better be `result`.
Result defaults to None so we can shorten the calls where we expect
an exception and never get to the result comparison.
"""
actual = Templite(text).render(ctx or {})
# If result is None, then an exception should have prevented us getting
# to here.
assert result is not None
self.assertEqual(actual, result)
def assertSynErr(self, msg):
"""Assert that a `TempliteSyntaxError` will happen.
A context manager, and the message should be `msg`.
"""
pat = "^" + re.escape(msg) + "$"
return self.assertRaisesRegex(TempliteSyntaxError, pat)
def test_passthrough(self):
# Strings without variables are passed through unchanged.
self.assertEqual(Templite("Hello").render(), "Hello")
self.assertEqual(
Templite("Hello, 20% fun time!").render(),
"Hello, 20% fun time!"
)
def test_variables(self):
# Variables use {{var}} syntax.
self.try_render("Hello, {{name}}!", {'name':'Ned'}, "Hello, Ned!")
def test_undefined_variables(self):
# Using undefined names is an error.
with self.assertRaises(Exception):
self.try_render("Hi, {{name}}!")
def test_pipes(self):
# Variables can be filtered with pipes.
data = {
'name': 'Ned',
'upper': lambda x: x.upper(),
'second': lambda x: x[1],
}
self.try_render("Hello, {{name|upper}}!", data, "Hello, NED!")
# Pipes can be concatenated.
self.try_render("Hello, {{name|upper|second}}!", data, "Hello, E!")
def test_reusability(self):
# A single Templite can be used more than once with different data.
globs = {
'upper': lambda x: x.upper(),
'punct': '!',
}
template = Templite("This is {{name|upper}}{{punct}}", globs)
self.assertEqual(template.render({'name':'Ned'}), "This is NED!")
self.assertEqual(template.render({'name':'Ben'}), "This is BEN!")
def test_attribute(self):
# Variables' attributes can be accessed with dots.
obj = AnyOldObject(a="Ay")
self.try_render("{{obj.a}}", locals(), "Ay")
obj2 = AnyOldObject(obj=obj, b="Bee")
self.try_render("{{obj2.obj.a}} {{obj2.b}}", locals(), "Ay Bee")
def test_member_function(self):
# Variables' member functions can be used, as long as they are nullary.
class WithMemberFns(AnyOldObject):
"""A class to try out member function access."""
def ditto(self):
"""Return twice the .txt attribute."""
return self.txt + self.txt
obj = WithMemberFns(txt="Once")
self.try_render("{{obj.ditto}}", locals(), "OnceOnce")
def test_item_access(self):
# Variables' items can be used.
d = {'a':17, 'b':23}
self.try_render("{{d.a}} < {{d.b}}", locals(), "17 < 23")
def test_loops(self):
# Loops work like in Django.
nums = [1,2,3,4]
self.try_render(
"Look: {% for n in nums %}{{n}}, {% endfor %}done.",
locals(),
"Look: 1, 2, 3, 4, done."
)
# Loop iterables can be filtered.
def rev(l):
"""Return the reverse of `l`."""
l = l[:]
l.reverse()
return l
self.try_render(
"Look: {% for n in nums|rev %}{{n}}, {% endfor %}done.",
locals(),
"Look: 4, 3, 2, 1, done."
)
def test_empty_loops(self):
self.try_render(
"Empty: {% for n in nums %}{{n}}, {% endfor %}done.",
{'nums':[]},
"Empty: done."
)
def test_multiline_loops(self):
self.try_render(
"Look: \n{% for n in nums %}\n{{n}}, \n{% endfor %}done.",
{'nums':[1,2,3]},
"Look: \n\n1, \n\n2, \n\n3, \ndone."
)
def test_multiple_loops(self):
self.try_render(
"{% for n in nums %}{{n}}{% endfor %} and "
"{% for n in nums %}{{n}}{% endfor %}",
{'nums': [1,2,3]},
"123 and 123"
)
def test_comments(self):
# Single-line comments work:
self.try_render(
"Hello, {# Name goes here: #}{{name}}!",
{'name':'Ned'}, "Hello, Ned!"
)
# and so do multi-line comments:
self.try_render(
"Hello, {# Name\ngoes\nhere: #}{{name}}!",
{'name':'Ned'}, "Hello, Ned!"
)
def test_if(self):
self.try_render(
"Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!",
{'ned': 1, 'ben': 0},
"Hi, NED!"
)
self.try_render(
"Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!",
{'ned': 0, 'ben': 1},
"Hi, BEN!"
)
self.try_render(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 0, 'ben': 0},
"Hi, !"
)
self.try_render(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 1, 'ben': 0},
"Hi, NED!"
)
self.try_render(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 1, 'ben': 1},
"Hi, NEDBEN!"
)
def test_complex_if(self):
class Complex(AnyOldObject):
"""A class to try out complex data access."""
def getit(self):
"""Return it."""
return self.it
obj = Complex(it={'x':"Hello", 'y': 0})
self.try_render(
"@"
"{% if obj.getit.x %}X{% endif %}"
"{% if obj.getit.y %}Y{% endif %}"
"{% if obj.getit.y|str %}S{% endif %}"
"!",
{ 'obj': obj, 'str': str },
"@XS!"
)
def test_loop_if(self):
self.try_render(
"@{% for n in nums %}{% if n %}Z{% endif %}{{n}}{% endfor %}!",
{'nums': [0,1,2]},
"@0Z1Z2!"
)
self.try_render(
"X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!",
{'nums': [0,1,2]},
"X@012!"
)
self.try_render(
"X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!",
{'nums': []},
"X!"
)
def test_nested_loops(self):
self.try_render(
"@"
"{% for n in nums %}"
"{% for a in abc %}{{a}}{{n}}{% endfor %}"
"{% endfor %}"
"!",
{'nums': [0,1,2], 'abc': ['a', 'b', 'c']},
"@a0b0c0a1b1c1a2b2c2!"
)
def test_whitespace_handling(self):
self.try_render(
"@{% for n in nums %}\n"
" {% for a in abc %}{{a}}{{n}}{% endfor %}\n"
"{% endfor %}!\n",
{'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']},
"@\n a0b0c0\n\n a1b1c1\n\n a2b2c2\n!\n"
)
self.try_render(
"@{% for n in nums -%}\n"
" {% for a in abc -%}\n"
" {# this disappears completely -#}\n"
" {{a -}}\n"
" {{n -}}\n"
" {% endfor %}\n"
"{% endfor %}!\n",
{'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']},
"@a0b0c0\na1b1c1\na2b2c2\n!\n"
)
def test_non_ascii(self):
self.try_render(
u"{{where}} ollǝɥ",
{ 'where': u'ǝɹǝɥʇ' },
u"ǝɹǝɥʇ ollǝɥ"
)
def test_exception_during_evaluation(self):
# TypeError: Couldn't evaluate {{ foo.bar.baz }}:
msg = "Couldn't evaluate None.bar"
with self.assertRaisesRegex(TempliteValueError, msg):
self.try_render(
"Hey {{foo.bar.baz}} there", {'foo': None}, "Hey ??? there"
)
def test_bad_names(self):
with self.assertSynErr("Not a valid name: 'var%&!@'"):
self.try_render("Wat: {{ var%&!@ }}")
with self.assertSynErr("Not a valid name: 'filter%&!@'"):
self.try_render("Wat: {{ foo|filter%&!@ }}")
with self.assertSynErr("Not a valid name: '@'"):
self.try_render("Wat: {% for @ in x %}{% endfor %}")
def test_bogus_tag_syntax(self):
with self.assertSynErr("Don't understand tag: 'bogus'"):
self.try_render("Huh: {% bogus %}!!{% endbogus %}??")
def test_malformed_if(self):
with self.assertSynErr("Don't understand if: '{% if %}'"):
self.try_render("Buh? {% if %}hi!{% endif %}")
with self.assertSynErr("Don't understand if: '{% if this or that %}'"):
self.try_render("Buh? {% if this or that %}hi!{% endif %}")
def test_malformed_for(self):
with self.assertSynErr("Don't understand for: '{% for %}'"):
self.try_render("Weird: {% for %}loop{% endfor %}")
with self.assertSynErr("Don't understand for: '{% for x from y %}'"):
self.try_render("Weird: {% for x from y %}loop{% endfor %}")
with self.assertSynErr("Don't understand for: '{% for x, y in z %}'"):
self.try_render("Weird: {% for x, y in z %}loop{% endfor %}")
def test_bad_nesting(self):
with self.assertSynErr("Unmatched action tag: 'if'"):
self.try_render("{% if x %}X")
with self.assertSynErr("Mismatched end tag: 'for'"):
self.try_render("{% if x %}X{% endfor %}")
with self.assertSynErr("Too many ends: '{% endif %}'"):
self.try_render("{% if x %}{% endif %}{% endif %}")
def test_malformed_end(self):
with self.assertSynErr("Don't understand end: '{% end if %}'"):
self.try_render("{% if x %}X{% end if %}")
with self.assertSynErr("Don't understand end: '{% endif now %}'"):
self.try_render("{% if x %}X{% endif now %}")
| apache-2.0 | 1,905,987,912,981,123,000 | 33.670886 | 79 | 0.478094 | false |
RyanSkraba/beam | sdks/python/apache_beam/coders/typecoders.py | 1 | 8078 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Type coders registration.
This module contains functionality to define and use coders for custom classes.
Let's say we have a class Xyz and we are processing a PCollection with elements
of type Xyz. If we do not register a coder for Xyz, a default pickle-based
fallback coder will be used. This can be undesirable for two reasons. First, we
may want a faster coder or a more space efficient one. Second, the pickle-based
coder is not deterministic in the sense that objects like dictionaries or sets
are not guaranteed to be encoded in the same way every time (elements are not
really ordered).
Two (sometimes three) steps are needed to define and use a custom coder:
- define the coder class
- associate the code with the class (a.k.a. coder registration)
- typehint DoFns or transforms with the new class or composite types using
the class.
A coder class is defined by subclassing from CoderBase and defining the
encode_to_bytes and decode_from_bytes methods. The framework uses duck-typing
for coders so it is not strictly required to subclass from CoderBase as long as
the encode/decode methods are defined.
Registering a coder class is made with a register_coder() call::
from apache_beam import coders
...
coders.registry.register_coder(Xyz, XyzCoder)
Additionally, DoFns and PTransforms may need type hints. This is not always
necessary since there is functionality to infer the return types of DoFns by
analyzing the code. For instance, for the function below the return type of
'Xyz' will be inferred::
def MakeXyzs(v):
return Xyz(v)
If Xyz is inferred then its coder will be used whenever the framework needs to
serialize data (e.g., writing to the shuffler subsystem responsible for group by
key operations). If a typehint is needed it can be specified by decorating the
DoFns or using with_input_types/with_output_types methods on PTransforms. For
example, the above function can be decorated::
@with_output_types(Xyz)
def MakeXyzs(v):
return complex_operation_returning_Xyz(v)
See apache_beam.typehints.decorators module for more details.
"""
from __future__ import absolute_import
from builtins import object
from typing import Any
from typing import Dict
from typing import Iterable
from typing import List
from typing import Type
from past.builtins import unicode
from apache_beam.coders import coders
from apache_beam.typehints import typehints
__all__ = ['registry']
class CoderRegistry(object):
"""A coder registry for typehint/coder associations."""
def __init__(self, fallback_coder=None):
self._coders = {} # type: Dict[Any, Type[coders.Coder]]
self.custom_types = [] # type: List[Any]
self.register_standard_coders(fallback_coder)
def register_standard_coders(self, fallback_coder):
"""Register coders for all basic and composite types."""
self._register_coder_internal(int, coders.VarIntCoder)
self._register_coder_internal(float, coders.FloatCoder)
self._register_coder_internal(bytes, coders.BytesCoder)
self._register_coder_internal(bool, coders.BooleanCoder)
self._register_coder_internal(unicode, coders.StrUtf8Coder)
self._register_coder_internal(typehints.TupleConstraint, coders.TupleCoder)
# Default fallback coders applied in that order until the first matching
# coder found.
default_fallback_coders = [coders.ProtoCoder, coders.FastPrimitivesCoder]
self._fallback_coder = fallback_coder or FirstOf(default_fallback_coders)
def _register_coder_internal(self, typehint_type, typehint_coder_class):
# type: (Any, Type[coders.Coder]) -> None
self._coders[typehint_type] = typehint_coder_class
def register_coder(self, typehint_type, typehint_coder_class):
# type: (Any, Type[coders.Coder]) -> None
if not isinstance(typehint_coder_class, type):
raise TypeError('Coder registration requires a coder class object. '
'Received %r instead.' % typehint_coder_class)
if typehint_type not in self.custom_types:
self.custom_types.append(typehint_type)
self._register_coder_internal(typehint_type, typehint_coder_class)
def get_coder(self, typehint):
# type: (Any) -> coders.Coder
coder = self._coders.get(
typehint.__class__ if isinstance(typehint, typehints.TypeConstraint)
else typehint, None)
if isinstance(typehint, typehints.TypeConstraint) and coder is not None:
return coder.from_type_hint(typehint, self)
if coder is None:
# We use the fallback coder when there is no coder registered for a
# typehint. For example a user defined class with no coder specified.
if not hasattr(self, '_fallback_coder'):
raise RuntimeError(
'Coder registry has no fallback coder. This can happen if the '
'fast_coders module could not be imported.')
if isinstance(typehint, (typehints.IterableTypeConstraint,
typehints.ListConstraint)):
return coders.IterableCoder.from_type_hint(typehint, self)
elif typehint is None:
# In some old code, None is used for Any.
# TODO(robertwb): Clean this up.
pass
elif typehint is object or typehint == typehints.Any:
# We explicitly want the fallback coder.
pass
elif isinstance(typehint, typehints.TypeVariable):
# TODO(robertwb): Clean this up when type inference is fully enabled.
pass
else:
# TODO(robertwb): Re-enable this warning when it's actionable.
# warnings.warn('Using fallback coder for typehint: %r.' % typehint)
pass
coder = self._fallback_coder
return coder.from_type_hint(typehint, self)
def get_custom_type_coder_tuples(self, types):
"""Returns type/coder tuples for all custom types passed in."""
return [(t, self._coders[t]) for t in types if t in self.custom_types]
def verify_deterministic(self, key_coder, op_name, silent=True):
if not key_coder.is_deterministic():
error_msg = ('The key coder "%s" for %s '
'is not deterministic. This may result in incorrect '
'pipeline output. This can be fixed by adding a type '
'hint to the operation preceding the GroupByKey step, '
'and for custom key classes, by writing a '
'deterministic custom Coder. Please see the '
'documentation for more details.' % (key_coder, op_name))
return key_coder.as_deterministic_coder(op_name, error_msg)
else:
return key_coder
class FirstOf(object):
"""For internal use only; no backwards-compatibility guarantees.
A class used to get the first matching coder from a list of coders."""
def __init__(self, coders):
# type: (Iterable[Type[coders.Coder]]) -> None
self._coders = coders
def from_type_hint(self, typehint, registry):
messages = []
for coder in self._coders:
try:
return coder.from_type_hint(typehint, self)
except Exception as e:
msg = ('%s could not provide a Coder for type %s: %s' %
(coder, typehint, e))
messages.append(msg)
raise ValueError('Cannot provide coder for %s: %s' %
(typehint, ';'.join(messages)))
registry = CoderRegistry()
| apache-2.0 | -3,578,803,315,472,674,000 | 41.072917 | 80 | 0.709953 | false |
sunshinelover/chanlun | vn.trader/ctaAlgo/uiChanlunWidget.py | 1 | 68647 | # encoding: UTF-8
"""
缠论模块相关的GUI控制组件
"""
from vtGateway import VtSubscribeReq
from uiBasicWidget import QtGui, QtCore, BasicCell,BasicMonitor,TradingWidget
from eventEngine import *
from ctaBase import *
import pyqtgraph as pg
import numpy as np
import pymongo
from pymongo.errors import *
from datetime import datetime, timedelta
from ctaHistoryData import HistoryDataEngine
import time
import types
import pandas as pd
########################################################################
class MyStringAxis(pg.AxisItem):
def __init__(self, xdict, *args, **kwargs):
pg.AxisItem.__init__(self, *args, **kwargs)
self.x_values = np.asarray(xdict.keys())
self.x_strings = xdict.values()
def tickStrings(self, values, scale, spacing):
strings = []
for v in values:
# vs is the original tick value
vs = v * scale
# if we have vs in our values, show the string
# otherwise show nothing
if vs in self.x_values:
# Find the string with x_values closest to vs
vstr = self.x_strings[np.abs(self.x_values - vs).argmin()]
else:
vstr = ""
strings.append(vstr)
return strings
########################################################################
class ChanlunEngineManager(QtGui.QWidget):
"""chanlun引擎管理组件"""
signal = QtCore.pyqtSignal(type(Event()))
# ----------------------------------------------------------------------
def __init__(self, chanlunEngine, eventEngine, mainEngine, parent=None):
"""Constructor"""
super(ChanlunEngineManager, self).__init__(parent)
self.chanlunEngine = chanlunEngine
self.eventEngine = eventEngine
self.mainEngine = mainEngine
self.penLoaded = False
self.segmentLoaded = False
self.tickLoaded = False
self.zhongShuLoaded = False
self.instrumentid = ''
self.initUi()
self.registerEvent()
# 记录日志
self.chanlunEngine.writeChanlunLog(u'缠论引擎启动成功')
# ----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'缠论策略')
# 期货代码输入框
self.codeEdit = QtGui.QLineEdit()
self.codeEdit.setPlaceholderText(u'在此输入期货代码')
self.codeEdit.setMaximumWidth(200)
self.data = pd.DataFrame() #画图所需数据, 重要
self.fenX = [] #分笔分段所需X轴坐标
self.fenY = [] #分笔分段所需Y轴坐标
self.zhongshuPos = [] #中枢的位置
self.zhongShuType = [] #中枢的方向
# 金融图
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.TickW = None
# MongoDB数据库相关
self.__mongoConnected = False
self.__mongoConnection = None
# 调用函数
self.__connectMongo()
# 按钮
penButton = QtGui.QPushButton(u'分笔')
segmentButton = QtGui.QPushButton(u'分段')
zhongshuButton = QtGui.QPushButton(u'走势中枢')
shopButton = QtGui.QPushButton(u'买卖点')
restoreButton = QtGui.QPushButton(u'还原')
penButton.clicked.connect(self.pen)
segmentButton.clicked.connect(self.segment)
zhongshuButton.clicked.connect(self.zhongShu)
shopButton.clicked.connect(self.shop)
restoreButton.clicked.connect(self.restore)
# Chanlun组件的日志监控
self.chanlunLogMonitor = QtGui.QTextEdit()
self.chanlunLogMonitor.setReadOnly(True)
self.chanlunLogMonitor.setMaximumHeight(180)
# 设置布局
self.hbox2 = QtGui.QHBoxLayout()
self.hbox2.addWidget(self.codeEdit)
self.hbox2.addWidget(penButton)
self.hbox2.addWidget(segmentButton)
self.hbox2.addWidget(zhongshuButton)
self.hbox2.addWidget(shopButton)
self.hbox2.addWidget(restoreButton)
self.hbox2.addStretch()
tickButton = QtGui.QPushButton(u'Tick')
oneMButton = QtGui.QPushButton(u"1分")
fiveMButton = QtGui.QPushButton(u'5分')
fifteenMButton = QtGui.QPushButton(u'15分')
thirtyMButton = QtGui.QPushButton(u'30分')
sixtyMButton = QtGui.QPushButton(u'60分')
dayButton = QtGui.QPushButton(u'日')
weekButton = QtGui.QPushButton(u'周')
monthButton = QtGui.QPushButton(u'月')
oneMButton.checked = True
self.vbox1 = QtGui.QVBoxLayout()
tickButton.clicked.connect(self.openTick)
oneMButton.clicked.connect(self.oneM)
fiveMButton.clicked.connect(self.fiveM)
fifteenMButton.clicked.connect(self.fifteenM)
thirtyMButton.clicked.connect(self.thirtyM)
sixtyMButton.clicked.connect(self.sixtyM)
dayButton.clicked.connect(self.daily)
weekButton.clicked.connect(self.weekly)
monthButton.clicked.connect(self.monthly)
self.vbox2 = QtGui.QVBoxLayout()
self.vbox1.addWidget(self.PriceW)
self.vbox2.addWidget(tickButton)
self.vbox2.addWidget(oneMButton)
self.vbox2.addWidget(fiveMButton)
self.vbox2.addWidget(fifteenMButton)
self.vbox2.addWidget(thirtyMButton)
self.vbox2.addWidget(sixtyMButton)
self.vbox2.addWidget(dayButton)
self.vbox2.addWidget(weekButton)
self.vbox2.addWidget(monthButton)
self.vbox2.addStretch()
self.hbox3 = QtGui.QHBoxLayout()
self.hbox3.addStretch()
self.hbox3.addLayout(self.vbox1)
self.hbox3.addLayout(self.vbox2)
self.vbox = QtGui.QVBoxLayout()
self.vbox.addLayout(self.hbox2)
self.vbox.addLayout(self.hbox3)
self.vbox.addWidget(self.chanlunLogMonitor)
self.setLayout(self.vbox)
self.codeEdit.returnPressed.connect(self.updateSymbol)
#-----------------------------------------------------------------------
#从通联数据端获取历史数据
def downloadData(self, symbol, unit):
listBar = [] #K线数据
num = 0
#从通联客户端获取K线数据
historyDataEngine = HistoryDataEngine()
# unit为int型获取分钟数据,为String类型获取日周月K线数据
if type(unit) is types.IntType:
#从通联数据端获取当日分钟数据并存入数据库
historyDataEngine.downloadFuturesIntradayBar(symbol, unit)
# 从数据库获取前几天的分钟数据
cx = self.getDbData(symbol, unit)
if cx:
for data in cx:
barOpen = data['open']
barClose = data['close']
barLow = data['low']
barHigh = data['high']
barTime = data['datetime']
listBar.append((num, barTime, barOpen, barClose, barLow, barHigh))
num += 1
elif type(unit) is types.StringType:
data = historyDataEngine.downloadFuturesBar(symbol, unit)
if data:
for d in data:
barOpen = d.get('openPrice', 0)
barClose = d.get('closePrice', 0)
barLow = d.get('lowestPrice', 0)
barHigh = d.get('highestPrice', 0)
if unit == "daily":
barTime = d.get('tradeDate', '').replace('-', '')
else:
barTime = d.get('endDate', '').replace('-', '')
listBar.append((num, barTime, barOpen, barClose, barLow, barHigh))
num += 1
if unit == "monthly" or unit == "weekly":
listBar.reverse()
else:
print "参数格式错误"
return
#将List数据转换成dataFormat类型,方便处理
df = pd.DataFrame(listBar, columns=['num', 'time', 'open', 'close', 'low', 'high'])
df.index = df['time'].tolist()
df = df.drop('time', 1)
return df
#-----------------------------------------------------------------------
#从数据库获取前两天的分钟数据
def getDbData(self, symbol, unit):
#周六周日不交易,无分钟数据
# 给数据库命名
dbname = ''
days = 7
if unit == 1:
dbname = MINUTE_DB_NAME
elif unit == 5:
dbname = MINUTE5_DB_NAME
elif unit == 15:
dbname = MINUTE15_DB_NAME
elif unit == 30:
dbname = MINUTE30_DB_NAME
elif unit == 60:
dbname = MINUTE60_DB_NAME
weekday = datetime.now().weekday() # weekday() 返回的是0-6是星期一到星期日
if days == 2:
if weekday == 6:
aDay = timedelta(days=3)
elif weekday == 0 or weekday == 1:
aDay = timedelta(days=4)
else:
aDay = timedelta(days=2)
else:
aDay = timedelta(days=7)
startDate = (datetime.now() - aDay).strftime('%Y%m%d')
print startDate
if self.__mongoConnected:
collection = self.__mongoConnection[dbname][symbol]
cx = collection.find({'date': {'$gte': startDate}})
return cx
else:
return None
#----------------------------------------------------------------------------------
#"""合约变化"""
def updateSymbol(self):
# 读取组件数据
instrumentid = str(self.codeEdit.text())
self.chanlunEngine.writeChanlunLog(u'查询合约%s' % (instrumentid))
# 从通联数据客户端获取当日分钟数据
self.data = self.downloadData(instrumentid, 1)
if self.data.empty:
self.chanlunEngine.writeChanlunLog(u'合约%s 不存在' % (instrumentid))
else:
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.chanlunEngine.writeChanlunLog(u'打开合约%s 1分钟K线图' % (instrumentid))
self.penLoaded = False
self.segmentLoaded = False
self.tickLoaded = False
self.zhongShuLoaded = False
# # 订阅合约[仿照ctaEngine.py写的]
# # 先取消订阅之前的合约,再订阅最新输入的合约
# contract = self.mainEngine.getContract(self.instrumentid)
# if contract:
# req = VtSubscribeReq()
# req.symbol = contract.symbol
# self.mainEngine.unsubscribe(req, contract.gatewayName)
#
# contract = self.mainEngine.getContract(instrumentid)
# if contract:
# req = VtSubscribeReq()
# req.symbol = contract.symbol
# self.mainEngine.subscribe(req, contract.gatewayName)
# else:
# self.chanlunEngine.writeChanlunLog(u'交易合约%s无法找到' % (instrumentid))
#
# # 重新注册事件监听
# self.eventEngine.unregister(EVENT_TICK + self.instrumentid, self.signal.emit)
# self.eventEngine.register(EVENT_TICK + instrumentid, self.signal.emit)
# 更新目前的合约
self.instrumentid = instrumentid
def oneM(self):
"打开1分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 1分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 1)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def fiveM(self):
"打开5分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 5分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 5)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def fifteenM(self):
"打开15分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 15分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 15)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def thirtyM(self):
"打开30分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 30分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 30)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def sixtyM(self):
"打开60分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 60分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 60)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def daily(self):
"""打开日K线图"""
self.chanlunEngine.writeChanlunLog(u'打开合约%s 日K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, "daily")
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def weekly(self):
"""打开周K线图"""
self.chanlunEngine.writeChanlunLog(u'打开合约%s 周K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, "weekly")
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
def monthly(self):
"""打开月K线图"""
self.chanlunEngine.writeChanlunLog(u'打开合约%s 月K线图' % (self.instrumentid))
# 从通联数据客户端获取数据并画图
self.data = self.downloadData(self.instrumentid, "monthly")
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def openTick(self):
"""切换成tick图"""
self.chanlunEngine.writeChanlunLog(u'打开tick图')
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.TickW = TickWidget(self.eventEngine, self.chanlunEngine)
self.vbox1.addWidget(self.TickW)
self.tickLoaded = True
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def restore(self):
"""还原初始k线状态"""
self.chanlunEngine.writeChanlunLog(u'还原加载成功')
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.data = self.downloadData(self.instrumentid, 1)
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data, self)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.chanlunEngine.writeChanlunLog(u'还原为1分钟k线图')
self.penLoaded = False
self.segmentLoaded = False
self.tickLoaded = False
# ----------------------------------------------------------------------
def pen(self):
"""加载分笔"""
# 先合并K线数据,记录新建PriceW之前合并K线的数据
if not self.penLoaded:
after_fenxing = self.judgeInclude() #判断self.data中K线数据的包含关系
# 清空画布时先remove已有的Widget再新建
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, after_fenxing)
self.vbox1.addWidget(self.PriceW)
#使用合并K线的数据重新画K线图
self.plotAfterFenXing(after_fenxing)
# 找出顶和底
fenxing_data, fenxing_type = self.findTopAndLow(after_fenxing)
arrayFenxingdata = np.array(fenxing_data)
arrayTypedata = np.array(fenxing_type)
self.fenY = []
self.fenX = [m[0] for m in arrayFenxingdata]
fenbiY1 = [m[4] for m in arrayFenxingdata] # 顶分型标志最高价
fenbiY2 = [m[3] for m in arrayFenxingdata] # 底分型标志最低价
for i in xrange(len(self.fenX)):
if arrayTypedata[i] == 1:
self.fenY.append(fenbiY1[i])
else:
self.fenY.append(fenbiY2[i])
if not self.penLoaded:
if self.fenX:
self.fenX.append(self.fenX[-1])
self.fenY.append(self.fenY[-1])
print "self.fenX: ", self.fenX
print "self.fenY: ", self.fenY
self.fenbi(self.fenX, self.fenY)
self.fenX.pop()
self.fenY.pop()
self.chanlunEngine.writeChanlunLog(u'分笔加载成功')
self.penLoaded = True
# ----------------------------------------------------------------------
def segment(self):
if not self.penLoaded:
self.pen() #先分笔才能分段
segmentX = [] #分段点X轴值
segmentY = [] #分段点Y轴值
temp_type = 0 #标志线段方向,向上为1,向下为-1, 未判断前三笔是否重合为0
i = 0
while i < len(self.fenX) - 4:
if temp_type == 0:
if self.fenY[i] > self.fenY[i+1] and self.fenY[i] > self.fenY[i+3]:
temp_type = -1 #向下线段,三笔重合
segmentX.append(self.fenX[i])
segmentY.append(self.fenY[i])
elif self.fenY[i] < self.fenY[i+1] and self.fenY[i] < self.fenY[i+3]:
temp_type = 1 #向上线段,三笔重合
segmentX.append(self.fenX[i])
segmentY.append(self.fenY[i])
else:
temp_type = 0
i += 1
continue
if temp_type == 1: #向上线段
j = i+1
high = [] # 记录顶
low = [] # 记录低
while j < len(self.fenX) - 1: #记录顶底
high.append(self.fenY[j])
low.append(self.fenY[j+1])
j += 2
if self.fenY[i+4] < self.fenY[i+1]: #向上线段被向下笔破坏
j = 0
while j < len(high)-2:
# 顶底出现顶分型,向上线段结束
if high[j+1] > high[j] and high[j+1] > high[j+2]:
num = i + 2 * j + 3 #线段结束点位置
segmentX.append(self.fenX[num])
segmentY.append(self.fenY[num])
i = num
temp_type = -1 #向上线段一定由向下线段结束
break
j += 1
if j == len(high)-2:
break
else: #向上线段未被向下笔破坏
j = 1
while j < len(high)-2:
# 顶底出现底分型,向上线段结束
if low[j + 1] < low[j] and low[j + 1] < low[j + 2]:
num = i + 2 * j + 1 # 线段结束点位置
segmentX.append(self.fenX[num])
segmentY.append(self.fenY[num])
i = num
temp_type = -1 # 向上线段一定由向下线段结束
break
j += 1
if j == len(high)-2:
break
elif temp_type == -1: # 向下线段
j = i + 1
high = [] # 记录顶
low = [] # 记录低
while j < len(self.fenX) - 1: # 记录顶底
high.append(self.fenY[j + 1])
low.append(self.fenY[j])
j += 2
if self.fenY[i + 4] > self.fenY[i + 1]: # 向下线段被向上笔破坏
j = 0
while j < len(high) - 2:
# 顶底出现底分型,向下线段结束
if low[j + 1] < low[j] and low[j + 1] < low[j + 2]:
num = i + 2 * j + 3 # 线段结束点位置
segmentX.append(self.fenX[num])
segmentY.append(self.fenY[num])
i = num
temp_type = 1 # 向下线段一定由向上线段结束
break
j += 1
if j == len(high) - 2:
break
else: # 向下线段未被向上笔破坏
j = 1
while j < len(high) - 2:
# 顶底出现顶分型,向下线段结束
if high[j + 1] > high[j] and high[j + 1] > high[j + 2]:
num = i + 2 * j + 1 # 线段结束点位置
segmentX.append(self.fenX[num])
segmentY.append(self.fenY[num])
i = num
temp_type = 1 # 向下线段一定由向上线段结束
break
j += 1
if j == len(high) - 2:
break
print "segmentX: ", segmentX
print "segmentY: ", segmentY
if not self.segmentLoaded:
if len(segmentX) > 1:
segmentX.append(segmentX[-1])
segmentY.append(segmentY[-1])
segmentX = [int(x) for x in segmentX]
segmentY = [int(y) for y in segmentY]
self.fenduan(segmentX, segmentY)
self.chanlunEngine.writeChanlunLog(u'分段加载成功')
self.segmentLoaded = True
# ----------------------------------------------------------------------
def updateChanlunLog(self, event):
"""更新缠论相关日志"""
log = event.dict_['data']
# print type(log)
if(log.logTime):
content = '\t'.join([log.logTime, log.logContent])
self.chanlunLogMonitor.append(content)
else:
print 0
#-----------------------------------------------------------------------
def zhongShu(self):
if not self.penLoaded:
self.pen() # 先分笔才能画走势中枢
# temp_type = 0 # 标志中枢方向,向上为1,向下为-1
i = 0
temp_high, temp_low = 0, 0
minX, maxY = 0, 0
self.zhongshuPos = [] # 记录所有的中枢开始段和结束段的位置
self.zhongShuType = [] #记录所有中枢的方向
while i < len(self.fenX) - 4:
if (self.fenY[i] > self.fenY[i + 1] and self.fenY[i + 1] < self.fenY[i + 4]): #判断进入段方向
temp_low = max(self.fenY[i + 1], self.fenY[i + 3])
temp_high = min(self.fenY[i + 2], self.fenY[i + 4]) #记录中枢内顶的最小值与底的最大值
minX = self.fenX[i+1]
self.zhongshuPos.append(i)
self.zhongShuType.append(-1)
j = i
while i < len(self.fenX) - 4:
j = i
if self.fenY[i + 1] < self.fenY[i + 4] and self.fenY[i + 4] > temp_low and self.fenY[i + 3] < temp_high :
maxX = self.fenX[i+4]
if self.fenY[i + 3] > temp_low:
temp_low = self.fenY[i + 3]
if self.fenY[i + 4] < temp_high:
temp_high = self.fenY[i + 4]
i = i + 1
elif self.fenY[i + 1] > self.fenY[i + 4] and self.fenY[i + 4] < temp_high and self.fenY[i + 3] > temp_low :
maxX = self.fenX[i + 4]
if self.fenY[i + 3] < temp_high:
temp_high = self.fenY[i + 3]
if self.fenY[i + 4] > temp_low:
temp_low = self.fenY[i + 4]
i = i + 1
if j == i:
break
elif (self.fenY[i] < self.fenY[i + 1] and self.fenY[i + 1] > self.fenY[i + 4]):
temp_high = min(self.fenY[i + 1], self.fenY[i + 3])
temp_low = max(self.fenY[i + 2], self.fenY[i + 4])
minX = self.fenX[i + 1]
self.zhongshuPos.append(i)
self.zhongShuType.append(1)
j = i
while i < len(self.fenX) - 4:
j = i
if self.fenY[i + 1] > self.fenY[i + 4] and self.fenY[i + 4] < temp_high and self.fenY[i + 3] > temp_low:
maxX = self.fenX[i + 4]
if self.fenY[i + 3] < temp_high:
temp_high = self.fenY[i + 3]
if self.fenY[i + 4] > temp_low:
temp_low = self.fenY[i + 4]
i = i + 1
elif self.fenY[i + 1] < self.fenY[i + 4] and self.fenY[i + 4] > temp_low and self.fenY[i + 3] < temp_high:
maxX = self.fenX[i + 4]
if self.fenY[i + 3] > temp_low:
temp_low = self.fenY[i + 3]
if self.fenY[i + 4] < temp_high:
temp_high = self.fenY[i + 4]
i = i + 1
if i == j:
break
else:
i += 1
continue
# 画出当前判断出的中枢
if minX != 0 and maxX == 0:
maxX = self.fenX[i+4]
i = i + 1
self.zhongshuPos.append(i + 4)
else:
self.zhongshuPos.append(i + 3)
minY, maxY = temp_low, temp_high
print minX, minY, maxX, maxY
if int(maxY) > int(minY):
plotX = [minX, minX, maxX, maxX, minX]
plotY = [minY, maxY, maxY, minY, minY]
plotX = [int(x) for x in plotX]
plotY = [int(y) for y in plotY]
self.zhongshu(plotX, plotY)
i = i + 4
self.zhongShuLoaded = True
self.chanlunEngine.writeChanlunLog(u'走势中枢加载成功')
# ----------------------------------------------------------------------
def shop(self):
"""加载买卖点"""
if not self.zhongShuLoaded:
self.zhongShu()
i = 0
while i < len(self.zhongShuType) - 1:
startPos, endPos = self.zhongshuPos[2*i], self.zhongshuPos[2*i + 1] # 中枢开始段的位置和结束段的位置
startY = self.fenY[startPos + 1] - self.fenY[startPos] # 开始段Y轴距离
startX = self.fenX[startPos + 1] - self.fenX[startPos] # 开始段X轴距离
startK = abs(startY * startX) # 开始段投影面积
endY = self.fenY[endPos + 1] - self.fenY[endPos] # 结束段Y轴距离
endX = self.fenX[endPos + 1] - self.fenX[endPos] # 结束段段X轴距离
endK = abs(endY * endX) # 开始段投影面积
if endK < startK:
print startPos, endPos
if self.zhongShuType[i] == 1 and self.zhongShuType[i + 1] == -1:
# 一卖
self.sellpoint([self.fenX[endPos + 1]], [self.fenY[endPos + 1]], 1)
# 二卖,一卖后一个顶点
self.sellpoint([self.fenX[endPos + 3]], [self.fenY[endPos + 3]], 2)
# 三卖,一卖之后中枢结束段的第一个顶
i = i + 1
nextPos = self.zhongshuPos[2*i + 1] # 下一个中枢结束位置
if nextPos + 1 < len(self.fenY):
if self.fenY[nextPos + 1] > self.fenY[nextPos]:
self.sellpoint([self.fenX[nextPos + 1]], [self.fenY[nextPos + 1]], 3)
else:
self.sellpoint([self.fenX[nextPos]], [self.fenY[nextPos]], 3)
elif self.zhongShuType[i] == -1 and self.zhongShuType[i + 1] == 1:
# 一买
self.buypoint([self.fenX[endPos + 1]], [self.fenY[endPos + 1]], 1)
# 二买,一买后一个底点
self.buypoint([self.fenX[endPos + 3]], [self.fenY[endPos + 3]], 2)
# 三买,一买之后中枢结束段的第一个顶
i = i + 1
nextPos = self.zhongshuPos[2*i + 1] # 下一个中枢结束位置
if nextPos + 1 < len(self.fenY):
if self.fenY[nextPos + 1] < self.fenY[nextPos]:
self.buypoint([self.fenX[nextPos + 1]], [self.fenY[nextPos + 1]], 3)
else:
self.buypoint([self.fenX[nextPos]], [self.fenY[nextPos]], 3)
i = i + 1 # 接着判断之后的中枢是否出现背驰
self.chanlunEngine.writeChanlunLog(u'买卖点加载成功')
# ----------------------------------------------------------------------
def fenbi(self, fenbix, fenbiy):
self.PriceW.pw2.plotItem.plot(x=fenbix, y=fenbiy, pen=QtGui.QPen(QtGui.QColor(255, 236, 139)))
def fenduan(self, fenduanx, fenduany):
self.PriceW.pw2.plot(x=fenduanx, y=fenduany, symbol='o', pen=QtGui.QPen(QtGui.QColor(131, 111, 255)))
def zhongshu(self, zhongshux, zhongshuy):
self.PriceW.pw2.plot(x=zhongshux, y=zhongshuy, pen=QtGui.QPen(QtGui.QColor(255,165,0)))
def buypoint(self, buyx, buyy, point):
if point == 1:
self.PriceW.pw2.plot(x=buyx, y=buyy, symbolSize=18, symbolBrush=(255,0,0), symbolPen=(255,0,0), symbol='star')
elif point == 2:
self.PriceW.pw2.plot(x=buyx, y=buyy, symbolSize=18, symbolBrush=(238,130,238), symbolPen=(238,130,238),symbol='star')
elif point == 3:
self.PriceW.pw2.plot(x=buyx, y=buyy, symbolSize=18, symbolBrush=(138,43,226), symbolPen=(138,43,226),symbol='star')
def sellpoint(self, sellx, selly, point):
if point == 1:
self.PriceW.pw2.plot(x=sellx, y=selly, symbolSize=18, symbolBrush=(119,172,48), symbolPen=(119,172,48), symbol='star')
elif point == 2:
self.PriceW.pw2.plot(x=sellx, y=selly, symbolSize=18, symbolBrush=(221,221,34), symbolPen=(221,221,34),symbol='star')
elif point == 3:
self.PriceW.pw2.plot(x=sellx, y=selly, symbolSize=18, symbolBrush=(179,158,77), symbolPen=(179,158,77),symbol='star')
# ----------------------------------------------------------------------
# 判断包含关系,仿照聚框,合并K线数据
def judgeInclude(self):
## 判断包含关系
k_data = self.data
# 保存分型后dataFrame的值
after_fenxing = pd.DataFrame()
temp_data = k_data[:1]
zoushi = [3] # 3-持平 4-向下 5-向上
for i in xrange(len(k_data)):
case1 = temp_data.high[-1] >= k_data.high[i] and temp_data.low[-1] <= k_data.low[i] # 第1根包含第2根
case2 = temp_data.high[-1] <= k_data.high[i] and temp_data.low[-1] >= k_data.low[i] # 第2根包含第1根
case3 = temp_data.high[-1] == k_data.high[i] and temp_data.low[-1] == k_data.low[i] # 第1根等于第2根
case4 = temp_data.high[-1] > k_data.high[i] and temp_data.low[-1] > k_data.low[i] # 向下趋势
case5 = temp_data.high[-1] < k_data.high[i] and temp_data.low[-1] < k_data.low[i] # 向上趋势
if case3:
zoushi.append(3)
continue
elif case1:
print temp_data
if zoushi[-1] == 4:
temp_data.ix[0, 4] = k_data.high[i] #向下走取高点的低点
else:
temp_data.ix[0, 3] = k_data.low[i] #向上走取低点的高点
elif case2:
temp_temp = temp_data[-1:]
temp_data = k_data[i:i + 1]
if zoushi[-1] == 4:
temp_data.ix[0, 4] = temp_temp.high[0]
else:
temp_data.ix[0, 3] = temp_temp.low[0]
elif case4:
zoushi.append(4)
after_fenxing = pd.concat([after_fenxing, temp_data], axis=0)
temp_data = k_data[i:i + 1]
elif case5:
zoushi.append(5)
after_fenxing = pd.concat([after_fenxing, temp_data], axis=0)
temp_data = k_data[i:i + 1]
return after_fenxing
# ----------------------------------------------------------------------
#画出合并后的K线图,分笔
def plotAfterFenXing(self, after_fenxing):
#判断包含关系,合并K线
for i in xrange(len(after_fenxing)):
#处理k线的最大最小值、开盘收盘价,合并后k线不显示影线。
after_fenxing.iloc[i, 0] = i
if after_fenxing.open[i] > after_fenxing.close[i]:
after_fenxing.iloc[i, 1] = after_fenxing.high[i]
after_fenxing.iloc[i, 2] = after_fenxing.low[i]
else:
after_fenxing.iloc[i, 1] = after_fenxing.low[i]
after_fenxing.iloc[i, 2] = after_fenxing.high[i]
self.PriceW.onBarAfterFenXing(i, after_fenxing.index[i], after_fenxing.open[i], after_fenxing.close[i], after_fenxing.low[i], after_fenxing.high[i])
self.PriceW.plotKlineAfterFenXing()
print "plotKLine after fenxing"
# ----------------------------------------------------------------------
# 找出顶和底
def findTopAndLow(self, after_fenxing):
temp_num = 0 # 上一个顶或底的位置
temp_high = 0 # 上一个顶的high值
temp_low = 0 # 上一个底的low值
temp_type = 0 # 上一个记录位置的类型
i = 1
fenxing_type = [] # 记录分型点的类型,1为顶分型,-1为底分型
fenxing_data = pd.DataFrame() # 分型点的DataFrame值
while (i < len(after_fenxing) - 1):
case1 = after_fenxing.high[i - 1] < after_fenxing.high[i] and after_fenxing.high[i] > after_fenxing.high[i + 1] # 顶分型
case2 = after_fenxing.low[i - 1] > after_fenxing.low[i] and after_fenxing.low[i] < after_fenxing.low[i + 1] # 底分型
if case1:
if temp_type == 1: # 如果上一个分型为顶分型,则进行比较,选取高点更高的分型
if after_fenxing.high[i] <= temp_high:
i += 1
else:
temp_high = after_fenxing.high[i]
temp_num = i
temp_type = 1
i += 1
elif temp_type == 2: # 如果上一个分型为底分型,则记录上一个分型,用当前分型与后面的分型比较,选取同向更极端的分型
if temp_low >= after_fenxing.high[i]: # 如果上一个底分型的底比当前顶分型的顶高,则跳过当前顶分型。
i += 1
elif i < temp_num + 4: # 顶和底至少5k线
i += 1
else:
fenxing_type.append(-1)
fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0)
temp_high = after_fenxing.high[i]
temp_num = i
temp_type = 1
i += 1
else:
temp_high = after_fenxing.high[i]
temp_num = i
temp_type = 1
i += 1
elif case2:
if temp_type == 2: # 如果上一个分型为底分型,则进行比较,选取低点更低的分型
if after_fenxing.low[i] >= temp_low:
i += 1
else:
temp_low = after_fenxing.low[i]
temp_num = i
temp_type = 2
i += 1
elif temp_type == 1: # 如果上一个分型为顶分型,则记录上一个分型,用当前分型与后面的分型比较,选取同向更极端的分型
if temp_high <= after_fenxing.low[i]: # 如果上一个顶分型的底比当前底分型的底低,则跳过当前底分型。
i += 1
elif i < temp_num + 4: # 顶和底至少5k线
i += 1
else:
fenxing_type.append(1)
fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0)
temp_low = after_fenxing.low[i]
temp_num = i
temp_type = 2
i += 1
else:
temp_low = after_fenxing.low[i]
temp_num = i
temp_type = 2
i += 1
else:
i += 1
# if fenxing_type:
# if fenxing_type[-1] == 1 and temp_type == 2:
# fenxing_type.append(-1)
# fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0)
#
# if fenxing_type[-1] == -1 and temp_type == 1:
# fenxing_type.append(1)
# fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0)
return fenxing_data, fenxing_type
# ----------------------------------------------------------------------
# 连接MongoDB数据库
def __connectMongo(self):
try:
self.__mongoConnection = pymongo.MongoClient("localhost", 27017)
self.__mongoConnected = True
except ConnectionFailure:
pass
# ----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.signal.connect(self.updateChanlunLog)
self.eventEngine.register(EVENT_CHANLUN_LOG, self.signal.emit)
########################################################################
class PriceWidget(QtGui.QWidget):
"""用于显示价格走势图"""
signal = QtCore.pyqtSignal(type(Event()))
symbol = ''
class CandlestickItem(pg.GraphicsObject):
def __init__(self, data):
pg.GraphicsObject.__init__(self)
self.data = data ## data must have fields: time, open, close, min, max
self.generatePicture()
def generatePicture(self):
## pre-computing a QPicture object allows paint() to run much more quickly,
## rather than re-drawing the shapes every time.
self.picture = QtGui.QPicture()
p = QtGui.QPainter(self.picture)
p.setPen(pg.mkPen(color='w', width=0.4)) # 0.4 means w*2
# w = (self.data[1][0] - self.data[0][0]) / 3.
w = 0.2
for (n, t, open, close, min, max) in self.data:
p.drawLine(QtCore.QPointF(n, min), QtCore.QPointF(n, max))
if open > close:
p.setBrush(pg.mkBrush('g'))
else:
p.setBrush(pg.mkBrush('r'))
p.drawRect(QtCore.QRectF(n-w, open, w*2, close-open))
pg.setConfigOption('leftButtonPan', False)
p.end()
def paint(self, p, *args):
p.drawPicture(0, 0, self.picture)
def boundingRect(self):
## boundingRect _must_ indicate the entire area that will be drawn on
## or else we will get artifacts and possibly crashing.
## (in this case, QPicture does all the work of computing the bouning rect for us)
return QtCore.QRectF(self.picture.boundingRect())
#----------------------------------------------------------------------
def __init__(self, eventEngine, chanlunEngine, data, parent=None):
"""Constructor"""
super(PriceWidget, self).__init__(parent)
# K线图EMA均线的参数、变量
self.EMAFastAlpha = 0.0167 # 快速EMA的参数,60
self.EMASlowAlpha = 0.0083 # 慢速EMA的参数,120
self.fastEMA = 0 # 快速EMA的数值
self.slowEMA = 0 # 慢速EMA的数值
self.listfastEMA = []
self.listslowEMA = []
# 保存K线数据的列表对象
self.listBar = []
self.listClose = []
self.listHigh = []
self.listLow = []
self.listOpen = []
# 是否完成了历史数据的读取
self.initCompleted = False
self.__eventEngine = eventEngine
self.__chanlunEngine = chanlunEngine
self.data = data #画图所需数据
# MongoDB数据库相关
self.__mongoConnected = False
self.__mongoConnection = None
# 调用函数
self.__connectMongo()
self.initUi()
# self.registerEvent()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'Price')
self.vbl_1 = QtGui.QHBoxLayout()
self.initplotKline() # plotKline初始化
self.setLayout(self.vbl_1)
#----------------------------------------------------------------------
def initplotKline(self):
"""Kline"""
s = self.data.index #横坐标值
print "numbers of KLine: ", len(s)
xdict = dict(enumerate(s))
self.__axisTime = MyStringAxis(xdict, orientation='bottom')
self.pw2 = pg.PlotWidget(axisItems={'bottom': self.__axisTime}) # K线图
pw2x = self.pw2.getAxis('bottom')
pw2x.setGrid(150) # 设置默认x轴网格
pw2y = self.pw2.getAxis('left')
pw2y.setGrid(150) # 设置默认y轴网格
self.vbl_1.addWidget(self.pw2)
self.pw2.setMinimumWidth(1500)
self.pw2.setMaximumWidth(1800)
self.pw2.setDownsampling(mode='peak')
self.pw2.setClipToView(True)
self.curve5 = self.pw2.plot()
self.curve6 = self.pw2.plot()
self.candle = self.CandlestickItem(self.listBar)
self.pw2.addItem(self.candle)
## Draw an arrowhead next to the text box
# self.arrow = pg.ArrowItem()
# self.pw2.addItem(self.arrow)
# 从数据库读取一分钟数据画分钟线
def plotMin(self, symbol):
self.initCompleted = True
cx = self.__mongoMinDB[symbol].find()
print cx.count()
if cx:
for data in cx:
self.barOpen = data['open']
self.barClose = data['close']
self.barLow = data['low']
self.barHigh = data['high']
self.barOpenInterest = data['openInterest']
# print self.num, self.barOpen, self.barClose, self.barLow, self.barHigh, self.barOpenInterest
self.onBar(self.num, self.barOpen, self.barClose, self.barLow, self.barHigh, self.barOpenInterest)
self.num += 1
# 画历史数据K线图
def plotHistorticData(self):
self.initCompleted = True
for i in xrange(len(self.data)):
self.onBar(i, self.data.index[i], self.data.open[i], self.data.close[i], self.data.low[i], self.data.high[i])
self.plotKline()
print "plotKLine success"
#----------------------------------------------------------------------
def initHistoricalData(self):
"""初始历史数据"""
if self.symbol!='':
print "download histrical data:",self.symbol
self.initCompleted = True # 读取历史数据完成
td = timedelta(days=1) # 读取3天的历史TICK数据
# if startDate:
# cx = self.loadTick(self.symbol, startDate-td)
# else:
# today = datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)
# cx = self.loadTick(self.symbol, today-td)
print cx.count()
if cx:
for data in cx:
tick = Tick(data['symbol'])
tick.openPrice = data['lastPrice']
tick.highPrice = data['upperLimit']
tick.lowPrice = data['lowerLimit']
tick.lastPrice = data['lastPrice']
tick.volume = data['volume']
tick.openInterest = data['openInterest']
tick.upperLimit = data['upperLimit']
tick.lowerLimit = data['lowerLimit']
tick.time = data['time']
# tick.ms = data['UpdateMillisec']
tick.bidPrice1 = data['bidPrice1']
tick.bidPrice2 = data['bidPrice2']
tick.bidPrice3 = data['bidPrice3']
tick.bidPrice4 = data['bidPrice4']
tick.bidPrice5 = data['bidPrice5']
tick.askPrice1 = data['askPrice1']
tick.askPrice2 = data['askPrice2']
tick.askPrice3 = data['askPrice3']
tick.askPrice4 = data['askPrice4']
tick.askPrice5 = data['askPrice5']
tick.bidVolume1 = data['bidVolume1']
tick.bidVolume2 = data['bidVolume2']
tick.bidVolume3 = data['bidVolume3']
tick.bidVolume4 = data['bidVolume4']
tick.bidVolume5 = data['bidVolume5']
tick.askVolume1 = data['askVolume1']
tick.askVolume2 = data['askVolume2']
tick.askVolume3 = data['askVolume3']
tick.askVolume4 = data['askVolume4']
tick.askVolume5 = data['askVolume5']
self.onTick(tick)
print('load historic data completed')
#----------------------------------------------------------------------
def plotKline(self):
"""K线图"""
if self.initCompleted:
# 均线
self.curve5.setData(self.listfastEMA, pen=(255, 0, 0), name="Red curve")
self.curve6.setData(self.listslowEMA, pen=(0, 255, 0), name="Green curve")
# 画K线
self.pw2.removeItem(self.candle)
self.candle = self.CandlestickItem(self.listBar)
self.pw2.addItem(self.candle)
#----------------------------------------------------------------------
def plotText(self):
lenClose = len(self.listClose)
if lenClose >= 5: # Fractal Signal
if self.listClose[-1] > self.listClose[-2] and self.listClose[-3] > self.listClose[-2] and self.listClose[-4] > self.listClose[-2] and self.listClose[-5] > self.listClose[-2] and self.listfastEMA[-1] > self.listslowEMA[-1]:
## Draw an arrowhead next to the text box
# self.pw2.removeItem(self.arrow)
self.arrow = pg.ArrowItem(pos=(lenClose-1, self.listLow[-1]), angle=90, brush=(255, 0, 0))#红色
self.pw2.addItem(self.arrow)
elif self.listClose[-1] < self.listClose[-2] and self.listClose[-3] < self.listClose[-2] and self.listClose[-4] < self.listClose[-2] and self.listClose[-5] < self.listClose[-2] and self.listfastEMA[-1] < self.listslowEMA[-1]:
## Draw an arrowhead next to the text box
# self.pw2.removeItem(self.arrow)
self.arrow = pg.ArrowItem(pos=(lenClose-1, self.listHigh[-1]), angle=-90, brush=(0, 255, 0))#绿色
self.pw2.addItem(self.arrow)
#----------------------------------------------------------------------
def onBar(self, n, t, o, c, l, h):
self.listBar.append((n, t, o, c, l, h))
self.listOpen.append(o)
self.listClose.append(c)
self.listHigh.append(h)
self.listLow.append(l)
#计算K线图EMA均线
if self.fastEMA:
self.fastEMA = c*self.EMAFastAlpha + self.fastEMA*(1-self.EMAFastAlpha)
self.slowEMA = c*self.EMASlowAlpha + self.slowEMA*(1-self.EMASlowAlpha)
else:
self.fastEMA = c
self.slowEMA = c
self.listfastEMA.append(self.fastEMA)
self.listslowEMA.append(self.slowEMA)
self.plotText() #显示开仓位置
# ----------------------------------------------------------------------
#画合并后的K线Bar
def onBarAfterFenXing(self, n, t, o, c, l, h):
self.listBar.append((n, t, o, c, l, h))
def plotKlineAfterFenXing(self):
# 画K线
self.pw2.removeItem(self.candle)
self.candle = self.CandlestickItem(self.listBar)
self.pw2.addItem(self.candle)
#----------------------------------------------------------------------
def __connectMongo(self):
"""连接MongoDB数据库"""
try:
self.__mongoConnection = pymongo.MongoClient("localhost", 27017)
self.__mongoConnected = True
self.__mongoMinDB = self.__mongoConnection['VnTrader_1Min_Db']
except ConnectionFailure:
pass
########################################################################
class TickWidget(QtGui.QWidget):
"""用于显示价格走势图"""
signal = QtCore.pyqtSignal(type(Event()))
# tick图的相关参数、变量
listlastPrice = np.empty(1000)
fastMA = 0
midMA = 0
slowMA = 0
listfastMA = np.empty(1000)
listmidMA = np.empty(1000)
listslowMA = np.empty(1000)
tickFastAlpha = 0.0333 # 快速均线的参数,30
tickMidAlpha = 0.0167 # 中速均线的参数,60
tickSlowAlpha = 0.0083 # 慢速均线的参数,120
ptr = 0
ticktime = None # tick数据时间
class CandlestickItem(pg.GraphicsObject):
def __init__(self, data):
pg.GraphicsObject.__init__(self)
self.data = data ## data must have fields: time, open, close, min, max
self.generatePicture()
def generatePicture(self):
## pre-computing a QPicture object allows paint() to run much more quickly,
## rather than re-drawing the shapes every time.
self.picture = QtGui.QPicture()
p = QtGui.QPainter(self.picture)
p.setPen(pg.mkPen(color='w', width=0.4)) # 0.4 means w*2
a = pg.AxisItem('bottom', pen=None, linkView=None, parent=None, maxTickLength=-5, showValues=True)
a.setFixedWidth(1)
a.setWidth(1)
a.setLabel(show=True)
a.setGrid(grid=True)
labelStyle = {'color': '#FFF', 'font-size': '14pt'}
a.setLabel('label text', units='V', **labelStyle)
# w = (self.data[1][0] - self.data[0][0]) / 3.
w = 0.2
for (t, open, close, min, max) in self.data:
p.drawLine(QtCore.QPointF(t, min), QtCore.QPointF(t, max))
if open > close:
p.setBrush(pg.mkBrush('g'))
else:
p.setBrush(pg.mkBrush('r'))
p.drawRect(QtCore.QRectF(t-w, open, w*2, close-open))
pg.setConfigOption('leftButtonPan', False)
p.end()
def paint(self, p, *args):
p.drawPicture(0, 0, self.picture)
def boundingRect(self):
## boundingRect _must_ indicate the entire area that will be drawn on
## or else we will get artifacts and possibly crashing.
## (in this case, QPicture does all the work of computing the bouning rect for us)
return QtCore.QRectF(self.picture.boundingRect())
#----------------------------------------------------------------------
def __init__(self, eventEngine, chanlunEngine, parent=None):
"""Constructor"""
super(TickWidget, self).__init__(parent)
self.__eventEngine = eventEngine
self.__chanlunEngine = chanlunEngine
# MongoDB数据库相关
self.__mongoConnected = False
self.__mongoConnection = None
self.__mongoTickDB = None
# 调用函数
self.initUi()
self.registerEvent()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'Tick')
self.vbl_1 = QtGui.QHBoxLayout()
self.initplotTick() # plotTick初始化
self.setLayout(self.vbl_1)
#----------------------------------------------------------------------
def initplotTick(self):
""""""
self.pw1 = pg.PlotWidget(name='Plot1')
self.vbl_1.addWidget(self.pw1)
self.pw1.setMinimumWidth(1500)
self.pw1.setMaximumWidth(1800)
self.pw1.setRange(xRange=[-360, 0])
self.pw1.setLimits(xMax=5)
self.pw1.setDownsampling(mode='peak')
self.pw1.setClipToView(True)
self.curve1 = self.pw1.plot()
self.curve2 = self.pw1.plot()
self.curve3 = self.pw1.plot()
self.curve4 = self.pw1.plot()
# #----------------------------------------------------------------------
# def initHistoricalData(self,startDate=None):
# """初始历史数据"""
# print "download histrical data"
# self.initCompleted = True # 读取历史数据完成
# td = timedelta(days=1) # 读取3天的历史TICK数据
#
# if startDate:
# cx = self.loadTick(self.symbol, startDate-td)
# else:
# today = datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)
# cx = self.loadTick(self.symbol, today-td)
#
# print cx.count()
#
# if cx:
# for data in cx:
# tick = Tick(data['symbol'])
#
# tick.openPrice = data['lastPrice']
# tick.highPrice = data['upperLimit']
# tick.lowPrice = data['lowerLimit']
# tick.lastPrice = data['lastPrice']
#
# tick.volume = data['volume']
# tick.openInterest = data['openInterest']
#
# tick.upperLimit = data['upperLimit']
# tick.lowerLimit = data['lowerLimit']
#
# tick.time = data['time']
# # tick.ms = data['UpdateMillisec']
#
# tick.bidPrice1 = data['bidPrice1']
# tick.bidPrice2 = data['bidPrice2']
# tick.bidPrice3 = data['bidPrice3']
# tick.bidPrice4 = data['bidPrice4']
# tick.bidPrice5 = data['bidPrice5']
#
# tick.askPrice1 = data['askPrice1']
# tick.askPrice2 = data['askPrice2']
# tick.askPrice3 = data['askPrice3']
# tick.askPrice4 = data['askPrice4']
# tick.askPrice5 = data['askPrice5']
#
# tick.bidVolume1 = data['bidVolume1']
# tick.bidVolume2 = data['bidVolume2']
# tick.bidVolume3 = data['bidVolume3']
# tick.bidVolume4 = data['bidVolume4']
# tick.bidVolume5 = data['bidVolume5']
#
# tick.askVolume1 = data['askVolume1']
# tick.askVolume2 = data['askVolume2']
# tick.askVolume3 = data['askVolume3']
# tick.askVolume4 = data['askVolume4']
# tick.askVolume5 = data['askVolume5']
#
# self.onTick(tick)
#
# print('load historic data completed')
#----------------------------------------------------------------------
def plotTick(self):
"""画tick图"""
self.curve1.setData(self.listlastPrice[:self.ptr])
self.curve2.setData(self.listfastMA[:self.ptr], pen=(255, 0, 0), name="Red curve")
self.curve3.setData(self.listmidMA[:self.ptr], pen=(0, 255, 0), name="Green curve")
self.curve4.setData(self.listslowMA[:self.ptr], pen=(0, 0, 255), name="Blue curve")
self.curve1.setPos(-self.ptr, 0)
self.curve2.setPos(-self.ptr, 0)
self.curve3.setPos(-self.ptr, 0)
self.curve4.setPos(-self.ptr, 0)
#----------------------------------------------------------------------
def updateMarketData(self, event):
"""更新行情"""
data = event.dict_['data']
print "update", data['InstrumentID']
symbol = data['InstrumentID']
tick = Tick(symbol)
tick.openPrice = data['OpenPrice']
tick.highPrice = data['HighestPrice']
tick.lowPrice = data['LowestPrice']
tick.lastPrice = data['LastPrice']
tick.volume = data['Volume']
tick.openInterest = data['OpenInterest']
tick.upperLimit = data['UpperLimitPrice']
tick.lowerLimit = data['LowerLimitPrice']
tick.time = data['UpdateTime']
tick.ms = data['UpdateMillisec']
tick.bidPrice1 = data['BidPrice1']
tick.bidPrice2 = data['BidPrice2']
tick.bidPrice3 = data['BidPrice3']
tick.bidPrice4 = data['BidPrice4']
tick.bidPrice5 = data['BidPrice5']
tick.askPrice1 = data['AskPrice1']
tick.askPrice2 = data['AskPrice2']
tick.askPrice3 = data['AskPrice3']
tick.askPrice4 = data['AskPrice4']
tick.askPrice5 = data['AskPrice5']
tick.bidVolume1 = data['BidVolume1']
tick.bidVolume2 = data['BidVolume2']
tick.bidVolume3 = data['BidVolume3']
tick.bidVolume4 = data['BidVolume4']
tick.bidVolume5 = data['BidVolume5']
tick.askVolume1 = data['AskVolume1']
tick.askVolume2 = data['AskVolume2']
tick.askVolume3 = data['AskVolume3']
tick.askVolume4 = data['AskVolume4']
tick.askVolume5 = data['AskVolume5']
self.onTick(tick) # tick数据更新
self.__recordTick(tick) #记录Tick数据
#----------------------------------------------------------------------
def onTick(self, tick):
"""tick数据更新"""
from datetime import time
# 首先生成datetime.time格式的时间(便于比较),从字符串时间转化为time格式的时间
hh, mm, ss = tick.time.split(':')
self.ticktime = time(int(hh), int(mm), int(ss), microsecond=tick.ms)
# 计算tick图的相关参数
if self.ptr == 0:
self.fastMA = tick.lastPrice
self.midMA = tick.lastPrice
self.slowMA = tick.lastPrice
else:
self.fastMA = (1-self.tickFastAlpha) * self.fastMA + self.tickFastAlpha * tick.lastPrice
self.midMA = (1-self.tickMidAlpha) * self.midMA + self.tickMidAlpha * tick.lastPrice
self.slowMA = (1-self.tickSlowAlpha) * self.slowMA + self.tickSlowAlpha * tick.lastPrice
self.listlastPrice[self.ptr] = int(tick.lastPrice)
self.listfastMA[self.ptr] = int(self.fastMA)
self.listmidMA[self.ptr] = int(self.midMA)
self.listslowMA[self.ptr] = int(self.slowMA)
self.ptr += 1
print(self.ptr)
if self.ptr >= self.listlastPrice.shape[0]:
tmp = self.listlastPrice
self.listlastPrice = np.empty(self.listlastPrice.shape[0] * 2)
self.listlastPrice[:tmp.shape[0]] = tmp
tmp = self.listfastMA
self.listfastMA = np.empty(self.listfastMA.shape[0] * 2)
self.listfastMA[:tmp.shape[0]] = tmp
tmp = self.listmidMA
self.listmidMA = np.empty(self.listmidMA.shape[0] * 2)
self.listmidMA[:tmp.shape[0]] = tmp
tmp = self.listslowMA
self.listslowMA = np.empty(self.listslowMA.shape[0] * 2)
self.listslowMA[:tmp.shape[0]] = tmp
# 调用画图函数
self.plotTick() # tick图
#----------------------------------------------------------------------
def __connectMongo(self):
"""连接MongoDB数据库"""
try:
self.__mongoConnection = pymongo.MongoClient("localhost", 27017)
self.__mongoConnected = True
self.__mongoTickDB = self.__mongoConnection['VnTrader_Tick_Db']
except ConnectionFailure:
pass
#----------------------------------------------------------------------
def __recordTick(self, data):
"""将Tick数据插入到MongoDB中"""
if self.__mongoConnected:
symbol = data['InstrumentID']
data['date'] = datetime.now().strftime('%Y%m%d')
self.__mongoTickDB[symbol].insert(data)
# #----------------------------------------------------------------------
# def loadTick(self, symbol, startDate, endDate=None):
# """从MongoDB中读取Tick数据"""
# cx = self.__mongoTickDB[symbol].find()
# print cx.count()
# return cx
# # if self.__mongoConnected:
# # collection = self.__mongoTickDB[symbol]
# #
# # # 如果输入了读取TICK的最后日期
# # if endDate:
# # cx = collection.find({'date': {'$gte': startDate, '$lte': endDate}})
# # else:
# # cx = collection.find({'date': {'$gte': startDate}})
# # return cx
# # else:
# # return None
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
print "connect"
self.signal.connect(self.updateMarketData)
self.__eventEngine.register(EVENT_MARKETDATA, self.signal.emit)
class Tick:
"""Tick数据对象"""
#----------------------------------------------------------------------
def __init__(self, symbol):
"""Constructor"""
self.symbol = symbol # 合约代码
self.openPrice = 0 # OHLC
self.highPrice = 0
self.lowPrice = 0
self.lastPrice = 0
self.volume = 0 # 成交量
self.openInterest = 0 # 持仓量
self.upperLimit = 0 # 涨停价
self.lowerLimit = 0 # 跌停价
self.time = '' # 更新时间和毫秒
self.ms = 0
self.bidPrice1 = 0 # 深度行情
self.bidPrice2 = 0
self.bidPrice3 = 0
self.bidPrice4 = 0
self.bidPrice5 = 0
self.askPrice1 = 0
self.askPrice2 = 0
self.askPrice3 = 0
self.askPrice4 = 0
self.askPrice5 = 0
self.bidVolume1 = 0
self.bidVolume2 = 0
self.bidVolume3 = 0
self.bidVolume4 = 0
self.bidVolume5 = 0
self.askVolume1 = 0
self.askVolume2 = 0
self.askVolume3 = 0
self.askVolume4 = 0
self.askVolume5 = 0 | mit | 1,005,031,568,701,190,500 | 37.755556 | 237 | 0.49587 | false |
blsmit5728/PokeAlarm | PokeAlarm/Events/MonEvent.py | 1 | 13089 | # Standard Library Imports
from datetime import datetime
# 3rd Party Imports
# Local Imports
from PokeAlarm import Unknown
from PokeAlarm.Utilities import MonUtils
from PokeAlarm.Utils import (
get_gmaps_link, get_move_type, get_move_damage, get_move_dps,
get_move_duration, get_move_energy, get_pokemon_size,
get_applemaps_link, get_time_as_str, get_seconds_remaining,
get_base_types, get_dist_as_str, get_weather_emoji,
get_type_emoji)
from . import BaseEvent
class MonEvent(BaseEvent):
""" Event representing the discovery of a Pokemon. """
def __init__(self, data):
""" Creates a new Monster Event based on the given dict. """
super(MonEvent, self).__init__('monster')
check_for_none = BaseEvent.check_for_none
# Identification
self.enc_id = data['encounter_id']
self.monster_id = int(data['pokemon_id'])
# Time Left
self.disappear_time = datetime.utcfromtimestamp(data['disappear_time'])
self.time_left = get_seconds_remaining(self.disappear_time)
# Spawn Data
self.spawn_start = check_for_none(
int, data.get('spawn_start'), Unknown.REGULAR)
self.spawn_end = check_for_none(
int, data.get('spawn_end'), Unknown.REGULAR)
self.spawn_verified = check_for_none(bool, data.get('verified'), False)
# Location
self.lat = float(data['latitude'])
self.lng = float(data['longitude'])
self.distance = Unknown.SMALL # Completed by Manager
self.direction = Unknown.TINY # Completed by Manager
self.weather_id = check_for_none(
int, data.get('weather'), Unknown.TINY)
self.boosted_weather_id = check_for_none(
int, data.get('boosted_weather')
or data.get('weather_boosted_condition'), 0)
# Encounter Stats
self.mon_lvl = check_for_none(
int, data.get('pokemon_level'), Unknown.TINY)
self.cp = check_for_none(int, data.get('cp'), Unknown.TINY)
# IVs
self.atk_iv = check_for_none(
int, data.get('individual_attack'), Unknown.TINY)
self.def_iv = check_for_none(
int, data.get('individual_defense'), Unknown.TINY)
self.sta_iv = check_for_none(
int, data.get('individual_stamina'), Unknown.TINY)
if Unknown.is_not(self.atk_iv, self.def_iv, self.sta_iv):
self.iv = \
100 * (self.atk_iv + self.def_iv + self.sta_iv) / float(45)
else:
self.iv = Unknown.SMALL
# Quick Move
self.quick_id = check_for_none(
int, data.get('move_1'), Unknown.TINY)
self.quick_type = get_move_type(self.quick_id)
self.quick_damage = get_move_damage(self.quick_id)
self.quick_dps = get_move_dps(self.quick_id)
self.quick_duration = get_move_duration(self.quick_id)
self.quick_energy = get_move_energy(self.quick_id)
# Charge Move
self.charge_id = check_for_none(
int, data.get('move_2'), Unknown.TINY)
self.charge_type = get_move_type(self.charge_id)
self.charge_damage = get_move_damage(self.charge_id)
self.charge_dps = get_move_dps(self.charge_id)
self.charge_duration = get_move_duration(self.charge_id)
self.charge_energy = get_move_energy(self.charge_id)
# Catch Probs
self.base_catch = check_for_none(
float, data.get('base_catch'), Unknown.TINY)
self.great_catch = check_for_none(
float, data.get('great_catch'), Unknown.TINY)
self.ultra_catch = check_for_none(
float, data.get('ultra_catch'), Unknown.TINY)
# Attack Rating
self.atk_grade = check_for_none(
str, data.get('atk_grade'), Unknown.TINY)
self.def_grade = check_for_none(
str, data.get('def_grade'), Unknown.TINY)
# Cosmetic
self.gender = MonUtils.get_gender_sym(
check_for_none(int, data.get('gender'), Unknown.TINY))
self.height = check_for_none(float, data.get('height'), Unknown.SMALL)
self.weight = check_for_none(float, data.get('weight'), Unknown.SMALL)
if Unknown.is_not(self.height, self.weight):
self.size_id = get_pokemon_size(
self.monster_id, self.height, self.weight)
else:
self.size_id = Unknown.SMALL
self.types = get_base_types(self.monster_id)
# Form
self.form_id = check_for_none(int, data.get('form'), 0)
# Costume
self.costume_id = check_for_none(int, data.get('costume'), 0)
# Correct this later
self.name = self.monster_id
self.geofence = Unknown.REGULAR
self.custom_dts = {}
def generate_dts(self, locale, timezone, units):
""" Return a dict with all the DTS for this event. """
time = get_time_as_str(self.disappear_time, timezone)
form_name = locale.get_form_name(self.monster_id, self.form_id)
costume_name = locale.get_costume_name(
self.monster_id, self.costume_id)
weather_name = locale.get_weather_name(self.weather_id)
boosted_weather_name = locale.get_weather_name(self.boosted_weather_id)
type1 = locale.get_type_name(self.types[0])
type2 = locale.get_type_name(self.types[1])
dts = self.custom_dts.copy()
dts.update({
# Identification
'encounter_id': self.enc_id,
'mon_name': locale.get_pokemon_name(self.monster_id),
'mon_id': self.monster_id,
'mon_id_3': "{:03}".format(self.monster_id),
# Time Remaining
'time_left': time[0],
'12h_time': time[1],
'24h_time': time[2],
# Spawn Data
'spawn_start': self.spawn_start,
'spawn_end': self.spawn_end,
'spawn_verified': self.spawn_verified,
# Location
'lat': self.lat,
'lng': self.lng,
'lat_5': "{:.5f}".format(self.lat),
'lng_5': "{:.5f}".format(self.lng),
'distance': (
get_dist_as_str(self.distance, units)
if Unknown.is_not(self.distance) else Unknown.SMALL),
'direction': self.direction,
'gmaps': get_gmaps_link(self.lat, self.lng),
'applemaps': get_applemaps_link(self.lat, self.lng),
'geofence': self.geofence,
# Weather
'weather_id': self.weather_id,
'weather': weather_name,
'weather_or_empty': Unknown.or_empty(weather_name),
'weather_emoji': get_weather_emoji(self.weather_id),
'boosted_weather_id': self.boosted_weather_id,
'boosted_weather': boosted_weather_name,
'boosted_weather_or_empty': (
'' if self.boosted_weather_id == 0
else Unknown.or_empty(boosted_weather_name)),
'boosted_weather_emoji':
get_weather_emoji(self.boosted_weather_id),
'boosted_or_empty': locale.get_boosted_text() if \
Unknown.is_not(self.boosted_weather_id) and
self.boosted_weather_id != 0 else '',
# Encounter Stats
'mon_lvl': self.mon_lvl,
'cp': self.cp,
# IVs
'iv_0': (
"{:.0f}".format(self.iv) if Unknown.is_not(self.iv)
else Unknown.TINY),
'iv': (
"{:.1f}".format(self.iv) if Unknown.is_not(self.iv)
else Unknown.SMALL),
'iv_2': (
"{:.2f}".format(self.iv) if Unknown.is_not(self.iv)
else Unknown.SMALL),
'atk': self.atk_iv,
'def': self.def_iv,
'sta': self.sta_iv,
# Type
'type1': type1,
'type1_or_empty': Unknown.or_empty(type1),
'type1_emoji': Unknown.or_empty(get_type_emoji(self.types[0])),
'type2': type2,
'type2_or_empty': Unknown.or_empty(type2),
'type2_emoji': Unknown.or_empty(get_type_emoji(self.types[1])),
'types': (
"{}/{}".format(type1, type2)
if Unknown.is_not(type2) else type1),
'types_emoji': (
"{}{}".format(
get_type_emoji(self.types[0]),
get_type_emoji(self.types[1]))
if Unknown.is_not(type2) else get_type_emoji(self.types[0])),
# Form
'form': form_name,
'form_or_empty': Unknown.or_empty(form_name),
'form_id': self.form_id,
'form_id_3': "{:03d}".format(self.form_id),
# Costume
'costume': costume_name,
'costume_or_empty': Unknown.or_empty(costume_name),
'costume_id': self.costume_id,
'costume_id_3': "{:03d}".format(self.costume_id),
# Quick Move
'quick_move': locale.get_move_name(self.quick_id),
'quick_id': self.quick_id,
'quick_type_id': self.quick_type,
'quick_type': locale.get_type_name(self.quick_type),
'quick_type_emoji': get_type_emoji(self.quick_type),
'quick_damage': self.quick_damage,
'quick_dps': self.quick_dps,
'quick_duration': self.quick_duration,
'quick_energy': self.quick_energy,
# Charge Move
'charge_move': locale.get_move_name(self.charge_id),
'charge_id': self.charge_id,
'charge_type_id': self.charge_type,
'charge_type': locale.get_type_name(self.charge_type),
'charge_type_emoji': get_type_emoji(self.charge_type),
'charge_damage': self.charge_damage,
'charge_dps': self.charge_dps,
'charge_duration': self.charge_duration,
'charge_energy': self.charge_energy,
# Cosmetic
'gender': self.gender,
'height_0': (
"{:.0f}".format(self.height) if Unknown.is_not(self.height)
else Unknown.TINY),
'height': (
"{:.1f}".format(self.height) if Unknown.is_not(self.height)
else Unknown.SMALL),
'height_2': (
"{:.2f}".format(self.height) if Unknown.is_not(self.height)
else Unknown.SMALL),
'weight_0': (
"{:.0f}".format(self.weight) if Unknown.is_not(self.weight)
else Unknown.TINY),
'weight': (
"{:.1f}".format(self.weight) if Unknown.is_not(self.weight)
else Unknown.SMALL),
'weight_2': (
"{:.2f}".format(self.weight) if Unknown.is_not(self.weight)
else Unknown.SMALL),
'size': locale.get_size_name(self.size_id),
# Attack rating
'atk_grade': (
Unknown.or_empty(self.atk_grade, Unknown.TINY)),
'def_grade': (
Unknown.or_empty(self.def_grade, Unknown.TINY)),
# Catch Prob
'base_catch_0': (
"{:.0f}".format(self.base_catch * 100)
if Unknown.is_not(self.base_catch)
else Unknown.TINY),
'base_catch': (
"{:.1f}".format(self.base_catch * 100)
if Unknown.is_not(self.base_catch)
else Unknown.SMALL),
'base_catch_2': (
"{:.2f}".format(self.base_catch * 100)
if Unknown.is_not(self.base_catch)
else Unknown.SMALL),
'great_catch_0': (
"{:.0f}".format(self.great_catch * 100)
if Unknown.is_not(self.great_catch)
else Unknown.TINY),
'great_catch': (
"{:.1f}".format(self.great_catch * 100)
if Unknown.is_not(self.great_catch)
else Unknown.SMALL),
'great_catch_2': (
"{:.2f}".format(self.great_catch * 100)
if Unknown.is_not(self.great_catch)
else Unknown.SMALL),
'ultra_catch_0': (
"{:.0f}".format(self.ultra_catch * 100)
if Unknown.is_not(self.ultra_catch)
else Unknown.TINY),
'ultra_catch': (
"{:.1f}".format(self.ultra_catch * 100)
if Unknown.is_not(self.ultra_catch)
else Unknown.SMALL),
'ultra_catch_2': (
"{:.2f}".format(self.ultra_catch * 100)
if Unknown.is_not(self.ultra_catch)
else Unknown.SMALL),
# Misc
'big_karp': (
'big' if self.monster_id == 129 and Unknown.is_not(self.weight)
and self.weight >= 13.13 else ''),
'tiny_rat': (
'tiny' if self.monster_id == 19 and Unknown.is_not(self.weight)
and self.weight <= 2.41 else '')
})
return dts
| agpl-3.0 | 1,737,988,250,561,061,600 | 39.150307 | 79 | 0.533578 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.