ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b406c28dfd52c70a1a94cd8d99111673100f86a3 | import os
import re
import sys
import time
import fcntl
import types
import shelve
import random
import datetime
import commands
import threading
import userinterface.Client as Client
from dataservice.DDM import rucioAPI
from taskbuffer.OraDBProxy import DBProxy
from taskbuffer.TaskBuffer import taskBuffer
from taskbuffer import EventServiceUtils
from pandalogger.PandaLogger import PandaLogger
from jobdispatcher.Watcher import Watcher
from brokerage.SiteMapper import SiteMapper
from dataservice.Finisher import Finisher
from dataservice.MailUtils import MailUtils
from taskbuffer import ProcessGroups
import taskbuffer.ErrorCode
import dataservice.DDM
# password
from config import panda_config
passwd = panda_config.dbpasswd
# logger
_logger = PandaLogger().getLogger('copyArchive')
_logger.debug("===================== start =====================")
# memory checker
def _memoryCheck(str):
try:
proc_status = '/proc/%d/status' % os.getpid()
procfile = open(proc_status)
name = ""
vmSize = ""
vmRSS = ""
# extract Name,VmSize,VmRSS
for line in procfile:
if line.startswith("Name:"):
name = line.split()[-1]
continue
if line.startswith("VmSize:"):
vmSize = ""
for item in line.split()[1:]:
vmSize += item
continue
if line.startswith("VmRSS:"):
vmRSS = ""
for item in line.split()[1:]:
vmRSS += item
continue
procfile.close()
_logger.debug('MemCheck - %s Name=%s VSZ=%s RSS=%s : %s' % (os.getpid(),name,vmSize,vmRSS,str))
except:
type, value, traceBack = sys.exc_info()
_logger.error("memoryCheck() : %s %s" % (type,value))
_logger.debug('MemCheck - %s unknown : %s' % (os.getpid(),str))
return
_memoryCheck("start")
# kill old dq2 process
try:
# time limit
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=2)
# get process list
scriptName = sys.argv[0]
out = commands.getoutput('ps axo user,pid,lstart,args | grep dq2.clientapi | grep -v PYTHONPATH | grep -v grep')
for line in out.split('\n'):
if line == '':
continue
items = line.split()
# owned process
if not items[0] in ['sm','atlpan','pansrv','root']: # ['os.getlogin()']: doesn't work in cron
continue
# look for python
if re.search('python',line) == None:
continue
# PID
pid = items[1]
# start time
timeM = re.search('(\S+\s+\d+ \d+:\d+:\d+ \d+)',line)
startTime = datetime.datetime(*time.strptime(timeM.group(1),'%b %d %H:%M:%S %Y')[:6])
# kill old process
if startTime < timeLimit:
_logger.debug("old dq2 process : %s %s" % (pid,startTime))
_logger.debug(line)
commands.getoutput('kill -9 %s' % pid)
except:
type, value, traceBack = sys.exc_info()
_logger.error("kill dq2 process : %s %s" % (type,value))
# kill old process
try:
# time limit
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=7)
# get process list
scriptName = sys.argv[0]
out = commands.getoutput('ps axo user,pid,lstart,args | grep %s' % scriptName)
for line in out.split('\n'):
items = line.split()
# owned process
if not items[0] in ['sm','atlpan','pansrv','root']: # ['os.getlogin()']: doesn't work in cron
continue
# look for python
if re.search('python',line) == None:
continue
# PID
pid = items[1]
# start time
timeM = re.search('(\S+\s+\d+ \d+:\d+:\d+ \d+)',line)
startTime = datetime.datetime(*time.strptime(timeM.group(1),'%b %d %H:%M:%S %Y')[:6])
# kill old process
if startTime < timeLimit:
_logger.debug("old process : %s %s" % (pid,startTime))
_logger.debug(line)
commands.getoutput('kill -9 %s' % pid)
except:
type, value, traceBack = sys.exc_info()
_logger.error("kill process : %s %s" % (type,value))
# instantiate TB
taskBuffer.init(panda_config.dbhost,panda_config.dbpasswd,nDBConnection=1)
# instantiate sitemapper
siteMapper = SiteMapper(taskBuffer)
# send email for access requests
_logger.debug("Site Access")
try:
# get contact
contactAddr = {}
siteContactAddr = {}
sql = "SELECT name,email FROM ATLAS_PANDAMETA.cloudconfig"
status,res = taskBuffer.querySQLS(sql,{})
for cloudName,cloudEmail in res:
contactAddr[cloudName] = cloudEmail
# get requests
sql = "SELECT pandaSite,status,dn FROM ATLAS_PANDAMETA.siteaccess WHERE status IN (:status1,:status2,:status3) "
sql += "ORDER BY pandaSite,status "
varMap = {}
varMap[':status1'] = 'requested'
varMap[':status2'] = 'tobeapproved'
varMap[':status3'] = 'toberejected'
status,res = taskBuffer.querySQLS(sql,varMap)
requestsInCloud = {}
mailUtils = MailUtils()
# loop over all requests
for pandaSite,reqStatus,userName in res:
cloud = siteMapper.getSite(pandaSite).cloud
_logger.debug("request : '%s' site=%s status=%s cloud=%s" % (userName,pandaSite,reqStatus,cloud))
# send emails to user
if reqStatus in ['tobeapproved','toberejected']:
# set status
if reqStatus == 'tobeapproved':
newStatus = 'approved'
else:
newStatus = 'rejected'
# get mail address for user
userMailAddr = ''
sqlUM = "SELECT email FROM ATLAS_PANDAMETA.users WHERE name=:userName"
varMap = {}
varMap[':userName'] = userName
stUM,resUM = taskBuffer.querySQLS(sqlUM,varMap)
if resUM == None or len(resUM) == 0:
_logger.error("email address is unavailable for '%s'" % userName)
else:
userMailAddr = resUM[0][0]
# send
if not userMailAddr in ['',None,'None','notsend']:
_logger.debug("send update to %s" % userMailAddr)
retMail = mailUtils.sendSiteAccessUpdate(userMailAddr,newStatus,pandaSite)
_logger.debug(retMail)
# update database
sqlUp = "UPDATE ATLAS_PANDAMETA.siteaccess SET status=:newStatus "
sqlUp += "WHERE pandaSite=:pandaSite AND dn=:userName"
varMap = {}
varMap[':userName'] = userName
varMap[':newStatus'] = newStatus
varMap[':pandaSite'] = pandaSite
stUp,resUp = taskBuffer.querySQLS(sqlUp,varMap)
else:
# append cloud
if not requestsInCloud.has_key(cloud):
requestsInCloud[cloud] = {}
# append site
if not requestsInCloud[cloud].has_key(pandaSite):
requestsInCloud[cloud][pandaSite] = []
# append user
requestsInCloud[cloud][pandaSite].append(userName)
# send requests to the cloud responsible
for cloud,requestsMap in requestsInCloud.iteritems():
_logger.debug("requests for approval : cloud=%s" % cloud)
# send
if contactAddr.has_key(cloud) and (not contactAddr[cloud] in ['',None,'None']):
# get site contact
for pandaSite,userNames in requestsMap.iteritems():
if not siteContactAddr.has_key(pandaSite):
varMap = {}
varMap[':siteid'] = pandaSite
sqlSite = "SELECT email FROM ATLAS_PANDAMETA.schedconfig WHERE siteid=:siteid AND rownum<=1"
status,res = taskBuffer.querySQLS(sqlSite,varMap)
siteContactAddr[pandaSite] = res[0][0]
# append
if not siteContactAddr[pandaSite] in ['',None,'None']:
contactAddr[cloud] += ',%s' % siteContactAddr[pandaSite]
# send
_logger.debug("send request to %s" % contactAddr[cloud])
retMail = mailUtils.sendSiteAccessRequest(contactAddr[cloud],requestsMap,cloud)
_logger.debug(retMail)
# update database
if retMail:
sqlUp = "UPDATE ATLAS_PANDAMETA.siteaccess SET status=:newStatus "
sqlUp += "WHERE pandaSite=:pandaSite AND dn=:userName"
for pandaSite,userNames in requestsMap.iteritems():
for userName in userNames:
varMap = {}
varMap[':userName'] = userName
varMap[':newStatus'] = 'inprocess'
varMap[':pandaSite'] = pandaSite
stUp,resUp = taskBuffer.querySQLS(sqlUp,varMap)
else:
_logger.error("contact email address is unavailable for %s" % cloud)
except:
type, value, traceBack = sys.exc_info()
_logger.error("Failed with %s %s" % (type,value))
_logger.debug("Site Access : done")
# finalize failed jobs
_logger.debug("AnalFinalizer session")
try:
# get min PandaID for failed jobs in Active table
sql = "SELECT MIN(PandaID),prodUserName,jobDefinitionID,jediTaskID,computingSite FROM ATLAS_PANDA.jobsActive4 "
sql += "WHERE prodSourceLabel=:prodSourceLabel AND jobStatus=:jobStatus "
sql += "GROUP BY prodUserName,jobDefinitionID,jediTaskID,computingSite "
varMap = {}
varMap[':jobStatus'] = 'failed'
varMap[':prodSourceLabel'] = 'user'
status,res = taskBuffer.querySQLS(sql,varMap)
if res != None:
# loop over all user/jobdefID
for pandaID,prodUserName,jobDefinitionID,jediTaskID,computingSite in res:
# check
_logger.debug("check finalization for %s task=%s jobdefID=%s site=%s" % (prodUserName,jediTaskID,
jobDefinitionID,
computingSite))
sqlC = "SELECT COUNT(*) FROM ("
sqlC += "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 "
sqlC += "WHERE prodSourceLabel=:prodSourceLabel AND prodUserName=:prodUserName "
sqlC += "AND jediTaskID=:jediTaskID "
sqlC += "AND computingSite=:computingSite "
sqlC += "AND NOT jobStatus IN (:jobStatus1,:jobStatus2) "
sqlC += "UNION "
sqlC += "SELECT PandaID FROM ATLAS_PANDA.jobsDefined4 "
sqlC += "WHERE prodSourceLabel=:prodSourceLabel AND prodUserName=:prodUserName "
sqlC += "AND jediTaskID=:jediTaskID "
sqlC += "AND computingSite=:computingSite "
sqlC += "AND NOT jobStatus IN (:jobStatus1,:jobStatus2) "
sqlC += ") "
varMap = {}
varMap[':jobStatus1'] = 'failed'
varMap[':jobStatus2'] = 'merging'
varMap[':prodSourceLabel'] = 'user'
varMap[':jediTaskID'] = jediTaskID
varMap[':computingSite'] = computingSite
varMap[':prodUserName'] = prodUserName
statC,resC = taskBuffer.querySQLS(sqlC,varMap)
# finalize if there is no non-failed jobs
if resC != None:
_logger.debug("n of non-failed jobs : %s" % resC[0][0])
if resC[0][0] == 0:
jobSpecs = taskBuffer.peekJobs([pandaID],fromDefined=False,fromArchived=False,fromWaiting=False)
jobSpec = jobSpecs[0]
if jobSpec == None:
_logger.debug("skip PandaID={0} not found in jobsActive".format(pandaID))
continue
_logger.debug("finalize %s %s" % (prodUserName,jobDefinitionID))
finalizedFlag = taskBuffer.finalizePendingJobs(prodUserName,jobDefinitionID)
_logger.debug("finalized with %s" % finalizedFlag)
if finalizedFlag and jobSpec.produceUnMerge():
# collect sub datasets
subDsNames = set()
subDsList = []
for tmpFileSpec in jobSpec.Files:
if tmpFileSpec.type in ['log','output'] and \
re.search('_sub\d+$',tmpFileSpec.destinationDBlock) != None:
if tmpFileSpec.destinationDBlock in subDsNames:
continue
subDsNames.add(tmpFileSpec.destinationDBlock)
datasetSpec = taskBuffer.queryDatasetWithMap({'name':tmpFileSpec.destinationDBlock})
subDsList.append(datasetSpec)
_logger.debug("update unmerged datasets")
taskBuffer.updateUnmergedDatasets(jobSpec,subDsList)
else:
_logger.debug("n of non-failed jobs : None")
except:
errType,errValue = sys.exc_info()[:2]
_logger.error("AnalFinalizer failed with %s %s" % (errType,errValue))
# finalize failed jobs
_logger.debug("check stuck mergeing jobs")
try:
# get PandaIDs
varMap = {}
varMap[':prodSourceLabel'] = 'managed'
varMap[':jobStatus'] = 'merging'
varMap[':timeLimit'] = timeLimit
sql = "SELECT distinct jediTaskID FROM ATLAS_PANDA.jobsActive4 "
sql += "WHERE prodSourceLabel=:prodSourceLabel AND jobStatus=:jobStatus and modificationTime<:timeLimit "
tmp,res = taskBuffer.querySQLS(sql,varMap)
checkedDS = set()
for jediTaskID, in res:
varMap = {}
varMap[':jediTaskID'] = jediTaskID
varMap[':dsType'] = 'trn_log'
sql = "SELECT datasetID FROM ATLAS_PANDA.JEDI_Datasets WHERE jediTaskID=:jediTaskID AND type=:dsType AND nFilesUsed=nFilesTobeUsed "
tmpP,resD = taskBuffer.querySQLS(sql,varMap)
for datasetID, in resD:
varMap = {}
varMap[':jediTaskID'] = jediTaskID
varMap[':fileStatus'] = 'ready'
varMap[':datasetID'] = datasetID
sql = "SELECT PandaID FROM ATLAS_PANDA.JEDI_Dataset_Contents "
sql += "WHERE jediTaskID=:jediTaskID AND datasetid=:datasetID AND status=:fileStatus AND PandaID=OutPandaID AND rownum<=1 "
tmpP,resP = taskBuffer.querySQLS(sql,varMap)
if resP == []:
continue
PandaID = resP[0][0]
varMap = {}
varMap[':PandaID'] = PandaID
varMap[':fileType'] = 'log'
sql = "SELECT d.status FROM ATLAS_PANDA.filesTable4 f,ATLAS_PANDA.datasets d WHERE PandaID=:PandaID AND f.type=:fileType AND d.name=f.destinationDBlock "
tmpS,resS = taskBuffer.querySQLS(sql,varMap)
if resS != None:
subStatus, = resS[0]
if subStatus in ['completed']:
jobSpecs = taskBuffer.peekJobs([PandaID],fromDefined=False,fromArchived=False,fromWaiting=False)
jobSpec = jobSpecs[0]
subDsNames = set()
subDsList = []
for tmpFileSpec in jobSpec.Files:
if tmpFileSpec.type in ['log','output'] and \
re.search('_sub\d+$',tmpFileSpec.destinationDBlock) != None:
if tmpFileSpec.destinationDBlock in subDsNames:
continue
subDsNames.add(tmpFileSpec.destinationDBlock)
datasetSpec = taskBuffer.queryDatasetWithMap({'name':tmpFileSpec.destinationDBlock})
subDsList.append(datasetSpec)
_logger.debug("update unmerged datasets for jediTaskID={0} PandaID={1}".format(jediTaskID,PandaID))
taskBuffer.updateUnmergedDatasets(jobSpec,subDsList,updateCompleted=True)
except:
errType,errValue = sys.exc_info()[:2]
_logger.error("check for stuck merging jobs failed with %s %s" % (errType,errValue))
# get sites to skip various timeout
varMap = {}
varMap[':status'] = 'paused'
sql = "SELECT siteid FROM ATLAS_PANDAMETA.schedconfig WHERE status=:status "
sitesToSkipTO = set()
status,res = taskBuffer.querySQLS(sql,varMap)
for siteid, in res:
sitesToSkipTO.add(siteid)
_logger.debug("PQs to skip timeout : {0}".format(','.join(sitesToSkipTO)))
_memoryCheck("watcher")
_logger.debug("Watcher session")
# get the list of workflows
sql = "SELECT DISTINCT workflow FROM ATLAS_PANDAMETA.schedconfig WHERE status='online' "
status, res = taskBuffer.querySQLS(sql, {})
workflow_timeout_map = {}
for workflow, in res:
timeout = taskBuffer.getConfigValue('watcher', 'HEARTBEAT_TIMEOUT_{0}'.format(workflow), 'pandaserver', 'atlas')
if timeout is not None:
workflow_timeout_map[workflow] = timeout
workflows = workflow_timeout_map.keys()
workflows.append(None)
# check heartbeat for analysis jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=2)
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':prodSourceLabel1'] = 'panda'
varMap[':prodSourceLabel2'] = 'user'
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':jobStatus3'] = 'stagein'
varMap[':jobStatus4'] = 'stageout'
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE (prodSourceLabel=:prodSourceLabel1 OR prodSourceLabel=:prodSourceLabel2) "
sql += "AND jobStatus IN (:jobStatus1,:jobStatus2,:jobStatus3,:jobStatus4) AND modificationTime<:modificationTime"
status,res = taskBuffer.querySQLS(sql,varMap)
if res == None:
_logger.debug("# of Anal Watcher : %s" % res)
else:
_logger.debug("# of Anal Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("Anal Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=60,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for analysis jobs in transferring
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=2)
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':prodSourceLabel1'] = 'panda'
varMap[':prodSourceLabel2'] = 'user'
varMap[':jobStatus1'] = 'transferring'
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE prodSourceLabel IN (:prodSourceLabel1,:prodSourceLabel2) "
sql += "AND jobStatus=:jobStatus1 AND modificationTime<:modificationTime"
status,res = taskBuffer.querySQLS(sql,varMap)
if res == None:
_logger.debug("# of Transferring Anal Watcher : %s" % res)
else:
_logger.debug("# of Transferring Anal Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("Trans Anal Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=60,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for sent jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(minutes=30)
varMap = {}
varMap[':jobStatus'] = 'sent'
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND modificationTime<:modificationTime",
varMap)
if res == None:
_logger.debug("# of Sent Watcher : %s" % res)
else:
_logger.debug("# of Sent Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("Sent Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=30,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for 'holding' analysis/ddm jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
# get XMLs
xmlIDs = []
xmlFiles = os.listdir(panda_config.logdir)
for file in xmlFiles:
match = re.search('^(\d+)_([^_]+)_.{36}$',file)
if match != None:
id = match.group(1)
xmlIDs.append(int(id))
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND (modificationTime<:modificationTime OR (endTime IS NOT NULL AND endTime<:endTime)) AND (prodSourceLabel=:prodSourceLabel1 OR prodSourceLabel=:prodSourceLabel2 OR prodSourceLabel=:prodSourceLabel3) AND stateChangeTime != modificationTime"
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':endTime'] = timeLimit
varMap[':jobStatus'] = 'holding'
varMap[':prodSourceLabel1'] = 'panda'
varMap[':prodSourceLabel2'] = 'user'
varMap[':prodSourceLabel3'] = 'ddm'
status,res = taskBuffer.querySQLS(sql,varMap)
if res == None:
_logger.debug("# of Holding Anal/DDM Watcher : %s" % res)
else:
_logger.debug("# of Holding Anal/DDM Watcher : %s - XMLs : %s" % (len(res),len(xmlIDs)))
for (id,) in res:
_logger.debug("Holding Anal/DDM Watcher %s" % id)
if int(id) in xmlIDs:
_logger.debug(" found XML -> skip %s" % id)
continue
thr = Watcher(taskBuffer,id,single=True,sleepTime=180,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for high prio production jobs
timeOutVal = 3
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeOutVal)
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND currentPriority>:pLimit "
sql += "AND (modificationTime<:modificationTime OR (endTime IS NOT NULL AND endTime<:endTime))"
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':endTime'] = timeLimit
varMap[':jobStatus'] = 'holding'
varMap[':pLimit'] = 800
status,res = taskBuffer.querySQLS(sql,varMap)
if res == None:
_logger.debug("# of High prio Holding Watcher : %s" % res)
else:
_logger.debug("# of High prio Holding Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("High prio Holding Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=60*timeOutVal,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for production jobs
timeOutVal = 48
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeOutVal)
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND (modificationTime<:modificationTime OR (endTime IS NOT NULL AND endTime<:endTime))"
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':endTime'] = timeLimit
varMap[':jobStatus'] = 'holding'
status,res = taskBuffer.querySQLS(sql,varMap)
if res == None:
_logger.debug("# of Holding Watcher : %s" % res)
else:
_logger.debug("# of Holding Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("Holding Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=60*timeOutVal,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for production jobs with internal stage-out
default_timeOutVal = 4
sql = "SELECT PandaID,jobStatus,jobSubStatus FROM ATLAS_PANDA.jobsActive4 j,ATLAS_PANDAMETA.schedconfig s "
sql += "WHERE j.computingSite=s.siteid AND jobStatus=:jobStatus1 AND jobSubStatus IS NOT NULL AND modificationTime<:modificationTime "
for workflow in workflows:
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':jobStatus1'] = 'transferring'
sqlX = sql
if workflow is None:
timeOutVal = default_timeOutVal
if len(workflows) > 1:
sqlX += "AND (s.workflow IS NULL OR s.workflow NOT IN ("
for ng_workflow in workflows[:-1]:
tmp_key = ':w_{0}'.format(ng_workflow)
varMap[tmp_key] = ng_workflow
sqlX += '{0},'.format(tmp_key)
sqlX = sqlX[:-1]
sqlX += ")) "
else:
timeOutVal = workflow_timeout_map[workflow]
tmp_key = ':w_{0}'.format(workflow)
sqlX += "AND s.workflow={0} ".format(tmp_key)
varMap[tmp_key] = workflow
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeOutVal)
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS(sqlX, varMap)
if res == None:
_logger.debug("# of Internal Staging Watcher with workflow={0}: {1}".format(workflow, res))
else:
_logger.debug("# of Internal Staging Watcher with workflow={0}: {1}".format(workflow, len(res)))
for pandaID, jobStatus, jobSubStatus in res:
_logger.debug("Internal Staging Watcher %s %s:%s" % (pandaID, jobStatus, jobSubStatus))
thr = Watcher(taskBuffer,pandaID,single=True,sleepTime=60*timeOutVal,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for production jobs
default_timeOutVal = 2
sql = "SELECT PandaID,jobStatus,j.computingSite FROM ATLAS_PANDA.jobsActive4 j, ATLAS_PANDAMETA.schedconfig s "
sql += "WHERE j.computingSite=s.siteid AND jobStatus IN (:jobStatus1,:jobStatus2,:jobStatus3,:jobStatus4) AND modificationTime<:modificationTime "
for workflow in workflows:
varMap = {}
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':jobStatus3'] = 'stagein'
varMap[':jobStatus4'] = 'stageout'
sqlX = sql
if workflow is None:
timeOutVal = default_timeOutVal
if len(workflows) > 1:
sqlX += "AND (s.workflow IS NULL OR s.workflow NOT IN ("
for ng_workflow in workflows[:-1]:
tmp_key = ':w_{0}'.format(ng_workflow)
varMap[tmp_key] = ng_workflow
sqlX += '{0},'.format(tmp_key)
sqlX = sqlX[:-1]
sqlX += ")) "
else:
timeOutVal = workflow_timeout_map[workflow]
tmp_key = ':w_{0}'.format(workflow)
sqlX += "AND s.workflow={0} ".format(tmp_key)
varMap[tmp_key] = workflow
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeOutVal)
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS(sqlX, varMap)
if res == None:
_logger.debug("# of General Watcher with workflow={0}: {1}".format(workflow, res))
else:
_logger.debug("# of General Watcher with workflow={0}: {1}".format(workflow, len(res)))
for pandaID,jobStatus,computingSite in res:
if computingSite in sitesToSkipTO:
_logger.debug("skip General Watcher for PandaID={0} at {1} since timeout is disabled for {2}".format(pandaID,computingSite,jobStatus))
continue
_logger.debug("General Watcher %s" % pandaID)
thr = Watcher(taskBuffer,pandaID,single=True,sleepTime=60*timeOutVal,sitemapper=siteMapper)
thr.start()
thr.join()
_memoryCheck("reassign")
# kill long-waiting jobs in defined table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
status,res = taskBuffer.querySQLS("SELECT PandaID,cloud,prodSourceLabel FROM ATLAS_PANDA.jobsDefined4 WHERE creationTime<:creationTime",
{':creationTime':timeLimit})
jobs=[]
dashFileMap = {}
if res != None:
for pandaID,cloud,prodSourceLabel in res:
# collect PandaIDs
jobs.append(pandaID)
if len(jobs):
_logger.debug("killJobs for Defined (%s)" % str(jobs))
Client.killJobs(jobs,2)
# kill long-waiting jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
varMap = {}
varMap[':jobStatus'] = 'activated'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID from ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime",
varMap)
jobs=[]
if res != None:
for (id,) in res:
jobs.append(id)
if len(jobs):
_logger.debug("killJobs for Active (%s)" % str(jobs))
Client.killJobs(jobs,2)
# kill long-waiting ddm jobs for dispatch
_logger.debug("kill PandaMovers")
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=12)
sql = "SELECT PandaID from ATLAS_PANDA.jobsActive4 WHERE prodSourceLabel=:prodSourceLabel AND transferType=:transferType AND creationTime<:creationTime"
varMap = {}
varMap[':creationTime'] = timeLimit
varMap[':prodSourceLabel'] = 'ddm'
varMap[':transferType'] = 'dis'
_logger.debug(sql+str(varMap))
status,res = taskBuffer.querySQLS(sql,varMap)
_logger.debug(res)
jobs=[]
if res != None:
for (id,) in res:
jobs.append(id)
if len(jobs):
_logger.debug("kill DDM Jobs (%s)" % str(jobs))
Client.killJobs(jobs,2)
# kill hang-up movers
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
sql = "SELECT PandaID from ATLAS_PANDA.jobsActive4 WHERE prodSourceLabel=:prodSourceLabel AND transferType=:transferType AND jobStatus=:jobStatus AND startTime<:startTime"
varMap = {}
varMap[':startTime'] = timeLimit
varMap[':prodSourceLabel'] = 'ddm'
varMap[':transferType'] = 'dis'
varMap[':jobStatus'] = 'running'
_logger.debug(sql+str(varMap))
status,res = taskBuffer.querySQLS(sql,varMap)
_logger.debug(res)
jobs = []
movers = []
if res != None:
for id, in res:
movers.append(id)
# get dispatch dataset
sql = 'SELECT name FROM ATLAS_PANDA.Datasets WHERE MoverID=:MoverID'
stDS,resDS = taskBuffer.querySQLS(sql,{':MoverID':id})
if resDS != None:
disDS = resDS[0][0]
# get PandaIDs associated to the dis dataset
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsDefined4 WHERE jobStatus=:jobStatus AND dispatchDBlock=:dispatchDBlock"
varMap = {}
varMap[':jobStatus'] = 'assigned'
varMap[':dispatchDBlock'] = disDS
stP,resP = taskBuffer.querySQLS(sql,varMap)
if resP != None:
for pandaID, in resP:
jobs.append(pandaID)
# kill movers
if len(movers):
_logger.debug("kill hangup DDM Jobs (%s)" % str(movers))
Client.killJobs(movers,2)
# reassign jobs
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for hangup movers (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
# reassign activated jobs in inactive sites
inactiveTimeLimitSite = 2
inactiveTimeLimitJob = 4
inactivePrioLimit = 800
timeLimitSite = datetime.datetime.utcnow() - datetime.timedelta(hours=inactiveTimeLimitSite)
timeLimitJob = datetime.datetime.utcnow() - datetime.timedelta(hours=inactiveTimeLimitJob)
# get PandaIDs
sql = 'SELECT distinct computingSite FROM ATLAS_PANDA.jobsActive4 '
sql += 'WHERE prodSourceLabel=:prodSourceLabel '
sql += 'AND ((modificationTime<:timeLimit AND jobStatus=:jobStatus1) '
sql += 'OR (stateChangeTime<:timeLimit AND jobStatus=:jobStatus2)) '
sql += 'AND lockedby=:lockedby AND currentPriority>=:prioLimit '
sql += 'AND NOT processingType IN (:pType1) AND relocationFlag<>:rFlag1 '
varMap = {}
varMap[':prodSourceLabel'] = 'managed'
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'starting'
varMap[':lockedby'] = 'jedi'
varMap[':timeLimit'] = timeLimitJob
varMap[':prioLimit'] = inactivePrioLimit
varMap[':pType1'] = 'pmerge'
varMap[':rFlag1'] = 2
stDS,resDS = taskBuffer.querySQLS(sql,varMap)
sqlSS = 'SELECT laststart FROM ATLAS_PANDAMETA.siteData '
sqlSS += 'WHERE site=:site AND flag=:flag AND hours=:hours AND laststart<:laststart '
sqlPI = 'SELECT PandaID,eventService,attemptNr FROM ATLAS_PANDA.jobsActive4 '
sqlPI += 'WHERE prodSourceLabel=:prodSourceLabel AND jobStatus IN (:jobStatus1,:jobStatus2) '
sqlPI += 'AND (modificationTime<:timeLimit OR stateChangeTime<:timeLimit) '
sqlPI += 'AND lockedby=:lockedby AND currentPriority>=:prioLimit '
sqlPI += 'AND computingSite=:site AND NOT processingType IN (:pType1) AND relocationFlag<>:rFlag1 '
for tmpSite, in resDS:
if tmpSite in sitesToSkipTO:
_logger.debug('skip reassignJobs at inactive site %s since timeout is disabled' % (tmpSite))
continue
# check if the site is inactive
varMap = {}
varMap[':site'] = tmpSite
varMap[':flag'] = 'production'
varMap[':hours'] = 3
varMap[':laststart'] = timeLimitSite
stSS,resSS = taskBuffer.querySQLS(sqlSS,varMap)
if stSS != None and len(resSS) > 0:
# get jobs
varMap = {}
varMap[':prodSourceLabel'] = 'managed'
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'starting'
varMap[':lockedby'] = 'jedi'
varMap[':timeLimit'] = timeLimitJob
varMap[':prioLimit'] = inactivePrioLimit
varMap[':site'] = tmpSite
varMap[':pType1'] = 'pmerge'
varMap[':rFlag1'] = 2
stPI,resPI = taskBuffer.querySQLS(sqlPI,varMap)
jediJobs = []
# reassign
_logger.debug('reassignJobs for JEDI at inactive site %s laststart=%s' % (tmpSite,resSS[0][0]))
if resPI != None:
for pandaID, eventService, attemptNr in resPI:
if eventService in [EventServiceUtils.esMergeJobFlagNumber]:
_logger.debug('retrying es merge %s at inactive site %s' % (pandaID,tmpSite))
taskBuffer.retryJob(pandaID,{},getNewPandaID=True,attemptNr=attemptNr,
recoverableEsMerge=True)
jediJobs.append(pandaID)
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for JEDI at inactive site %s (%s)' % (tmpSite,jediJobs[iJob:iJob+nJob]))
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign defined jobs in defined table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=4)
# get PandaIDs
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsDefined4",timeLimit,['defined'],['managed'],[],[],[],
True)
jobs = []
jediJobs = []
if res != None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
# reassign
_logger.debug('reassignJobs for defined jobs -> #%s' % len(jobs))
if len(jobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for defined jobs (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for JEDI defined jobs -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for JEDI defined jobs (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign long-waiting jobs in defined table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=12)
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsDefined4",timeLimit,[],['managed'],[],[],[],
True)
jobs = []
jediJobs = []
if res != None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
# reassign
_logger.debug('reassignJobs for long in defined table -> #%s' % len(jobs))
if len(jobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for long in defined table (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for long JEDI in defined table -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for long JEDI in defined table (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign too long-standing evgen/simul jobs with active state at T1
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=6)
for tmpCloud in siteMapper.getCloudList():
# ignore special clouds
if tmpCloud in ['CERN','OSG']:
continue
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['activated'],['managed'],
['evgen','simul'],[siteMapper.getCloud(tmpCloud)['tier1']],[],
True,onlyReassignable=True)
jobs = []
jediJobs = []
if res != None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
_logger.debug('reassignJobs for Active T1 evgensimul in %s -> #%s' % (tmpCloud,len(jobs)))
if len(jobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for Active T1 evgensimul (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for Active T1 JEDI evgensimul in %s -> #%s' % (tmpCloud,len(jediJobs)))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for Active T1 JEDI evgensimul (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign too long-standing evgen/simul jobs with active state at T2
try:
_logger.debug('looking for stuck T2s to reassign evgensimul')
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=6)
varMap = {}
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'running'
varMap[':prodSourceLabel'] = 'managed'
varMap[':processingType1'] = 'evgen'
varMap[':processingType2'] = 'simul'
status,res = taskBuffer.querySQLS("SELECT cloud,computingSite,jobStatus,COUNT(*) FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus IN (:jobStatus1,:jobStatus2) AND prodSourceLabel=:prodSourceLabel AND processingType IN (:processingType1,:processingType2) GROUP BY cloud,computingSite,jobStatus",
varMap)
if res != None:
# get ratio of activated/running
siteStatData = {}
for tmpCloud,tmpComputingSite,tmpJobStatus,tmpCount in res:
# skip T1
if tmpComputingSite == siteMapper.getCloud(tmpCloud)['tier1']:
continue
# skip if timeout is disabled
if tmpComputingSite in sitesToSkipTO:
continue
# add cloud/site
tmpKey = (tmpCloud,tmpComputingSite)
if not siteStatData.has_key(tmpKey):
siteStatData[tmpKey] = {'activated':0,'running':0}
# add the number of jobs
if siteStatData[tmpKey].has_key(tmpJobStatus):
siteStatData[tmpKey][tmpJobStatus] += tmpCount
# look for stuck site
stuckThr = 10
stuckSites = []
for tmpKey,tmpStatData in siteStatData.iteritems():
if tmpStatData['running'] == 0 or \
float(tmpStatData['activated'])/float(tmpStatData['running']) > stuckThr:
tmpCloud,tmpComputingSite = tmpKey
_logger.debug(' %s:%s %s/%s > %s' % (tmpCloud,tmpComputingSite,tmpStatData['activated'],tmpStatData['running'],stuckThr))
# get stuck jobs
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['activated'],['managed'],
['evgen','simul'],[tmpComputingSite],[tmpCloud],True,
onlyReassignable=True)
jobs = []
jediJobs = []
if res != None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
_logger.debug('reassignJobs for Active T2 evgensimul %s:%s -> #%s' % (tmpCloud,tmpComputingSite,len(jobs)))
if len(jobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for Active T2 evgensimul (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for Active T2 JEDI evgensimul %s:%s -> #%s' % (tmpCloud,tmpComputingSite,len(jediJobs)))
if len(jediJobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for Active T2 JEDI evgensimul (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
except:
errType,errValue = sys.exc_info()[:2]
_logger.error("failed to reassign T2 evgensimul with %s:%s" % (errType,errValue))
# reassign too long activated jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=2)
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['activated'],['managed'],[],[],[],True,
onlyReassignable=True,getEventService=True)
jobs = []
jediJobs = []
if res != None:
for pandaID, lockedby, eventService, attemptNr, computingSite in res:
if computingSite in sitesToSkipTO:
_logger.debug('skip reassignJobs for PandaID={0} for long activated in active table since timeout is disabled at {1}'.format(pandaID,computingSite))
continue
if lockedby == 'jedi':
if eventService in [EventServiceUtils.esMergeJobFlagNumber]:
_logger.debug('retrying {0} in long activated' % pandaID)
taskBuffer.retryJob(pandaID,{},getNewPandaID=True,attemptNr=attemptNr,
recoverableEsMerge=True)
jediJobs.append(pandaID)
else:
jobs.append(pandaID)
_logger.debug('reassignJobs for long activated in active table -> #%s' % len(jobs))
if len(jobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for long activated in active table (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for long activated JEDI in active table -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for long activated JEDI in active table (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign too long starting jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=48)
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['starting'],['managed'],[],[],[],True,
onlyReassignable=True,useStateChangeTime=True,getEventService=True)
jobs = []
jediJobs = []
if res != None:
for pandaID, lockedby, eventService, attemptNr, computingSite in res:
if computingSite in sitesToSkipTO:
_logger.debug('skip reassignJobs for PandaID={0} for long starting in active table since timeout is disabled at {1}'.format(pandaID,computingSite))
continue
if lockedby == 'jedi':
jediJobs.append(pandaID)
else:
jobs.append(pandaID)
_logger.debug('reassignJobs for long starting in active table -> #%s' % len(jobs))
if len(jobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for long starting in active table (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for long starting JEDI in active table -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for long stating JEDI in active table (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# kill too long-standing analysis jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
varMap = {}
varMap[':prodSourceLabel1'] = 'test'
varMap[':prodSourceLabel2'] = 'panda'
varMap[':prodSourceLabel3'] = 'user'
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE (prodSourceLabel=:prodSourceLabel1 OR prodSourceLabel=:prodSourceLabel2 OR prodSourceLabel=:prodSourceLabel3) AND modificationTime<:modificationTime ORDER BY PandaID",
varMap)
jobs = []
if res != None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
Client.killJobs(jobs,2)
_logger.debug("killJobs for Anal Active (%s)" % str(jobs))
# kill too long pending jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
varMap = {}
varMap[':jobStatus'] = 'pending'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsWaiting4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime",
varMap)
jobs = []
if res != None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for Pending (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob],4)
iJob += nJob
# kick waiting ES merge jobs which were generated from fake co-jumbo
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(minutes=10)
varMap = {}
varMap[':jobStatus'] = 'waiting'
varMap[':creationTime'] = timeLimit
varMap[':esMerge'] = EventServiceUtils.esMergeJobFlagNumber
sql = "SELECT PandaID,computingSite FROM ATLAS_PANDA.jobsWaiting4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime "
sql += "AND eventService=:esMerge ORDER BY jediTaskID "
status,res = taskBuffer.querySQLS(sql, varMap)
jobsMap = {}
if res != None:
for id,site in res:
if site not in jobsMap:
jobsMap[site] = []
jobsMap[site].append(id)
# kick
if len(jobsMap):
for site, jobs in jobsMap.iteritems():
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("kick waiting ES merge (%s)" % str(jobs[iJob:iJob+nJob]))
Client.reassignJobs(jobs[iJob:iJob+nJob], )
iJob += nJob
# kill too long waiting jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
varMap = {}
varMap[':jobStatus'] = 'waiting'
varMap[':creationTime'] = timeLimit
varMap[':coJumbo'] = EventServiceUtils.coJumboJobFlagNumber
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsWaiting4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime "
sql += "AND (eventService IS NULL OR eventService<>:coJumbo) "
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res != None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for Waiting (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob],4)
iJob += nJob
# kill too long running ES jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=24)
varMap = {}
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':timeLimit'] = timeLimit
varMap[':esJob'] = EventServiceUtils.esJobFlagNumber
varMap[':coJumbo'] = EventServiceUtils.coJumboJobFlagNumber
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus IN (:jobStatus1,:jobStatus2) AND stateChangeTime<:timeLimit "
sql += "AND eventService IN (:esJob,:coJumbo) AND currentPriority>=900 "
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res != None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for long running ES jobs (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob], 2, keepUnmerged=True, jobSubStatus='es_toolong')
iJob += nJob
# kill too long running ES merge jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=24)
varMap = {}
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':timeLimit'] = timeLimit
varMap[':esMergeJob'] = EventServiceUtils.esMergeJobFlagNumber
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus IN (:jobStatus1,:jobStatus2) AND stateChangeTime<:timeLimit "
sql += "AND eventService=:esMergeJob "
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res != None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for long running ES merge jobs (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob], 2)
iJob += nJob
# kill too long waiting jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsWaiting4 WHERE ((creationTime<:timeLimit AND (eventService IS NULL OR eventService<>:coJumbo)) "
sql += "OR modificationTime<:timeLimit) "
varMap = {}
varMap[':timeLimit'] = timeLimit
varMap[':coJumbo'] = EventServiceUtils.coJumboJobFlagNumber
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res != None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
Client.killJobs(jobs,4)
_logger.debug("killJobs in jobsWaiting (%s)" % str(jobs))
# reassign long waiting jobs
"""
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(minutes=30)
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsWaiting4",timeLimit,['waiting'],['managed'],[],[],[],True)
jobs = []
jediJobs = []
if res != None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
_logger.debug('reassignJobs for Waiting -> #%s' % len(jobs))
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for Waiting (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for JEDI Waiting -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for JEDI Waiting (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
"""
# kill too long running jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=21)
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE creationTime<:creationTime",
{':creationTime':timeLimit})
jobs = []
if res != None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
# set tobekill
_logger.debug('killJobs for Running (%s)' % jobs[iJob:iJob+nJob])
Client.killJobs(jobs[iJob:iJob+nJob],2)
# run watcher
for id in jobs[iJob:iJob+nJob]:
thr = Watcher(taskBuffer,id,single=True,sitemapper=siteMapper,sleepTime=60*24*21)
thr.start()
thr.join()
time.sleep(1)
iJob += nJob
time.sleep(10)
# kill too long waiting ddm jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=5)
varMap = {}
varMap[':prodSourceLabel'] = 'ddm'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE prodSourceLabel=:prodSourceLabel AND creationTime<:creationTime",
varMap)
jobs = []
if res != None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
Client.killJobs(jobs,2)
_logger.debug("killJobs for DDM (%s)" % str(jobs))
# kill too long throttled jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
varMap = {}
varMap[':jobStatus'] = 'throttled'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime ",
varMap)
jobs = []
if res != None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
Client.killJobs(jobs,2)
_logger.debug("killJobs for throttled (%s)" % str(jobs))
# check if merge job is valid
_logger.debug('kill invalid pmerge')
varMap = {}
varMap[':processingType'] = 'pmerge'
varMap[':timeLimit'] = datetime.datetime.utcnow() - datetime.timedelta(minutes=30)
sql = "SELECT PandaID,jediTaskID FROM ATLAS_PANDA.jobsDefined4 WHERE processingType=:processingType AND modificationTime<:timeLimit "
sql += "UNION "
sql += "SELECT PandaID,jediTaskID FROM ATLAS_PANDA.jobsActive4 WHERE processingType=:processingType AND modificationTime<:timeLimit "
status,res = taskBuffer.querySQLS(sql,varMap)
nPmerge = 0
badPmerge = 0
_logger.debug('check {0} pmerge'.format(len(res)))
for pandaID,jediTaskID in res:
nPmerge += 1
isValid,tmpMsg = taskBuffer.isValidMergeJob(pandaID,jediTaskID)
if isValid == False:
_logger.debug("kill pmerge {0} since {1} gone".format(pandaID,tmpMsg))
taskBuffer.killJobs([pandaID],'killed since pre-merge job {0} gone'.format(tmpMsg),
'52',True)
badPmerge += 1
_logger.debug('killed invalid pmerge {0}/{1}'.format(badPmerge,nPmerge))
# cleanup of jumbo jobs
_logger.debug('jumbo job cleanup')
res = taskBuffer.cleanupJumboJobs()
_logger.debug(res)
_memoryCheck("delete XML")
# delete old files in DA cache
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
files = os.listdir(panda_config.cache_dir)
for file in files:
# skip special test file
if file == 'sources.72c48dc5-f055-43e5-a86e-4ae9f8ea3497.tar.gz':
continue
if file == 'sources.090f3f51-fc81-4e80-9749-a5e4b2bd58de.tar.gz':
continue
try:
# get timestamp
timestamp = datetime.datetime.fromtimestamp(os.stat('%s/%s' % (panda_config.cache_dir,file)).st_mtime)
# delete
if timestamp < timeLimit:
_logger.debug("delete %s " % file)
os.remove('%s/%s' % (panda_config.cache_dir,file))
except:
pass
_memoryCheck("delete core")
# delete core
dirName = '%s/..' % panda_config.logdir
for file in os.listdir(dirName):
if file.startswith('core.'):
_logger.debug("delete %s " % file)
try:
os.remove('%s/%s' % (dirName,file))
except:
pass
# update email DB
_memoryCheck("email")
_logger.debug("Update emails")
# lock file
_lockGetMail = open(panda_config.lockfile_getMail, 'w')
# lock email DB
fcntl.flock(_lockGetMail.fileno(), fcntl.LOCK_EX)
# open email DB
pDB = shelve.open(panda_config.emailDB)
# read
mailMap = {}
for name,addr in pDB.iteritems():
mailMap[name] = addr
# close DB
pDB.close()
# release file lock
fcntl.flock(_lockGetMail.fileno(), fcntl.LOCK_UN)
# set email address
for name,addr in mailMap.iteritems():
# remove _
name = re.sub('_$','',name)
status,res = taskBuffer.querySQLS("SELECT email FROM ATLAS_PANDAMETA.users WHERE name=:name",{':name':name})
# failed or not found
if status == -1 or len(res) == 0:
_logger.error("%s not found in user DB" % name)
continue
# already set
if not res[0][0] in ['','None',None]:
continue
# update email
_logger.debug("set '%s' to %s" % (name,addr))
status,res = taskBuffer.querySQLS("UPDATE ATLAS_PANDAMETA.users SET email=:addr WHERE name=:name",{':addr':addr,':name':name})
_memoryCheck("end")
_logger.debug("===================== end =====================")
|
py | b406c2c5648b07b59cae93ff56f15e102bc37a1c | from .base import BaseRenderer
class LiveChatLegacyPaidMessageRenderer(BaseRenderer):
def __init__(self, item):
super().__init__(item, "newSponsorEvent")
def get_snippet(self):
message = self.get_message(self.renderer)
return {
"type": self.chattype,
"liveChatId": "",
"authorChannelId": self.renderer.get("authorExternalChannelId"),
"publishedAt": self.get_publishedat(self.renderer.get("timestampUsec", 0)),
"hasDisplayContent": True,
"displayMessage": message,
}
def get_authordetails(self):
authorExternalChannelId = self.renderer.get("authorExternalChannelId")
# parse subscriber type
isVerified, isChatOwner, _, isChatModerator = (
self.get_badges(self.renderer)
)
return {
"channelId": authorExternalChannelId,
"channelUrl": "http://www.youtube.com/channel/" + authorExternalChannelId,
"displayName": self.renderer["authorName"]["simpleText"],
"profileImageUrl": self.renderer["authorPhoto"]["thumbnails"][1]["url"],
"isVerified": isVerified,
"isChatOwner": isChatOwner,
"isChatSponsor": True,
"isChatModerator": isChatModerator
}
def get_message(self, renderer):
message = (renderer["eventText"]["runs"][0]["text"]
) + ' / ' + (renderer["detailText"]["simpleText"])
return message
|
py | b406c33fd7d551953a4d410231380597e78d744f | import numpy as np
import common.transformations.orientation as orient
FULL_FRAME_SIZE = (1164, 874)
W, H = FULL_FRAME_SIZE[0], FULL_FRAME_SIZE[1]
eon_focal_length = FOCAL = 910.0
# aka 'K' aka camera_frame_from_view_frame
eon_intrinsics = np.array([
[FOCAL, 0., W/2.],
[ 0., FOCAL, H/2.],
[ 0., 0., 1.]])
leon_dcam_intrinsics = np.array([
[650, 0, 816//2],
[ 0, 650, 612//2],
[ 0, 0, 1]])
eon_dcam_intrinsics = np.array([
[860, 0, 1152//2],
[ 0, 860, 864//2],
[ 0, 0, 1]])
# aka 'K_inv' aka view_frame_from_camera_frame
eon_intrinsics_inv = np.linalg.inv(eon_intrinsics)
# device/mesh : x->forward, y-> right, z->down
# view : x->right, y->down, z->forward
device_frame_from_view_frame = np.array([
[ 0., 0., 1.],
[ 1., 0., 0.],
[ 0., 1., 0.]
])
view_frame_from_device_frame = device_frame_from_view_frame.T
def get_calib_from_vp(vp):
vp_norm = normalize(vp)
yaw_calib = np.arctan(vp_norm[0])
pitch_calib = -np.arctan(vp_norm[1]*np.cos(yaw_calib))
roll_calib = 0
return roll_calib, pitch_calib, yaw_calib
# aka 'extrinsic_matrix'
# road : x->forward, y -> left, z->up
def get_view_frame_from_road_frame(roll, pitch, yaw, height):
device_from_road = orient.rot_from_euler([roll, pitch, yaw]).dot(np.diag([1, -1, -1]))
view_from_road = view_frame_from_device_frame.dot(device_from_road)
return np.hstack((view_from_road, [[0], [height], [0]]))
# aka 'extrinsic_matrix'
def get_view_frame_from_calib_frame(roll, pitch, yaw, height):
device_from_calib= orient.rot_from_euler([roll, pitch, yaw])
view_from_calib = view_frame_from_device_frame.dot(device_from_calib)
return np.hstack((view_from_calib, [[0], [height], [0]]))
def vp_from_ke(m):
"""
Computes the vanishing point from the product of the intrinsic and extrinsic
matrices C = KE.
The vanishing point is defined as lim x->infinity C (x, 0, 0, 1).T
"""
return (m[0, 0]/m[2, 0], m[1, 0]/m[2, 0])
def vp_from_rpy(rpy):
e = get_view_frame_from_road_frame(rpy[0], rpy[1], rpy[2], 1.22)
ke = np.dot(eon_intrinsics, e)
return vp_from_ke(ke)
def roll_from_ke(m):
# note: different from calibration.h/RollAnglefromKE: i think that one's just wrong
return np.arctan2(-(m[1, 0] - m[1, 1] * m[2, 0] / m[2, 1]),
-(m[0, 0] - m[0, 1] * m[2, 0] / m[2, 1]))
def normalize(img_pts, intrinsics=eon_intrinsics):
# normalizes image coordinates
# accepts single pt or array of pts
intrinsics_inv = np.linalg.inv(intrinsics)
img_pts = np.array(img_pts)
input_shape = img_pts.shape
img_pts = np.atleast_2d(img_pts)
img_pts = np.hstack((img_pts, np.ones((img_pts.shape[0], 1))))
img_pts_normalized = img_pts.dot(intrinsics_inv.T)
img_pts_normalized[(img_pts < 0).any(axis=1)] = np.nan
return img_pts_normalized[:, :2].reshape(input_shape)
def denormalize(img_pts, intrinsics=eon_intrinsics):
# denormalizes image coordinates
# accepts single pt or array of pts
img_pts = np.array(img_pts)
input_shape = img_pts.shape
img_pts = np.atleast_2d(img_pts)
img_pts = np.hstack((img_pts, np.ones((img_pts.shape[0], 1))))
img_pts_denormalized = img_pts.dot(intrinsics.T)
img_pts_denormalized[img_pts_denormalized[:, 0] > W] = np.nan
img_pts_denormalized[img_pts_denormalized[:, 0] < 0] = np.nan
img_pts_denormalized[img_pts_denormalized[:, 1] > H] = np.nan
img_pts_denormalized[img_pts_denormalized[:, 1] < 0] = np.nan
return img_pts_denormalized[:, :2].reshape(input_shape)
def device_from_ecef(pos_ecef, orientation_ecef, pt_ecef):
# device from ecef frame
# device frame is x -> forward, y-> right, z -> down
# accepts single pt or array of pts
input_shape = pt_ecef.shape
pt_ecef = np.atleast_2d(pt_ecef)
ecef_from_device_rot = orient.rotations_from_quats(orientation_ecef)
device_from_ecef_rot = ecef_from_device_rot.T
pt_ecef_rel = pt_ecef - pos_ecef
pt_device = np.einsum('jk,ik->ij', device_from_ecef_rot, pt_ecef_rel)
return pt_device.reshape(input_shape)
def img_from_device(pt_device):
# img coordinates from pts in device frame
# first transforms to view frame, then to img coords
# accepts single pt or array of pts
input_shape = pt_device.shape
pt_device = np.atleast_2d(pt_device)
pt_view = np.einsum('jk,ik->ij', view_frame_from_device_frame, pt_device)
# This function should never return negative depths
pt_view[pt_view[:, 2] < 0] = np.nan
pt_img = pt_view/pt_view[:, 2:3]
return pt_img.reshape(input_shape)[:, :2]
def get_camera_frame_from_calib_frame(camera_frame_from_road_frame):
camera_frame_from_ground = camera_frame_from_road_frame[:, (0, 1, 3)]
calib_frame_from_ground = np.dot(eon_intrinsics,
get_view_frame_from_road_frame(0, 0, 0, 1.22))[:, (0, 1, 3)]
ground_from_calib_frame = np.linalg.inv(calib_frame_from_ground)
camera_frame_from_calib_frame = np.dot(camera_frame_from_ground, ground_from_calib_frame)
return camera_frame_from_calib_frame
def pretransform_from_calib(calib):
roll, pitch, yaw, height = calib
view_frame_from_road_frame = get_view_frame_from_road_frame(roll, pitch, yaw, height)
camera_frame_from_road_frame = np.dot(eon_intrinsics, view_frame_from_road_frame)
camera_frame_from_calib_frame = get_camera_frame_from_calib_frame(camera_frame_from_road_frame)
return np.linalg.inv(camera_frame_from_calib_frame)
|
py | b406c38ad0a10d78bde206e2dd77eb1998d97be8 | import os
from setuptools import (
setup,
find_packages,
)
version = "1.0a1"
shortdesc = "AGX generator for buildout"
longdesc = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
longdesc += open(os.path.join(os.path.dirname(__file__), 'LICENSE.rst')).read()
setup(name="agx.generator.buildout",
version=version,
description=shortdesc,
long_description=longdesc,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Programming Language :: Python',
],
keywords="AGX, Code Generation",
author="BlueDynamics Alliance",
author_email="[email protected]",
url="http://github.com/bluedynamics/agx.generator.buildout",
license="GNU General Public Licence",
packages=find_packages("src"),
package_dir={"": "src"},
namespace_packages=["agx", "agx.generator"],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
##code-section dependencies
##/code-section dependencies
],
extras_require = dict(
##code-section extras_require
test=[
'interlude',
]
##/code-section extras_require
),
entry_points="""
##code-section entry_points
[agx.generator]
register = agx.generator.buildout:register
##/code-section entry_points
""",
##code-section additionals
##/code-section additionals
)
|
py | b406c5781720fb9e9b61d9fad1f10499f34621f9 | # src models/ShopModel.py
from marshmallow import fields, Schema
from . import db
class ShopModel(db.Model):
"""
ShopModel
"""
#table name
__tablename__='shops'
id=db.Column(db.Integer,primary_key=True)
type=db.Column(db.String(255),nullable=True)
node_id=db.Column(db.Integer,db.ForeignKey('nodes.id'),nullable=False)
# class constructor
def __init__(self, data):
"""
Class constructor
"""
self.type = data.get('type')
self.node_id = data.get('node_id')
def save(self):
db.session.add(self)
db.session.commit()
def update(self, data):
for key, item in data.items():
setattr(self, key, item)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
@staticmethod
def get_all_shops():
return ShopModel.query.all()
@staticmethod
def get_one_shop(id):
return ShopModel.query.get(id)
class ShopSchema(Schema):
"""
Shop Schema
"""
id = fields.Int(dump_only=True)
type=fields.Str(required=True)
node_id = fields.Int(required=True)
|
py | b406c644e6470b0b5da001623054f7b95e0215f8 | """
Files Pipeline
See documentation in topics/media-pipeline.rst
"""
import functools
import hashlib
import logging
import mimetypes
import os
import time
from collections import defaultdict
from contextlib import suppress
from email.utils import mktime_tz
from email.utils import parsedate_tz
from ftplib import FTP
from io import BytesIO
from urllib.parse import urlparse
from itemadapter import ItemAdapter
from twisted.internet import defer
from twisted.internet import threads
from scrapy.exceptions import IgnoreRequest
from scrapy.exceptions import NotConfigured
from scrapy.http import Request
from scrapy.pipelines.media import MediaPipeline
from scrapy.settings import Settings
from scrapy.utils.boto import is_botocore
from scrapy.utils.datatypes import CaselessDict
from scrapy.utils.ftp import ftp_store_file
from scrapy.utils.log import failure_to_exc_info
from scrapy.utils.misc import md5sum
from scrapy.utils.python import to_bytes
from scrapy.utils.request import referer_str
logger = logging.getLogger(__name__)
class FileException(Exception):
"""General media error exception"""
class FSFilesStore:
def __init__(self, basedir):
if "://" in basedir:
basedir = basedir.split("://", 1)[1]
self.basedir = basedir
self._mkdir(self.basedir)
self.created_directories = defaultdict(set)
def persist_file(self, path, buf, info, meta=None, headers=None):
absolute_path = self._get_filesystem_path(path)
self._mkdir(os.path.dirname(absolute_path), info)
with open(absolute_path, "wb") as f:
f.write(buf.getvalue())
def stat_file(self, path, info):
absolute_path = self._get_filesystem_path(path)
try:
last_modified = os.path.getmtime(absolute_path)
except os.error:
return {}
with open(absolute_path, "rb") as f:
checksum = md5sum(f)
return {"last_modified": last_modified, "checksum": checksum}
def _get_filesystem_path(self, path):
path_comps = path.split("/")
return os.path.join(self.basedir, *path_comps)
def _mkdir(self, dirname, domain=None):
seen = self.created_directories[domain] if domain else set()
if dirname not in seen:
if not os.path.exists(dirname):
os.makedirs(dirname)
seen.add(dirname)
class S3FilesStore:
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = None
AWS_ENDPOINT_URL = None
AWS_REGION_NAME = None
AWS_USE_SSL = None
AWS_VERIFY = None
# Overriden from settings.FILES_STORE_S3_ACL in FilesPipeline.from_settings
POLICY = "private"
HEADERS = {
"Cache-Control": "max-age=172800",
}
def __init__(self, uri):
self.is_botocore = is_botocore()
if self.is_botocore:
import botocore.session
session = botocore.session.get_session()
self.s3_client = session.create_client(
"s3",
aws_access_key_id=self.AWS_ACCESS_KEY_ID,
aws_secret_access_key=self.AWS_SECRET_ACCESS_KEY,
endpoint_url=self.AWS_ENDPOINT_URL,
region_name=self.AWS_REGION_NAME,
use_ssl=self.AWS_USE_SSL,
verify=self.AWS_VERIFY,
)
else:
from boto.s3.connection import S3Connection
self.S3Connection = S3Connection
if not uri.startswith("s3://"):
raise ValueError("Incorrect URI scheme in %s, expected 's3'" % uri)
self.bucket, self.prefix = uri[5:].split("/", 1)
def stat_file(self, path, info):
def _onsuccess(boto_key):
if self.is_botocore:
checksum = boto_key["ETag"].strip('"')
last_modified = boto_key["LastModified"]
modified_stamp = time.mktime(last_modified.timetuple())
else:
checksum = boto_key.etag.strip('"')
last_modified = boto_key.last_modified
modified_tuple = parsedate_tz(last_modified)
modified_stamp = int(mktime_tz(modified_tuple))
return {"checksum": checksum, "last_modified": modified_stamp}
return self._get_boto_key(path).addCallback(_onsuccess)
def _get_boto_bucket(self):
# disable ssl (is_secure=False) because of this python bug:
# https://bugs.python.org/issue5103
c = self.S3Connection(self.AWS_ACCESS_KEY_ID,
self.AWS_SECRET_ACCESS_KEY,
is_secure=False)
return c.get_bucket(self.bucket, validate=False)
def _get_boto_key(self, path):
key_name = "%s%s" % (self.prefix, path)
if self.is_botocore:
return threads.deferToThread(self.s3_client.head_object,
Bucket=self.bucket,
Key=key_name)
else:
b = self._get_boto_bucket()
return threads.deferToThread(b.get_key, key_name)
def persist_file(self, path, buf, info, meta=None, headers=None):
"""Upload file to S3 storage"""
key_name = "%s%s" % (self.prefix, path)
buf.seek(0)
if self.is_botocore:
extra = self._headers_to_botocore_kwargs(self.HEADERS)
if headers:
extra.update(self._headers_to_botocore_kwargs(headers))
return threads.deferToThread(
self.s3_client.put_object,
Bucket=self.bucket,
Key=key_name,
Body=buf,
Metadata={k: str(v)
for k, v in (meta or {}).items()},
ACL=self.POLICY,
**extra)
else:
b = self._get_boto_bucket()
k = b.new_key(key_name)
if meta:
for metakey, metavalue in meta.items():
k.set_metadata(metakey, str(metavalue))
h = self.HEADERS.copy()
if headers:
h.update(headers)
return threads.deferToThread(
k.set_contents_from_string,
buf.getvalue(),
headers=h,
policy=self.POLICY,
)
def _headers_to_botocore_kwargs(self, headers):
""" Convert headers to botocore keyword agruments.
"""
# This is required while we need to support both boto and botocore.
mapping = CaselessDict({
"Content-Type":
"ContentType",
"Cache-Control":
"CacheControl",
"Content-Disposition":
"ContentDisposition",
"Content-Encoding":
"ContentEncoding",
"Content-Language":
"ContentLanguage",
"Content-Length":
"ContentLength",
"Content-MD5":
"ContentMD5",
"Expires":
"Expires",
"X-Amz-Grant-Full-Control":
"GrantFullControl",
"X-Amz-Grant-Read":
"GrantRead",
"X-Amz-Grant-Read-ACP":
"GrantReadACP",
"X-Amz-Grant-Write-ACP":
"GrantWriteACP",
"X-Amz-Object-Lock-Legal-Hold":
"ObjectLockLegalHoldStatus",
"X-Amz-Object-Lock-Mode":
"ObjectLockMode",
"X-Amz-Object-Lock-Retain-Until-Date":
"ObjectLockRetainUntilDate",
"X-Amz-Request-Payer":
"RequestPayer",
"X-Amz-Server-Side-Encryption":
"ServerSideEncryption",
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id":
"SSEKMSKeyId",
"X-Amz-Server-Side-Encryption-Context":
"SSEKMSEncryptionContext",
"X-Amz-Server-Side-Encryption-Customer-Algorithm":
"SSECustomerAlgorithm",
"X-Amz-Server-Side-Encryption-Customer-Key":
"SSECustomerKey",
"X-Amz-Server-Side-Encryption-Customer-Key-Md5":
"SSECustomerKeyMD5",
"X-Amz-Storage-Class":
"StorageClass",
"X-Amz-Tagging":
"Tagging",
"X-Amz-Website-Redirect-Location":
"WebsiteRedirectLocation",
})
extra = {}
for key, value in headers.items():
try:
kwarg = mapping[key]
except KeyError:
raise TypeError('Header "%s" is not supported by botocore' %
key)
else:
extra[kwarg] = value
return extra
class GCSFilesStore:
GCS_PROJECT_ID = None
CACHE_CONTROL = "max-age=172800"
# The bucket's default object ACL will be applied to the object.
# Overriden from settings.FILES_STORE_GCS_ACL in FilesPipeline.from_settings.
POLICY = None
def __init__(self, uri):
from google.cloud import storage
client = storage.Client(project=self.GCS_PROJECT_ID)
bucket, prefix = uri[5:].split("/", 1)
self.bucket = client.bucket(bucket)
self.prefix = prefix
permissions = self.bucket.test_iam_permissions(
["storage.objects.get", "storage.objects.create"])
if "storage.objects.get" not in permissions:
logger.warning(
"No 'storage.objects.get' permission for GSC bucket %(bucket)s. "
"Checking if files are up to date will be impossible. Files will be downloaded every time.",
{"bucket": bucket},
)
if "storage.objects.create" not in permissions:
logger.error(
"No 'storage.objects.create' permission for GSC bucket %(bucket)s. Saving files will be impossible!",
{"bucket": bucket},
)
def stat_file(self, path, info):
def _onsuccess(blob):
if blob:
checksum = blob.md5_hash
last_modified = time.mktime(blob.updated.timetuple())
return {"checksum": checksum, "last_modified": last_modified}
else:
return {}
return threads.deferToThread(self.bucket.get_blob,
path).addCallback(_onsuccess)
def _get_content_type(self, headers):
if headers and "Content-Type" in headers:
return headers["Content-Type"]
else:
return "application/octet-stream"
def persist_file(self, path, buf, info, meta=None, headers=None):
blob = self.bucket.blob(self.prefix + path)
blob.cache_control = self.CACHE_CONTROL
blob.metadata = {k: str(v) for k, v in (meta or {}).items()}
return threads.deferToThread(
blob.upload_from_string,
data=buf.getvalue(),
content_type=self._get_content_type(headers),
predefined_acl=self.POLICY,
)
class FTPFilesStore:
FTP_USERNAME = None
FTP_PASSWORD = None
USE_ACTIVE_MODE = None
def __init__(self, uri):
if not uri.startswith("ftp://"):
raise ValueError("Incorrect URI scheme in %s, expected 'ftp'" %
uri)
u = urlparse(uri)
self.port = u.port
self.host = u.hostname
self.port = int(u.port or 21)
self.username = u.username or self.FTP_USERNAME
self.password = u.password or self.FTP_PASSWORD
self.basedir = u.path.rstrip("/")
def persist_file(self, path, buf, info, meta=None, headers=None):
path = "%s/%s" % (self.basedir, path)
return threads.deferToThread(
ftp_store_file,
path=path,
file=buf,
host=self.host,
port=self.port,
username=self.username,
password=self.password,
use_active_mode=self.USE_ACTIVE_MODE,
)
def stat_file(self, path, info):
def _stat_file(path):
try:
ftp = FTP()
ftp.connect(self.host, self.port)
ftp.login(self.username, self.password)
if self.USE_ACTIVE_MODE:
ftp.set_pasv(False)
file_path = "%s/%s" % (self.basedir, path)
last_modified = float(
ftp.voidcmd("MDTM %s" % file_path)[4:].strip())
m = hashlib.md5()
ftp.retrbinary("RETR %s" % file_path, m.update)
return {
"last_modified": last_modified,
"checksum": m.hexdigest()
}
# The file doesn't exist
except Exception:
return {}
return threads.deferToThread(_stat_file, path)
class FilesPipeline(MediaPipeline):
"""Abstract pipeline that implement the file downloading
This pipeline tries to minimize network transfers and file processing,
doing stat of the files and determining if file is new, uptodate or
expired.
``new`` files are those that pipeline never processed and needs to be
downloaded from supplier site the first time.
``uptodate`` files are the ones that the pipeline processed and are still
valid files.
``expired`` files are those that pipeline already processed but the last
modification was made long time ago, so a reprocessing is recommended to
refresh it in case of change.
"""
MEDIA_NAME = "file"
EXPIRES = 90
STORE_SCHEMES = {
"": FSFilesStore,
"file": FSFilesStore,
"s3": S3FilesStore,
"gs": GCSFilesStore,
"ftp": FTPFilesStore,
}
DEFAULT_FILES_URLS_FIELD = "file_urls"
DEFAULT_FILES_RESULT_FIELD = "files"
def __init__(self, store_uri, download_func=None, settings=None):
if not store_uri:
raise NotConfigured
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
cls_name = "FilesPipeline"
self.store = self._get_store(store_uri)
resolve = functools.partial(self._key_for_pipe,
base_class_name=cls_name,
settings=settings)
self.expires = settings.getint(resolve("FILES_EXPIRES"), self.EXPIRES)
if not hasattr(self, "FILES_URLS_FIELD"):
self.FILES_URLS_FIELD = self.DEFAULT_FILES_URLS_FIELD
if not hasattr(self, "FILES_RESULT_FIELD"):
self.FILES_RESULT_FIELD = self.DEFAULT_FILES_RESULT_FIELD
self.files_urls_field = settings.get(resolve("FILES_URLS_FIELD"),
self.FILES_URLS_FIELD)
self.files_result_field = settings.get(resolve("FILES_RESULT_FIELD"),
self.FILES_RESULT_FIELD)
super().__init__(download_func=download_func, settings=settings)
@classmethod
def from_settings(cls, settings):
s3store = cls.STORE_SCHEMES["s3"]
s3store.AWS_ACCESS_KEY_ID = settings["AWS_ACCESS_KEY_ID"]
s3store.AWS_SECRET_ACCESS_KEY = settings["AWS_SECRET_ACCESS_KEY"]
s3store.AWS_ENDPOINT_URL = settings["AWS_ENDPOINT_URL"]
s3store.AWS_REGION_NAME = settings["AWS_REGION_NAME"]
s3store.AWS_USE_SSL = settings["AWS_USE_SSL"]
s3store.AWS_VERIFY = settings["AWS_VERIFY"]
s3store.POLICY = settings["FILES_STORE_S3_ACL"]
gcs_store = cls.STORE_SCHEMES["gs"]
gcs_store.GCS_PROJECT_ID = settings["GCS_PROJECT_ID"]
gcs_store.POLICY = settings["FILES_STORE_GCS_ACL"] or None
ftp_store = cls.STORE_SCHEMES["ftp"]
ftp_store.FTP_USERNAME = settings["FTP_USER"]
ftp_store.FTP_PASSWORD = settings["FTP_PASSWORD"]
ftp_store.USE_ACTIVE_MODE = settings.getbool("FEED_STORAGE_FTP_ACTIVE")
store_uri = settings["FILES_STORE"]
return cls(store_uri, settings=settings)
def _get_store(self, uri):
if os.path.isabs(uri): # to support win32 paths like: C:\\some\dir
scheme = "file"
else:
scheme = urlparse(uri).scheme
store_cls = self.STORE_SCHEMES[scheme]
return store_cls(uri)
def media_to_download(self, request, info):
def _onsuccess(result):
if not result:
return # returning None force download
last_modified = result.get("last_modified", None)
if not last_modified:
return # returning None force download
age_seconds = time.time() - last_modified
age_days = age_seconds / 60 / 60 / 24
if age_days > self.expires:
return # returning None force download
referer = referer_str(request)
logger.debug(
"File (uptodate): Downloaded %(medianame)s from %(request)s "
"referred in <%(referer)s>",
{
"medianame": self.MEDIA_NAME,
"request": request,
"referer": referer
},
extra={"spider": info.spider},
)
self.inc_stats(info.spider, "uptodate")
checksum = result.get("checksum", None)
return {
"url": request.url,
"path": path,
"checksum": checksum,
"status": "uptodate",
}
path = self.file_path(request, info=info)
dfd = defer.maybeDeferred(self.store.stat_file, path, info)
dfd.addCallbacks(_onsuccess, lambda _: None)
dfd.addErrback(lambda f: logger.error(
self.__class__.__name__ + ".store.stat_file",
exc_info=failure_to_exc_info(f),
extra={"spider": info.spider},
))
return dfd
def media_failed(self, failure, request, info):
if not isinstance(failure.value, IgnoreRequest):
referer = referer_str(request)
logger.warning(
"File (unknown-error): Error downloading %(medianame)s from "
"%(request)s referred in <%(referer)s>: %(exception)s",
{
"medianame": self.MEDIA_NAME,
"request": request,
"referer": referer,
"exception": failure.value,
},
extra={"spider": info.spider},
)
raise FileException
def media_downloaded(self, response, request, info):
referer = referer_str(request)
if response.status != 200:
logger.warning(
"File (code: %(status)s): Error downloading file from "
"%(request)s referred in <%(referer)s>",
{
"status": response.status,
"request": request,
"referer": referer
},
extra={"spider": info.spider},
)
raise FileException("download-error")
if not response.body:
logger.warning(
"File (empty-content): Empty file from %(request)s referred "
"in <%(referer)s>: no-content",
{
"request": request,
"referer": referer
},
extra={"spider": info.spider},
)
raise FileException("empty-content")
status = "cached" if "cached" in response.flags else "downloaded"
logger.debug(
"File (%(status)s): Downloaded file from %(request)s referred in "
"<%(referer)s>",
{
"status": status,
"request": request,
"referer": referer
},
extra={"spider": info.spider},
)
self.inc_stats(info.spider, status)
try:
path = self.file_path(request, response=response, info=info)
checksum = self.file_downloaded(response, request, info)
except FileException as exc:
logger.warning(
"File (error): Error processing file from %(request)s "
"referred in <%(referer)s>: %(errormsg)s",
{
"request": request,
"referer": referer,
"errormsg": str(exc)
},
extra={"spider": info.spider},
exc_info=True,
)
raise
except Exception as exc:
logger.error(
"File (unknown-error): Error processing file from %(request)s "
"referred in <%(referer)s>",
{
"request": request,
"referer": referer
},
exc_info=True,
extra={"spider": info.spider},
)
raise FileException(str(exc))
return {
"url": request.url,
"path": path,
"checksum": checksum,
"status": status,
}
def inc_stats(self, spider, status):
spider.crawler.stats.inc_value("file_count", spider=spider)
spider.crawler.stats.inc_value("file_status_count/%s" % status,
spider=spider)
# Overridable Interface
def get_media_requests(self, item, info):
urls = ItemAdapter(item).get(self.files_urls_field, [])
return [Request(u) for u in urls]
def file_downloaded(self, response, request, info):
path = self.file_path(request, response=response, info=info)
buf = BytesIO(response.body)
checksum = md5sum(buf)
buf.seek(0)
self.store.persist_file(path, buf, info)
return checksum
def item_completed(self, results, item, info):
with suppress(KeyError):
ItemAdapter(item)[self.files_result_field] = [
x for ok, x in results if ok
]
return item
def file_path(self, request, response=None, info=None):
media_guid = hashlib.sha1(to_bytes(request.url)).hexdigest()
media_ext = os.path.splitext(request.url)[1]
# Handles empty and wild extensions by trying to guess the
# mime type then extension or default to empty string otherwise
if media_ext not in mimetypes.types_map:
media_ext = ""
media_type = mimetypes.guess_type(request.url)[0]
if media_type:
media_ext = mimetypes.guess_extension(media_type)
return "full/%s%s" % (media_guid, media_ext)
|
py | b406c6bf0894a926d6d363aec3664e47233766a5 | #!/usr/bin/env python
# Created by "Thieu" at 08:33, 17/03/2022 ----------%
# Email: [email protected] %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%
|
py | b406c742592ae53955336c64635d4738bcde6c58 | # Generated by Django 4.0 on 2021-12-08 20:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='customuser',
name='phone_number',
field=models.CharField(blank=True, max_length=15, null=True),
),
]
|
py | b406c74481365d3235187da3685ac2265dcc9e2d | from praw import Reddit
old = Reddit(username='', password='', client_id='', client_secret='',
user_agent='Transferring NSFW subs from old account to new account (old account)')
new = Reddit(username='', password='', client_id='', client_secret='',
user_agent='Transferring NSFW subs from old account to new account (new account)')
[new.subreddit(sub.display_name).subscribe() for sub in old.user.me().subreddits() if sub.over18]
|
py | b406c7e427a58972b1f66858f235c82c5a9f0cd4 | import click
import frappe
import os
import json
import importlib
import frappe.utils
import traceback
import warnings
click.disable_unicode_literals_warning = True
def main():
commands = get_app_groups()
commands.update({
'get-frappe-commands': get_frappe_commands,
'get-frappe-help': get_frappe_help
})
click.Group(commands=commands)(prog_name='bench')
def get_app_groups():
'''Get all app groups, put them in main group "frappe" since bench is
designed to only handle that'''
commands = dict()
for app in get_apps():
app_commands = get_app_commands(app)
if app_commands:
commands.update(app_commands)
ret = dict(frappe=click.group(name='frappe', commands=commands)(app_group))
return ret
def get_app_group(app):
app_commands = get_app_commands(app)
if app_commands:
return click.group(name=app, commands=app_commands)(app_group)
@click.option('--site')
@click.option('--profile', is_flag=True, default=False, help='Profile')
@click.option('--verbose', is_flag=True, default=False, help='Verbose')
@click.option('--force', is_flag=True, default=False, help='Force')
@click.pass_context
def app_group(ctx, site=False, force=False, verbose=False, profile=False):
ctx.obj = {
'sites': get_sites(site),
'force': force,
'verbose': verbose,
'profile': profile
}
if ctx.info_name == 'frappe':
ctx.info_name = ''
def get_sites(site_arg):
if site_arg == 'all':
return frappe.utils.get_sites()
elif site_arg:
return [site_arg]
elif os.environ.get('FRAPPE_SITE'):
return [os.environ.get('FRAPPE_SITE')]
elif os.path.exists('currentsite.txt'):
with open('currentsite.txt') as f:
site = f.read().strip()
if site:
return [site]
return []
def get_app_commands(app):
if os.path.exists(os.path.join('..', 'apps', app, app, 'commands.py'))\
or os.path.exists(os.path.join('..', 'apps', app, app, 'commands', '__init__.py')):
try:
app_command_module = importlib.import_module(app + '.commands')
except Exception:
traceback.print_exc()
return []
else:
return []
ret = {}
for command in getattr(app_command_module, 'commands', []):
ret[command.name] = command
return ret
@click.command('get-frappe-commands')
def get_frappe_commands():
commands = list(get_app_commands('frappe'))
for app in get_apps():
app_commands = get_app_commands(app)
if app_commands:
commands.extend(list(app_commands))
print(json.dumps(commands))
@click.command('get-frappe-help')
def get_frappe_help():
print(click.Context(get_app_groups()['frappe']).get_help())
def get_apps():
return frappe.get_all_apps(with_internal_apps=False, sites_path='.')
if __name__ == "__main__":
if not frappe._dev_server:
warnings.simplefilter('ignore')
main()
|
py | b406c8b75f6b20b1d69d30c6a25cea81355c0df4 | # Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from shader import sampler, vec
from .. stubcollector import stubgenerator
vec4 = vec.vec4
@stubgenerator
def makeSamplerFunc(collector):
llfunc = collector.llfunc
export = collector.export
replaceAttr = collector.replaceAttr
attachPtr = collector.attachPtr
@export
@replaceAttr(sampler.sampler2D, 'texture')
@replaceAttr(sampler.samplerCube, 'texture')
@llfunc(descriptive=True)
def texture(self, P, bias=None):
return vec4(allocate(float), allocate(float), allocate(float), allocate(float))
@export
@replaceAttr(sampler.sampler2D, 'textureLod')
@replaceAttr(sampler.samplerCube, 'textureLod')
@llfunc(descriptive=True)
def textureLod(self, P, lod):
return vec4(allocate(float), allocate(float), allocate(float), allocate(float))
|
py | b406c8eeccfaf65c12f9ecbb163dc2e550896139 | import tarfile
import gzip
import itertools
import random
import numpy as np
"""
SRL Dataset reader
"""
UNK_IDX = 0
word_dict_file = './data/embedding/vocab.txt'
embedding_file='./data/embedding/wordVectors.txt'
word_dict_srl_file = './data/srl/train/words_SRL.txt'
prop_SRL_file='./data/srl/train/prop_SRL.txt'
predicate_file = './data/srl/train/verbDict_SRL.txt'
test_word_dict_srl_file = './data/srl/test/words_SRL.txt'
test_prop_SRL_file='./data/srl/test/prop_SRL.txt'
test_predicate_file = predicate_file
label_dict_file = './data/srl/train/targetDict.txt'
train_list_file = './data/srl/train.list'
test_list_file = './data/srl/test.list'
# there is a line to line mapping between embedding file and word_dict_file
conll_empty='_'
def load_dict(filename):
d = dict()
with open(filename, 'r') as f:
for i, line in enumerate(f):
d[line.strip()] = i
return d
# this is a test function. not done yet
# def generateconllCorpus(words_file, props_file):
# sentences = []
# labels = []
# one_seg = []
# counter=0
# with open(words_file) as w, open(props_file) as p:
# for word, label in itertools.izip(w, p):
# word = word.strip()
# label = label.strip().split()
# if len(label) == 0: # end of sentence
# dummy=0
# w.close()
# p.close()
# return reader
def corpus_reader(words_file, props_file):
"""
Read one corpus. It returns an iterator. Each element of
this iterator is a tuple including sentence and labels. The sentence is
consist of a list of word IDs. The labels include a list of label IDs.
:return: a iterator of data.
:rtype: iterator
"""
def reader():
sentences = []
labels = []
one_seg = []
counter=0
with open(words_file) as w, open(props_file) as p:
for word, label in itertools.izip(w, p):
word = word.strip()
label = label.strip().split()
if len(label) == 0: # end of sentence
for i in xrange(len(one_seg[0])):
a_kind_lable = [x[i] for x in one_seg]
labels.append(a_kind_lable)
if len(labels) >= 1:
verb_list = []
for x in labels[0]:
if x != conll_empty:
verb_list.append(x)
for i, lbl in enumerate(labels[1:]):
cur_tag = 'O'
is_in_bracket = False
lbl_seq = []
verb_word = ''
for l in lbl:
if l == '*' and is_in_bracket == False:
lbl_seq.append('O')
elif l == '*' and is_in_bracket == True:
lbl_seq.append('I-' + cur_tag)
elif l == '*)':
lbl_seq.append('I-' + cur_tag)
is_in_bracket = False
elif l.find('(') != -1 and l.find(')') != -1:
cur_tag = l[1:l.find('*')]
lbl_seq.append('B-' + cur_tag)
is_in_bracket = False
elif l.find('(') != -1 and l.find(')') == -1:
cur_tag = l[1:l.find('*')]
lbl_seq.append('B-' + cur_tag)
is_in_bracket = True
else:
raise RuntimeError('Unexpected label: %s' %
l)
#print sentences, verb_list[i], lbl_seq
yield sentences, verb_list[i], lbl_seq
sentences = []
labels = []
one_seg = []
counter+=1
else:
sentences.append(word)
one_seg.append(label)
w.close()
p.close()
return reader
def reader_creator(corpus_reader,
word_dict=None,
predicate_dict=None,
label_dict=None):
def reader():
counter=0
for sentence, predicate, labels in corpus_reader():
counter+=1
sen_len = len(sentence)
if 'B-V' not in labels:
print 'B-V not present : ', predicate,labels
verb_index = labels.index('B-V')
mark = [0] * len(labels)
if verb_index > 0:
mark[verb_index - 1] = 1
ctx_n1 = sentence[verb_index - 1]
else:
ctx_n1 = 'bos'
if verb_index > 1:
mark[verb_index - 2] = 1
ctx_n2 = sentence[verb_index - 2]
else:
ctx_n2 = 'bos'
mark[verb_index] = 1
ctx_0 = sentence[verb_index]
if verb_index < len(labels) - 1:
mark[verb_index + 1] = 1
ctx_p1 = sentence[verb_index + 1]
else:
ctx_p1 = 'eos'
if verb_index < len(labels) - 2:
mark[verb_index + 2] = 1
ctx_p2 = sentence[verb_index + 2]
else:
ctx_p2 = 'eos'
word_idx = [word_dict.get(w, UNK_IDX) for w in sentence]
ctx_n2_idx = [word_dict.get(ctx_n2, UNK_IDX)] * sen_len
ctx_n1_idx = [word_dict.get(ctx_n1, UNK_IDX)] * sen_len
ctx_0_idx = [word_dict.get(ctx_0, UNK_IDX)] * sen_len
ctx_p1_idx = [word_dict.get(ctx_p1, UNK_IDX)] * sen_len
ctx_p2_idx = [word_dict.get(ctx_p2, UNK_IDX)] * sen_len
# we are changing from predicate_dict to word_dict (embedding) for testing
if predicate in word_dict:
pred_idx = [word_dict.get(predicate)] * sen_len
else:
print "predicate %s not in dictionary. using UNK_IDX " % predicate
pred_idx = [word_dict.get(predicate,UNK_IDX)] * sen_len
label_idx = [label_dict.get(w) for w in labels]
# print 'sentence id: ', counter
# for string in sentence:
# print string
# print '/n'
# print predicate,labels
# print ''
# #print counter, word_idx, label_idx
# print word_idx, ctx_n2_idx, ctx_n1_idx, \
# ctx_0_idx, ctx_p1_idx, ctx_p2_idx, pred_idx, mark, label_idx
yield word_idx, ctx_n2_idx, ctx_n1_idx, \
ctx_0_idx, ctx_p1_idx, ctx_p2_idx, pred_idx, mark, label_idx
return reader
def get_dict():
word_dict = load_dict(word_dict_file)
verb_dict = load_dict(predicate_file)
label_dict = load_dict(label_dict_file)
return word_dict, verb_dict, label_dict
def get_test_dict():
word_dict = load_dict(word_dict_file)
verb_dict = load_dict(predicate_file)
label_dict = load_dict(label_dict_file)
return word_dict, verb_dict, label_dict
def get_embedding(emb_file=embedding_file):
"""
Get the trained word vector.
"""
return np.loadtxt(emb_file, dtype=float)
def shuffle(reader, buf_size):
"""
Creates a data reader whose data output is shuffled.
Output from the iterator that created by original reader will be
buffered into shuffle buffer, and then shuffled. The size of shuffle buffer
is determined by argument buf_size.
:param reader: the original reader whose output will be shuffled.
:type reader: callable
:param buf_size: shuffle buffer size.
:type buf_size: int
:return: the new reader whose output is shuffled.
:rtype: callable
"""
def data_reader():
buf = []
for e in reader():
buf.append(e)
if len(buf) >= buf_size:
random.shuffle(buf)
for b in buf:
yield b
buf = []
if len(buf) > 0:
random.shuffle(buf)
for b in buf:
yield b
return data_reader
def train():
word_dict, verb_dict, label_dict = get_dict()
reader = corpus_reader(
words_file=word_dict_srl_file,
props_file=prop_SRL_file)
return reader_creator(reader, word_dict, verb_dict, label_dict)
def test():
word_dict, verb_dict, label_dict = get_test_dict()
reader = corpus_reader(
words_file=test_word_dict_srl_file,
props_file=test_prop_SRL_file)
return reader_creator(reader, word_dict, verb_dict, label_dict)
def main():
#reader = corpus_reader(word_dict_srl_file,prop_SRL_file)
#word_dict, verb_dict, label_dict = get_dict()
reader1 = corpus_reader(
words_file=word_dict_srl_file,
props_file=prop_SRL_file)
c=1
for x in reader1():
c+=1
print c
target = open("train/train_data.txt", 'w')
reader=train();
counter=0
for x in reader():
target.write(str(x))
target.write(str("\n"))
counter+=1
target.close()
print 'total train sentences : ', counter
reader2 = corpus_reader(
words_file=word_dict_srl_file,
props_file=prop_SRL_file)
c=1
for x in reader1():
c+=1
print c
target = open("out/test_data.txt", 'w')
reader=test();
counter=0
for x in reader():
target.write(str(x))
target.write(str("\n"))
counter+=1
target.close()
print 'total test sentences : ', counter
if __name__ == '__main__':
main() |
py | b406c9114969c4c6b820c5bc0bb4d667e304da74 | import os
import shutil
def json_to_dataset_list(img_dir,labelme_env_name):
# 获取文件夹内的文件名
FileNameList = os.listdir(img_dir)
print(FileNameList)
# 激活labelme环境
os.system("activate " + labelme_env_name)
for i in range(len(FileNameList)):
# 判断当前文件是否为json文件
if (os.path.splitext(FileNameList[i])[1] == ".json"):
json_file = img_dir + "/" + FileNameList[i]
# print(json_file)
# 将该json文件转为png
os.system("labelme_json_to_dataset " + json_file)
def remove_other_file(remove_path):
# remove that dir and file below
filelist = os.listdir(remove_path)
print(filelist)
for filename in filelist:
filename = remove_path + filename
print(filename)
if os.path.isfile(filename):
os.remove(filename)
elif os.path.isdir(filename): # which is not need for the reason that without spawn extra dir
shutil.rmtree(filename)
else:
pass
if os.path.isdir(remove_path):
shutil.rmtree(remove_path)
def get_label_png(img_dir, save_path):
try:
os.makedirs(save_path, exist_ok=True)
print("create dir " + save_path)
except:
pass
# FileNameList = os.listdir(img_dir)
for root, dirs, files in os.walk(img_dir):
# print(root)
if dirs:
# print(dirs)
for inter_dir in dirs:
ori_path = img_dir + '/' + inter_dir + '/label.png'
new_name = save_path + '/' + inter_dir.split('_json')[0] + '.png'
remove_path = img_dir + '/' + inter_dir+ '/'
# print(new_name)
os.rename(ori_path, new_name)
remove_other_file(remove_path)
if __name__ == '__main__':
img_dir = './base_jsons'
save_path = './base_annotations'
labelme_env_name = 'labelme' # your conda env name with labelme
json_to_dataset_list(img_dir,labelme_env_name)
get_label_png(img_dir, save_path)
|
py | b406c975d0b81074ff78c1cb982b1351042a8c33 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.dev20160107235441 on 2016-10-03 18:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('groups', '0002_auto_20161003_1842'),
]
operations = [
migrations.AlterField(
model_name='group',
name='user_name',
field=models.CharField(default=123, max_length=400),
preserve_default=False,
),
]
|
py | b406ca5c879d0ed3fe9700487b17c828b1f2058a | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
spark-submit --master yarn --executor-memory 16G --driver-memory 24G --num-executors 10 --executor-cores 5 --jars spark-tensorflow-connector_2.11-1.15.0.jar --conf spark.hadoop.hive.exec.dynamic.partition=true --conf spark.hadoop.hive.exec.dynamic.partition.mode=nonstrict pipeline/main_trainready_generator.py config.yml
input: trainready table
output: dataset readable by trainer in tfrecord format
"""
from pyspark.sql.functions import lit, udf, explode
from pyspark.sql.types import IntegerType, ArrayType, StructType, StructField
from util import save_pickle_file, resolve_placeholder, load_config
def generate_tfrecord(hive_context, tf_stat_path, keyword_table, cutting_date, length, trainready_table, tfrecords_hdfs_path_train, tfrecords_hdfs_path_test):
def str_to_intlist(table):
ji = []
for k in [table[j].decode().split(",") for j in range(len(table))]:
s = []
for a in k:
b = int(a.split(":")[1])
s.append(b)
ji.append(s)
return ji
def list_of_list_toint(table):
ji = []
for k in [table[j].decode().split(",") for j in range(len(table))]:
s = [int(a) for a in k]
ji.append(s)
return ji
def padding(kwlist,length):
diff = length-len(kwlist)
temp_list = [0 for i in range(diff)]
padded_keyword = kwlist + temp_list
return padded_keyword
def generating_dataframe(df ):
df = df.withColumn("sl", udf(lambda x: len(x), IntegerType())(df.keyword_list))
df = df.where(df.sl > 5)
df = df.withColumn('max_length', lit(df.agg({'sl': 'max'}).collect()[0][0]))
df = df.withColumn('keyword_list_padded',
udf(padding, ArrayType(IntegerType()))(df.keyword_list, df.max_length))
return df
def generate_tf_statistics(testsetDF, trainDF, keyword_df, tf_stat_path):
tfrecords_statistics = {}
tfrecords_statistics['test_dataset_count'] = testsetDF.count()
tfrecords_statistics['train_dataset_count'] = trainDF.count()
tfrecords_statistics['user_count'] = trainDF.select('aid').distinct().count()
tfrecords_statistics['item_count'] = keyword_df.distinct().count() + 1
save_pickle_file(tfrecords_statistics, tf_stat_path)
def create_trainset(aid_index, click_counts, keyword_int):
def flatten(lst):
f = [y for x in lst for y in x]
return f
t_set = []
for m in range(len(click_counts)):
for n in range(len(click_counts[m])):
if (click_counts[m][n] != 0):
pos = (aid_index, flatten(keyword_int[m + 1:m + 1 + length]), keyword_int[m][n], 1)
if len(pos[1]) >= 1:
t_set.append(pos)
elif (m % 5 == 0 and n % 2 == 0):
neg = (aid_index, flatten(keyword_int[m + 1:m + 1 + length]), keyword_int[m][n], 0)
if len(neg[1]) >= 1:
t_set.append(neg)
return t_set
schema = StructType([
StructField("aid_index", IntegerType(), True),
StructField("keyword_list", ArrayType(IntegerType()), True),
StructField("keyword", IntegerType(), True),
StructField("label", IntegerType(), True)
])
command = """SELECT * FROM {}"""
df = hive_context.sql(command.format(trainready_table))
df = df.withColumn('interval_starting_time', df['interval_starting_time'].cast(ArrayType(IntegerType())))
df = df.withColumn('_kwi', udf(list_of_list_toint, ArrayType(ArrayType(IntegerType())))(df.kwi))
df = df.withColumn('click_counts', udf(str_to_intlist, ArrayType(ArrayType(IntegerType())))(df['kwi_click_counts']))
df = df.withColumn('total_click', udf(lambda x: sum([item for sublist in x for item in sublist]), IntegerType())(df.click_counts))
df = df.where(df.total_click != 0)
df = df.withColumn('indicing', udf(lambda y: len([x for x in y if x >= cutting_date]), IntegerType())(df.interval_starting_time))
df = df.withColumn('keyword_int_train', udf(lambda x, y: x[y:],ArrayType(ArrayType(IntegerType())))(df._kwi, df.indicing))
df = df.withColumn('keyword_int_test', udf(lambda x, y: x[:y],ArrayType(ArrayType(IntegerType())))(df._kwi, df.indicing))
df = df.withColumn('click_counts_train', udf(lambda x, y: x[y:],ArrayType(ArrayType(IntegerType())))(df.click_counts, df.indicing))
df = df.withColumn('click_counts_test', udf(lambda x, y: x[:y],ArrayType(ArrayType(IntegerType())))(df.click_counts, df.indicing))
df = df.withColumn('train_set', udf(create_trainset, ArrayType(schema))(df.aid_index, df.click_counts_train,df.keyword_int_train))
df = df.withColumn('test_set', udf(create_trainset, ArrayType(schema))(df.aid_index, df.click_counts_test, df.keyword_int_test))
trainDF = df.select(df.aid_index, explode(df.train_set).alias('dataset'))
testDF = df.select(df.aid_index, explode(df.test_set).alias('dataset'))
train_set = trainDF.select('aid_index', trainDF.dataset['aid_index'].alias('aid'), trainDF.dataset['keyword_list'].alias('keyword_list'), trainDF.dataset['keyword'].alias('keyword'), trainDF.dataset['label'].alias('label'))
test_set = testDF.select('aid_index', testDF.dataset['aid_index'].alias('aid'), testDF.dataset['keyword_list'].alias('keyword_list'), testDF.dataset['keyword'].alias('keyword'), testDF.dataset['label'].alias('label'))
train_set = generating_dataframe(train_set)
train_set.write.option("header", "true").option("encoding", "UTF-8").mode("overwrite").format('hive').saveAsTable(tfrecords_hdfs_path_train)
train_set.write.format("tfrecords").option("recordType", "Example").mode("overwrite").save(tfrecords_hdfs_path_train)
testsetDF = generating_dataframe(test_set)
testsetDF.write.format("tfrecords").option("recordType", "Example").mode("overwrite").save(tfrecords_hdfs_path_test)
command = "SELECT * from {}"
keyword_df = hive_context.sql(command.format(keyword_table))
generate_tf_statistics(testsetDF, trainDF, keyword_df, tf_stat_path)
def run(hive_context, cfg):
cfgp = cfg['pipeline']
cfg_train = cfg['pipeline']['main_trainready']
trainready_table = cfg_train['trainready_output_table']
cfg_tfrecord = cfg['pipeline']['tfrecords']
tfrecords_hdfs_path_train = cfg_tfrecord['tfrecords_hdfs_path_train']
tfrecords_hdfs_path_test = cfg_tfrecord['tfrecords_hdfs_path_test']
cutting_date = cfg['pipeline']['cutting_date']
length = cfg['pipeline']['length']
tf_stat_path = cfgp['tfrecords']['tfrecords_statistics_path']
keyword_table = cfgp['main_keywords']['keyword_output_table']
generate_tfrecord(hive_context, tf_stat_path, keyword_table, cutting_date, length, trainready_table, tfrecords_hdfs_path_train, tfrecords_hdfs_path_test)
if __name__ == "__main__":
"""
This program performs the followings:
adds normalized data by adding index of features
groups data into time_intervals and dids (labeled by did)
"""
sc, hive_context, cfg = load_config(description="pre-processing train ready data")
resolve_placeholder(cfg)
run(hive_context=hive_context, cfg=cfg)
sc.stop()
|
py | b406caaa10ec9d1cb597be1cc2dc83f42f52e4fe | from typing import cast
from mypy.nodes import Node, TypeInfo, CoerceExpr, JavaCast
from mypy.types import (
Type, Instance, Void, NoneTyp, AnyType
)
from mypy.sametypes import is_same_type
from mypy.subtypes import is_proper_subtype
from mypy.rttypevars import translate_runtime_type_vars_in_context
def coerce(expr: Node, target_type: Type, source_type: Type, context: TypeInfo,
is_wrapper_class: bool = False, is_java: bool = False) -> Node:
"""Build an expression that coerces expr from source_type to target_type.
Return bare expr if the coercion is trivial (always a no-op).
"""
if is_trivial_coercion(target_type, source_type, is_java):
res = expr
else:
# Translate type variables to expressions that fetch the value of a
# runtime type variable.
target = translate_runtime_type_vars_in_context(target_type, context,
is_java)
source = translate_runtime_type_vars_in_context(source_type, context,
is_java)
res = CoerceExpr(expr, target, source, is_wrapper_class)
if is_java and ((isinstance(source_type, Instance) and
(cast(Instance, source_type)).erased)
or (isinstance(res, CoerceExpr) and
isinstance(target_type, Instance))):
res = JavaCast(res, target_type)
return res
def is_trivial_coercion(target_type: Type, source_type: Type,
is_java: bool) -> bool:
"""Is an implicit coercion from source_type to target_type a no-op?
Note that we omit coercions of form any <= C, unless C is a primitive that
may have a special representation.
"""
# FIX: Replace type vars in source type with any?
if isinstance(source_type, Void) or is_same_type(target_type, source_type):
return True
# Coercions from a primitive type to any other type are non-trivial, since
# we may have to change the representation.
if not is_java and is_special_primitive(source_type):
return False
return (is_proper_subtype(source_type, target_type)
or isinstance(source_type, NoneTyp)
or isinstance(target_type, AnyType))
def is_special_primitive(type: Type) -> bool:
"""Is type a primitive with a special runtime representation?
There needs to be explicit corcions to/from special primitive types. For
example, floats need to boxed/unboxed. The special primitive types include
int, float and bool.
"""
return (isinstance(type, Instance)
and (cast(Instance, type)).type.fullname() in ['builtins.int',
'builtins.float',
'builtins.bool'])
|
py | b406cd088b1a36001a4f64b0305df1568cd9ef2b | #!/usr/bin/env python
""" Script generated from simulation of the fc_asm_mscm_rdriv_690k test case.
Usage:
fc_asm_mscm_rdriv_690k.py [--no_fw_load][--no_reset]
Options:
--help Shows this help message.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from docopt import docopt
import sys
import io
import os
import time
import struct
import subprocess
import ctypes
from collections import OrderedDict
import threading
from newton_control_main import newton as newton
if __name__ == "__main__":
performReset = True
loadFirmware = True
args = docopt(__doc__, version='0.1')
rc = newton.adi_newton_config( 0 )
if rc != 0:
print( "ERROR: newton.adi_newton_config return an error (" + str( rc ) + ")." )
sys.exit( rc )
if args['--no_fw_load']:
loadFirmware = False
if args['--no_reset']:
performReset = False
if performReset == True:
newton.adi_reset_newton( newton.PIN_MODE_HSP_DEBUG )
if loadFirmware == True:
cmd_file = os.path.expanduser( "./tests/dms_eval_tests/fc_asm_mscm_rdriv_690k/fc_asm_mscm_rdriv_690k.txt" )
cmd_file_bytes = cmd_file.encode(encoding='utf-8')
newton.adi_load_command_file( cmd_file_bytes )
newton.adi_write_register( 0x000C, 0x00c5 ) # useqControlRegister
newton.adi_check_register_py( 0x0142, 0x0500 ) # pll_status
newton.adi_write_register( 0x0244, 0x0020 ) # SCRATCHPAD[34]
newton.adi_write_register( 0x0014, 0x3918 ) # digPwrDown
newton.adi_write_register( 0x0146, 0x007b ) # power_down_adc_others
newton.adi_write_register( 0x0e00, 0x0082 ) # de_control
newton.adi_write_register( 0x0e5a, 0x0003 ) # array_init_vec_dark
newton.adi_check_register_py( 0x0032, 0x0000 ) # errorStatus
newton.adi_check_register_py( 0x0256, 0x0001 ) # SCRATCHPAD[43]
newton.adi_check_register_py( 0x0032, 0x0000 ) # errorStatus
newton.adi_check_register_py( 0x0256, 0x0001 ) # SCRATCHPAD[43]
newton.adi_check_register_py( 0x0032, 0x0000 ) # errorStatus
newton.adi_check_register_py( 0x0256, 0x0001 ) # SCRATCHPAD[43]
newton.adi_check_register_py( 0x0032, 0x0000 ) # errorStatus
newton.adi_check_register_py( 0x0256, 0x0001 ) # SCRATCHPAD[43]
newton.adi_check_register_py( 0x0032, 0x0000 ) # errorStatus
newton.adi_check_register_py( 0x0256, 0x0001 ) # SCRATCHPAD[43]
newton.adi_check_register_py( 0x0032, 0x0000 ) # errorStatus
newton.adi_check_register_py( 0x0256, 0x0001 ) # SCRATCHPAD[43]
newton.adi_check_register_py( 0x0032, 0x0000 ) # errorStatus
newton.adi_check_register_py( 0x0256, 0x0002 ) # SCRATCHPAD[43]
newton.adi_write_register( 0x0200, 0x0005 ) # SCRATCHPAD[0]
newton.adi_check_register_py( 0x0032, 0x0000 ) # errorStatus
newton.adi_check_register_py( 0x0256, 0x0002 ) # SCRATCHPAD[43]
|
py | b406cebe0508fc4ec565a25a9ec39ab8c9d594cb | from pypy.interpreter.mixedmodule import MixedModule
class Module(MixedModule):
"""Implementation module for SSL socket operations.
See the socket module for documentation."""
interpleveldefs = {
'sslwrap': 'interp_ssl.sslwrap',
'SSLError': 'interp_ssl.get_error(space)',
'_test_decode_cert': 'interp_ssl._test_decode_cert',
}
appleveldefs = {
}
@classmethod
def buildloaders(cls):
# init the SSL module
from pypy.module._ssl.interp_ssl import constants, HAVE_OPENSSL_RAND
for constant, value in constants.iteritems():
Module.interpleveldefs[constant] = "space.wrap(%r)" % (value,)
if HAVE_OPENSSL_RAND:
Module.interpleveldefs['RAND_add'] = "interp_ssl.RAND_add"
Module.interpleveldefs['RAND_status'] = "interp_ssl.RAND_status"
Module.interpleveldefs['RAND_egd'] = "interp_ssl.RAND_egd"
super(Module, cls).buildloaders()
def startup(self, space):
from pypy.rlib.ropenssl import init_ssl
init_ssl()
from pypy.module._ssl.interp_ssl import setup_ssl_threads
setup_ssl_threads()
|
py | b406cf093bb7264f5a7aa5847b55e1bdada4b607 | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 13 22:00:43 2021
@author: ali_d
"""
#school
import numpy as np
import pandas as pd
# plotly
from plotly.offline import init_notebook_mode, iplot, plot
import plotly as py
init_notebook_mode(connected=True)
import plotly.graph_objs as go
from wordcloud import WordCloud
# matplotlib
import matplotlib.pyplot as plt
from plotly import tools
data = pd.read_csv("cwurData.csv")
#data1 = pd.read_csv("education_expenditure_supplementary_data.csv")
data2 = pd.read_csv("educational_attainment_supplementary_data.csv")
data3 = pd.read_csv("school_and_country_table.csv")
data4 = pd.read_csv("shanghaiData.csv")
data5 = pd.read_csv("timesData.csv")
# Bar Charts
df2014 = data5[data5.year == 2014].iloc[:100,:]
df2015 = data5[data5.year == 2015].iloc[:100,:]
df2016 = data5[data5.year == 2016].iloc[:100,:]
df2014 = data5[data5.year == 2014].iloc[:3,:]
print(df2014)
# prepare data frames
trace1 = go.Bar(
x = df2014.university_name,
y = df2014.citations,
name = "citations",
marker = dict(color = 'rgba(255, 174, 255, 0.5)',
line=dict(color='rgb(0,0,0)',width=1.5)),
text = df2014.country)
# create trace2
trace2 = go.Bar(
x = df2014.university_name,
y = df2014.teaching,
name = "teaching",
marker = dict(color = 'rgba(255, 255, 128, 0.5)',
line=dict(color='rgb(0,0,0)',width=1.5)),
text = df2014.country)
data = [trace1, trace2]
layout = go.Layout(barmode = "group")
fig = go.Figure(data = data, layout = layout)
plot(fig)
#%%
a = go.Bar(
x = df2014.university_name,
y = df2014.citations,
name = "citations",
marker = dict(color = 'rgba(100, 44, 55, 0.5)',
line=dict(color='rgb(0,0,0)',width=1.5)),
text = df2014.country)
b = go.Bar(
x = df2014.university_name,
y = df2014.teaching,
name = "teaching",
marker = dict(color = 'rgba(65, 55, 122, 0.5)',
line=dict(color='rgb(0,0,0)',width=1.5)),
text = df2014.country)
data = [a, b]
layout = go.Layout(barmode = "group")
fig = go.Figure(data = data, layout = layout)
plot(fig)
#%% prepare data frames
df2014 = data5[data5.year == 2014].iloc[:3,:]
x = df2014.university_name
trace1 = {
'x': x,
'y': df2014.citations,
'name': 'citation',
'type': 'bar'
};
trace2 = {
'x': x,
'y': df2014.teaching,
'name': 'teaching',
'type': 'bar'
};
data = [trace1,trace2]
layout = {
'xaxis': {'title': 'Top 3 universities'},
'barmode': 'relative',
'title': 'citations and teaching of top 3 universities in 2014'
};
fig = go.Figure(data = data,layout =layout)
plot(fig)
#%%
df2014 = data5[data5.year == 2014].iloc[:3,:]
x = df2014.university_name
trace1 = {
'x': x,
'y': df2014.international,
'name': 'international',
'type': 'bar'
};
trace2 = {
'x': x,
'y': df2014.total_score,
'name': 'total_score',
'type': 'bar',
};
data = [trace1,trace2]
layout = {
'xaxis': {'title': 'Top 3 universities'},
'barmode': 'relative',
'title': 'total_score and total_score of top 3 universities in 2014'
};
fig = go.Figure(data = data,layout =layout)
plot(fig)
#%%
df2016 = data5[data5.year == 2016].iloc[:7,:]
y_saving = [each for each in df2016.research]
y_net_worth = [float(each) for each in df2016.income]
x_saving = [each for each in df2016.university_name]
x_net_worth = [each for each in df2016.university_name]
trace0 = go.Bar(
x=y_saving,
y=x_saving,
marker=dict(color='rgba(171, 50, 96, 0.6)',line=dict(color='rgba(171, 50, 96, 1.0)',width=1)),
name='research',
orientation='h',
)
trace1 = go.Scatter(
x=y_net_worth,
y=x_net_worth,
mode='lines+markers',
line=dict(color='rgb(63, 72, 204)'),
name='income',
)
layout = dict(
title='Citations and income',
yaxis=dict(showticklabels=True,domain=[0, 0.85]),
yaxis2=dict(showline=True,showticklabels=False,linecolor='rgba(102, 102, 102, 0.8)',linewidth=2,domain=[0, 0.85]),
xaxis=dict(zeroline=False,showline=False,showticklabels=True,showgrid=True,domain=[0, 0.42]),
xaxis2=dict(zeroline=False,showline=False,showticklabels=True,showgrid=True,domain=[0.47, 1],side='top',dtick=25),
legend=dict(x=0.029,y=1.038,font=dict(size=10) ),
margin=dict(l=200, r=20,t=70,b=70),
paper_bgcolor='rgb(248, 248, 255)',
plot_bgcolor='rgb(248, 248, 255)',
)
annotations = []
y_s = np.round(y_saving, decimals=2)
y_nw = np.rint(y_net_worth)
# Adding labels
for ydn, yd, xd in zip(y_nw, y_s, x_saving):
# labeling the scatter savings
annotations.append(dict(xref='x2', yref='y2', y=xd, x=ydn - 4,text='{:,}'.format(ydn),font=dict(family='Arial', size=12,color='rgb(63, 72, 204)'),showarrow=False))
# labeling the bar net worth
annotations.append(dict(xref='x1', yref='y1', y=xd, x=yd + 3,text=str(yd),font=dict(family='Arial', size=12,color='rgb(171, 50, 96)'),showarrow=False))
layout['annotations'] = annotations
# Creating two subplots
fig = tools.make_subplots(rows=1, cols=2, specs=[[{}, {}]], shared_xaxes=True,
shared_yaxes=False, vertical_spacing=0.001)
fig.append_trace(trace0, 1, 1)
fig.append_trace(trace1, 1, 2)
fig['layout'].update(layout)
plot(fig)
#%% Pie Charts
df2016 = data5[data5.year == 2016].iloc[:7,:]
pie1 = df2016.num_students
pie1_list = [float(each.replace(',', '.')) for each in df2016.num_students]
labels = df2016.university_name
#figure
fig = {
"data": [
{
"values": pie1_list,
"labels": labels,
"domain": {"x": [0, .5]},
"name": "Number Of Students Rates",
"hoverinfo":"label+percent+name",
"hole": .3,
"type": "pie"
},],
"layout": {
"title":"Universities Number of Students rates",
"annotations": [
{ "font": { "size": 20},
"showarrow": False,
"text": "Number of Students",
"x": 0.20,
"y": 1
},
]
}
}
plot(fig)
#%%
df2016 = data5[data5.year == 2016].iloc[:7,:]
pie1 = df2016.num_students
pie1_list = [float(each.replace(',', '.')) for each in df2016.num_students]
labels = df2016.university_name
#figure
fig = {
"data": [
{
"values": pie1_list,
"labels": labels,
"domain": {"x": [0, .5]},
"name": "Number Of Students Rates",
"hoverinfo":"label+percent+name",
"hole": .3,
"type": "pie"
},],
"layout": {
"title":"Universities Number of Students rates",
"annotations": [
{ "font": { "size": 30},
"showarrow": True,
"text": "Number of Students",
"x": 0.20,
"y": 1
},
]
}
}
plot(fig)
#%%
df2016 = data5[data5.year == 2016].iloc[:7,:]
pie1 = df2016.num_students
pie1_list = [float(each.replace(',', '.')) for each in df2016.num_students]
labels = df2016.income
#figure
fig = {
"data": [
{
"values": pie1_list,
"labels": labels,
"domain": {"x": [0, .5]},
"name": "Number Of Students Rates",
"hoverinfo":"label+percent+name",
"hole": .3,
"type": "pie"
},],
"layout": {
"title":"Universities Number of Students rates",
"annotations": [
{ "font": { "size": 20},
"showarrow": False,
"text": "Number of Students",
"x": 0.20,
"y": 1
},
]
}
}
plot(fig)
|
py | b406cfe790197b433483c5025c059382d7199bb7 | import os
import json
import json5
import argparse
import dconfjson
import subprocess
from glob import glob
from collections import OrderedDict
#dconf reset -f /org/gnome/terminal/legacy/profiles:/
#python setup.py sdist bdist_wheel
parser = argparse.ArgumentParser(description='Import visual studio themes as linux terminal theme profiles')
parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true")
args = parser.parse_args()
def uuidgen():
cmd = "(uuidgen)"
tmp = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE )
(out, err) = tmp.communicate()
return out.decode("utf-8").strip()
def dset(dconf_path):
spath = "/org/gnome/terminal/legacy/profiles:/"
cmd = ("dconf load %s < %s" % (spath, dconf_path))
p = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE)
(out, error) = p.communicate()
print(out.decode("utf-8"))
return
def dconf_get():
spath = "/org/gnome/terminal/legacy/profiles:/"
cmd = ("dconf dump %s /" % spath)
p = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE )
(out, err) = p.communicate()
return out
def hex2rgb(color):
tmp = color.lstrip("#")
if len(tmp) == 3: # for example #AAA
tmp = "".join([c+c for c in tmp])
rgb = tuple( int(tmp[i:i+2], 16) for i in (0, 2, 4) )
elif len(tmp) > 6: # for example #AAAAAA
rgb = tuple( int(tmp[:6][i:i+2], 16) for i in (0, 2, 4) )
elif len(tmp) == 6: # for example #AAAAAA
rgb = tuple( int(tmp[i:i+2], 16) for i in (0, 2, 4) )
else:
if args.verbose:
print("could not convert color: %s to rgb", color)
return None
return ("rgb(%d,%d,%d)" % rgb)
def importer():
# get current dconf
s_out = dconf_get().decode("utf-8")
if s_out:
confjson = dconfjson.dconf_json(s_out)
else:
confjson = dconfjson.dconf_json(EMPTY_CONF)
extensions_folders = []
# get vscode default themes
# snap
extensions_folders.append("/snap/code/current/usr/share/code/resources/app/extensions/")
# dnf(fedora),
extensions_folders.append("/usr/share/code/resources/app/extensions/")
# get installed vscode theme extensions
extensions_folders.append("%s/.vscode/extensions/" % os.path.expanduser("~"))
new_profile_added = False
theme_files = []
for extensions_folder in extensions_folders:
for extension in glob("%s*" % extensions_folder):
extension_name = os.path.basename(extension)
try:
with open("%s/package.json" % extension) as fin:
a = json.load(fin)
for theme in a["contributes"]["themes"]:
if "path" in theme:
path = extension + theme["path"][1:]
uiTheme = "vs-light"
if "uiTheme" in theme:
uiTheme = theme["uiTheme"]
if path.endswith(".json"):
theme_files.append([path, uiTheme])
except:
None
for [theme_file, uiTheme] in theme_files:
# load input data from vscode theme json
try:
with open(theme_file) as fin:
data = json5.load(fin)
except:
if args.verbose:
print("Could not read json config for: %s" % theme_file)
continue
if "name" in data:
name = data["name"]
else:
name = " ".join(os.path.basename(theme_file).split("-")[:-1])
if "colors" in data:
colors = data["colors"]
else:
if args.verbose:
print("%sCould not find colors -- %s%s" % ("\033[91m", "\033[0m", name))
continue
if uiTheme == "vs-dark":
mapping = TERMINAL_COLOR_MAP_DARK.copy()
else:
mapping = TERMINAL_COLOR_MAP_LIGHT.copy()
# map data to gnome terminal settings
for key in mapping:
if key in data["colors"]:
if data["colors"][key] != None:
mapping[key] = data["colors"][key]
# set background color
if "terminal.background" in data["colors"]:
tmp = data["colors"]["terminal.background"]
elif uiTheme == "vs-light" and "terminal.ansiWhite" in data["colors"]:
tmp = data["colors"]["terminal.ansiWhite"]
elif "editor.background" in data["colors"]:
tmp = data["colors"]["editor.background"]
mapping["terminal.background"] = tmp
# map json colorconfjsonsettings
palette = []
for key in mapping:
if key == "terminal.background":
break
palette.append(hex2rgb(mapping[key]))
background = mapping["terminal.background"]
foreground = mapping["terminal.foreground"]
# check if theme already exists
exists = False
for key in confjson:
if "visible-name" in confjson[key]:
if confjson[key]["visible-name"][1:-1] == name:
exists = True
uuid = key
if args.verbose:
print("%(y)sProfile exists --%(e)s %(n)s %(y)s-- %(f)s%(e)s" % {"y":"\033[93m", "e":"\033[0m", "n":name, "f":theme_file})
if not exists:
# add uuid to list
uuid = uuidgen()
confjson[""][" "]["list"] = "%s, '%s']" % (confjson[""][" "]["list"][:-1], uuid)
# add settings under uuid key
tmp_confjson= DCONF_DEFAULT.copy()
tmp_confjson["visible-name"] = "'%s'" % name
tmp_confjson["palette"] = palette
tmp_confjson["background-color"] = "'%s'" % hex2rgb(background)
tmp_confjson["foreground-color"] = "'%s'" % hex2rgb(foreground)
confjson[":%s " % uuid] = tmp_confjson
print("%sInstalled%s %s" % ("\033[92m", "\033[0m", name))
new_profile_added = True
if not new_profile_added:
print("%sNo new profiles added%s" % ("\033[93m", "\033[0m"))
#check if first uuid is empty (happens when there were no previous profiles)
if confjson[""][" "]["list"][1] == ",":
confjson[""][" "]["list"] = "[" + confjson[""][" "]["list"][2:]
dconf = dconfjson.json_dconf(confjson)
# write to file
with open("output.conf", "w") as fout:
fout.write(dconf)
# load into gnome settings
dset("output.conf")
# Default colors
TERMINAL_COLOR_MAP_LIGHT = OrderedDict([
("terminal.ansiBlack", "#000000)"),
("terminal.ansiRed", "#cd3131"),
("terminal.ansiGreen", "#00bc00"),
("terminal.ansiYellow", "#949800"),
("terminal.ansiBlue", "#0451a5"),
("terminal.ansiMagenta", "#bc05bc"),
("terminal.ansiCyan", "#0598bc"),
("terminal.ansiWhite", "#555555"),
("terminal.ansiBrightBlack", "#666666"),
("terminal.ansiBrightRed", "#cd3131"),
("terminal.ansiBrightGreen", "#14ce14"),
("terminal.ansiBrightYellow", "#b5ba00"),
("terminal.ansiBrightBlue", "#0451a5"),
("terminal.ansiBrightMagenta", "#bc05bc"),
("terminal.ansiBrightCyan", "#0598bc"),
("terminal.ansiBrightWhite", "#a5a5a5"),
("terminal.background", "#ffffff"),
("terminal.foreground", "#333333")
])
TERMINAL_COLOR_MAP_DARK = OrderedDict([
("terminal.ansiBlack", "#000000)"),
("terminal.ansiRed", "#cd3131"),
("terminal.ansiGreen", "#0dbc79"),
("terminal.ansiYellow", "#e5e510"),
("terminal.ansiBlue", "#2472c8"),
("terminal.ansiMagenta", "#bc3fbc"),
("terminal.ansiCyan", "#11a8cd"),
("terminal.ansiWhite", "#e5e5e5"),
("terminal.ansiBrightBlack", "#666666"),
("terminal.ansiBrightRed", "#f14c4c"),
("terminal.ansiBrightGreen", "#23d18b"),
("terminal.ansiBrightYellow", "#f5f543"),
("terminal.ansiBrightBlue", "#3b8eea"),
("terminal.ansiBrightMagenta", "#d670d6"),
("terminal.ansiBrightCyan", "#29b8db"),
("terminal.ansiBrightWhite", "#e5e5e5"),
("terminal.background", "#1e1e1e"),
("terminal.foreground", "#cccccc")
])
DCONF_DEFAULT = OrderedDict([
("foreground-color", "\'rgb(239,239,227)\'"),
("visible-name", None),
("palette", None),
("cursor-background-color", "\'rgb(0,0,0)\'"),
("cursor-colors-set", "false"),
("highlight-colors-set", "false"),
("use-theme-colors", "false"),
("cursor-foreground-color", "\'rgb(255,255,255)\'"),
("bold-color-same-as-fg", "true"),
("bold-color", "\'rgb(0,0,0)\'"),
("background-color", "\'rgb(46,52,54)\'")
])
EMPTY_CONF = "[/]\nlist=[]\ndefault=''" |
py | b406d11f88ab4aba65c9f2bad68b12cc8fc3f538 | """Test for Torstens's quorum enumeration"""
from .quorums import enumerate_quorums
from .quorum_lachowski import contains_slice
def test_enumerate_quorums():
"""Test enumerate_quorums() with simple example"""
slices_by_node = {
1: [{1, 2, 3, 7}],
2: [{1, 2, 3, 7}],
3: [{1, 2, 3, 7}],
4: [{4, 5, 6, 7}],
5: [{4, 5, 6, 7}],
6: [{4, 5, 6, 7}],
7: [{7}],
}
def ex28_fbas(nodes_subset, node) -> bool:
return contains_slice(nodes_subset, slices_by_node, node)
quorums = list(enumerate_quorums((ex28_fbas, {1, 2, 3, 4, 5, 6, 7})))
assert set(quorums) == set(
[frozenset({7}),
frozenset({4, 5, 6, 7}),
frozenset({1, 2, 3, 7}),
frozenset({1, 2, 3, 4, 5, 6, 7})]
)
def test_enumerate_quorums_stellar_core():
"""Test enumerate_quorums() with stellar core style fbas"""
# init test:
stellar_core_orgs = [
{'name': "B", 'nodes': ["1", "2", "3"], 'limit': 2},
{'name': "A", 'nodes': ["1", "2", "3"], 'limit': 2},
{'name': "C", 'nodes': ["1", "2", "3"], 'limit': 2},
{'name': "D", 'nodes': ["1", "2", "3"], 'limit': 2},
{'name': "E", 'nodes': ["1", "2", "3"], 'limit': 2},
{'name': "F", 'nodes': ["1", "2", "3", "4", "5"], 'limit': 3}
]
stellar_core_nodes = set()
for org in stellar_core_orgs:
name, nodes = org['name'], org['nodes']
for node in nodes:
stellar_core_nodes.add(name + node)
threshold = 5
def stellar_core(subset: set, _: str) -> bool:
sufficient_orgs = 0
for org in stellar_core_orgs:
name, nodes, limit = org['name'], org['nodes'], org['limit']
sufficient_nodes = 0
for org_node in nodes:
node = str(name + org_node)
if node in subset:
sufficient_nodes += 1
if sufficient_nodes >= limit:
sufficient_orgs += 1
return sufficient_orgs >= threshold
quorums = list(enumerate_quorums((stellar_core, stellar_core_nodes)))
assert len(quorums) == 114688
|
py | b406d134dcaac7a7d8e6a66ab042dbbe7de4e210 | import yaml
import json
import os
import torch
from easydict import EasyDict
def load_config_from_yaml(path):
"""
Method to load the config file for
neural network training
:param path: yaml-filepath with configs stored
:return: easydict containing config
"""
c = yaml.load(open(path))
config = EasyDict(c)
return config
def load_config_from_json(path):
"""
Method to load the config file
from json files.
:param path: path to json file
:return: easydict containing config
"""
with open(path, 'r') as file:
data = json.load(file)
config = EasyDict(data)
return config
def load_experiment(path):
"""
Method to load experiment from path
:param path: path to experiment folder
:return: easydict containing config
"""
path = os.path.join(path, 'config.json')
config = load_config_from_json(path)
return config
def load_config(path):
"""
Wrapper method around different methods
loading config file based on file ending.
"""
if path[-4:] == 'yaml':
return load_config_from_yaml(path)
elif path[-4:] == 'json':
return load_config_from_json(path)
else:
raise ValueError('Unsupported file format for config')
def load_model(file, model):
checkpoint = file
if not os.path.exists(checkpoint):
raise FileNotFoundError("File doesn't exist {}".format(checkpoint))
try:
if torch.cuda.is_available():
checkpoint = torch.load(checkpoint)
else:
checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['state_dict'])
except:
print('loading model partly')
pretrained_dict = {k: v for k, v in checkpoint['state_dict'].items() if k in model.state_dict()}
model.state_dict().update(pretrained_dict)
model.load_state_dict(model.state_dict())
def load_pipeline(file, model):
checkpoint = file
if not os.path.exists(checkpoint):
raise FileNotFoundError("File doesn't exist {}".format(checkpoint))
try:
if torch.cuda.is_available():
checkpoint = torch.load(checkpoint)
else:
checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['pipeline_state_dict'])
except:
print('loading model partly')
pretrained_dict = {k: v for k, v in checkpoint['pipeline_state_dict'].items() if k in model.state_dict()}
model.state_dict().update(pretrained_dict)
model.load_state_dict(model.state_dict())
def load_checkpoint(checkpoint, model, optimizer=None):
"""Loads model parameters (state_dict) from file_path.
If optimizer is provided, loads state_dict of
optimizer assuming it is present in checkpoint.
Args:
checkpoint: (string) filename which needs to be loaded
model: (torch.nn.Module) model for which the parameters are loaded
optimizer: (torch.optim) optional: resume optimizer from checkpoint
"""
if not os.path.exists(checkpoint):
raise FileNotFoundError("File doesn't exist {}".format(checkpoint))
try:
if torch.cuda.is_available():
checkpoint = torch.load(checkpoint)
else:
checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['state_dict'])
except:
print('loading model partly')
pretrained_dict = {k: v for k, v in checkpoint['state_dict'].items() if k in model.state_dict()}
model.state_dict().update(pretrained_dict)
model.load_state_dict(model.state_dict())
if optimizer:
optimizer.load_state_dict(checkpoint['optim_dict'])
return checkpoint
|
py | b406d1bd014afc24d2cbaefa7aee0ef846075e4d | """Integer Field Type"""
# standard library
from typing import TYPE_CHECKING, Any, Callable, Dict, Union
# third-party
from pydantic.types import OptionalInt
# first-party
from tcex.input.field_types.exception import InvalidIntegerValue, InvalidType, InvalidVariableType
if TYPE_CHECKING: # pragma: no cover
# third-party
from pydantic.fields import ModelField
# first-party
from tcex.input.input import StringVariable
class Integer(int):
"""Integer Field Type"""
ge: OptionalInt = None
gt: OptionalInt = None
le: OptionalInt = None
lt: OptionalInt = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
"""Modify the field schema."""
def update_not_none(mapping: Dict[Any, Any], **update: Any) -> None:
mapping.update({k: v for k, v in update.items() if v is not None})
update_not_none(
field_schema,
exclusiveMinimum=cls.gt,
exclusiveMaximum=cls.lt,
minimum=cls.ge,
maximum=cls.le,
)
@classmethod
def __get_validators__(cls) -> Callable:
"""Run validators / modifiers on input."""
yield cls.validate_variable_type
yield cls.validate_type
yield cls.validate_value
@classmethod
def validate_type(cls, value: Union[int, str, 'StringVariable'], field: 'ModelField') -> int:
"""Raise exception if value is not a String type."""
if not isinstance(value, (int, str)):
raise InvalidType(
field_name=field.name, expected_types='(int, str)', provided_type=type(value)
)
return value
@classmethod
def validate_value(cls, value: Union[int, str, 'StringVariable'], field: 'ModelField') -> int:
"""Raise exception if value does not meet criteria."""
if isinstance(value, str):
value = int(value)
if cls.ge is not None and not value >= cls.ge:
raise InvalidIntegerValue(
field_name=field.name, operation='greater than or equal to', constraint=cls.ge
)
if cls.gt is not None and not value > cls.gt:
raise InvalidIntegerValue(
field_name=field.name, operation='greater than', constraint=cls.gt
)
if cls.le is not None and not value <= cls.le:
raise InvalidIntegerValue(
field_name=field.name, operation='less than or equal to', constraint=cls.le
)
if cls.lt is not None and not value < cls.lt:
raise InvalidIntegerValue(
field_name=field.name, operation='less than', constraint=cls.lt
)
return value
@classmethod
def validate_variable_type(
cls, value: Union[int, str, 'StringVariable'], field: 'ModelField'
) -> int:
"""Raise exception if value is not a String type."""
if hasattr(value, '_variable_type') and value._variable_type != 'String':
raise InvalidVariableType(
field_name=field.name, expected_type='String', provided_type=value._variable_type
)
return value
def integer(
gt: OptionalInt = None,
ge: OptionalInt = None,
lt: OptionalInt = None,
le: OptionalInt = None,
) -> type:
"""Return configured instance of String."""
namespace = dict(
gt=gt,
ge=ge,
lt=lt,
le=le,
)
return type('ConstrainedInteger', (Integer,), namespace)
|
py | b406d20330d35f056bf86df3a7ab2ee63246904e | import sys
from imp import reload
import configloader
import os
import hashlib
import datetime
import time
import requests
import rsa
import base64
from urllib import parse
import aiohttp
import asyncio
reload(sys)
def CurrentTime():
currenttime = int(time.mktime(datetime.datetime.now().timetuple()))
return str(currenttime)
class bilibili():
instance = None
def __new__(cls, *args, **kw):
if not cls.instance:
cls.instance = super(bilibili, cls).__new__(cls, *args, **kw)
fileDir = os.path.dirname(os.path.realpath('__file__'))
file_bilibili = fileDir + "/conf/bilibili.conf"
cls.instance.dic_bilibili = configloader.load_bilibili(file_bilibili)
cls.instance.bili_session = None
return cls.instance
@property
def bili_section(self):
if self.bili_session is None:
self.bili_session = aiohttp.ClientSession()
# print(0)
return self.bili_session
def calc_sign(self, str):
str = str + self.dic_bilibili['app_secret']
hash = hashlib.md5()
hash.update(str.encode('utf-8'))
sign = hash.hexdigest()
return sign
def cnn_captcha(self,img):
url = "http://101.236.6.31:8080/code"
data = {"image": img}
ressponse = requests.post(url, data=data)
captcha = ressponse.text
print("此次登录出现验证码,识别结果为%s" % (captcha))
return captcha
def calc_name_passw(self,key, Hash, username, password):
pubkey = rsa.PublicKey.load_pkcs1_openssl_pem(key.encode())
password = base64.b64encode(rsa.encrypt((Hash + password).encode('utf-8'), pubkey))
password = parse.quote_plus(password)
username = parse.quote_plus(username)
return username, password
async def replay_request(self,response):
json_response = await response.json(content_type=None)
if json_response['code'] == 1024:
print('b站炸了,暂停所有请求5s后重试,请耐心等待')
await asyncio.sleep(5)
return True
else:
return False
async def bili_section_post(self, url, headers=None, data=None):
while True:
try:
response = await self.bili_section.post(url, headers=headers, data=data)
tag = await self.replay_request(response)
if tag:
continue
return response
except :
#print('当前网络不好,正在重试,请反馈开发者!!!!')
#print(sys.exc_info()[0], sys.exc_info()[1])
continue
async def bili_section_get(self, url, headers=None, data=None, params=None):
while True:
try:
response = await self.bili_section.get(url, headers=headers, data=data, params=params)
tag = await self.replay_request(response)
if tag:
continue
return response
except :
#print('当前网络不好,正在重试,请反馈开发者!!!!')
#print(sys.exc_info()[0], sys.exc_info()[1])
continue
#1:900兑换
async def request_doublegain_coin2silver(self):
#url: "/exchange/coin2silver",
data = {'coin': 10}
url = "https://api.live.bilibili.com/exchange/coin2silver"
response = await self.bili_section_post(url, data=data, headers=self.dic_bilibili['pcheaders'])
return response
async def post_watching_history(self, room_id):
data = {
"room_id": room_id,
"csrf_token": self.dic_bilibili['csrf']
}
url = "https://api.live.bilibili.com/room/v1/Room/room_entry_action"
response = await self.bili_section_post(url, data=data, headers=self.dic_bilibili['pcheaders'])
return response
async def silver2coin_web(self):
url = "https://api.live.bilibili.com/exchange/silver2coin"
response = await self.bili_section_post(url, headers=self.dic_bilibili['pcheaders'])
return response
async def silver2coin_app(self):
temp_params = 'access_key=' + self.dic_bilibili['access_key'] + '&actionKey=' + self.dic_bilibili[
'actionKey'] + '&appkey=' + self.dic_bilibili['appkey'] + '&build=' + self.dic_bilibili[
'build'] + '&device=' + self.dic_bilibili['device'] + '&mobi_app=' + self.dic_bilibili[
'mobi_app'] + '&platform=' + self.dic_bilibili['platform'] + '&ts=' + CurrentTime()
sign = self.calc_sign(temp_params)
app_url = "https://api.live.bilibili.com/AppExchange/silver2coin?" + temp_params + "&sign=" + sign
response1 = await self.bili_section_post(app_url, headers=self.dic_bilibili['appheaders'])
return response1
async def request_check_room(self, roomid):
url = "https://api.live.bilibili.com/room/v1/Room/room_init?id=" + str(roomid)
response = await self.bili_section_get(url, headers=self.dic_bilibili['pcheaders'])
return response
async def request_fetch_bag_list(self):
url = "https://api.live.bilibili.com/gift/v2/gift/bag_list"
response = await self.bili_section_get(url, headers=self.dic_bilibili['pcheaders'])
return response
async def request_check_taskinfo(self):
url = 'https://api.live.bilibili.com/i/api/taskInfo'
response = await self.bili_section_get(url, headers=self.dic_bilibili['pcheaders'])
return response
async def request_send_gift_web(self, giftid, giftnum, bagid, ruid, biz_id):
url = "https://api.live.bilibili.com/gift/v2/live/bag_send"
data = {
'uid': self.dic_bilibili['uid'],
'gift_id': giftid,
'ruid': ruid,
'gift_num': giftnum,
'bag_id': bagid,
'platform': 'pc',
'biz_code': 'live',
'biz_id': biz_id,
'rnd': CurrentTime(),
'storm_beat_id': '0',
'metadata': '',
'price': '0',
'csrf_token': self.dic_bilibili['csrf']
}
response = await self.bili_section_post(url, headers=self.dic_bilibili['pcheaders'], data=data)
return response
async def request_fetch_user_info(self):
url = "https://api.live.bilibili.com/i/api/liveinfo"
response = await self.bili_section_get(url, headers=self.dic_bilibili['pcheaders'])
return response
async def request_fetch_user_infor_ios(self):
# 长串请求起作用的就这几个破玩意儿
url = 'https://api.live.bilibili.com/mobile/getUser?access_key={}&platform=ios'.format(self.dic_bilibili['access_key'])
response = await self.bili_section_get(url)
return response
async def request_fetch_liveuser_info(self, real_roomid):
url = 'https://api.live.bilibili.com/live_user/v1/UserInfo/get_anchor_in_room?roomid={}'.format(real_roomid)
response = await self.bili_section_get(url)
return response
def request_load_img(self, url):
return requests.get(url)
async def request_fetchmedal(self):
url = 'https://api.live.bilibili.com/i/api/medal?page=1&pageSize=50'
response = await self.bili_section_post(url, headers=self.dic_bilibili['pcheaders'])
return response
def request_getkey(self):
url = 'https://passport.bilibili.com/api/oauth2/getKey'
temp_params = 'appkey=' + self.dic_bilibili['appkey']
sign = self.calc_sign(temp_params)
params = {'appkey': self.dic_bilibili['appkey'], 'sign': sign}
response = requests.post(url, data=params)
return response
async def get_gift_of_events_web(self, text1, text2, raffleid):
headers = {
'Accept': 'application/json, text/plain, */*',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'cookie': self.dic_bilibili['cookie'],
'referer': text2
}
pc_url = 'https://api.live.bilibili.com/activity/v1/Raffle/join?roomid=' + str(
text1) + '&raffleId=' + str(raffleid)
pc_response = await self.bili_section_get(pc_url, headers=headers)
return pc_response
async def get_gift_of_events_app(self, text1, text2, raffleid):
headers = {
'Accept': 'application/json, text/plain, */*',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'cookie': self.dic_bilibili['cookie'],
'referer': text2
}
temp_params = 'access_key=' + self.dic_bilibili['access_key'] + '&actionKey=' + self.dic_bilibili[
'actionKey'] + '&appkey=' + self.dic_bilibili['appkey'] + '&build=' + self.dic_bilibili[
'build'] + '&device=' + self.dic_bilibili['device'] + '&event_type='+self.dic_bilibili['activity_name']+'-'+ str(
raffleid) + '&mobi_app=' + self.dic_bilibili['mobi_app'] + '&platform=' + self.dic_bilibili[
'platform'] + '&room_id=' + str(
text1) + '&ts=' + CurrentTime()
params = temp_params + self.dic_bilibili['app_secret']
sign = self.calc_sign(temp_params)
true_url = 'https://api.live.bilibili.com/YunYing/roomEvent?' + temp_params + '&sign=' + sign
response1 = await self.bili_section_get(true_url, params=params, headers=headers)
return response1
async def get_gift_of_TV(self, real_roomid, raffleid):
temp_params = 'access_key=' + self.dic_bilibili['access_key'] + '&actionKey=' + self.dic_bilibili[
'actionKey'] + '&appkey=' + self.dic_bilibili['appkey'] + '&build=' + self.dic_bilibili[
'build'] + '&device=' + self.dic_bilibili['device'] + '&id=' + str(
raffleid) + '&mobi_app=' + self.dic_bilibili['mobi_app'] + '&platform=' + self.dic_bilibili[
'platform'] + '&roomid=' + str(
real_roomid) + '&ts=' + CurrentTime()
sign = self.calc_sign(temp_params)
true_url = 'https://api.live.bilibili.com/AppSmallTV/join?' + temp_params + '&sign=' + sign
response2 = await self.bili_section_get(true_url, headers=self.dic_bilibili['appheaders'])
return response2
async def get_gift_of_captain(self, roomid, id):
join_url = "https://api.live.bilibili.com/lottery/v1/lottery/join"
payload = {"roomid": roomid, "id": id, "type": "guard", "csrf_token": self.dic_bilibili['csrf']}
response2 = await self.bili_section_post(join_url, data=payload, headers=self.dic_bilibili['pcheaders'])
return response2
async def get_giftlist_of_events(self, text1):
headers = {
'Accept': 'application/json, text/plain, */*',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'accept-encoding': 'gzip, async deflate',
'Host': 'api.live.bilibili.com',
}
url = 'https://api.live.bilibili.com/activity/v1/Raffle/check?roomid=' + str(text1)
response = await self.bili_section_get(url, headers=headers)
return response
async def get_giftlist_of_TV(self, real_roomid):
temp_params = 'access_key=' + self.dic_bilibili['access_key'] + '&actionKey=' + self.dic_bilibili[
'actionKey'] + '&appkey=' + self.dic_bilibili['appkey'] + '&build=' + self.dic_bilibili[
'build'] + '&device=' + self.dic_bilibili['device'] + \
'&mobi_app=' + self.dic_bilibili['mobi_app'] + '&platform=' + self.dic_bilibili[
'platform'] + '&roomid=' + str(
real_roomid) + '&ts=' + CurrentTime()
sign = self.calc_sign(temp_params)
check_url = 'https://api.live.bilibili.com/AppSmallTV/index?' + temp_params + '&sign=' + sign
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36",
}
response = await self.bili_section_get(check_url, headers=headers)
return response
async def get_giftlist_of_captain(self, roomid):
true_url = 'https://api.live.bilibili.com/lottery/v1/lottery/check?roomid=' + str(roomid)
headers = {
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q = 0.8",
"Accept-Encoding":"gzip,async deflate,br",
"Accept-Language":"zh-CN",
"DNT": "1",
"Cookie":"LIVE_BUVID=AUTO7715232653604550",
"Connection":"keep-alive",
"Cache-Control":"max-age =0",
"Host":"api.live.bilibili.com",
"Upgrade-Insecure-Requests": "1",
"User-Agent":'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:59.0) Gecko/20100101 Firefox/59.0'
}
response1 = await self.bili_section_get(true_url,headers=headers)
return response1
def get_giftids_raffle(self, str):
return self.dic_bilibili['giftids_raffle'][str]
def get_giftids_raffle_keys(self):
return self.dic_bilibili['giftids_raffle'].keys()
async def get_activity_result(self, activity_roomid, activity_raffleid):
url = "https://api.live.bilibili.com/activity/v1/Raffle/notice?roomid=" + str(
activity_roomid) + "&raffleId=" + str(activity_raffleid)
headers = {
'Accept': 'application/json, text/plain, */*',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'accept-encoding': 'gzip, async deflate',
'Host': 'api.live.bilibili.com',
'cookie': self.dic_bilibili['cookie'],
}
response = await self.bili_section_get(url, headers=headers)
return response
async def get_TV_result(self, TV_roomid, TV_raffleid):
url = "https://api.live.bilibili.com/gift/v2/smalltv/notice?roomid=" + str(TV_roomid) + "&raffleId=" + str(
TV_raffleid)
headers = {
'Accept': 'application/json, text/plain, */*',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'accept-encoding': 'gzip, async deflate',
'Host': 'api.live.bilibili.com',
'cookie': self.dic_bilibili['cookie'],
}
response = await self.bili_section_get(url, headers=headers)
return response
async def pcpost_heartbeat(self):
url = 'https://api.live.bilibili.com/User/userOnlineHeart'
response = await self.bili_section_post(url, headers=self.dic_bilibili['pcheaders'])
return response
# 发送app心跳包
async def apppost_heartbeat(self):
time = CurrentTime()
temp_params = 'access_key=' + self.dic_bilibili['access_key'] + '&actionKey=' + self.dic_bilibili[
'actionKey'] + '&appkey=' + self.dic_bilibili['appkey'] + '&build=' + self.dic_bilibili[
'build'] + '&device=' + self.dic_bilibili['device'] + '&mobi_app=' + self.dic_bilibili[
'mobi_app'] + '&platform=' + self.dic_bilibili['platform'] + '&ts=' + time
sign = self.calc_sign(temp_params)
url = 'https://api.live.bilibili.com/mobile/userOnlineHeart?' + temp_params + '&sign=' + sign
payload = {'roomid': 23058, 'scale': 'xhdpi'}
response = await self.bili_section_post(url, data=payload, headers=self.dic_bilibili['appheaders'])
return response
# 心跳礼物
async def heart_gift(self):
url = "https://api.live.bilibili.com/gift/v2/live/heart_gift_receive?roomid=3&area_v2_id=34"
response = await self.bili_section_get(url, headers=self.dic_bilibili['pcheaders'])
return response
async def get_lotterylist(self, i):
url = "https://api.live.bilibili.com/lottery/v1/box/getStatus?aid=" + str(i)
response = await self.bili_section_get(url, headers=self.dic_bilibili['pcheaders'])
return response
async def get_gift_of_lottery(self, i, g):
url1 = 'https://api.live.bilibili.com/lottery/v1/box/draw?aid=' + str(i) + '&number=' + str(g + 1)
response1 = await self.bili_section_get(url1, headers=self.dic_bilibili['pcheaders'])
return response1
async def get_time_about_silver(self):
time = CurrentTime()
temp_params = 'access_key=' + self.dic_bilibili['access_key'] + '&actionKey=' + self.dic_bilibili[
'actionKey'] + '&appkey=' + self.dic_bilibili['appkey'] + '&build=' + self.dic_bilibili[
'build'] + '&device=' + self.dic_bilibili['device'] + '&mobi_app=' + self.dic_bilibili[
'mobi_app'] + '&platform=' + self.dic_bilibili['platform'] + '&ts=' + time
sign = self.calc_sign(temp_params)
GetTask_url = 'https://api.live.bilibili.com/mobile/freeSilverCurrentTask?' + temp_params + '&sign=' + sign
response = await self.bili_section_get(GetTask_url, headers=self.dic_bilibili['appheaders'])
return response
async def get_silver(self, timestart, timeend):
time = CurrentTime()
temp_params = 'access_key=' + self.dic_bilibili['access_key'] + '&actionKey=' + self.dic_bilibili[
'actionKey'] + '&appkey=' + self.dic_bilibili['appkey'] + '&build=' + self.dic_bilibili[
'build'] + '&device=' + self.dic_bilibili['device'] + '&mobi_app=' + self.dic_bilibili[
'mobi_app'] + '&platform=' + self.dic_bilibili[
'platform'] + '&time_end=' + timeend + '&time_start=' + timestart + '&ts=' + time
sign = self.calc_sign(temp_params)
url = 'https://api.live.bilibili.com/mobile/freeSilverAward?' + temp_params + '&sign=' + sign
response = await self.bili_section_get(url, headers=self.dic_bilibili['appheaders'])
return response
async def get_dailybag(self):
url = 'https://api.live.bilibili.com/gift/v2/live/receive_daily_bag'
response = await self.bili_section_get(url, headers=self.dic_bilibili['pcheaders'])
return response
async def get_dosign(self):
url = 'https://api.live.bilibili.com/sign/doSign'
response = await self.bili_section_get(url, headers=self.dic_bilibili['pcheaders'])
return response
async def get_dailytask(self):
url = 'https://api.live.bilibili.com/activity/v1/task/receive_award'
payload2 = {'task_id': 'double_watch_task'}
response2 = await self.bili_section_post(url, data=payload2, headers=self.dic_bilibili['appheaders'])
return response2
def get_grouplist(self):
url = "https://api.vc.bilibili.com/link_group/v1/member/my_groups"
pcheaders = self.dic_bilibili['pcheaders'].copy()
pcheaders['Host'] = "api.vc.bilibili.com"
response = requests.get(url, headers=pcheaders)
return response
def assign_group(self, i1, i2):
temp_params = "_device=" + self.dic_bilibili[
'device'] + "&_hwid=SX1NL0wuHCsaKRt4BHhIfRguTXxOfj5WN1BkBTdLfhstTn9NfUouFiUV&access_key=" + \
self.dic_bilibili['access_key'] + "&appkey=" + self.dic_bilibili['appkey'] + "&build=" + \
self.dic_bilibili['build'] + "&group_id=" + str(i1) + "&mobi_app=" + self.dic_bilibili[
'mobi_app'] + "&owner_id=" + str(i2) + "&platform=" + self.dic_bilibili[
'platform'] + "&src=xiaomi&trace_id=20171224024300024&ts=" + CurrentTime() + "&version=5.20.1.520001"
sign = self.calc_sign(temp_params)
url = "https://api.vc.bilibili.com/link_setting/v1/link_setting/sign_in?" + temp_params + "&sign=" + sign
appheaders = self.dic_bilibili['appheaders'].copy()
appheaders['Host'] = "api.vc.bilibili.com"
response = requests.get(url, headers=appheaders)
return response
async def gift_list(self):
url = "https://api.live.bilibili.com/gift/v2/live/room_gift_list?roomid=2721650&area_v2_id=86"
res = await self.bili_section_get(url)
return res
async def check_activity_exist(self):
url = "https://api.live.bilibili.com/activity/v1/Common/roomInfo?roomid=128666&ruid=18174739"
response = await self.bili_section_get(url)
return response
async def query_guard(self,name):
search_url = "https://search.bilibili.com/live?keyword=" + str(name) + "&page=1&search_type=live_user"
response = await self.bili_section_get(search_url)
return response |
py | b406d22ed7543d4c3f75bbc4a9c8a53274cd12f6 | from typing import List
import heapq
def solution(people: List[int], limit: int) -> int:
"""[summary]
사람들의 몸무게와 보트의 적재한계랑으로 필요한 최소보트수를 산출
Args:
people (List[int]): 사람들의 몸무게, l:[1:50000], e:[40:240]
limit (int): 구명보트 제한 무게, lim:[40:240]
단, 보트의 제한무게는 사람들 중 최대 무게보다 크게 주어지므로, 구하지 못하는 사람은 없다.
Returns:
int: 필요한 보트의 최소값
"""
boats = []
people.sort(reverse=True)
answer = 0
for person in people:
if not boats:
heapq.heappush(boats, -(limit - person))
answer += 1
continue
if person + boats[0] > 0:
heapq.heappush(boats, -(limit - person))
answer += 1
else:
heapq.heappop(boats)
return answer
if __name__ == "__main__":
p, l = [70, 50, 80, 50], 100
print(solution(p, l))
|
py | b406d3328f32771abed915fe99d87b2eb0b55cd8 | class TargetType:
"""
This classed is used to define the targets variable type we're dealing with
"""
REGRESSION = 0
BINARY_CLASSIFICATION = 1
MULTICLASS_CLASSIFICATION = 2 |
py | b406d39b90f766807c812f1226830d635a494914 | # cmu_112_graphics.py
# version 0.8.6
# Pre-release for CMU 15-112-f20
# Require Python 3.6 or later
import sys
if ((sys.version_info[0] != 3) or (sys.version_info[1] < 6)):
raise Exception('cmu_112_graphics.py requires Python version 3.6 or later.')
# Track version and file update timestamp
import datetime
MAJOR_VERSION = 0
MINOR_VERSION = 8.6 # version 0.8.6
LAST_UPDATED = datetime.date(year=2020, month=2, day=24)
# Pending changes:
# * Fix Windows-only bug: Position popup dialog box over app window (already works fine on Macs)
# * Add documentation
# * integrate sounds (probably from pyGame)
# * Improved methodIsOverridden to TopLevelApp and ModalApp
# * Save to animated gif and/or mp4 (with audio capture?)
# Deferred changes:
# * replace/augment tkinter canvas with PIL/Pillow imageDraw (perhaps with our own fn names)
# * use snake_case and CapWords
# Chages in v0.8.6
# * f20
# Chages in v0.8.5
# * Support loadImage from Modes
# Chages in v0.8.3 + v0.8.4
# * Use default empty Mode if none is provided
# * Add KeyRelease event binding
# * Drop user32.SetProcessDPIAware (caused window to be really tiny on some Windows machines)
# Changes in v0.8.1 + v0.8.2
# * print version number and last-updated date on load
# * restrict modifiers to just control key (was confusing with NumLock, etc)
# * replace hasModifiers with 'control-' prefix, as in 'control-A'
# * replace app._paused with app.paused, etc (use app._ for private variables)
# * use improved ImageGrabber import for linux
# Changes in v0.8.0
# * suppress more modifier keys (Super_L, Super_R, ...)
# * raise exception on event.keysym or event.char + works with key = 'Enter'
# * remove tryToInstall
# Changes in v0.7.4
# * renamed drawAll back to redrawAll :-)
# Changes in v0.7.3
# * Ignore mousepress-drag-release and defer configure events for drags in titlebar
# * Extend deferredRedrawAll to 100ms with replace=True and do not draw while deferred
# (together these hopefully fix Windows-only bug: file dialog makes window not moveable)
# * changed sizeChanged to not take event (use app.width and app.height)
# Changes in v0.7.2
# * Singleton App._theRoot instance (hopefully fixes all those pesky Tkinter errors-on-exit)
# * Use user32.SetProcessDPIAware to get resolution of screen grabs right on Windows-only (fine on Macs)
# * Replaces showGraphics() with runApp(...), which is a veneer for App(...) [more intuitive for pre-OOP part of course]
# * Fixes/updates images:
# * disallows loading images in redrawAll (raises exception)
# * eliminates cache from loadImage
# * eliminates app.getTkinterImage, so user now directly calls ImageTk.PhotoImage(image))
# * also create_image allows magic pilImage=image instead of image=ImageTk.PhotoImage(app.image)
# Changes in v0.7.1
# * Added keyboard shortcut:
# * cmd/ctrl/alt-x: hard exit (uses os._exit() to exit shell without tkinter error messages)
# * Fixed bug: shortcut keys stopped working after an MVC violation (or other exception)
# * In app.saveSnapshot(), add .png to path if missing
# * Added: Print scripts to copy-paste into shell to install missing modules (more automated approaches proved too brittle)
# Changes in v0.7
# * Added some image handling (requires PIL (retained) and pyscreenshot (later removed):
# * app.loadImage() # loads PIL/Pillow image from file, with file dialog, or from URL (http or https)
# * app.scaleImage() # scales a PIL/Pillow image
# * app.getTkinterImage() # converts PIL/Pillow image to Tkinter PhotoImage for use in create_image(...)
# * app.getSnapshot() # get a snapshot of the canvas as a PIL/Pillow image
# * app.saveSnapshot() # get and save a snapshot
# * Added app._paused, app.togglePaused(), and paused highlighting (red outline around canvas when paused)
# * Added keyboard shortcuts:
# * cmd/ctrl/alt-s: save a snapshot
# * cmd/ctrl/alt-p: pause/unpause
# * cmd/ctrl/alt-q: quit
# Changes in v0.6:
# * Added fnPrefix option to TopLevelApp (so multiple TopLevelApp's can be in one file)
# * Added showGraphics(drawFn) (for graphics-only drawings before we introduce animations)
# Changes in v0.5:
# * Added:
# * app.winx and app.winy (and add winx,winy parameters to app.__init__, and sets these on configure events)
# * app.setSize(width, height)
# * app.setPosition(x, y)
# * app.quit()
# * app.showMessage(message)
# * app.getUserInput(prompt)
# * App.lastUpdated (instance of datetime.date)
# * Show popup dialog box on all exceptions (not just for MVC violations)
# * Draw (in canvas) "Exception! App Stopped! (See console for details)" for any exception
# * Replace callUserMethod() with more-general @_safeMethod decorator (also handles exceptions outside user methods)
# * Only include lines from user's code (and not our framework nor tkinter) in stack traces
# * Require Python version (3.6 or greater)
# Changes in v0.4:
# * Added __setattr__ to enforce Type 1A MVC Violations (setting app.x in redrawAll) with better stack trace
# * Added app._deferredRedrawAll() (avoids resizing drawing/crashing bug on some platforms)
# * Added deferredMethodCall() and app._afterIdMap to generalize afterId handling
# * Use (_ is None) instead of (_ == None)
# Changes in v0.3:
# * Fixed "event not defined" bug in sizeChanged handlers.
# * draw "MVC Violation" on Type 2 violation (calling draw methods outside redrawAll)
# Changes in v0.2:
# * Handles another MVC violation (now detects drawing on canvas outside of redrawAll)
# * App stops running when an exception occurs (in user code) (stops cascading errors)
# Changes in v0.1:
# * OOPy + supports inheritance + supports multiple apps in one file + etc
# * uses import instead of copy-paste-edit starter code + no "do not edit code below here!"
# * no longer uses Struct (which was non-Pythonic and a confusing way to sort-of use OOP)
# * Includes an early version of MVC violation handling (detects model changes in redrawAll)
# * added events:
# * appStarted (no init-vs-__init__ confusion)
# * appStopped (for cleanup)
# * keyReleased (well, sort of works) + mouseReleased
# * mouseMoved + mouseDragged
# * sizeChanged (when resizing window)
# * improved key names (just use event.key instead of event.char and/or event.keysym + use names for 'Enter', 'Escape', ...)
# * improved function names (renamed redrawAll to drawAll)
# * improved (if not perfect) exiting without that irksome Tkinter error/bug
# * app has a title in the titlebar (also shows window's dimensions)
# * supports Modes and ModalApp (see ModalApp and Mode, and also see TestModalApp example)
# * supports TopLevelApp (using top-level functions instead of subclasses and methods)
# * supports version checking with App.majorVersion, App.minorVersion, and App.version
# * logs drawing calls to support autograding views (still must write that autograder, but this is a very helpful first step)
from tkinter import *
from tkinter import messagebox, simpledialog, filedialog
import inspect, copy, traceback
import sys, os
from io import BytesIO
def failedImport(importName, installName=None):
installName = installName or importName
print('**********************************************************')
print(f'** Cannot import {importName} -- it seems you need to install {installName}')
print(f'** This may result in limited functionality or even a runtime error.')
print('**********************************************************')
print()
try: from PIL import Image, ImageTk
except ModuleNotFoundError: failedImport('PIL', 'pillow')
if sys.platform.startswith('linux'):
try: import pyscreenshot as ImageGrabber
except ModuleNotFoundError: failedImport('pyscreenshot')
else:
try: from PIL import ImageGrab as ImageGrabber
except ModuleNotFoundError: pass # Our PIL warning is already printed above
try: import requests
except ModuleNotFoundError: failedImport('requests')
def getHash(obj):
# This is used to detect MVC violations in redrawAll
# @TODO: Make this more robust and efficient
try:
return getHash(obj.__dict__)
except:
if (isinstance(obj, list)): return getHash(tuple([getHash(v) for v in obj]))
elif (isinstance(obj, set)): return getHash(sorted(obj))
elif (isinstance(obj, dict)): return getHash(tuple([obj[key] for key in sorted(obj)]))
else:
try: return hash(obj)
except: return getHash(repr(obj))
class WrappedCanvas(Canvas):
# Enforces MVC: no drawing outside calls to redrawAll
# Logs draw calls (for autograder) in canvas.loggedDrawingCalls
def __init__(wrappedCanvas, app):
wrappedCanvas.loggedDrawingCalls = [ ]
wrappedCanvas.logDrawingCalls = True
wrappedCanvas.inRedrawAll = False
wrappedCanvas.app = app
super().__init__(app._root, width=app.width, height=app.height)
def log(self, methodName, args, kwargs):
if (not self.inRedrawAll):
self.app._mvcViolation('you may not use the canvas (the view) outside of redrawAll')
if (self.logDrawingCalls):
self.loggedDrawingCalls.append((methodName, args, kwargs))
def create_arc(self, *args, **kwargs): self.log('create_arc', args, kwargs); return super().create_arc(*args, **kwargs)
def create_bitmap(self, *args, **kwargs): self.log('create_bitmap', args, kwargs); return super().create_bitmap(*args, **kwargs)
def create_line(self, *args, **kwargs): self.log('create_line', args, kwargs); return super().create_line(*args, **kwargs)
def create_oval(self, *args, **kwargs): self.log('create_oval', args, kwargs); return super().create_oval(*args, **kwargs)
def create_polygon(self, *args, **kwargs): self.log('create_polygon', args, kwargs); return super().create_polygon(*args, **kwargs)
def create_rectangle(self, *args, **kwargs): self.log('create_rectangle', args, kwargs); return super().create_rectangle(*args, **kwargs)
def create_text(self, *args, **kwargs): self.log('create_text', args, kwargs); return super().create_text(*args, **kwargs)
def create_window(self, *args, **kwargs): self.log('create_window', args, kwargs); return super().create_window(*args, **kwargs)
def create_image(self, *args, **kwargs):
self.log('create_image', args, kwargs);
usesImage = 'image' in kwargs
usesPilImage = 'pilImage' in kwargs
if ((not usesImage) and (not usesPilImage)):
raise Exception('create_image requires an image to draw')
elif (usesImage and usesPilImage):
raise Exception('create_image cannot use both an image and a pilImage')
elif (usesPilImage):
pilImage = kwargs['pilImage']
del kwargs['pilImage']
if (not isinstance(pilImage, Image.Image)):
raise Exception('create_image: pilImage value is not an instance of a PIL/Pillow image')
image = ImageTk.PhotoImage(pilImage)
else:
image = kwargs['image']
if (isinstance(image, Image.Image)):
raise Exception('create_image: image must not be an instance of a PIL/Pillow image\n' +
'You perhaps meant to convert from PIL to Tkinter, like so:\n' +
' canvas.create_image(x, y, image=ImageTk.PhotoImage(image))')
kwargs['image'] = image
return super().create_image(*args, **kwargs)
class App(object):
majorVersion = MAJOR_VERSION
minorVersion = MINOR_VERSION
version = f'{majorVersion}.{minorVersion}'
lastUpdated = LAST_UPDATED
_theRoot = None # singleton Tkinter root object
####################################
# User Methods:
####################################
def redrawAll(app, canvas): pass # draw (view) the model in the canvas
def appStarted(app): pass # initialize the model (app.xyz)
def appStopped(app): pass # cleanup after app is done running
def keyPressed(app, event): pass # use event.key
def keyReleased(app, event): pass # use event.key
def mousePressed(app, event): pass # use event.x and event.y
def mouseReleased(app, event): pass # use event.x and event.y
def mouseMoved(app, event): pass # use event.x and event.y
def mouseDragged(app, event): pass # use event.x and event.y
def timerFired(app): pass # respond to timer events
def sizeChanged(app): pass # respond to window size changes
####################################
# Implementation:
####################################
def __init__(app, width=300, height=300, x=0, y=0, title=None, autorun=True, mvcCheck=True, logDrawingCalls=True):
app.winx, app.winy, app.width, app.height = x, y, width, height
app.timerDelay = 100 # milliseconds
app.mouseMovedDelay = 50 # ditto
app._title = title
app._mvcCheck = mvcCheck
app._logDrawingCalls = logDrawingCalls
app._running = app._paused = False
app._mousePressedOutsideWindow = False
if autorun: app.run()
def setSize(app, width, height):
app._root.geometry(f'{width}x{height}')
def setPosition(app, x, y):
app._root.geometry(f'+{x}+{y}')
def showMessage(app, message):
messagebox.showinfo('showMessage', message, parent=app._root)
def getUserInput(app, prompt):
return simpledialog.askstring('getUserInput', prompt)
def loadImage(app, path=None):
if (app._canvas.inRedrawAll):
raise Exception('Cannot call loadImage in redrawAll')
if (path is None):
path = filedialog.askopenfilename(initialdir=os.getcwd(), title='Select file: ',filetypes = (('Image files','*.png *.gif *.jpg'),('all files','*.*')))
if (not path): return None
if (path.startswith('http')):
response = requests.request('GET', path) # path is a URL!
image = Image.open(BytesIO(response.content))
else:
image = Image.open(path)
return image
def scaleImage(app, image, scale, antialias=False):
# antialiasing is higher-quality but slower
resample = Image.ANTIALIAS if antialias else Image.NEAREST
return image.resize((round(image.width*scale), round(image.height*scale)), resample=resample)
def getSnapshot(app):
app._showRootWindow()
x0 = app._root.winfo_rootx() + app._canvas.winfo_x()
y0 = app._root.winfo_rooty() + app._canvas.winfo_y()
result = ImageGrabber.grab((x0,y0,x0+app.width,y0+app.height))
return result
def saveSnapshot(app):
path = filedialog.asksaveasfilename(initialdir=os.getcwd(), title='Select file: ',filetypes = (('png files','*.png'),('all files','*.*')))
if (path):
# defer call to let filedialog close (and not grab those pixels)
if (not path.endswith('.png')): path += '.png'
app._deferredMethodCall(afterId='saveSnapshot', afterDelay=0, afterFn=lambda:app.getSnapshot().save(path))
def _togglePaused(app):
app._paused = not app._paused
def quit(app):
app._running = False
app._root.quit() # break out of root.mainloop() without closing window!
def __setattr__(app, attr, val):
d = app.__dict__
d[attr] = val
canvas = d.get('_canvas', None)
if (d.get('running', False) and
d.get('mvcCheck', False) and
(canvas is not None) and
canvas.inRedrawAll):
app._mvcViolation(f'you may not change app.{attr} in the model while in redrawAll (the view)')
def _printUserTraceback(app, exception, tb):
stack = traceback.extract_tb(tb)
lines = traceback.format_list(stack)
inRedrawAllWrapper = False
printLines = [ ]
for line in lines:
if (('"cmu_112_graphics.py"' not in line) and
('/cmu_112_graphics.py' not in line) and
('\\cmu_112_graphics.py' not in line) and
('/tkinter/' not in line) and
('\\tkinter\\' not in line)):
printLines.append(line)
if ('redrawAllWrapper' in line):
inRedrawAllWrapper = True
if (len(printLines) == 0):
# No user code in trace, so we have to use all the code (bummer),
# but not if we are in a redrawAllWrapper...
if inRedrawAllWrapper:
printLines = [' No traceback available. Error occurred in redrawAll.\n']
else:
printLines = lines
print('Traceback (most recent call last):')
for line in printLines: print(line, end='')
print(f'Exception: {exception}')
def _safeMethod(appMethod):
def m(*args, **kwargs):
app = args[0]
try:
return appMethod(*args, **kwargs)
except Exception as e:
app._running = False
app._printUserTraceback(e, sys.exc_info()[2])
if ('_canvas' in app.__dict__):
app._canvas.inRedrawAll = True # not really, but stops recursive MVC Violations!
app._canvas.create_rectangle(0, 0, app.width, app.height, fill=None, width=10, outline='red')
app._canvas.create_rectangle(10, app.height-50, app.width-10, app.height-10,
fill='white', outline='red', width=4)
app._canvas.create_text(app.width/2, app.height-40, text=f'Exception! App Stopped!', fill='red', font='Arial 12 bold')
app._canvas.create_text(app.width/2, app.height-20, text=f'See console for details', fill='red', font='Arial 12 bold')
app._canvas.update()
app.showMessage(f'Exception: {e}\nClick ok then see console for details.')
return m
def _methodIsOverridden(app, methodName):
return (getattr(type(app), methodName) is not getattr(App, methodName))
def _mvcViolation(app, errMsg):
app._running = False
raise Exception('MVC Violation: ' + errMsg)
@_safeMethod
def _redrawAllWrapper(app):
if (not app._running): return
if ('deferredRedrawAll' in app._afterIdMap): return # wait for pending call
app._canvas.inRedrawAll = True
app._canvas.delete(ALL)
width,outline = (10,'red') if app._paused else (0,'white')
app._canvas.create_rectangle(0, 0, app.width, app.height, fill='white', width=width, outline=outline)
app._canvas.loggedDrawingCalls = [ ]
app._canvas.logDrawingCalls = app._logDrawingCalls
hash1 = getHash(app) if app._mvcCheck else None
try:
app.redrawAll(app._canvas)
hash2 = getHash(app) if app._mvcCheck else None
if (hash1 != hash2):
app._mvcViolation('you may not change the app state (the model) in redrawAll (the view)')
finally:
app._canvas.inRedrawAll = False
app._canvas.update()
def _deferredMethodCall(app, afterId, afterDelay, afterFn, replace=False):
def afterFnWrapper():
app._afterIdMap.pop(afterId, None)
afterFn()
id = app._afterIdMap.get(afterId, None)
if ((id is None) or replace):
if id: app._root.after_cancel(id)
app._afterIdMap[afterId] = app._root.after(afterDelay, afterFnWrapper)
def _deferredRedrawAll(app):
app._deferredMethodCall(afterId='deferredRedrawAll', afterDelay=100, afterFn=app._redrawAllWrapper, replace=True)
@_safeMethod
def _appStartedWrapper(app):
app.appStarted()
app._redrawAllWrapper()
_keyNameMap = { '\t':'Tab', '\n':'Enter', '\r':'Enter', '\b':'Backspace',
chr(127):'Delete', chr(27):'Escape', ' ':'Space' }
@staticmethod
def _useEventKey(attr):
raise Exception(f'Use event.key instead of event.{attr}')
@staticmethod
def _getEventKeyInfo(event, keysym, char):
key = c = char
hasControlKey = (event.state & 0x4 != 0)
if ((c in [None, '']) or (len(c) > 1) or (ord(c) > 255)):
key = keysym
if (key.endswith('_L') or
key.endswith('_R') or
key.endswith('_Lock')):
key = 'Modifier_Key'
elif (c in App._keyNameMap):
key = App._keyNameMap[c]
elif ((len(c) == 1) and (1 <= ord(c) <= 26)):
key = chr(ord('a')-1 + ord(c))
hasControlKey = True
if hasControlKey and (len(key) == 1):
# don't add control- prefix to Enter, Tab, Escape, ...
key = 'control-' + key
return key
class KeyEventWrapper(Event):
def __init__(self, event):
keysym, char = event.keysym, event.char
del event.keysym
del event.char
for key in event.__dict__:
if (not key.startswith('__')):
self.__dict__[key] = event.__dict__[key]
self.key = App._getEventKeyInfo(event, keysym, char)
keysym = property(lambda *args: App._useEventKey('keysym'),
lambda *args: App._useEventKey('keysym'))
char = property(lambda *args: App._useEventKey('char'),
lambda *args: App._useEventKey('char'))
@_safeMethod
def _keyPressedWrapper(app, event):
event = App.KeyEventWrapper(event)
if (event.key == 'control-s'):
app.saveSnapshot()
elif (event.key == 'control-p'):
app._togglePaused()
app._redrawAllWrapper()
elif (event.key == 'control-q'):
app.quit()
elif (event.key == 'control-x'):
os._exit(0) # hard exit avoids tkinter error messages
elif (app._running and
(not app._paused) and
app._methodIsOverridden('keyPressed') and
(not event.key == 'Modifier_Key')):
app.keyPressed(event)
app._redrawAllWrapper()
@_safeMethod
def _keyReleasedWrapper(app, event):
if (not app._running) or app._paused or (not app._methodIsOverridden('keyReleased')): return
event = App.KeyEventWrapper(event)
if (not event.key == 'Modifier_Key'):
app.keyReleased(event)
app._redrawAllWrapper()
@_safeMethod
def _mousePressedWrapper(app, event):
if (not app._running) or app._paused: return
if ((event.x < 0) or (event.x > app.width) or
(event.y < 0) or (event.y > app.height)):
app._mousePressedOutsideWindow = True
else:
app._mousePressedOutsideWindow = False
app._mouseIsPressed = True
app._lastMousePosn = (event.x, event.y)
if (app._methodIsOverridden('mousePressed')):
app.mousePressed(event)
app._redrawAllWrapper()
@_safeMethod
def _mouseReleasedWrapper(app, event):
if (not app._running) or app._paused: return
app._mouseIsPressed = False
if app._mousePressedOutsideWindow:
app._mousePressedOutsideWindow = False
app._sizeChangedWrapper()
else:
app._lastMousePosn = (event.x, event.y)
if (app._methodIsOverridden('mouseReleased')):
app.mouseReleased(event)
app._redrawAllWrapper()
@_safeMethod
def _timerFiredWrapper(app):
if (not app._running) or (not app._methodIsOverridden('timerFired')): return
if (not app._paused):
app.timerFired()
app._redrawAllWrapper()
app._deferredMethodCall(afterId='_timerFiredWrapper', afterDelay=app.timerDelay, afterFn=app._timerFiredWrapper)
@_safeMethod
def _sizeChangedWrapper(app, event=None):
if (not app._running): return
if (event and ((event.width < 2) or (event.height < 2))): return
if (app._mousePressedOutsideWindow): return
app.width,app.height,app.winx,app.winy = [int(v) for v in app._root.winfo_geometry().replace('x','+').split('+')]
if (app._lastWindowDims is None):
app._lastWindowDims = (app.width, app.height, app.winx, app.winy)
else:
newDims =(app.width, app.height, app.winx, app.winy)
if (app._lastWindowDims != newDims):
app._lastWindowDims = newDims
app.updateTitle()
app.sizeChanged()
app._deferredRedrawAll() # avoid resize crashing on some platforms
@_safeMethod
def _mouseMotionWrapper(app):
if (not app._running): return
mouseMovedExists = app._methodIsOverridden('mouseMoved')
mouseDraggedExists = app._methodIsOverridden('mouseDragged')
if ((not app._paused) and
(not app._mousePressedOutsideWindow) and
(((not app._mouseIsPressed) and mouseMovedExists) or
(app._mouseIsPressed and mouseDraggedExists))):
class MouseMotionEvent(object): pass
event = MouseMotionEvent()
root = app._root
event.x = root.winfo_pointerx() - root.winfo_rootx()
event.y = root.winfo_pointery() - root.winfo_rooty()
if ((app._lastMousePosn != (event.x, event.y)) and
(event.x >= 0) and (event.x <= app.width) and
(event.y >= 0) and (event.y <= app.height)):
if (app._mouseIsPressed): app.mouseDragged(event)
else: app.mouseMoved(event)
app._lastMousePosn = (event.x, event.y)
app._redrawAllWrapper()
if (mouseMovedExists or mouseDraggedExists):
app._deferredMethodCall(afterId='mouseMotionWrapper', afterDelay=app.mouseMovedDelay, afterFn=app._mouseMotionWrapper)
def updateTitle(app):
app._title = app._title or type(app).__name__
app._root.title(f'{app._title} ({app.width} x {app.height})')
def getQuitMessage(app):
appLabel = type(app).__name__
if (app._title != appLabel):
if (app._title.startswith(appLabel)):
appLabel = app._title
else:
appLabel += f" '{app._title}'"
return f"*** Closing {appLabel}. Bye! ***\n"
def _showRootWindow(app):
root = app._root
root.update(); root.deiconify(); root.lift(); root.focus()
def _hideRootWindow(app):
root = app._root
root.withdraw()
@_safeMethod
def run(app):
app._mouseIsPressed = False
app._lastMousePosn = (-1, -1)
app._lastWindowDims= None # set in sizeChangedWrapper
app._afterIdMap = dict()
# create the singleton root window
if (App._theRoot is None):
App._theRoot = Tk()
App._theRoot.createcommand('exit', lambda: '') # when user enters cmd-q, ignore here (handled in keyPressed)
App._theRoot.protocol('WM_DELETE_WINDOW', lambda: App._theRoot.app.quit()) # when user presses 'x' in title bar
App._theRoot.bind("<Button-1>", lambda event: App._theRoot.app._mousePressedWrapper(event))
App._theRoot.bind("<B1-ButtonRelease>", lambda event: App._theRoot.app._mouseReleasedWrapper(event))
App._theRoot.bind("<KeyPress>", lambda event: App._theRoot.app._keyPressedWrapper(event))
App._theRoot.bind("<KeyRelease>", lambda event: App._theRoot.app._keyReleasedWrapper(event))
App._theRoot.bind("<Configure>", lambda event: App._theRoot.app._sizeChangedWrapper(event))
else:
App._theRoot.canvas.destroy()
app._root = root = App._theRoot # singleton root!
root.app = app
root.geometry(f'{app.width}x{app.height}+{app.winx}+{app.winy}')
app.updateTitle()
# create the canvas
root.canvas = app._canvas = WrappedCanvas(app)
app._canvas.pack(fill=BOTH, expand=YES)
# initialize, start the timer, and launch the app
app._running = True
app._paused = False
app._appStartedWrapper()
app._timerFiredWrapper()
app._mouseMotionWrapper()
app._showRootWindow()
root.mainloop()
app._hideRootWindow()
app._running = False
for afterId in app._afterIdMap: app._root.after_cancel(app._afterIdMap[afterId])
app._afterIdMap.clear() # for safety
app.appStopped()
print(app.getQuitMessage())
####################################
# TopLevelApp:
# (with top-level functions not subclassses and methods)
####################################
class TopLevelApp(App):
_apps = dict() # maps fnPrefix to app
def __init__(app, fnPrefix='', **kwargs):
if (fnPrefix in TopLevelApp._apps):
print(f'Quitting previous version of {fnPrefix} TopLevelApp.')
TopLevelApp._apps[fnPrefix].quit()
if ((fnPrefix != '') and ('title' not in kwargs)):
kwargs['title'] = f"TopLevelApp '{fnPrefix}'"
TopLevelApp._apps[fnPrefix] = app
app._fnPrefix = fnPrefix
app._callersGlobals = inspect.stack()[1][0].f_globals
super().__init__(**kwargs)
def _callFn(app, fn, *args):
fn = app._fnPrefix + fn
if (fn in app._callersGlobals): app._callersGlobals[fn](*args)
def redrawAll(app, canvas): app._callFn('redrawAll', app, canvas)
def appStarted(app): app._callFn('appStarted', app)
def appStopped(app): app._callFn('appStopped', app)
def keyPressed(app, event): app._callFn('keyPressed', app, event)
def keyReleased(app, event): app._callFn('keyReleased', app, event)
def mousePressed(app, event): app._callFn('mousePressed', app, event)
def mouseReleased(app, event): app._callFn('mouseReleased', app, event)
def mouseMoved(app, event): app._callFn('mouseMoved', app, event)
def mouseDragged(app, event): app._callFn('mouseDragged', app, event)
def timerFired(app): app._callFn('timerFired', app)
def sizeChanged(app): app._callFn('sizeChanged', app)
####################################
# ModalApp + Mode:
####################################
class ModalApp(App):
def __init__(app, activeMode=None, **kwargs):
app._running = False
app._activeMode = None
app.setActiveMode(activeMode)
super().__init__(**kwargs)
def setActiveMode(app, mode):
if (mode == None): mode = Mode() # default empty mode
if (not isinstance(mode, Mode)): raise Exception('activeMode must be a mode!')
if (mode.app not in [None, app]): raise Exception('Modes cannot be added to two different apps!')
if (app._activeMode != mode):
mode.app = app
if (app._activeMode != None): app._activeMode.modeDeactivated()
app._activeMode = mode
if (app._running): app.startActiveMode()
def startActiveMode(app):
app._activeMode.width, app._activeMode.height = app.width, app.height
if (not app._activeMode._appStartedCalled):
app._activeMode.appStarted() # called once per mode
app._activeMode._appStartedCalled = True
app._activeMode.modeActivated() # called each time a mode is activated
app._redrawAllWrapper()
def redrawAll(app, canvas):
if (app._activeMode != None): app._activeMode.redrawAll(canvas)
def appStarted(app):
if (app._activeMode != None): app.startActiveMode()
def appStopped(app):
if (app._activeMode != None): app._activeMode.modeDeactivated()
def keyPressed(app, event):
if (app._activeMode != None): app._activeMode.keyPressed(event)
def keyReleased(app, event):
if (app._activeMode != None): app._activeMode.keyReleased(event)
def mousePressed(app, event):
if (app._activeMode != None): app._activeMode.mousePressed(event)
def mouseReleased(app, event):
if (app._activeMode != None): app._activeMode.mouseReleased(event)
def mouseMoved(app, event):
if (app._activeMode != None): app._activeMode.mouseMoved(event)
def mouseDragged(app, event):
if (app._activeMode != None): app._activeMode.mouseDragged(event)
def timerFired(app):
if (app._activeMode != None): app._activeMode.timerFired()
def sizeChanged(app):
if (app._activeMode != None):
app._activeMode.width, app._activeMode.height = app.width, app.height
app._activeMode.sizeChanged()
class Mode(App):
def __init__(mode, **kwargs):
mode.app = None
mode._appStartedCalled = False
super().__init__(autorun=False, **kwargs)
def modeActivated(mode): pass
def modeDeactivated(mode): pass
def loadImage(mode, path=None): return mode.app.loadImage(path)
####################################
# runApp()
####################################
'''
def showGraphics(drawFn, **kwargs):
class GraphicsApp(App):
def __init__(app, **kwargs):
if ('title' not in kwargs):
kwargs['title'] = drawFn.__name__
super().__init__(**kwargs)
def redrawAll(app, canvas):
drawFn(app, canvas)
app = GraphicsApp(**kwargs)
'''
runApp = TopLevelApp
print(f'Loaded cmu_112_graphics version {App.version} (last updated {App.lastUpdated})')
if (__name__ == '__main__'):
try: import cmu_112_graphics_tests
except: pass
|
py | b406d42704942b66a5cb5adb0cb210f7ad7fd9d2 | # coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
from bakery_cli.scripts import genmetadata
class Metadata(object):
def __init__(self, bakery):
self.project_root = bakery.project_root
self.builddir = bakery.build_dir
self.bakery = bakery
def execute(self, pipedata, prefix=""):
task = self.bakery.logging_task('Generate METADATA.json (fontbakery-build-metadata.py)')
if self.bakery.forcerun:
return
self.bakery.logging_cmd('fontbakery-build-metadata.py %s' % self.builddir)
try:
# reassign ansiprint to our own method
genmetadata.ansiprint = self.ansiprint
genmetadata.run(self.builddir)
self.bakery.logging_task_done(task)
except Exception as e:
self.bakery.logging_err(e.message)
self.bakery.logging_task_done(task, failed=True)
raise
def ansiprint(self, message, color):
self.bakery.logging_raw(u'{}\n'.format(message.encode('unicode_escape', 'ignore')))
|
py | b406d441e073990926d181593128383f307fcdd9 | from pathlib import Path
import pytest
import sentencepiece as spm
import spacy
from spacy.vocab import Vocab
from camphr.lang.sentencepiece import EXTS, SentencePieceLang
@pytest.fixture
def nlp(spiece_path):
return SentencePieceLang(Vocab(), meta={"tokenizer": {"model_path": spiece_path}})
TESTCASE = [
(" New York ", ["New", "Y", "or", "k"]),
("今日はいい天気だった", ["今日は", "いい", "天気", "だった"]),
(" 今日は\tいい天気 だった", ["今日は", "いい", "天気", "だった"]),
]
@pytest.mark.parametrize("text,tokens", TESTCASE)
def test_sentencepiece(nlp, text: str, tokens, spiece: spm.SentencePieceProcessor):
doc = nlp(text)
assert doc.text == text.replace(" ", " ").replace("\t", " ").strip()
for token, expected in zip(doc, tokens):
assert token.text == expected
# doc test
pieces_ = spiece.encode_as_pieces(text)
assert doc._.get(EXTS.pieces_) == pieces_
def test_serialize(nlp: SentencePieceLang, tmpdir: Path):
nlp.to_disk(str(tmpdir))
nlp2 = spacy.load(str(tmpdir))
text, tokens = TESTCASE[0]
doc = nlp2(text)
assert doc.text == text.replace(" ", " ").replace("\t", " ").strip()
for token, expected in zip(doc, tokens):
assert token.text == expected
|
py | b406d453a1d70553b7cf488918ba85c4208dc5a7 | from django.db import models
from django.core.exceptions import ValidationError
from mighty.models.base import Base
from mighty.applications.address import translates as _, fields, get_address_backend
from mighty.applications.address.apps import AddressConfig as conf
address_backend = get_address_backend()
CHOICES_WAYS = sorted(list(_.WAYS), key=lambda x: x[1])
class AddressNoBase(models.Model):
backend_id = models.CharField(_.address, max_length=255, null=True, blank=True)
address = models.CharField(_.address, max_length=255, null=True, blank=True)
complement = models.CharField(_.complement, max_length=255, null=True, blank=True)
locality = models.CharField(_.locality, max_length=255, null=True, blank=True)
postal_code = models.CharField(_.postal_code, max_length=255, null=True, blank=True)
state = models.CharField(_.state, max_length=255, null=True, blank=True)
state_code = models.CharField(_.state_code, max_length=255, null=True, blank=True)
country = models.CharField(_.country, max_length=255, default=conf.Default.country, blank=True, null=True)
country_code = models.CharField(_.country_code, max_length=255, default=conf.Default.country_code, blank=True, null=True)
cedex = models.CharField(_.cedex, max_length=255, null=True, blank=True)
cedex_code = models.CharField(_.cedex_code, max_length=255, null=True, blank=True)
special = models.CharField(max_length=255, null=True, blank=True)
index = models.CharField(max_length=255, null=True, blank=True)
latitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
longitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
source = models.CharField(max_length=255, null=True, blank=True)
raw = models.CharField(max_length=255, null=True, blank=True)
enable_clean_fields = False
class Meta:
abstract = True
verbose_name = _.v_address
verbose_name_plural = _.vp_address
def fill_from_raw(self):
address = address_backend.get_location(self.raw)
if address:
self.backend_id = address["id"]
self.source = address["source"]
for field in fields:
setattr(self, field, address[field])
def fill_raw(self):
if not self.address_is_empty:
if self.country_code:
formatting = 'format_%s' % self.country_code.lower()
self.raw = getattr(self, formatting)() if hasattr(self, formatting) else self.format_universal()
else:
self.raw = self.format_universal()
def erase_to_new(self, *args, **kwargs):
self.backend_id = kwargs.get('backend_id', None)
self.source = kwargs.get('source', 'FROMUSER')
self.raw = None
for field in fields:
setattr(self, field, kwargs.get(field, None))
@property
def has_state_or_postal_code(self):
return True if self.postal_code or self.state_code else False
def clean_state_or_postal_code(self):
if not self.has_state_or_postal_code:
raise ValidationError(_.validate_postal_state_code)
@property
def has_address(self):
return True if self.address else False
def clean_address(self):
if not self.has_address:
raise ValidationError(code='invalid_address', message='invalid address')
@property
def has_locality(self):
return True if self.locality else False
def clean_locality(self):
if not self.has_locality:
raise ValidationError(code='invalid_locality', message='invalid locality')
def clean_address_fields(self):
if self.enable_clean_fields:
self.clean_address()
#self.clean_locality()
self.clean_state_or_postal_code()
@property
def address_is_usable(self):
try:
self.clean_address()
self.clean_locality()
self.clean_state_or_postal_code()
except ValidationError:
return False
return True
@property
def address_is_empty(self):
nb_fields = sum([1 if getattr(self, field) and field != 'raw' else 0 for field in fields])
return (nb_fields < 2)
def only_raw(self):
if self.address_is_empty and self.raw:
self.fill_from_raw()
def save(self, *args, **kwargs):
self.fill_raw()
self.clean_address_fields()
super().save(*args, **kwargs)
@property
def state_or_postal_code(self):
return self.postal_code if self.postal_code else self.state_code
@property
def city(self):
formatting = 'city_%s' % self.country_code.lower()
return getattr(self, formatting) if hasattr(self, formatting) else self.city_default
@property
def city_fr(self):
cedex = "CEDEX %s" % self.cedex_code if self.cedex_code else ""
return " ".join([str(ad) for ad in [self.state_or_postal_code, self.locality, cedex] if ad]).strip()
@property
def city_default(self):
return " ".join([str(ad) for ad in [self.state_or_postal_code, self.locality] if ad]).strip()
@property
def representation(self):
return self.raw
@property
def raw_address(self):
return self.raw
@property
def fields_used(self):
return fields
def format_universal(self):
tpl = ""
if self.address: tpl += "%(address)s"
if self.postal_code: tpl += ", %(postal_code)s" if len(tpl) else "%(postal_code)s"
if self.locality: tpl += ", %(locality)s" if len(tpl) else "%(locality)s"
if self.state: tpl += ", %(state)s" if len(tpl) else "%(state)s"
if self.country: tpl += ", %(country)s" if len(tpl) else "%(country)s"
return tpl % ({field: getattr(self, field) for field in self.fields_used})
class Address(AddressNoBase, Base):
search_fields = ['locality', 'postal_code']
class Meta:
abstract = True
verbose_name = _.v_address
verbose_name_plural = _.vp_address |
py | b406d4917370d218775d2f8cd466d426b6994896 | """
Code to detect keyboard interrupts and print the requisite velocities to the terminal
"""
from pynput import keyboard
def on_press(key):
try:
print('alphanumeric key {0} pressed'.format(
key.char))
except AttributeError:
print('special key {0} pressed'.format(
key))
def on_release(key):
print('{0} released'.format(
key))
if key == keyboard.Key.esc:
# Stop listener
return False
# Collect events until released
with keyboard.Listener(
on_press=on_press,
on_release=on_release) as listener:
listener.join() |
py | b406d56701c975c184a93a9ac2efb099aace3a7c | #! /usr/bin/env python
import codecs
import logging
import unittest
from sys import maxunicode
import pyslet.unicode5 as unicode5
from pyslet.py2 import (
byte,
character,
is_text,
join_bytes,
py2,
range3,
u8,
ul)
MAX_CHAR = 0x10FFFF
if maxunicode < MAX_CHAR:
MAX_CHAR = maxunicode
CHINESE_TEST = u8(b'\xe8\x8b\xb1\xe5\x9b\xbd')
def suite():
return unittest.TestSuite((
unittest.makeSuite(EncodingTests, 'test'),
unittest.makeSuite(CharClassTests, 'test'),
unittest.makeSuite(UCDTests, 'test'),
unittest.makeSuite(ParserTests, 'test')
))
class EncodingTests(unittest.TestCase):
def test_detection(self):
test_string = u"Caf\xe9"
for codec, bom in (
('utf_8', codecs.BOM_UTF8),
('utf_32_be', codecs.BOM_UTF32_BE),
('utf_32_le', codecs.BOM_UTF32_LE),
('utf_16_be', codecs.BOM_UTF16_BE),
('utf_16_le', codecs.BOM_UTF16_LE)):
data = test_string.encode(codec)
detected = unicode5.detect_encoding(data)
self.assertTrue(detected == codec,
"%s detected as %s" % (codec, detected))
# and once with the BOM
if codec == 'utf_8':
codec = 'utf_8_sig'
data = bom + data
detected = unicode5.detect_encoding(data)
self.assertTrue(detected == codec,
"%s with BOM detected as %s" % (codec, detected))
class CharClassTests(unittest.TestCase):
def test_constructor(self):
c = unicode5.CharClass()
if MAX_CHAR < 0x10FFFF:
logging.warn("unicode5 tests truncated to character(0x%X) by "
"narrow python build" % MAX_CHAR)
for code in range3(MAX_CHAR + 1):
self.assertFalse(c.test(character(code)))
c = unicode5.CharClass('a')
self.assertTrue(self.class_test(c) == 'a')
c = unicode5.CharClass(('a', 'z'))
self.assertTrue(self.class_test(c) == 'abcdefghijklmnopqrstuvwxyz')
c = unicode5.CharClass('abcxyz')
self.assertTrue(
len(c.ranges) == 2, "No range optimization: %s" % repr(c.ranges))
self.assertTrue(self.class_test(c) == 'abcxyz')
cc = unicode5.CharClass(c)
self.assertTrue(self.class_test(cc) == 'abcxyz')
c = unicode5.CharClass(('a', 'c'), ('e', 'g'), 'd')
self.assertTrue(
len(c.ranges) == 1, "Missing range optimization: %s"
% repr(c.ranges))
def test_complex_constructors(self):
init_tests = [
[[], ""],
[[['a', 'z']], "abcdefghijklmnopqrstuvwxyz"],
[[['a', 'd'], ['f', 'k']], "abcdfghijk"],
[[['b', 'b']], "b"],
[[['a', 'b'], ['c', 'd'], ['e', 'f'], ['g', 'h'],
['i', 'j'], ['k', 'k']], "abcdefghijk"],
[[['a', 'b'], ['d', 'f'], ['h', 'h']], "abdefh"],
[[['h', 'h'], ['d', 'f'], ['a', 'b']], "abdefh"],
]
for test in init_tests:
c = unicode5.CharClass(*test[0])
result = self.class_test(c)
self.assertTrue(result == test[1],
"CharClass test: expected %s, found %s" %
(test[1], result))
def test_add(self):
c = unicode5.CharClass(u"ac")
c.add_char(u"b")
self.assertTrue(self.class_test(c) == "abc", "add_char")
c.add_range(u"b", u"e")
self.assertTrue(self.class_test(c) == "abcde", "add_range")
c.add_class(unicode5.CharClass(["m", "s"]))
self.assertTrue(self.class_test(c) == "abcdemnopqrs", "add_class")
def test_subtraction(self):
c = unicode5.CharClass("abc")
c.subtract_char("b")
result = self.class_test(c)
self.assertTrue(result == "ac", "subtract_char: %s" % result)
c.subtract_range("b", "d")
self.assertTrue(self.class_test(c) == "a", "subtract_range")
tests = [
[[], [], ""],
[[['a', 'b']], [['c', 'd']], "ab"],
[[['a', 'b']], [['b', 'c']], "a"],
[[['a', 'c']], [['b', 'd']], "a"],
[[['a', 'd']], [['b', 'd']], "a"],
[[['a', 'd']], [['b', 'c']], "ad"],
[[['a', 'c']], [['a', 'd']], ""],
[[['a', 'c']], [['a', 'c']], ""],
[[['a', 'd']], [['a', 'b']], "cd"],
[[['b', 'c']], [['a', 'd']], ""],
[[['b', 'c']], [['a', 'c']], ""],
[[['b', 'd']], [['a', 'c']], "d"],
[[['a', 'z']], [['f', 'h'], ['s', 'u']],
"abcdeijklmnopqrvwxyz"],
[[['a', 'e'], ['i', 'r'], ['v', 'z']],
[['m', 'x']], "abcdeijklyz"]
]
for test in tests:
c1 = unicode5.CharClass(*test[0])
c2 = unicode5.CharClass(*test[1])
c3 = unicode5.CharClass(c1)
c3.subtract_class(c2)
result = self.class_test(c3)
self.assertTrue(result == test[2],
"Subtract: %s - %s, found %s" %
(repr(c1), repr(c2), repr(c3)))
def test_negate_char_class(self):
"""Check the Negation function"""
min_char = character(0)
max_char = character(maxunicode)
char_class_tests = [
[[], [[min_char, max_char]]],
[[['b', 'c']], [[min_char, 'a'], ['d', max_char]]],
[[['b', 'c'], ['e', 'f']], [
[min_char, 'a'], ['d', 'd'], ['g', max_char]]]
]
for test in char_class_tests:
c1 = unicode5.CharClass(*test[0])
c2 = unicode5.CharClass(c1)
c2.negate()
c3 = unicode5.CharClass(*test[1])
self.assertTrue(c2 == c3, "%s negated to %s, expected %s" %
(repr(c1), repr(c2), repr(c3)))
c2.negate()
self.assertTrue(c2 == c1,
"%s double negation got %s" % (repr(c1), repr(c2)))
def test_representation(self):
repr_tests = [
[[], "CharClass()", ""],
[[['a', 'z']],
"CharClass((u'a',u'z'))" if py2 else "CharClass(('a','z'))",
"a-z"],
[[['a', 'd'], ['f', 'k']],
"CharClass((u'a',u'd'), (u'f',u'k'))" if py2 else
"CharClass(('a','d'), ('f','k'))", "a-df-k"],
[[['-', '-']],
"CharClass(u'-')" if py2 else "CharClass('-')", "\\-"],
[[['[', ']']],
"CharClass((u'[',u']'))" if py2 else "CharClass(('[',']'))",
"[-\\]"],
[[['\\', '\\']],
"CharClass(u'\\\\')" if py2 else "CharClass('\\\\')",
"\\\\"],
]
for test in repr_tests:
c = unicode5.CharClass(*test[0])
self.assertTrue(repr(c) == test[1],
"CharClass repr test: expected %s, found %s" %
(test[1], repr(c)))
result = c.format_re()
self.assertTrue(result == test[2],
"CharClass Re test: expected %s, found %s" %
(test[2], result))
def class_test(self, cclass):
result = []
for c in range(ord('a'), ord('z') + 1):
if cclass.test(character(c)):
result.append(character(c))
result = ''.join(result)
return result
class UCDTests(unittest.TestCase):
"""Tests of the Unicode Category classes"""
def test_ucd_classes(self):
class_cc = unicode5.CharClass.ucd_category('Cc')
class_c = unicode5.CharClass.ucd_category('C')
for code in range3(0x20):
self.assertTrue(class_cc.test(character(code)))
self.assertTrue(class_c.test(character(code)))
for code in range3(0x7F, 0xA0):
self.assertTrue(class_cc.test(character(code)))
self.assertTrue(class_c.test(character(code)))
self.assertFalse(class_cc.test(character(0xAD)))
self.assertTrue(class_c.test(character(0xAD)))
self.assertTrue(
unicode5.CharClass.ucd_category('Cf').test(character(0xAD)))
def test_ucd_blocks(self):
class_basic_latin = unicode5.CharClass.ucd_block('Basic Latin')
self.assertTrue(class_basic_latin is unicode5.CharClass.ucd_block(
'basiclatin'), "block name normalization")
for code in range3(0x80):
self.assertTrue(class_basic_latin.test(character(code)))
self.assertFalse(class_basic_latin.test(character(0x80)))
# randomly pick one of the other blocks
class_basic_latin = unicode5.CharClass.ucd_block('Arrows')
self.assertFalse(class_basic_latin.test(character(0x2150)))
self.assertTrue(class_basic_latin.test(character(0x2190)))
class ParserTests(unittest.TestCase):
def test_constructor(self):
p = unicode5.BasicParser("hello")
self.assertTrue(p.raw == py2)
self.assertTrue(p.src == "hello")
self.assertTrue(p.pos == 0)
self.assertTrue(p.the_char == "h")
p = unicode5.BasicParser(b"hello")
self.assertTrue(p.raw)
self.assertTrue(isinstance(p.src, bytes))
self.assertTrue(isinstance(p.the_char, type(byte("h"))))
p = unicode5.BasicParser(u"hello")
self.assertFalse(p.raw)
self.assertTrue(is_text(p.src))
p = unicode5.BasicParser(bytearray(b"hello"))
self.assertTrue(p.raw)
self.assertTrue(isinstance(p.src, bytearray))
def test_setpos(self):
p = unicode5.BasicParser(u"hello")
save_pos1 = p.pos
p.parse("hell")
save_pos2 = p.pos
p.setpos(save_pos1)
self.assertTrue(p.pos == 0)
self.assertTrue(p.the_char == u"h")
p.setpos(save_pos2)
self.assertTrue(p.pos == 4)
self.assertTrue(p.the_char == u"o")
def test_nextchar(self):
p = unicode5.BasicParser(u"hello")
for c in u"hello":
self.assertTrue(p.the_char == c)
p.next_char()
def test_parser_error(self):
p = unicode5.BasicParser("hello")
# called with no argument, no previous error...
try:
p.parser_error()
self.fail("No parser error raised")
except unicode5.ParserError as e:
self.assertTrue(e.production == '')
self.assertTrue(e.pos == 0)
self.assertTrue(e.left == "")
self.assertTrue(e.right == "hello")
self.assertTrue(str(e) ==
"ParserError: at [0]", str(e))
self.assertTrue(isinstance(e, ValueError))
last_e = e
p.next_char()
# called with a character string argument, raises a new parser error
try:
p.parser_error('test')
self.fail("No parser error raised")
except unicode5.ParserError as e:
self.assertTrue(e.production == 'test')
self.assertTrue(e.pos == 1)
self.assertTrue(e.left == "h")
self.assertTrue(e.right == "ello")
self.assertTrue(str(e) ==
"ParserError: expected test at [1]")
last_e = e
# called with no argument re-raises the previous most advanced
# error (based on parser pos)
try:
p.parser_error()
self.fail("No parser error raised")
except unicode5.ParserError as e:
self.assertTrue(e is last_e)
self.assertTrue(p.pos == e.pos)
p.next_char()
# called with no argument beyond the previous most advanced
# error (based on parser pos)
try:
p.parser_error()
self.fail("No parser error raised")
except unicode5.ParserError as e:
self.assertTrue(e.production == '')
self.assertTrue(e.pos == 2)
self.assertTrue(e.left == "he")
self.assertTrue(e.right == "llo")
self.assertTrue(str(e) ==
"ParserError: at [2]")
last_e = e
p.next_char()
try:
p.parser_error('testA')
self.fail("No syntax error raised")
except unicode5.ParserError as e:
self.assertTrue(e.production == 'testA')
self.assertTrue(e.pos == 3)
self.assertTrue(e.left == "hel")
self.assertTrue(e.right == "lo")
self.assertTrue(str(e) ==
"ParserError: expected testA at [3]")
test_a = e
try:
p.parser_error('testB')
self.fail("No syntax error raised")
except unicode5.ParserError as e:
self.assertTrue(e.production == 'testB')
self.assertTrue(e.pos == 3)
self.assertTrue(e.left == "hel")
self.assertTrue(e.right == "lo")
self.assertTrue(str(e) ==
"ParserError: expected testB at [3]")
p.setpos(1)
try:
p.parser_error('testC')
self.fail("No syntax error raised")
except unicode5.ParserError as e:
self.assertTrue(e.production == 'testC')
self.assertTrue(e.pos == 1)
self.assertTrue(e.left == "h")
self.assertTrue(e.right == "ello")
self.assertTrue(str(e) ==
"ParserError: expected testC at [1]")
# most advanced error now test_a or test_b, we return the first
try:
p.parser_error()
self.fail("No parser error raised")
except unicode5.ParserError as e:
self.assertTrue(e is test_a)
self.assertTrue(p.pos == e.pos)
def test_require_production(self):
p = unicode5.BasicParser("hello")
x = object()
self.assertTrue(p.require_production(x, "object") is x)
self.assertTrue(p.require_production(x) is x)
self.assertTrue(p.require_production(False, "Boolean") is False)
self.assertTrue(p.require_production(0, "zero") == 0)
try:
p.require_production(None, "something")
self.fail("None failed to raise ParserError")
except unicode5.ParserError:
pass
def test_require_production_end(self):
p = unicode5.BasicParser("hello")
x = object()
try:
p.require_production_end(x, "something")
self.fail("None failed to raise ParserError")
except unicode5.ParserError:
pass
p.setpos(5)
try:
self.assertTrue(p.require_production_end(x, "object") is x)
except unicode5.ParserError:
self.fail("ParserError raised at end")
try:
p.require_production_end(None, "something")
self.fail("None failed to raise ParserError")
except unicode5.ParserError:
pass
def test_parse_production(self):
p = unicode5.BasicParser("hello")
x = object()
self.assertTrue(
p.parse_production(p.require_production, x, production="object"))
self.assertFalse(
p.parse_production(p.require_production, None,
production="something"))
def test_peek(self):
p = unicode5.BasicParser("hello")
self.assertTrue(p.peek(4) == "hell")
self.assertTrue(p.peek(0) == "")
self.assertTrue(p.peek(6) == "hello")
p.next_char()
self.assertTrue(p.peek(4) == "ello")
self.assertTrue(p.peek(0) == "")
self.assertTrue(p.peek(6) == "ello")
def test_match_end(self):
p = unicode5.BasicParser("hello")
for i in range3(5):
self.assertFalse(p.match_end())
p.next_char()
self.assertTrue(p.match_end())
def test_require_end(self):
p = unicode5.BasicParser("hello")
for i in range3(5):
try:
p.require_end()
self.fail("require_end failed to raise exception")
except unicode5.ParserError as e:
self.assertTrue(e.production == ul("end"))
p.next_char()
p.require_end()
def test_match(self):
p = unicode5.BasicParser(ul("hello"))
p.next_char()
save_pos = p.pos
self.assertTrue(p.match(ul("ell")))
self.assertTrue(p.pos == save_pos)
self.assertFalse(p.match(ul("elL")))
self.assertTrue(p.pos == save_pos)
self.assertFalse(p.match(ul("hell")))
self.assertTrue(p.pos == save_pos)
p = unicode5.BasicParser(b"hello")
p.next_char()
self.assertTrue(p.match(b"ell"))
self.assertFalse(p.match(b"elL"))
self.assertFalse(p.match(b"hell"))
def test_parse(self):
p = unicode5.BasicParser(ul("hello"))
p.next_char()
match = ul("ell")
save_pos = p.pos
self.assertTrue(p.parse(match) == match)
self.assertTrue(p.pos == save_pos + 3)
p.setpos(save_pos)
self.assertTrue(p.parse(ul("elL")) is None)
self.assertTrue(p.pos == save_pos)
self.assertTrue(p.parse(ul("hell")) is None)
self.assertTrue(p.pos == save_pos)
p = unicode5.BasicParser(b"hello")
p.next_char()
save_pos = p.pos
self.assertTrue(p.parse(b"ell") == b"ell")
p.setpos(save_pos)
self.assertTrue(p.parse(b"elL") is None)
self.assertTrue(p.pos == save_pos)
self.assertTrue(p.parse(b"hell") is None)
self.assertTrue(p.pos == save_pos)
def test_require(self):
p = unicode5.BasicParser(ul("hello"))
p.next_char()
match = ul("ell")
save_pos = p.pos
result = p.require(match)
self.assertTrue(result == match, result)
self.assertTrue(p.pos == save_pos + 3)
p.setpos(save_pos)
try:
p.require(ul("elL"))
self.fail("match string")
except unicode5.ParserError as e:
self.assertTrue(str(e) == "ParserError: expected elL at [1]",
str(e))
pass
try:
p.require(ul("elL"), "mixed")
self.fail("false match")
except unicode5.ParserError as e:
self.assertTrue(str(e) == "ParserError: expected mixed at [1]",
str(e))
pass
self.assertTrue(p.pos == save_pos)
# binary tests
p = unicode5.BasicParser(b"hello")
p.next_char()
save_pos = p.pos
self.assertTrue(p.require(b"ell") == b"ell")
p.setpos(save_pos)
try:
p.require(b"elL")
self.fail("false match")
except unicode5.ParserError as e:
self.assertTrue(str(e) == "ParserError: expected b'elL' at [1]",
str(e))
pass
self.assertTrue(p.pos == save_pos)
def test_match_insensitive(self):
p = unicode5.BasicParser(ul("heLLo"))
p.next_char()
save_pos = p.pos
self.assertTrue(p.match_insensitive(ul("ell")))
self.assertTrue(p.pos == save_pos)
self.assertFalse(p.match_insensitive(ul("hell")))
self.assertTrue(p.pos == save_pos)
p = unicode5.BasicParser(b"heLLo")
p.next_char()
self.assertTrue(p.match_insensitive(b"ell"))
self.assertFalse(p.match_insensitive(b"hell"))
def test_parse_insensitive(self):
p = unicode5.BasicParser(ul("heLLo"))
p.next_char()
match = ul("ell")
save_pos = p.pos
self.assertTrue(p.parse_insensitive(match) == ul("eLL"))
self.assertTrue(p.pos == save_pos + 3)
p.setpos(save_pos)
self.assertTrue(p.parse_insensitive(ul("hell")) is None)
self.assertTrue(p.pos == save_pos)
p = unicode5.BasicParser(b"heLLo")
p.next_char()
save_pos = p.pos
self.assertTrue(p.parse_insensitive(b"ell") == b"eLL")
p.setpos(save_pos)
self.assertTrue(p.parse_insensitive(b"hell") is None)
self.assertTrue(p.pos == save_pos)
def test_parse_until(self):
p = unicode5.BasicParser(ul("hello"))
self.assertTrue(p.parse_until(ul("ell")) == ul("h"))
self.assertTrue(p.pos == 1)
self.assertTrue(p.parse_until(ul("elL")) == ul("ello"))
self.assertTrue(p.pos == 5)
p.setpos(0)
self.assertTrue(p.parse_until(ul("hell")) is ul(""))
self.assertTrue(p.pos == 0)
# binary
p = unicode5.BasicParser(b"hello")
self.assertTrue(p.parse_until(b"ell") == b"h")
self.assertTrue(p.pos == 1)
self.assertTrue(p.parse_until(b"elL") == b"ello")
self.assertTrue(p.pos == 5)
p.setpos(0)
self.assertTrue(p.parse_until(b"hell") is b"")
self.assertTrue(p.pos == 0)
def test_match_one(self):
p = unicode5.BasicParser(ul("hello"))
self.assertTrue(p.match_one(ul("hello")))
self.assertTrue(p.match_one(ul("h")))
self.assertFalse(p.match_one(ul("e")))
p = unicode5.BasicParser(b"hello")
self.assertTrue(p.match_one(b"hello"))
self.assertTrue(p.match_one(b"h"))
self.assertFalse(p.match_one(b"e"))
def test_parse_one(self):
p = unicode5.BasicParser(ul("hello"))
self.assertTrue(p.parse_one(ul("hello")) == ul("h"))
self.assertTrue(p.pos == 1)
p.setpos(0)
self.assertTrue(p.parse_one(ul("h")) == ul("h"))
self.assertTrue(p.pos == 1)
p.setpos(0)
self.assertTrue(p.parse_one(ul("e")) is None)
self.assertTrue(p.pos == 0)
p = unicode5.BasicParser(b"hello")
self.assertTrue(p.parse_one(b"olleh") == byte(b"h"))
self.assertTrue(p.pos == 1)
p.setpos(0)
self.assertTrue(p.parse_one(b"h") == byte(b"h"))
p.setpos(0)
self.assertTrue(p.parse_one(b"e") is None)
def test_match_digit(self):
p = unicode5.BasicParser(ul("2p"))
self.assertTrue(p.match_digit())
p.next_char()
self.assertFalse(p.match_digit())
p.next_char()
self.assertFalse(p.match_digit())
# test Arabic digits, should not match!
p = unicode5.BasicParser(
u8(b'\xd9\xa0\xd9\xa1\xd9\xa2\xd9\xa3\xd9\xa4\xd9\xa5'
b'\xd9\xa6\xd9\xa7\xd9\xa8\xd9\xa9'))
for i in range3(10):
self.assertFalse(p.match_digit())
p.next_char()
p = unicode5.BasicParser(b"2p")
self.assertTrue(p.match_digit())
p.next_char()
self.assertFalse(p.match_digit())
p.next_char()
self.assertFalse(p.match_digit())
def test_parse_digit(self):
p = unicode5.BasicParser(ul("2p"))
self.assertTrue(p.parse_digit() == ul("2"))
self.assertTrue(p.pos == 1)
self.assertTrue(p.parse_digit() is None)
p.next_char()
self.assertTrue(p.parse_digit() is None)
# test Arabic digits, should not parse!
p = unicode5.BasicParser(
u8(b'\xd9\xa0\xd9\xa1\xd9\xa2\xd9\xa3\xd9\xa4\xd9\xa5'
b'\xd9\xa6\xd9\xa7\xd9\xa8\xd9\xa9'))
for i in range3(10):
self.assertTrue(p.parse_digit() is None)
p.next_char()
# test binary forms
p = unicode5.BasicParser(b"2p")
self.assertTrue(p.parse_digit() == byte(b"2"))
self.assertTrue(p.parse_digit() is None)
p.next_char()
self.assertTrue(p.parse_digit() is None)
def test_parse_digit_value(self):
p = unicode5.BasicParser(ul("2p"))
self.assertTrue(p.parse_digit_value() == 2)
self.assertTrue(p.pos == 1)
self.assertTrue(p.parse_digit_value() is None)
p.next_char()
self.assertTrue(p.parse_digit_value() is None)
# test Arabic digits, should not parse!
p = unicode5.BasicParser(
u8(b'\xd9\xa0\xd9\xa1\xd9\xa2\xd9\xa3\xd9\xa4\xd9\xa5'
b'\xd9\xa6\xd9\xa7\xd9\xa8\xd9\xa9'))
for i in range3(10):
self.assertTrue(p.parse_digit_value() is None)
p.next_char()
# test binary forms
p = unicode5.BasicParser(b"2p")
self.assertTrue(p.parse_digit_value() == 2)
self.assertTrue(p.parse_digit_value() is None)
p.next_char()
self.assertTrue(p.parse_digit_value() is None)
def test_parse_digits(self):
p = unicode5.BasicParser(ul("23p"))
# min value of 0
self.assertTrue(p.parse_digits(0) == ul("23"))
self.assertTrue(p.pos == 2)
# min value of 2, should fail
p.setpos(1)
self.assertTrue(p.parse_digits(2) is None)
# shouldn't move the parser
self.assertTrue(p.pos == 1)
# min value of 0, should throw an error
try:
p.parse_digits(-1)
self.fail("min=-1 didn't raise exception")
except ValueError:
# and it shouldn't move the parser
self.assertTrue(p.pos == 1)
# min value > max, should throw an error
try:
p.parse_digits(3, 1)
self.fail("min > max didn't raise exception")
except ValueError:
# and it shouldn't move the parser
self.assertTrue(p.pos == 1)
# check we can exceed ordinary integer sizes
istr = ul("123456789" + "0" * 256)
p = unicode5.BasicParser(istr)
self.assertTrue(len(p.parse_digits(0, 256)) == 256)
# and check that runs of 0 don't mean a thing
self.assertTrue(p.parse_digits(0, 256) == ul("000000000"))
# test Arabic digits, should not parse!
p = unicode5.BasicParser(
u8(b'\xd9\xa0\xd9\xa1\xd9\xa2\xd9\xa3\xd9\xa4\xd9\xa5'
b'\xd9\xa6\xd9\xa7\xd9\xa8\xd9\xa9'))
for i in range3(10):
self.assertTrue(p.parse_digits(1) is None)
p.next_char()
# test binary forms
p = unicode5.BasicParser(b"234p")
# unlike parse_digit we return a string, even if only one digit
self.assertTrue(p.parse_digits(1, 1) == b"2")
self.assertTrue(p.parse_digits(1) == b"34")
p.next_char()
self.assertTrue(p.parse_digits(1) is None)
self.assertTrue(p.parse_digits(0) == b"")
def test_parse_integer(self):
p = unicode5.BasicParser(ul("23p"))
# all defaults, unbounded
self.assertTrue(p.parse_integer() == 23)
self.assertTrue(p.pos == 2)
p.setpos(1)
# provide a minimum value
self.assertTrue(p.parse_integer(4) is None)
self.assertTrue(p.parse_integer(2) == 3)
p.setpos(1)
# provide a minimum and maximum value
self.assertTrue(p.parse_integer(0, 2) is None)
self.assertTrue(p.parse_integer(1, 4) == 3)
p.setpos(0)
# min value < 0, should throw an error
try:
p.parse_integer(-1)
self.fail("min = -1 didn't raise exception")
except ValueError:
# and it shouldn't move the parser
self.assertTrue(p.pos == 0)
# min value > max, should throw an error
try:
p.parse_integer(3, 1)
self.fail("min > max didn't raise exception")
except ValueError:
# and it shouldn't move the parser
self.assertTrue(p.pos == 0)
# check we can exceed ordinary integer sizes
istr = ul("123456789" + "0" * 256)
p = unicode5.BasicParser(istr)
# test max digits
self.assertTrue(p.parse_integer(0, None, 10) == 1234567890)
# check wide zeros
self.assertTrue(p.parse_integer(0, None, 10) == 0)
self.assertTrue(p.pos == 20)
p.setpos(0)
# check large numbers
self.assertTrue(p.parse_integer(0, None, 15) == 123456789000000)
# test Arabic digits, should not parse!
p = unicode5.BasicParser(
u8(b'\xd9\xa0\xd9\xa1\xd9\xa2\xd9\xa3\xd9\xa4\xd9\xa5'
b'\xd9\xa6\xd9\xa7\xd9\xa8\xd9\xa9'))
for i in range3(10):
self.assertTrue(p.parse_integer() is None)
p.next_char()
# test binary forms
p = unicode5.BasicParser(b"234p")
self.assertTrue(p.parse_integer(max_digits=1) == 2)
self.assertTrue(p.parse_integer(0, 2) is None)
self.assertTrue(p.parse_integer() == 34)
p.next_char()
self.assertTrue(p.parse_integer() is None)
def test_match_hex_digit(self):
p = unicode5.BasicParser(
u8(b"0123456789abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"\xd9\xa0\xd9\xa1\xd9\xa2\xd9\xa3\xd9\xa4\xd9\xa5"
b"\xd9\xa6\xd9\xa7\xd9\xa8\xd9\xa9"))
result = []
while p.the_char is not None:
if p.match_hex_digit():
result.append(p.the_char)
p.next_char()
self.assertTrue(ul('').join(result) ==
ul('0123456789abcdefABCDEF'))
# and now binary
p = unicode5.BasicParser(
b"0123456789abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ")
result = []
while p.the_char is not None:
if p.match_hex_digit():
result.append(p.the_char)
p.next_char()
self.assertTrue(join_bytes(result) ==
b'0123456789abcdefABCDEF')
def test_parse_hex_digit(self):
p = unicode5.BasicParser(
u8(b"0123456789abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"\xd9\xa0\xd9\xa1\xd9\xa2\xd9\xa3\xd9\xa4\xd9\xa5"
b"\xd9\xa6\xd9\xa7\xd9\xa8\xd9\xa9"))
result = []
while p.the_char is not None:
digit = p.parse_hex_digit()
if digit is not None:
result.append(digit)
else:
p.next_char()
self.assertTrue(ul('').join(result) ==
ul('0123456789abcdefABCDEF'))
# and now binary
p = unicode5.BasicParser(
b"0123456789abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ")
result = []
while p.the_char is not None:
digit = p.parse_hex_digit()
if digit is not None:
result.append(digit)
else:
p.next_char()
self.assertTrue(join_bytes(result) ==
b'0123456789abcdefABCDEF')
def test_parse_hex_digits(self):
src = ul("23.FG.fg.0.00.abcdefABCDEF0123456789")
p = unicode5.BasicParser(src)
pb = unicode5.BasicParser(src.encode('ascii'))
# min value of 0, should throw an error
try:
p.parse_hex_digits(-1)
self.fail("min=-1 didn't raise exception")
except ValueError:
# and it shouldn't move the parser
self.assertTrue(p.pos == 0)
# min value > max, should throw an error
try:
p.parse_hex_digits(3, 1)
self.fail("min > max didn't raise exception")
except ValueError:
# and it shouldn't move the parser
self.assertTrue(p.pos == 0)
# check min value of 1
result = [ul("23"), ul("F"), ul("f"), ul("0"), ul("00"),
ul("abcdefABCDEF0123456789")]
i = 0
while p.the_char is not None:
resulti = p.parse_hex_digits(1)
bresulti = pb.parse_hex_digits(1)
if resulti is not None:
self.assertTrue(resulti == result[i], resulti)
self.assertTrue(bresulti == result[i].encode('ascii'),
bresulti)
i += 1
p.next_char()
pb.next_char()
self.assertTrue(i == len(result))
# min value of 2
p.setpos(0)
pb.setpos(0)
result = [ul("23"), ul("00"), ul("abcdefABCDEF0123456789")]
i = 0
while p.the_char is not None:
resulti = p.parse_hex_digits(2)
bresulti = pb.parse_hex_digits(2)
if resulti is not None:
self.assertTrue(resulti == result[i], resulti)
self.assertTrue(bresulti == result[i].encode('ascii'),
bresulti)
i += 1
p.next_char()
pb.next_char()
self.assertTrue(i == len(result))
p.setpos(0)
pb.setpos(0)
result = [ul("23"), ul("00"), ul("abcde"), ul("ABCDE"), ul("01234"),
ul("6789")]
i = 0
while p.the_char is not None:
resulti = p.parse_hex_digits(2, 5)
bresulti = pb.parse_hex_digits(2, 5)
if resulti is not None:
self.assertTrue(resulti == result[i], resulti)
self.assertTrue(bresulti == result[i].encode('ascii'),
bresulti)
i += 1
p.next_char()
pb.next_char()
self.assertTrue(i == len(result))
# check we can exceed ordinary integer sizes
istr = ul("123456789aBcDeF" + "0" * 256)
p = unicode5.BasicParser(istr)
self.assertTrue(len(p.parse_hex_digits(1, 256)) == 256)
# and check that runs of 0 don't mean a thing
self.assertTrue(p.parse_hex_digits(1, 256) == ul("000000000000000"))
# test Arabic digits, should not parse!
p = unicode5.BasicParser(
u8(b'\xd9\xa0\xd9\xa1\xd9\xa2\xd9\xa3\xd9\xa4\xd9\xa5'
b'\xd9\xa6\xd9\xa7\xd9\xa8\xd9\xa9'))
for i in range3(10):
self.assertTrue(p.parse_hex_digits(1) is None)
p.next_char()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
unittest.main()
|
py | b406d779c402672f2f2afecd873f003db1c93238 | from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class RoleClassRelationshipFormal(GenericTypeCode):
"""
v3.RoleClassRelationshipFormal
From: http://terminology.hl7.org/ValueSet/v3-RoleClassRelationshipFormal in v3-codesystems.xml
A relationship between two entities that is formally recognized, frequently
by a contract or similar agreement.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/v3-RoleClass
"""
codeset: FhirUri = "http://terminology.hl7.org/CodeSystem/v3-RoleClass"
class RoleClassRelationshipFormalValues:
"""
Corresponds to the Role class
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
Role = RoleClassRelationshipFormal("ROL")
"""
The player of the role is a child of the scoping entity, in a generic sense.
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
Child = RoleClassRelationshipFormal("CHILD")
"""
A role played by an entity that receives credentials from the scoping entity.
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
CredentialedEntity = RoleClassRelationshipFormal("CRED")
"""
nurse practitioner
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
NursePractitioner = RoleClassRelationshipFormal("NURPRAC")
"""
nurse
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
Nurse = RoleClassRelationshipFormal("NURS")
"""
physician assistant
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
PhysicianAssistant = RoleClassRelationshipFormal("PA")
"""
physician
From: http://terminology.hl7.org/CodeSystem/v3-RoleClass in v3-codesystems.xml
"""
Physician = RoleClassRelationshipFormal("PHYS")
|
py | b406d7b9d75586c3a99cf85d67c64acdda2c8a64 | import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--dlib_path', type=str, default='shape_predictor_68_face_landmarks.dat', help='path of dlib 68 landmarks predictor')
# model parameters
parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
|
py | b406d87f848e36ac8124d2e265b6233d4f315594 | from marshmallow import Schema, fields, validate
class ProfileSchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(
validate=validate.Length(min=2, max=128),
required=True,
error_messages={"required": "Name is required."}
)
avatar = fields.Url(validate=validate.Length(max=255), dump_only=True)
bio = fields.Str(validate=validate.Length(max=255))
dob = fields.DateTime()
created_on = fields.DateTime(dump_only=True)
updated_on = fields.DateTime(dump_only=True)
auth = fields.Nested(
'UserSchema', only=(
'username', 'is_active', 'is_admin',), load_only=True)
|
py | b406d8a63d136d7b58060ec3b43d38ce0eb739d7 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.conf.urls import url
from .views import HealthCheckView
urlpatterns = [url(r"^healthcheck/$", HealthCheckView.as_view(), name="healthcheck")]
|
py | b406d8e83a87ca412e5155d795ed6daa86728007 | from transformers import pipeline
models = {
'kb-ner': 'KB/bert-base-swedish-cased-ner',
"swedish-wikiann": "birgermoell/ner-swedish-wikiann"
}
def named_entity_recognition(text, model='KB/bert-base-swedish-cased-ner', tokenizer='KB/bert-base-swedish-cased-ner'):
"""
Named entity recognition
"""
# Load the model
ner = pipeline('ner', model=model, tokenizer=tokenizer)
# Get the result
result = ner(text)
# Return the resultß
return result
# ner = named_entity_recognition('KTH är ett universitet i Stockholm')
# print(ner) |
py | b406d94b8e137d2a8d8146c77d706f37d8850cbf | import sys
import random
from PySide6 import QtWidgets
from PySide6.QtGui import QCloseEvent
app = QtWidgets.QApplication(sys.argv)
width = app.primaryScreen().size().width()
height = app.primaryScreen().size().height()
windows = [] # list to put windows in to get around scoping
score = 0
class Head(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Hydra!")
self.setStyleSheet("background-color: black;")
self.layout = QtWidgets.QVBoxLayout(self)
self.text = QtWidgets.QLabel("Cut off one head . . .")
self.text.setStyleSheet("color: white;")
self.layout.addWidget(self.text)
self.setFixedSize(200, 50)
self.move(random.randint(0, width), random.randint(0, height))
self.show()
def closeEvent(self, _e: QCloseEvent):
global score
score += 1
message = QtWidgets.QMessageBox()
message.setWindowTitle("Hydra!")
message.setText("Two more shall take its place!\nScore: " + str(score))
message.setStyleSheet("background-color: red;")
message.exec()
create_window()
create_window()
windows.remove(self) # remove the old window to plug memory leak
def create_window():
window = Head()
windows.append(window)
create_window()
sys.exit(app.exec())
|
py | b406da2f2074b4698d79170a23c759d680a7a34a | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence, Union
from packaging.version import parse
from pandas import DataFrame, Series
if TYPE_CHECKING:
import numpy as np
if parse(np.__version__) < parse("1.22.0"):
raise NotImplementedError(
"NumPy 1.22.0 or later required for type checking"
)
from numpy.typing import (
ArrayLike as ArrayLike,
DTypeLike,
NDArray,
_FloatLike_co,
_UIntLike_co,
)
_ExtendedFloatLike_co = Union[_FloatLike_co, _UIntLike_co]
NumericArray = NDArray[Any, np.dtype[_ExtendedFloatLike_co]]
Float64Array = NDArray[Any, np.double]
ArrayLike1D = Union[Sequence[Union[float, int]], NumericArray, Series]
ArrayLike2D = Union[
Sequence[Sequence[Union[float, int]]], NumericArray, DataFrame
]
else:
ArrayLike = Any
DTypeLike = Any
Float64Array = Any
NumericArray = Any
ArrayLike1D = Any
ArrayLike2D = Any
NDArray = Any
__all__ = [
"ArrayLike",
"DTypeLike",
"Float64Array",
"ArrayLike1D",
"ArrayLike2D",
"NDArray",
"NumericArray",
]
|
py | b406da3f8d953af5019af87b21d383958071b08c | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMock(PythonPackage):
"""mock is a library for testing in Python. It allows you to replace parts
of your system under test with mock objects and make assertions about how
they have been used."""
homepage = "https://github.com/testing-cabal/mock"
url = "https://pypi.io/packages/source/m/mock/mock-3.0.5.tar.gz"
version('3.0.5', sha256='83657d894c90d5681d62155c82bda9c1187827525880eda8ff5df4ec813437c3')
version('2.0.0', sha256='b158b6df76edd239b8208d481dc46b6afd45a846b7812ff0ce58971cf5bc8bba')
version('1.3.0', sha256='1e247dbecc6ce057299eb7ee019ad68314bb93152e81d9a6110d35f4d5eca0f6')
depends_on('[email protected]:2.8,3.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'), when='@2.0.0:')
depends_on('py-funcsigs@1:', type=('build', 'run'), when='^python@:3.2')
|
py | b406da53fea151f048be528003d31ce59ae4d31f | import re
import functools
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.log import msg
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from sutils import comas2dots, first
# TODO: categories_nextpage_xpath
# TODO: create parent spider class
class CariboomSpider(BaseSpider):
name = 'cariboom.com'
allowed_domains = [name]
start_urls = ('http://www.cariboom.com',)
categories_xpath = "//div[@id='custommenu']//a/@href"
categories_nextpage_xpath = None
products_xpath = "//div[@class='category-products']/ol/li"
products_nextpage_xpath = "//*[@id='contenu']/div[2]/div[2]/div/ol/li[3]/@onclick"
products_nextpage_re = "='(.+)'"
product_name = ("./div[@class='title']/h2/a/text()", )
product_price = ("./div[@class='prix_sans_promo']/div[@class='prix_vente_sans_promo']/text()",
"./div[@class='prix']/div[@class='prix_vente']/text()",)
product_url = ("./div[@class='title']/h2/a/@href", )
def parse(self, response):
base_url = get_base_url(response)
hxs = HtmlXPathSelector(response)
if self.categories_xpath:
categories = hxs.select(self.categories_xpath).extract()
for url in categories:
yield Request(urljoin_rfc(base_url, url),
callback=self.parse_page)
def parse_page(self, response):
base_url = get_base_url(response)
base_url_func = functools.partial(urljoin_rfc, base_url)
hxs = HtmlXPathSelector(response)
# products next page
if self.products_nextpage_xpath:
if not self.products_nextpage_re:
url = hxs.select(self.products_nextpage_xpath).extract()
else:
url = hxs.select(self.products_nextpage_xpath).re(
self.products_nextpage_re)
if url:
yield Request(urljoin_rfc(base_url, url[0]),
callback=self.parse_page)
# products
if self.products_xpath:
for z in hxs.select(self.products_xpath):
loader = ProductLoader(selector=z, item=Product())
if self.product_name:
for xpath in self.product_name:
loader.add_xpath('name', xpath)
#loader.add_xpath('name', "./div[@class='margue']/text()")
if self.product_url:
for xpath in self.product_url:
loader.add_xpath('url', xpath, first, base_url_func)
if self.product_price:
for xpath in self.product_price:
loader.add_xpath('price', xpath, comas2dots)
yield loader.load_item()
|
py | b406dbd795f1c9e1604fabb73da7735903275847 | from bokeh.models import PrintfTickFormatter
from bokeh.plotting import figure, output_file, show
output_file("gridlines.html")
p = figure(width=400, height=400)
p.circle([1,2,3,4,5], [2,5,8,2,7], size=10)
p.xaxis[0].formatter = PrintfTickFormatter(format="%4.1e")
p.yaxis[0].formatter = PrintfTickFormatter(format="%5.3f mu")
show(p)
|
py | b406dc42af4728a09a3328969529abdcc3277f92 | # -*- coding: utf-8 -*-
""" This module provides the base interface for the BabelNet API (v5).
BabelNet key is required. Register at https://babelnet.org/register
There are daily usage limits for free users (up to 1000 queries per day
by default, can be extended to 50,000 for academic users by request).
Commercial version also available.
The current functionality includes:
- Basic retrieval of urllib queries, given a set of arguments;
- Determining whether a word entry exists;
- Automatically formatting the dictionary language argument as required.
- counting the number of queries performed in this session
- retrieving BabelNet nodes and edges;
Todo:
- cache saved between sessions;
- authorization error for babelnet on invalid key;
- querying offline dump of BabelNet.
"""
import urllib
import urllib.request
import json
import gzip
import functools
from ldt.dicts.dictionary import Dictionary
from ldt.helpers.resources import lookup_language_by_code
from ldt.load_config import config
from ldt.helpers.exceptions import AuthorizationError
class BaseBabelNet(Dictionary):
"""The class providing the base BabelNet interface.
Note:
The language argument used for BabelNet API is in 2-letter-code
format, capitalized. LDT assumes the letter codes are the same as
`2-letter codes used in Wiktionary
<https://en.wiktionary.org/wiki/Wiktionary:List_of_languages>`_.
LDT provides on-the-fly conversion as needed.
Todo:
* the exceptions on daily limit exceeded & invalid key
"""
def __init__(self, babelnet_key=config["babelnet_key"], **kw): #pylint:
# disable=unused-argument
""" Initializing the BabelNet class.
Unlike the basic Dictionary class, BabelNet checks the language
argument upon initialization and converts it to the 2-letter code if
necessary. Exception is raised if the BabelNet key is not supplied.
Args:
babelnet_key (str): the BabelNet user key (registration at
`BabelNet <https://babelnet.org/register>`_)
"""
super(BaseBabelNet, self).__init__()
self.queries = 0
if len(self.language) > 2:
self.language = lookup_language_by_code(self.language, reverse=True)
self._language = self.language.upper()
if babelnet_key:
self.babelnet_key = babelnet_key
else:
raise AuthorizationError("Please provide a BabelNet key. If you "
"don't have one, register at "
"https://babelnet.org/register ")
def _set_language(self, language):
"""This method ensures the language arg is a 2-letter code."""
if len(language) > 2:
language = lookup_language_by_code(language, reverse=True)
self._language = language.upper()
def is_a_word(self, word):
"""Determining whether an entry exists in the resource.
While Wiktionary module can rely on an internal cache of page titles to
determine whether an entry exists without performing the full query,
that is not possible for BabelNet. So this method actually just
wraps the :meth:`get_ids` method for consistency, and should not be
used to first determine whether a word is worth querying. However,
extra pings should be saved with the cache mechanism.
Args:
word (str): the word to look up
Returns:
(bool): True if the word entry was found.
"""
if self.get_ids(word):
return True
return False
@functools.lru_cache(maxsize=config["cache_size"])
def query(self, url):
"""Helper method for querying BabelNet
Args:
url (str): the url from which data should be retrieved
Returns:
(dict): the data loaded from the retrieved json representation
"""
request = urllib.request.Request(url)
request.add_header('Accept-encoding', 'gzip')
# print(request.get_full_url())
try:
response = urllib.request.urlopen(request)
self.queries += 1
except urllib.error.HTTPError:
print("Cannot reach BabelNet")
return None
if response.info().get('Content-Encoding') == 'gzip':
gz_data = gzip.decompress(response.read()).decode('utf-8')
data = json.loads(gz_data)
return data
@functools.lru_cache(maxsize=config["cache_size"])
def get_ids(self, word, full=False):
"""Returns the list of BabelNet IDS for a given word
Args:
word (str): the word to look up
Returns:
(list): a list of BabelNet ids of the 'bn:00516031n' format
"""
service_url = 'https://babelnet.io/v5/getSynsetIds'
res = []
params = {'lemma': word, 'searchLang': self.language,
'key': self.babelnet_key}
url = service_url + '?' + urllib.parse.urlencode(params)
data = self.query(url)
if full:
return data
for result in data:
res.append(result["id"])
return res
@functools.lru_cache(maxsize=config["cache_size"])
def get_lemmas(self, babelnet_id):
""" Getting lemmas associated with a babelnet_id.
Args:
babelnet_id (str): the id to lookup (e.g. 'bn:00516031n')
Returns:
(list): the list of BabelNet's "simple lemmas" associated with
the queried id
"""
res = {}
service_url = 'https://babelnet.io/v5/getSynset'
params = {'id': babelnet_id, 'searchLang': self.language,
'key': self.babelnet_key}
url = service_url + '?' + urllib.parse.urlencode(params)
data = self.query(url)
senses = data.get("senses")
res = []
for sense in senses:
if sense["properties"]["language"] == self.language:
res.append(sense["properties"]["simpleLemma"])
# if self.lowercasing:
# res = [w.lower() for w in res]
# #todo mwu
res = list(set(res))
res = self.post_process(res)
return res
@functools.lru_cache(maxsize=config["cache_size"])
def get_edges(self, babelnet_id):
""" Getting babelnet_ids related to the given babelnet_id.
Args:
babelnet_id (str): the id to lookup (e.g. 'bn:00516031n')
Returns:
(dict): the dictionary of babelnet ids categorized by relation to
the queried id ("other", "hypernyms", "hyponyms", "meronyms",
"holonyms", "synonyms", "antonyms" are supported)
"""
# returns a dict of ids: list of [types/relation_groups] of words the
# target bnet_id is related to
res = {}
service_url = 'https://babelnet.io/v5/getOutgoingEdges'
params = {'id': babelnet_id, 'searchLang': self.language,
'key': self.babelnet_key}
url = service_url + '?' + urllib.parse.urlencode(params)
data = self.query(url)
res = {"other": [], "hypernyms": [], "hyponyms": [], "meronyms": [],
"holonyms": [], "synonyms": [], "antonyms": []}
#do synonyms work that way?
for result in data:
if self.language == result["language"]:
pointer = result['pointer']
relation = pointer.get('name').lower()
group = pointer.get('relationGroup').lower()
target = result.get('target')
for rel in res:
if group.startswith(rel[:-1]):
res[rel].append(target)
# retrieve lemma instead
if relation.startswith(rel[:-1]):
res[rel].append(target)
return res
|
py | b406dd39b93bdbc236fab041e7eb328d870429bc | #!/usr/bin/env python
import Settings
import json
def init():
global ignoredNumbers
ignoredNumbers = "ignoredNumbers"
global icao
icao = "icao"
global oIgnoredNumbers
# Open the saved flights
with open(Settings.filePathIgnroeNumbers) as ignoredNumbersFile:
oIgnoredNumbers = json.load(ignoredNumbersFile)
ignoredNumbersFile.close();
return
def dumpList():
for oIgnoredNumber in oIgnoredNumbers[ignoredNumbers]:
print "", oIgnoredNumber[icao]
return
def isIgnored(i_strIcao):
for oIgnoredNumber in oIgnoredNumbers[ignoredNumbers]:
if oIgnoredNumber[icao] == i_strIcao:
return True
return False
|
py | b406de626487b3b550505905287ac9d8dac443b3 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Testing
"""
import numpy as np
from nose.tools import assert_true, assert_false, \
assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
from nipy.fixes.scipy.stats.models.utils import matrix_rank
def test_matrix_rank():
# Full rank matrix
yield assert_equal, 4, matrix_rank(np.eye(4))
I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
yield assert_equal, matrix_rank(I), 3
# All zeros - zero rank
yield assert_equal, matrix_rank(np.zeros((4,4))), 0
# 1 dimension - rank 1 unless all 0
yield assert_equal, matrix_rank(np.ones((4,))), 1
yield assert_equal, matrix_rank(np.zeros((4,))), 0
# accepts array-like
yield assert_equal, matrix_rank([1]), 1
|
py | b406ded13bbed851ff6c159357be58053eb06fe5 | ################################################################################
# - 다중 선형 회귀
# - 이제 데이터를 선언합니다.
# - 아래 데이터는 y=2x를 가정된 상태에서 만들어진 데이터로 우리는 이미 정답이 W=2, b=0임을 알고 있는 사태입니다.
# - 모델이 이 두 W와 b의 값을 제대로 찾아내도록 하는 것이 목표입니다.
import myutil as mu
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
# 데이터
x_train = torch.FloatTensor([[1], [2], [3]])
y_train = torch.FloatTensor([[2], [4], [6]])
model = nn.Linear(in_features=1, out_features=1)
mu.log("model", model)
optimizer = optim.SGD(model.parameters(), lr=0.01)
nb_epochs = 2000
mu.plt_init()
for epoch in range(nb_epochs + 1):
hypothesis = model(x_train)
cost = F.mse_loss(hypothesis, y_train)
# accuracy 계산
accuracy = mu.get_regression_accuracy(hypothesis, y_train)
optimizer.zero_grad()
cost.backward()
optimizer.step()
if epoch % 100 == 0:
mu.log_epoch(epoch, nb_epochs, cost, accuracy)
mu.plt_show()
################################################################################
# - 학습이 완료되었습니다. Cost의 값이 매우 작습니다.
# - W와 b의 값도 최적화가 되었는지 확인해봅시다.
# - x에 임의의 값 4를 넣어 모델이 예측하는 y의 값을 확인해보겠습니다.
# - 사실 이 문제의 정답은 y=2x가 정답이므로 y값이 8에 가까우면 W와 b의 값이 어느정도 최적화가 된 것으로 볼 수 있습니다.
# - 실제로 예측된 y값은 7.9989로 8에 매우 가깝습니다.
# - 이제 학습 후의 W와 b의 값을 출력해보겠습니다.
# - W의 값이 2에 가깝고, b의 값이 0에 가까운 것을 볼 수 있습니다.
mu.log("model", model)
new_var = torch.FloatTensor([[4.0]])
pred_y = model(new_var)
mu.log("new_var.item()", new_var.item())
mu.log("pred_y.item()", pred_y.item())
|
py | b406def22f7a9e47143779574f9783d2c7e3723d | """
A simple testing framework for lldb using python's unit testing framework.
Tests for lldb are written as python scripts which take advantage of the script
bridging provided by LLDB.framework to interact with lldb core.
A specific naming pattern is followed by the .py script to be recognized as
a module which implements a test scenario, namely, Test*.py.
To specify the directories where "Test*.py" python test scripts are located,
you need to pass in a list of directory names. By default, the current
working directory is searched if nothing is specified on the command line.
Type:
./dotest.py -h
for available options.
"""
from __future__ import absolute_import
from __future__ import print_function
# System modules
import atexit
import os
import errno
import logging
import platform
import re
import signal
import socket
import subprocess
import sys
# Third-party modules
import six
import unittest2
# LLDB Modules
import lldbsuite
from . import configuration
from . import dotest_args
from . import lldbtest_config
from . import test_categories
from lldbsuite.test_event import formatter
from . import test_result
from lldbsuite.test_event.event_builder import EventBuilder
from ..support import seven
def is_exe(fpath):
"""Returns true if fpath is an executable."""
if fpath == None:
return False
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program):
"""Returns the full path to a program; None otherwise."""
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class _WritelnDecorator(object):
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self, stream):
self.stream = stream
def __getattr__(self, attr):
if attr in ('stream', '__getstate__'):
raise AttributeError(attr)
return getattr(self.stream, attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
#
# Global variables:
#
def usage(parser):
parser.print_help()
if configuration.verbose > 0:
print("""
Examples:
This is an example of using the -f option to pinpoint to a specific test class
and test method to be run:
$ ./dotest.py -f ClassTypesTestCase.test_with_dsym_and_run_command
----------------------------------------------------------------------
Collected 1 test
test_with_dsym_and_run_command (TestClassTypes.ClassTypesTestCase)
Test 'frame variable this' when stopped on a class constructor. ... ok
----------------------------------------------------------------------
Ran 1 test in 1.396s
OK
And this is an example of using the -p option to run a single file (the filename
matches the pattern 'ObjC' and it happens to be 'TestObjCMethods.py'):
$ ./dotest.py -v -p ObjC
----------------------------------------------------------------------
Collected 4 tests
test_break_with_dsym (TestObjCMethods.FoundationTestCase)
Test setting objc breakpoints using '_regexp-break' and 'breakpoint set'. ... ok
test_break_with_dwarf (TestObjCMethods.FoundationTestCase)
Test setting objc breakpoints using '_regexp-break' and 'breakpoint set'. ... ok
test_data_type_and_expr_with_dsym (TestObjCMethods.FoundationTestCase)
Lookup objective-c data types and evaluate expressions. ... ok
test_data_type_and_expr_with_dwarf (TestObjCMethods.FoundationTestCase)
Lookup objective-c data types and evaluate expressions. ... ok
----------------------------------------------------------------------
Ran 4 tests in 16.661s
OK
Running of this script also sets up the LLDB_TEST environment variable so that
individual test cases can locate their supporting files correctly. The script
tries to set up Python's search paths for modules by looking at the build tree
relative to this script. See also the '-i' option in the following example.
Finally, this is an example of using the lldb.py module distributed/installed by
Xcode4 to run against the tests under the 'forward' directory, and with the '-w'
option to add some delay between two tests. It uses ARCH=x86_64 to specify that
as the architecture and CC=clang to specify the compiler used for the test run:
$ PYTHONPATH=/Xcode4/Library/PrivateFrameworks/LLDB.framework/Versions/A/Resources/Python ARCH=x86_64 CC=clang ./dotest.py -v -w -i forward
Session logs for test failures/errors will go into directory '2010-11-11-13_56_16'
----------------------------------------------------------------------
Collected 2 tests
test_with_dsym_and_run_command (TestForwardDeclaration.ForwardDeclarationTestCase)
Display *bar_ptr when stopped on a function with forward declaration of struct bar. ... ok
test_with_dwarf_and_run_command (TestForwardDeclaration.ForwardDeclarationTestCase)
Display *bar_ptr when stopped on a function with forward declaration of struct bar. ... ok
----------------------------------------------------------------------
Ran 2 tests in 5.659s
OK
The 'Session ...' verbiage is recently introduced (see also the '-s' option) to
notify the directory containing the session logs for test failures or errors.
In case there is any test failure/error, a similar message is appended at the
end of the stderr output for your convenience.
ENABLING LOGS FROM TESTS
Option 1:
Writing logs into different files per test case::
This option is particularly useful when multiple dotest instances are created
by dosep.py
$ ./dotest.py --channel "lldb all"
$ ./dotest.py --channel "lldb all" --channel "gdb-remote packets"
These log files are written to:
<session-dir>/<test-id>-host.log (logs from lldb host process)
<session-dir>/<test-id>-server.log (logs from debugserver/lldb-server)
<session-dir>/<test-id>-<test-result>.log (console logs)
By default, logs from successful runs are deleted. Use the --log-success flag
to create reference logs for debugging.
$ ./dotest.py --log-success
Option 2: (DEPRECATED)
The following options can only enable logs from the host lldb process.
Only categories from the "lldb" or "gdb-remote" channels can be enabled
They also do not automatically enable logs in locally running debug servers.
Also, logs from all test case are written into each log file
o LLDB_LOG: if defined, specifies the log file pathname for the 'lldb' subsystem
with a default option of 'event process' if LLDB_LOG_OPTION is not defined.
o GDB_REMOTE_LOG: if defined, specifies the log file pathname for the
'process.gdb-remote' subsystem with a default option of 'packets' if
GDB_REMOTE_LOG_OPTION is not defined.
""")
sys.exit(0)
def parseExclusion(exclusion_file):
"""Parse an exclusion file, of the following format, where
'skip files', 'skip methods', 'xfail files', and 'xfail methods'
are the possible list heading values:
skip files
<file name>
<file name>
xfail methods
<method name>
"""
excl_type = None
with open(exclusion_file) as f:
for line in f:
line = line.strip()
if not excl_type:
excl_type = line
continue
if not line:
excl_type = None
elif excl_type == 'skip':
if not configuration.skip_tests:
configuration.skip_tests = []
configuration.skip_tests.append(line)
elif excl_type == 'xfail':
if not configuration.xfail_tests:
configuration.xfail_tests = []
configuration.xfail_tests.append(line)
def parseOptionsAndInitTestdirs():
"""Initialize the list of directories containing our unittest scripts.
'-h/--help as the first option prints out usage info and exit the program.
"""
do_help = False
platform_system = platform.system()
platform_machine = platform.machine()
parser = dotest_args.create_parser()
args = dotest_args.parse_args(parser, sys.argv[1:])
if args.unset_env_varnames:
for env_var in args.unset_env_varnames:
if env_var in os.environ:
# From Python Doc: When unsetenv() is supported, deletion of items in os.environ
# is automatically translated into a corresponding call to
# unsetenv().
del os.environ[env_var]
# os.unsetenv(env_var)
if args.set_env_vars:
for env_var in args.set_env_vars:
parts = env_var.split('=', 1)
if len(parts) == 1:
os.environ[parts[0]] = ""
else:
os.environ[parts[0]] = parts[1]
# only print the args if being verbose (and parsable is off)
if args.v and not args.q:
print(sys.argv)
if args.h:
do_help = True
if args.compiler:
configuration.compiler = os.path.realpath(args.compiler)
if not is_exe(configuration.compiler):
configuration.compiler = which(args.compiler)
if not is_exe(configuration.compiler):
logging.error(
'%s is not a valid compiler executable; aborting...',
args.compiler)
sys.exit(-1)
else:
# Use a compiler appropriate appropriate for the Apple SDK if one was
# specified
if platform_system == 'Darwin' and args.apple_sdk:
configuration.compiler = seven.get_command_output(
'xcrun -sdk "%s" -find clang 2> /dev/null' %
(args.apple_sdk))
else:
# 'clang' on ubuntu 14.04 is 3.4 so we try clang-3.5 first
candidateCompilers = ['clang-3.5', 'clang', 'gcc']
for candidate in candidateCompilers:
if which(candidate):
configuration.compiler = candidate
break
if args.dsymutil:
os.environ['DSYMUTIL'] = args.dsymutil
elif platform_system == 'Darwin':
os.environ['DSYMUTIL'] = seven.get_command_output(
'xcrun -find -toolchain default dsymutil')
if args.filecheck:
# The lldb-dotest script produced by the CMake build passes in a path
# to a working FileCheck binary. So does one specific Xcode project
# target. However, when invoking dotest.py directly, a valid --filecheck
# option needs to be given.
configuration.filecheck = os.path.abspath(args.filecheck)
else:
outputPaths = get_llvm_bin_dirs()
for outputPath in outputPaths:
candidatePath = os.path.join(outputPath, 'FileCheck')
if is_exe(candidatePath):
configuration.filecheck = candidatePath
break
if not configuration.get_filecheck_path():
logging.warning('No valid FileCheck executable; some tests may fail...')
logging.warning('(Double-check the --filecheck argument to dotest.py)')
if args.channels:
lldbtest_config.channels = args.channels
if args.log_success:
lldbtest_config.log_success = args.log_success
if args.out_of_tree_debugserver:
lldbtest_config.out_of_tree_debugserver = args.out_of_tree_debugserver
# Set SDKROOT if we are using an Apple SDK
if platform_system == 'Darwin' and args.apple_sdk:
os.environ['SDKROOT'] = seven.get_command_output(
'xcrun --sdk "%s" --show-sdk-path 2> /dev/null' %
(args.apple_sdk))
if args.arch:
configuration.arch = args.arch
if configuration.arch.startswith(
'arm') and platform_system == 'Darwin' and not args.apple_sdk:
os.environ['SDKROOT'] = seven.get_command_output(
'xcrun --sdk iphoneos.internal --show-sdk-path 2> /dev/null')
if not os.path.exists(os.environ['SDKROOT']):
os.environ['SDKROOT'] = seven.get_command_output(
'xcrun --sdk iphoneos --show-sdk-path 2> /dev/null')
else:
configuration.arch = platform_machine
if args.categoriesList:
configuration.categoriesList = set(
test_categories.validate(
args.categoriesList, False))
configuration.useCategories = True
else:
configuration.categoriesList = []
if args.skipCategories:
configuration.skipCategories += test_categories.validate(
args.skipCategories, False)
if args.E:
cflags_extras = args.E
os.environ['CFLAGS_EXTRAS'] = cflags_extras
if args.d:
sys.stdout.write(
"Suspending the process %d to wait for debugger to attach...\n" %
os.getpid())
sys.stdout.flush()
os.kill(os.getpid(), signal.SIGSTOP)
if args.f:
if any([x.startswith('-') for x in args.f]):
usage(parser)
configuration.filters.extend(args.f)
# Shut off multiprocessing mode when additional filters are specified.
# The rational is that the user is probably going after a very specific
# test and doesn't need a bunch of parallel test runners all looking for
# it in a frenzy. Also, '-v' now spits out all test run output even
# on success, so the standard recipe for redoing a failing test (with -v
# and a -f to filter to the specific test) now causes all test scanning
# (in parallel) to print results for do-nothing runs in a very distracting
# manner. If we really need filtered parallel runs in the future, consider
# adding a --no-output-on-success that prevents -v from setting
# output-on-success.
configuration.no_multiprocess_test_runner = True
if args.l:
configuration.skip_long_running_test = False
if args.framework:
configuration.lldbFrameworkPath = args.framework
if args.executable:
# lldb executable is passed explicitly
lldbtest_config.lldbExec = os.path.realpath(args.executable)
if not is_exe(lldbtest_config.lldbExec):
lldbtest_config.lldbExec = which(args.executable)
if not is_exe(lldbtest_config.lldbExec):
logging.error(
'%s is not a valid executable to test; aborting...',
args.executable)
sys.exit(-1)
if args.server:
os.environ['LLDB_DEBUGSERVER_PATH'] = args.server
if args.excluded:
for excl_file in args.excluded:
parseExclusion(excl_file)
if args.p:
if args.p.startswith('-'):
usage(parser)
configuration.regexp = args.p
if args.q:
configuration.parsable = True
if args.s:
if args.s.startswith('-'):
usage(parser)
configuration.sdir_name = args.s
configuration.session_file_format = args.session_file_format
if args.t:
os.environ['LLDB_COMMAND_TRACE'] = 'YES'
if args.v:
configuration.verbose = 2
# argparse makes sure we have a number
if args.sharp:
configuration.count = args.sharp
if sys.platform.startswith('win32'):
os.environ['LLDB_DISABLE_CRASH_DIALOG'] = str(
args.disable_crash_dialog)
os.environ['LLDB_LAUNCH_INFERIORS_WITHOUT_CONSOLE'] = str(True)
if do_help:
usage(parser)
if args.no_multiprocess:
configuration.no_multiprocess_test_runner = True
if args.inferior:
configuration.is_inferior_test_runner = True
if args.num_threads:
configuration.num_threads = args.num_threads
if args.test_subdir:
configuration.exclusive_test_subdir = args.test_subdir
if args.test_runner_name:
configuration.test_runner_name = args.test_runner_name
# Capture test results-related args.
if args.curses and not args.inferior:
# Act as if the following args were set.
args.results_formatter = "lldbsuite.test_event.formatter.curses.Curses"
args.results_file = "stdout"
if args.results_file:
configuration.results_filename = args.results_file
if args.results_port:
configuration.results_port = args.results_port
if args.results_file and args.results_port:
sys.stderr.write(
"only one of --results-file and --results-port should "
"be specified\n")
usage(args)
if args.results_formatter:
configuration.results_formatter_name = args.results_formatter
if args.results_formatter_options:
configuration.results_formatter_options = args.results_formatter_options
# Default to using the BasicResultsFormatter if no formatter is specified
# and we're not a test inferior.
if not args.inferior and configuration.results_formatter_name is None:
configuration.results_formatter_name = (
"lldbsuite.test_event.formatter.results_formatter.ResultsFormatter")
# rerun-related arguments
configuration.rerun_all_issues = args.rerun_all_issues
configuration.rerun_max_file_threshold = args.rerun_max_file_threshold
if args.lldb_platform_name:
configuration.lldb_platform_name = args.lldb_platform_name
if args.lldb_platform_url:
configuration.lldb_platform_url = args.lldb_platform_url
if args.lldb_platform_working_dir:
configuration.lldb_platform_working_dir = args.lldb_platform_working_dir
if args.test_build_dir:
configuration.test_build_dir = args.test_build_dir
if args.event_add_entries and len(args.event_add_entries) > 0:
entries = {}
# Parse out key=val pairs, separated by comma
for keyval in args.event_add_entries.split(","):
key_val_entry = keyval.split("=")
if len(key_val_entry) == 2:
(key, val) = key_val_entry
val_parts = val.split(':')
if len(val_parts) > 1:
(val, val_type) = val_parts
if val_type == 'int':
val = int(val)
entries[key] = val
# Tell the event builder to create all events with these
# key/val pairs in them.
if len(entries) > 0:
EventBuilder.add_entries_to_all_events(entries)
# Gather all the dirs passed on the command line.
if len(args.args) > 0:
configuration.testdirs = [os.path.realpath(os.path.abspath(x)) for x in args.args]
# Shut off multiprocessing mode when test directories are specified.
configuration.no_multiprocess_test_runner = True
lldbtest_config.codesign_identity = args.codesign_identity
#print("testdirs:", testdirs)
def getXcodeOutputPaths(lldbRootDirectory):
result = []
# These are for xcode build directories.
xcode3_build_dir = ['build']
xcode4_build_dir = ['build', 'lldb', 'Build', 'Products']
configurations = [
['Debug'],
['DebugClang'],
['Release'],
['BuildAndIntegration']]
xcode_build_dirs = [xcode3_build_dir, xcode4_build_dir]
for configuration in configurations:
for xcode_build_dir in xcode_build_dirs:
outputPath = os.path.join(
lldbRootDirectory, *(xcode_build_dir + configuration))
result.append(outputPath)
return result
def createSocketToLocalPort(port):
def socket_closer(s):
"""Close down an opened socket properly."""
s.shutdown(socket.SHUT_RDWR)
s.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", port))
return (sock, lambda: socket_closer(sock))
def setupTestResults():
"""Sets up test results-related objects based on arg settings."""
# Setup the results formatter configuration.
formatter_config = formatter.FormatterConfig()
formatter_config.filename = configuration.results_filename
formatter_config.formatter_name = configuration.results_formatter_name
formatter_config.formatter_options = (
configuration.results_formatter_options)
formatter_config.port = configuration.results_port
# Create the results formatter.
formatter_spec = formatter.create_results_formatter(
formatter_config)
if formatter_spec is not None and formatter_spec.formatter is not None:
configuration.results_formatter_object = formatter_spec.formatter
# Send an initialize message to the formatter.
initialize_event = EventBuilder.bare_event("initialize")
if isMultiprocessTestRunner():
if (configuration.test_runner_name is not None and
configuration.test_runner_name == "serial"):
# Only one worker queue here.
worker_count = 1
else:
# Workers will be the number of threads specified.
worker_count = configuration.num_threads
else:
worker_count = 1
initialize_event["worker_count"] = worker_count
formatter_spec.formatter.handle_event(initialize_event)
# Make sure we clean up the formatter on shutdown.
if formatter_spec.cleanup_func is not None:
atexit.register(formatter_spec.cleanup_func)
def getOutputPaths(lldbRootDirectory):
"""
Returns typical build output paths for the lldb executable
lldbDirectory - path to the root of the lldb svn/git repo
"""
result = []
if sys.platform == 'darwin':
result.extend(getXcodeOutputPaths(lldbRootDirectory))
# cmake builds? look for build or build/host folder next to llvm directory
# lldb is located in llvm/tools/lldb so we need to go up three levels
llvmParentDir = os.path.abspath(
os.path.join(
lldbRootDirectory,
os.pardir,
os.pardir,
os.pardir))
result.append(os.path.join(llvmParentDir, 'build', 'bin'))
result.append(os.path.join(llvmParentDir, 'build', 'host', 'bin'))
# some cmake developers keep their build directory beside their lldb
# directory
lldbParentDir = os.path.abspath(os.path.join(lldbRootDirectory, os.pardir))
result.append(os.path.join(lldbParentDir, 'build', 'bin'))
result.append(os.path.join(lldbParentDir, 'build', 'host', 'bin'))
return result
def get_llvm_bin_dirs():
"""
Returns an array of paths that may have the llvm/clang/etc binaries
in them, relative to this current file.
Returns an empty array if none are found.
"""
result = []
lldb_root_path = os.path.join(
os.path.dirname(__file__), "..", "..", "..", "..")
paths_to_try = [
"llvm-build/Release+Asserts/x86_64/bin",
"llvm-build/Debug+Asserts/x86_64/bin",
"llvm-build/Release/x86_64/bin",
"llvm-build/Debug/x86_64/bin",
"llvm-build/Ninja-DebugAssert/llvm-macosx-x86_64/bin",
"llvm-build/Ninja-DebugAssert+asan/llvm-macosx-x86_64/bin",
"llvm-build/Ninja-ReleaseAssert/llvm-macosx-x86_64/bin",
"llvm-build/Ninja-ReleaseAssert+asan/llvm-macosx-x86_64/bin",
"llvm-build/Ninja-RelWithDebInfoAssert/llvm-macosx-x86_64/bin",
"llvm-build/Ninja-RelWithDebInfoAssert+asan/llvm-macosx-x86_64/bin",
]
for p in paths_to_try:
path = os.path.join(lldb_root_path, p)
if os.path.exists(path):
result.append(path)
return result
def setupSysPath():
"""
Add LLDB.framework/Resources/Python to the search paths for modules.
As a side effect, we also discover the 'lldb' executable and export it here.
"""
# Get the directory containing the current script.
if "DOTEST_PROFILE" in os.environ and "DOTEST_SCRIPT_DIR" in os.environ:
scriptPath = os.environ["DOTEST_SCRIPT_DIR"]
else:
scriptPath = os.path.dirname(os.path.realpath(__file__))
if not scriptPath.endswith('test'):
print("This script expects to reside in lldb's test directory.")
sys.exit(-1)
os.environ["LLDB_TEST"] = scriptPath
# Set up the root build directory.
builddir = configuration.test_build_dir
if not configuration.test_build_dir:
raise Exception("test_build_dir is not set")
os.environ["LLDB_BUILD"] = os.path.abspath(configuration.test_build_dir)
# Set up the LLDB_SRC environment variable, so that the tests can locate
# the LLDB source code.
os.environ["LLDB_SRC"] = lldbsuite.lldb_root
pluginPath = os.path.join(scriptPath, 'plugins')
toolsLLDBMIPath = os.path.join(scriptPath, 'tools', 'lldb-mi')
toolsLLDBVSCode = os.path.join(scriptPath, 'tools', 'lldb-vscode')
toolsLLDBServerPath = os.path.join(scriptPath, 'tools', 'lldb-server')
# Insert script dir, plugin dir, lldb-mi dir and lldb-server dir to the
# sys.path.
sys.path.insert(0, pluginPath)
# Adding test/tools/lldb-mi to the path makes it easy
sys.path.insert(0, toolsLLDBMIPath)
# to "import lldbmi_testcase" from the MI tests
# Adding test/tools/lldb-vscode to the path makes it easy to
# "import lldb_vscode_testcase" from the VSCode tests
sys.path.insert(0, toolsLLDBVSCode)
# Adding test/tools/lldb-server to the path makes it easy
sys.path.insert(0, toolsLLDBServerPath)
# to "import lldbgdbserverutils" from the lldb-server tests
# This is the root of the lldb git/svn checkout
# When this changes over to a package instead of a standalone script, this
# will be `lldbsuite.lldb_root`
lldbRootDirectory = lldbsuite.lldb_root
# Some of the tests can invoke the 'lldb' command directly.
# We'll try to locate the appropriate executable right here.
# The lldb executable can be set from the command line
# if it's not set, we try to find it now
# first, we try the environment
if not lldbtest_config.lldbExec:
# First, you can define an environment variable LLDB_EXEC specifying the
# full pathname of the lldb executable.
if "LLDB_EXEC" in os.environ:
lldbtest_config.lldbExec = os.environ["LLDB_EXEC"]
if not lldbtest_config.lldbExec:
outputPaths = getOutputPaths(lldbRootDirectory)
for outputPath in outputPaths:
candidatePath = os.path.join(outputPath, 'lldb')
if is_exe(candidatePath):
lldbtest_config.lldbExec = candidatePath
break
if not lldbtest_config.lldbExec:
# Last, check the path
lldbtest_config.lldbExec = which('lldb')
if lldbtest_config.lldbExec and not is_exe(lldbtest_config.lldbExec):
print(
"'{}' is not a path to a valid executable".format(
lldbtest_config.lldbExec))
lldbtest_config.lldbExec = None
if not lldbtest_config.lldbExec:
print("The 'lldb' executable cannot be located. Some of the tests may not be run as a result.")
sys.exit(-1)
# confusingly, this is the "bin" directory
lldbLibDir = os.path.dirname(lldbtest_config.lldbExec)
os.environ["LLDB_LIB_DIR"] = lldbLibDir
lldbImpLibDir = os.path.join(
lldbLibDir,
'..',
'lib') if sys.platform.startswith('win32') else lldbLibDir
os.environ["LLDB_IMPLIB_DIR"] = lldbImpLibDir
print("LLDB library dir:", os.environ["LLDB_LIB_DIR"])
print("LLDB import library dir:", os.environ["LLDB_IMPLIB_DIR"])
os.system('%s -v' % lldbtest_config.lldbExec)
# Assume lldb-mi is in same place as lldb
# If not found, disable the lldb-mi tests
# TODO: Append .exe on Windows
# - this will be in a separate commit in case the mi tests fail horribly
lldbDir = os.path.dirname(lldbtest_config.lldbExec)
lldbMiExec = os.path.join(lldbDir, "lldb-mi")
if is_exe(lldbMiExec):
os.environ["LLDBMI_EXEC"] = lldbMiExec
else:
if not configuration.shouldSkipBecauseOfCategories(["lldb-mi"]):
print(
"The 'lldb-mi' executable cannot be located. The lldb-mi tests can not be run as a result.")
configuration.skipCategories.append("lldb-mi")
lldbVSCodeExec = os.path.join(lldbDir, "lldb-vscode")
if is_exe(lldbVSCodeExec):
os.environ["LLDBVSCODE_EXEC"] = lldbVSCodeExec
else:
if not configuration.shouldSkipBecauseOfCategories(["lldb-vscode"]):
print(
"The 'lldb-vscode' executable cannot be located. The lldb-vscode tests can not be run as a result.")
configuration.skipCategories.append("lldb-vscode")
lldbPythonDir = None # The directory that contains 'lldb/__init__.py'
if not configuration.lldbFrameworkPath and os.path.exists(os.path.join(lldbLibDir, "LLDB.framework")):
configuration.lldbFrameworkPath = os.path.join(lldbLibDir, "LLDB.framework")
if configuration.lldbFrameworkPath:
lldbtest_config.lldbFrameworkPath = configuration.lldbFrameworkPath
candidatePath = os.path.join(
configuration.lldbFrameworkPath, 'Resources', 'Python')
if os.path.isfile(os.path.join(candidatePath, 'lldb/__init__.py')):
lldbPythonDir = candidatePath
if not lldbPythonDir:
print(
'Resources/Python/lldb/__init__.py was not found in ' +
configuration.lldbFrameworkPath)
sys.exit(-1)
else:
# If our lldb supports the -P option, use it to find the python path:
init_in_python_dir = os.path.join('lldb', '__init__.py')
lldb_dash_p_result = subprocess.check_output(
[lldbtest_config.lldbExec, "-P"], stderr=subprocess.STDOUT, universal_newlines=True)
if lldb_dash_p_result and not lldb_dash_p_result.startswith(
("<", "lldb: invalid option:")) and not lldb_dash_p_result.startswith("Traceback"):
lines = lldb_dash_p_result.splitlines()
# Workaround for readline vs libedit issue on FreeBSD. If stdout
# is not a terminal Python executes
# rl_variable_bind ("enable-meta-key", "off");
# This produces a warning with FreeBSD's libedit because the
# enable-meta-key variable is unknown. Not an issue on Apple
# because cpython commit f0ab6f9f0603 added a #ifndef __APPLE__
# around the call. See http://bugs.python.org/issue19884 for more
# information. For now we just discard the warning output.
if len(lines) >= 1 and lines[0].startswith(
"bind: Invalid command"):
lines.pop(0)
# Taking the last line because lldb outputs
# 'Cannot read termcap database;\nusing dumb terminal settings.\n'
# before the path
if len(lines) >= 1 and os.path.isfile(
os.path.join(lines[-1], init_in_python_dir)):
lldbPythonDir = lines[-1]
if "freebsd" in sys.platform or "linux" in sys.platform:
os.environ['LLDB_LIB_DIR'] = os.path.join(
lldbPythonDir, '..', '..')
if not lldbPythonDir:
if platform.system() == "Darwin":
python_resource_dir = ['LLDB.framework', 'Resources', 'Python']
outputPaths = getXcodeOutputPaths(lldbRootDirectory)
for outputPath in outputPaths:
candidatePath = os.path.join(
outputPath, *python_resource_dir)
if os.path.isfile(
os.path.join(
candidatePath,
init_in_python_dir)):
lldbPythonDir = candidatePath
break
if not lldbPythonDir:
print("lldb.py is not found, some tests may fail.")
else:
print(
"Unable to load lldb extension module. Possible reasons for this include:")
print(" 1) LLDB was built with LLDB_DISABLE_PYTHON=1")
print(
" 2) PYTHONPATH and PYTHONHOME are not set correctly. PYTHONHOME should refer to")
print(
" the version of Python that LLDB built and linked against, and PYTHONPATH")
print(
" should contain the Lib directory for the same python distro, as well as the")
print(" location of LLDB\'s site-packages folder.")
print(
" 3) A different version of Python than that which was built against is exported in")
print(" the system\'s PATH environment variable, causing conflicts.")
print(
" 4) The executable '%s' could not be found. Please check " %
lldbtest_config.lldbExec)
print(" that it exists and is executable.")
if lldbPythonDir:
lldbPythonDir = os.path.normpath(lldbPythonDir)
# Some of the code that uses this path assumes it hasn't resolved the Versions... link.
# If the path we've constructed looks like that, then we'll strip out
# the Versions/A part.
(before, frameWithVersion, after) = lldbPythonDir.rpartition(
"LLDB.framework/Versions/A")
if frameWithVersion != "":
lldbPythonDir = before + "LLDB.framework" + after
lldbPythonDir = os.path.abspath(lldbPythonDir)
# If tests need to find LLDB_FRAMEWORK, now they can do it
os.environ["LLDB_FRAMEWORK"] = os.path.dirname(
os.path.dirname(lldbPythonDir))
# This is to locate the lldb.py module. Insert it right after
# sys.path[0].
sys.path[1:1] = [lldbPythonDir]
def visit_file(dir, name):
# Try to match the regexp pattern, if specified.
if configuration.regexp:
if not re.search(configuration.regexp, name):
# We didn't match the regex, we're done.
return
if configuration.skip_tests:
for file_regexp in configuration.skip_tests:
if re.search(file_regexp, name):
return
# We found a match for our test. Add it to the suite.
# Update the sys.path first.
if not sys.path.count(dir):
sys.path.insert(0, dir)
base = os.path.splitext(name)[0]
# Thoroughly check the filterspec against the base module and admit
# the (base, filterspec) combination only when it makes sense.
filterspec = None
for filterspec in configuration.filters:
# Optimistically set the flag to True.
filtered = True
module = __import__(base)
parts = filterspec.split('.')
obj = module
for part in parts:
try:
parent, obj = obj, getattr(obj, part)
except AttributeError:
# The filterspec has failed.
filtered = False
break
# If filtered, we have a good filterspec. Add it.
if filtered:
# print("adding filter spec %s to module %s" % (filterspec, module))
configuration.suite.addTests(
unittest2.defaultTestLoader.loadTestsFromName(
filterspec, module))
continue
# Forgo this module if the (base, filterspec) combo is invalid
if configuration.filters and not filtered:
return
if not filterspec or not filtered:
# Add the entire file's worth of tests since we're not filtered.
# Also the fail-over case when the filterspec branch
# (base, filterspec) combo doesn't make sense.
configuration.suite.addTests(
unittest2.defaultTestLoader.loadTestsFromName(base))
# TODO: This should be replaced with a call to find_test_files_in_dir_tree.
def visit(prefix, dir, names):
"""Visitor function for os.path.walk(path, visit, arg)."""
dir_components = set(dir.split(os.sep))
excluded_components = set(['.svn', '.git'])
if dir_components.intersection(excluded_components):
return
# Gather all the Python test file names that follow the Test*.py pattern.
python_test_files = [
name
for name in names
if name.endswith('.py') and name.startswith(prefix)]
# Visit all the python test files.
for name in python_test_files:
try:
# Ensure we error out if we have multiple tests with the same
# base name.
# Future improvement: find all the places where we work with base
# names and convert to full paths. We have directory structure
# to disambiguate these, so we shouldn't need this constraint.
if name in configuration.all_tests:
raise Exception("Found multiple tests with the name %s" % name)
configuration.all_tests.add(name)
# Run the relevant tests in the python file.
visit_file(dir, name)
except Exception as ex:
# Convert this exception to a test event error for the file.
test_filename = os.path.abspath(os.path.join(dir, name))
if configuration.results_formatter_object is not None:
# Grab the backtrace for the exception.
import traceback
backtrace = traceback.format_exc()
# Generate the test event.
configuration.results_formatter_object.handle_event(
EventBuilder.event_for_job_test_add_error(
test_filename, ex, backtrace))
raise
def disabledynamics():
import lldb
ci = lldb.DBG.GetCommandInterpreter()
res = lldb.SBCommandReturnObject()
ci.HandleCommand(
"setting set target.prefer-dynamic-value no-dynamic-values",
res,
False)
if not res.Succeeded():
raise Exception('disabling dynamic type support failed')
def lldbLoggings():
import lldb
"""Check and do lldb loggings if necessary."""
# Turn on logging for debugging purposes if ${LLDB_LOG} environment variable is
# defined. Use ${LLDB_LOG} to specify the log file.
ci = lldb.DBG.GetCommandInterpreter()
res = lldb.SBCommandReturnObject()
if ("LLDB_LOG" in os.environ):
open(os.environ["LLDB_LOG"], 'w').close()
if ("LLDB_LOG_OPTION" in os.environ):
lldb_log_option = os.environ["LLDB_LOG_OPTION"]
else:
lldb_log_option = "event process expr state api"
ci.HandleCommand(
"log enable -n -f " +
os.environ["LLDB_LOG"] +
" lldb " +
lldb_log_option,
res)
if not res.Succeeded():
raise Exception('log enable failed (check LLDB_LOG env variable)')
if ("LLDB_LINUX_LOG" in os.environ):
open(os.environ["LLDB_LINUX_LOG"], 'w').close()
if ("LLDB_LINUX_LOG_OPTION" in os.environ):
lldb_log_option = os.environ["LLDB_LINUX_LOG_OPTION"]
else:
lldb_log_option = "event process expr state api"
ci.HandleCommand(
"log enable -n -f " +
os.environ["LLDB_LINUX_LOG"] +
" linux " +
lldb_log_option,
res)
if not res.Succeeded():
raise Exception(
'log enable failed (check LLDB_LINUX_LOG env variable)')
# Ditto for gdb-remote logging if ${GDB_REMOTE_LOG} environment variable is defined.
# Use ${GDB_REMOTE_LOG} to specify the log file.
if ("GDB_REMOTE_LOG" in os.environ):
if ("GDB_REMOTE_LOG_OPTION" in os.environ):
gdb_remote_log_option = os.environ["GDB_REMOTE_LOG_OPTION"]
else:
gdb_remote_log_option = "packets process"
ci.HandleCommand(
"log enable -n -f " + os.environ["GDB_REMOTE_LOG"] + " gdb-remote "
+ gdb_remote_log_option,
res)
if not res.Succeeded():
raise Exception(
'log enable failed (check GDB_REMOTE_LOG env variable)')
def getMyCommandLine():
return ' '.join(sys.argv)
# ======================================== #
# #
# Execution of the test driver starts here #
# #
# ======================================== #
def checkDsymForUUIDIsNotOn():
cmd = ["defaults", "read", "com.apple.DebugSymbols"]
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
cmd_output = process.stdout.read()
output_str = cmd_output.decode("utf-8")
if "DBGFileMappedPaths = " in output_str:
print("%s =>" % ' '.join(cmd))
print(output_str)
print(
"Disable automatic lookup and caching of dSYMs before running the test suite!")
print("Exiting...")
sys.exit(0)
def exitTestSuite(exitCode=None):
import lldb
lldb.SBDebugger.Terminate()
if exitCode:
sys.exit(exitCode)
def isMultiprocessTestRunner():
# We're not multiprocess when we're either explicitly
# the inferior (as specified by the multiprocess test
# runner) OR we've been told to skip using the multiprocess
# test runner
return not (
configuration.is_inferior_test_runner or configuration.no_multiprocess_test_runner)
def getVersionForSDK(sdk):
sdk = str.lower(sdk)
full_path = seven.get_command_output('xcrun -sdk %s --show-sdk-path' % sdk)
basename = os.path.basename(full_path)
basename = os.path.splitext(basename)[0]
basename = str.lower(basename)
ver = basename.replace(sdk, '')
return ver
def getPathForSDK(sdk):
sdk = str.lower(sdk)
full_path = seven.get_command_output('xcrun -sdk %s --show-sdk-path' % sdk)
if os.path.exists(full_path):
return full_path
return None
def setDefaultTripleForPlatform():
if configuration.lldb_platform_name == 'ios-simulator':
triple_str = 'x86_64-apple-ios%s' % (
getVersionForSDK('iphonesimulator'))
os.environ['TRIPLE'] = triple_str
return {'TRIPLE': triple_str}
return {}
def checkCompiler():
# Add some intervention here to sanity check that the compiler requested is sane.
# If found not to be an executable program, we abort.
c = configuration.compiler
if which(c):
return
if not sys.platform.startswith("darwin"):
raise Exception(c + " is not a valid compiler")
pipe = subprocess.Popen(
['xcrun', '-find', c], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
cmd_output = pipe.stdout.read()
if not cmd_output or "not found" in cmd_output:
raise Exception(c + " is not a valid compiler")
configuration.compiler = cmd_output.split('\n')[0]
print("'xcrun -find %s' returning %s" % (c, configuration.compiler))
def canRunLibcxxTests():
from lldbsuite.test import lldbplatformutil
platform = lldbplatformutil.getPlatform()
if lldbplatformutil.target_is_android() or lldbplatformutil.platformIsDarwin():
return True, "libc++ always present"
if platform == "linux":
if not os.path.isdir("/usr/include/c++/v1"):
return False, "Unable to find libc++ installation"
return True, "Headers found, let's hope they work"
return False, "Don't know how to build with libc++ on %s" % platform
def checkLibcxxSupport():
result, reason = canRunLibcxxTests()
if result:
return # libc++ supported
if "libc++" in configuration.categoriesList:
return # libc++ category explicitly requested, let it run.
print("Libc++ tests will not be run because: " + reason)
configuration.skipCategories.append("libc++")
def canRunLibstdcxxTests():
from lldbsuite.test import lldbplatformutil
platform = lldbplatformutil.getPlatform()
if platform == "linux":
return True, "libstdcxx always present"
return False, "Don't know how to build with libstdcxx on %s" % platform
def checkLibstdcxxSupport():
result, reason = canRunLibstdcxxTests()
if result:
return # libstdcxx supported
if "libstdcxx" in configuration.categoriesList:
return # libstdcxx category explicitly requested, let it run.
print("libstdcxx tests will not be run because: " + reason)
configuration.skipCategories.append("libstdcxx")
def checkDebugInfoSupport():
import lldb
platform = lldb.DBG.GetSelectedPlatform().GetTriple().split('-')[2]
compiler = configuration.compiler
skipped = []
for cat in test_categories.debug_info_categories:
if cat in configuration.categoriesList:
continue # Category explicitly requested, let it run.
if test_categories.is_supported_on_platform(cat, platform, compiler):
continue
configuration.skipCategories.append(cat)
skipped.append(cat)
if skipped:
print("Skipping following debug info categories:", skipped)
def run_suite():
# On MacOS X, check to make sure that domain for com.apple.DebugSymbols defaults
# does not exist before proceeding to running the test suite.
if sys.platform.startswith("darwin"):
checkDsymForUUIDIsNotOn()
#
# Start the actions by first parsing the options while setting up the test
# directories, followed by setting up the search paths for lldb utilities;
# then, we walk the directory trees and collect the tests into our test suite.
#
parseOptionsAndInitTestdirs()
# Setup test results (test results formatter and output handling).
setupTestResults()
# If we are running as the multiprocess test runner, kick off the
# multiprocess test runner here.
if isMultiprocessTestRunner():
from . import dosep
dosep.main(
configuration.num_threads,
configuration.test_runner_name,
configuration.results_formatter_object)
raise Exception("should never get here")
elif configuration.is_inferior_test_runner:
# Shut off Ctrl-C processing in inferiors. The parallel
# test runner handles this more holistically.
signal.signal(signal.SIGINT, signal.SIG_IGN)
setupSysPath()
#
# If '-l' is specified, do not skip the long running tests.
if not configuration.skip_long_running_test:
os.environ["LLDB_SKIP_LONG_RUNNING_TEST"] = "NO"
# For the time being, let's bracket the test runner within the
# lldb.SBDebugger.Initialize()/Terminate() pair.
import lldb
# Create a singleton SBDebugger in the lldb namespace.
lldb.DBG = lldb.SBDebugger.Create()
if configuration.lldb_platform_name:
print("Setting up remote platform '%s'" %
(configuration.lldb_platform_name))
lldb.remote_platform = lldb.SBPlatform(
configuration.lldb_platform_name)
if not lldb.remote_platform.IsValid():
print(
"error: unable to create the LLDB platform named '%s'." %
(configuration.lldb_platform_name))
exitTestSuite(1)
if configuration.lldb_platform_url:
# We must connect to a remote platform if a LLDB platform URL was
# specified
print(
"Connecting to remote platform '%s' at '%s'..." %
(configuration.lldb_platform_name, configuration.lldb_platform_url))
platform_connect_options = lldb.SBPlatformConnectOptions(
configuration.lldb_platform_url)
err = lldb.remote_platform.ConnectRemote(platform_connect_options)
if err.Success():
print("Connected.")
else:
print("error: failed to connect to remote platform using URL '%s': %s" % (
configuration.lldb_platform_url, err))
exitTestSuite(1)
else:
configuration.lldb_platform_url = None
platform_changes = setDefaultTripleForPlatform()
first = True
for key in platform_changes:
if first:
print("Environment variables setup for platform support:")
first = False
print("%s = %s" % (key, platform_changes[key]))
if configuration.lldb_platform_working_dir:
print("Setting remote platform working directory to '%s'..." %
(configuration.lldb_platform_working_dir))
error = lldb.remote_platform.MakeDirectory(
configuration.lldb_platform_working_dir, 448) # 448 = 0o700
if error.Fail():
raise Exception("making remote directory '%s': %s" % (
configuration.lldb_platform_working_dir, error))
if not lldb.remote_platform.SetWorkingDirectory(
configuration.lldb_platform_working_dir):
raise Exception("failed to set working directory '%s'" % configuration.lldb_platform_working_dir)
lldb.DBG.SetSelectedPlatform(lldb.remote_platform)
else:
lldb.remote_platform = None
configuration.lldb_platform_working_dir = None
configuration.lldb_platform_url = None
# Set up the working directory.
# Note that it's not dotest's job to clean this directory.
import lldbsuite.test.lldbutil as lldbutil
build_dir = configuration.test_build_dir
lldbutil.mkdir_p(build_dir)
target_platform = lldb.DBG.GetSelectedPlatform().GetTriple().split('-')[2]
checkLibcxxSupport()
checkLibstdcxxSupport()
checkDebugInfoSupport()
# Don't do debugserver tests on anything except OS X.
configuration.dont_do_debugserver_test = "linux" in target_platform or "freebsd" in target_platform or "windows" in target_platform
# Don't do lldb-server (llgs) tests on anything except Linux.
configuration.dont_do_llgs_test = not ("linux" in target_platform)
# Collect tests from the specified testing directories. If a test
# subdirectory filter is explicitly specified, limit the search to that
# subdirectory.
exclusive_test_subdir = configuration.get_absolute_path_to_exclusive_test_subdir()
if exclusive_test_subdir:
dirs_to_search = [exclusive_test_subdir]
else:
dirs_to_search = configuration.testdirs
for testdir in dirs_to_search:
for (dirpath, dirnames, filenames) in os.walk(testdir):
visit('Test', dirpath, filenames)
#
# Now that we have loaded all the test cases, run the whole test suite.
#
# Turn on lldb loggings if necessary.
lldbLoggings()
# Disable default dynamic types for testing purposes
disabledynamics()
# Install the control-c handler.
unittest2.signals.installHandler()
# If sdir_name is not specified through the '-s sdir_name' option, get a
# timestamp string and export it as LLDB_SESSION_DIR environment var. This will
# be used when/if we want to dump the session info of individual test cases
# later on.
#
# See also TestBase.dumpSessionInfo() in lldbtest.py.
import datetime
# The windows platforms don't like ':' in the pathname.
timestamp_started = datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S")
if not configuration.sdir_name:
configuration.sdir_name = timestamp_started
os.environ["LLDB_SESSION_DIRNAME"] = os.path.join(
os.getcwd(), configuration.sdir_name)
sys.stderr.write(
"\nSession logs for test failures/errors/unexpected successes"
" will go into directory '%s'\n" %
configuration.sdir_name)
sys.stderr.write("Command invoked: %s\n" % getMyCommandLine())
if not os.path.isdir(configuration.sdir_name):
try:
os.mkdir(configuration.sdir_name)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
#
# Invoke the default TextTestRunner to run the test suite
#
checkCompiler()
if not configuration.parsable:
print("compiler=%s" % configuration.compiler)
# Iterating over all possible architecture and compiler combinations.
os.environ["ARCH"] = configuration.arch
os.environ["CC"] = configuration.compiler
configString = "arch=%s compiler=%s" % (configuration.arch,
configuration.compiler)
# Translate ' ' to '-' for pathname component.
if six.PY2:
import string
tbl = string.maketrans(' ', '-')
else:
tbl = str.maketrans(' ', '-')
configPostfix = configString.translate(tbl)
# Output the configuration.
if not configuration.parsable:
sys.stderr.write("\nConfiguration: " + configString + "\n")
# First, write out the number of collected test cases.
if not configuration.parsable:
sys.stderr.write(configuration.separator + "\n")
sys.stderr.write(
"Collected %d test%s\n\n" %
(configuration.suite.countTestCases(),
configuration.suite.countTestCases() != 1 and "s" or ""))
if configuration.parsable:
v = 0
else:
v = configuration.verbose
# Invoke the test runner.
if configuration.count == 1:
result = unittest2.TextTestRunner(
stream=sys.stderr,
verbosity=v,
resultclass=test_result.LLDBTestResult).run(
configuration.suite)
else:
# We are invoking the same test suite more than once. In this case,
# mark __ignore_singleton__ flag as True so the signleton pattern is
# not enforced.
test_result.LLDBTestResult.__ignore_singleton__ = True
for i in range(configuration.count):
result = unittest2.TextTestRunner(
stream=sys.stderr,
verbosity=v,
resultclass=test_result.LLDBTestResult).run(
configuration.suite)
configuration.failed = not result.wasSuccessful()
if configuration.sdir_has_content and not configuration.parsable:
sys.stderr.write(
"Session logs for test failures/errors/unexpected successes"
" can be found in directory '%s'\n" %
configuration.sdir_name)
if configuration.useCategories and len(
configuration.failuresPerCategory) > 0:
sys.stderr.write("Failures per category:\n")
for category in configuration.failuresPerCategory:
sys.stderr.write(
"%s - %d\n" %
(category, configuration.failuresPerCategory[category]))
# Terminate the test suite if ${LLDB_TESTSUITE_FORCE_FINISH} is defined.
# This should not be necessary now.
if ("LLDB_TESTSUITE_FORCE_FINISH" in os.environ):
print("Terminating Test suite...")
subprocess.Popen(["/bin/sh", "-c", "kill %s; exit 0" % (os.getpid())])
# Exiting.
exitTestSuite(configuration.failed)
if __name__ == "__main__":
print(
__file__ +
" is for use as a module only. It should not be run as a standalone script.")
sys.exit(-1)
|
py | b406dfe8056357a6763739c374464604941e26d4 | # Copyright 2021 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from .retrieval_metric import RetrievalMetric
from tensorflow_similarity.types import FloatTensor, IntTensor, BoolTensor
class PrecisionAtK(RetrievalMetric):
r"""Precision@K is computed as.
$$
P_i@k = \frac{TP_i}{TP_i+FP_i} = \frac{\sum_{j = 1}^{k} {rel_i_j}}{K}
$$
Where: K is the number of neighbors in the i_th query result set.
rel is the relevance mask (indicator function) for the i_th query.
i represents the i_th query.
j represents the j_th ranked query result.
P@K is unordered and does not take into account the rank of the TP results.
This metric is useful when we are interested in evaluating the embedding
within the context of a kNN classifier or as part of a clustering method.
Args:
name: Name associated with the metric object, e.g., precision@5
canonical_name: The canonical name associated with metric,
e.g., precision@K
k: The number of nearest neighbors over which the metric is computed.
distance_threshold: The max distance below which a nearest neighbor is
considered a valid match.
average: {'micro', 'macro'} Determines the type of averaging performed
on the data.
* 'micro': Calculates metrics globally over all data.
* 'macro': Calculates metrics for each label and takes the unweighted
mean.
"""
def __init__(self, name: str = "precision", k: int = 5, **kwargs) -> None:
if "canonical_name" not in kwargs:
kwargs["canonical_name"] = "precision@k"
super().__init__(name=name, k=k, **kwargs)
def compute(
self,
*, # keyword only arguments see PEP-570
query_labels: IntTensor,
match_mask: BoolTensor,
**kwargs,
) -> FloatTensor:
"""Compute the metric
Args:
query_labels: A 1D array of the labels associated with the
embedding queries.
match_mask: A 2D mask where a 1 indicates a match between the
jth query and the kth neighbor and a 0 indicates a mismatch.
**kwargs: Additional compute args.
Returns:
A rank 0 tensor containing the metric.
"""
self._check_shape(query_labels, match_mask)
k_slice = tf.cast(match_mask[:, : self.k], dtype="float")
tp = tf.math.reduce_sum(k_slice, axis=1)
per_example_p = tf.math.divide(tp, self.k)
if self.average == "micro":
p_at_k = tf.math.reduce_mean(per_example_p)
elif self.average == "macro":
per_class_metrics = 0
class_labels = tf.unique(query_labels)[0]
for label in class_labels:
idxs = tf.where(query_labels == label)
c_slice = tf.gather(per_example_p, indices=idxs)
per_class_metrics += tf.math.reduce_mean(c_slice)
p_at_k = tf.math.divide(per_class_metrics, len(class_labels))
else:
raise ValueError(
f"{self.average} is not a supported average " "option"
)
result: FloatTensor = p_at_k
return result
|
py | b406e036213c04e85db1519756f09151c6c70f73 |
"""YouTube Dataset class
For training a CycleGAN Network.
"""
from data.base_dataset import BaseDataset
from data.audio_folder import make_dataset, default_loader
from util import mkdir
import csv
import os
import librosa
import soundfile as sf
import torch
class YoutubeDataset(BaseDataset):
"""A template dataset class for you to implement custom datasets."""
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.add_argument('--stride', type=int, default=15, help='Stride in reading in audio (in secs)')
parser.add_argument('--length', type=int, default=30, help='Length of each audio sample when processing (in secs)')
parser.add_argument('--sample_rate', type=int, default=20480, help='Sample Rate to resample')
parser.add_argument('--nfft', type=int, default=2048, help='Number of Frequency bins for STFT')
parser.add_argument('--mel', type=bool, default=False, help='Use the mel scale')
parser.add_argument('--subdir', type=str, default="splits", help='Subdir of audio data to use')
parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values
return parser
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
A few things can be done here.
- save the options (have been done in BaseDataset)
- get image paths and meta information of the dataset.
- define the image transformation.
"""
# save the option and dataset root
BaseDataset.__init__(self, opt)
# get the audio path
self.dirpath = os.path.isdir('{}/{}', self.root, opt.subdir)
# TODO: Use cache if it exists
# self.cachedir = os.path.join(self.dirpath, '__cache')
# mkdir(self.cachedir)
# metadata_file = os.path.join(self.cachedir, 'metadata.csv')
# if os.path.exist(metadata_file):
# with open(metadata_file, 'r') as f:
# metadata = json.load(f)
# else:
# metadata = dict()
self.audio_paths = sorted(make_dataset(self.dirpath, opt.max_dataset_size))
self.frame_paths = list()
self.sample_rate = opt.sample_rate
self.length = opt.length
self.stride = opt.stride
self.nfft = opt.nfft
self.subdir = opt.subdir
self.mel = opt.mel
for p in self.audio_paths:
# Load audio
filename = os.path.splitext(os.path.basename(p))[0]
y, sr = sf.read(p, dtype='float32')
t = librosa.get_duration(y=y, sr=sr)
# Resample Audio
if sr != self.sample_rate:
y = librosa.resample(y, sr, self.sample_rate)
# Pad the audio
l = t % opt.stride
if l != 0:
t = t + 15 - l
y = librosa.util.fix_length(y, t * self.sample_rate)
# Length Check
if t < self.length:
print('Skipping {} due to short content length'.format(p))
continue
# Compute frames and store
frames = self.frame(y, sr, length=self.length, stride=self.stride)
# cannot store all the frames; store it into files
for i in len(frames):
fp = os.path.join(cachedir, '{}.{}.npy'.format(filename, i))
np.save(fp, frames[:, i])
self.frame_paths.append({
'file': filename,
'frame': i,
'path': fp
})
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index -- a random integer for data indexing
"""
metadata = self.frame_paths[index]
frame = np.load(metadata['path'])
data = self.transform(frame)
result = {'data': data}
result.update(metadata)
return result
def __len__(self):
"""Return the total number of images."""
return len(self.frame_paths)
def transform(self, frame):
if self.mel:
frame = hz_to_mel(frame)
# STFT
D = librosa_stft(frame, nfft=self.nfft)
lmag, agl = librosa_calc(D)
# TODO: add normalization
return combine_mag_angle(lmag, agl)
def librosa_stft(y, nfft=2048):
return librosa.stft(y, nfft=nfft, win_length=30*self.sample_rate/1024)
def torch_stft(x, nfft=2048):
return torch.stft(x, nfft=nfft)
def librosa_calc(D):
log_mag = np.log(np.abs(D))
agl = np.angle(D)
return torch.from_numpy(log_mag), torch.from_numpy(agl)
def torch_calc(D):
x = torch.from_numpy(D)
real = x[:, : , :, 0]
comp = x[:, : , :, 1]
log_mag = torch.sqrt(2 * torch.log(real) + 2 * torch.log(comp))
agl = torch.atan(torch.div(comp, real))
return log_mag, agl
def combine_mag_angle(mag, agl):
return torch.stack((mag, agl), 2)
|
py | b406e101ec67991a4046c5dcc63ae8f28604bf9b | import collections
import math
import argparse
import sys
import heapq
import re
import time
import os
import random
class VowelInsertionProblem:
def __init__(self, queryWords, bigramCost, possibleFills, dictionary):
self.queryWords = queryWords
self.bigramCost = bigramCost
self.possibleFills = possibleFills
self.dictionary = dictionary # all known correct spellings
def startState(self):
return wordsegUtil.SENTENCE_BEGIN, 0
def isEnd(self, state):
return state[1] == len(self.queryWords)
def succAndCost(self, state):
cur_word = self.queryWords[state[1]]
fillings = self.possibleFills(cur_word)
# corner case: there are no fillings - just return word as-is as the only successor
if len(fillings) == 0 or self.dictionary[cur_word]:
return [(cur_word, (cur_word, state[1] + 1), self.bigramCost(state[0], cur_word))]
# else give all possible fillings as successors
succ = []
for filling in self.possibleFills(cur_word):
succ.append((filling, (filling, state[1] + 1), self.bigramCost(state[0], filling)))
return succ
class spellCheckUtil:
@staticmethod
def insertVowels(queryWords, bigramCost, possibleFills, dictionary):
if len(queryWords) == 0:
return ''
ucs = UniformCostSearch(verbose=0)
ucs.solve(VowelInsertionProblem(queryWords, bigramCost, possibleFills, dictionary))
return ' '.join(ucs.actions) if ucs.actions else ""
class UniformCostSearch:
def __init__(self, verbose=0):
self.verbose = verbose
def solve(self, problem):
# If a path exists, set |actions| and |totalCost| accordingly.
# Otherwise, leave them as None.
self.actions = None
self.totalCost = None
self.numStatesExplored = 0
# Initialize data structures
frontier = PriorityQueue() # Explored states are maintained by the frontier.
backpointers = {} # map state to (action, previous state)
# Add the start state
startState = problem.startState()
frontier.update(startState, 0)
while True:
# Remove the state from the queue with the lowest pastCost
# (priority).
state, pastCost = frontier.removeMin()
if state == None: break
self.numStatesExplored += 1
if self.verbose >= 2:
print "Exploring %s with pastCost %s" % (state, pastCost)
# Check if we've reached an end state; if so, extract solution.
if problem.isEnd(state):
self.actions = []
while state != startState:
action, prevState = backpointers[state]
self.actions.append(action)
state = prevState
self.actions.reverse()
self.totalCost = pastCost
if self.verbose >= 1:
print "numStatesExplored = %d" % self.numStatesExplored
print "totalCost = %s" % self.totalCost
print "actions = %s" % self.actions
return
# Expand from |state| to new successor states,
# updating the frontier with each newState.
for action, newState, cost in problem.succAndCost(state):
if self.verbose >= 3:
print " Action %s => %s with cost %s + %s" % (action, newState, pastCost, cost)
if frontier.update(newState, pastCost + cost):
# Found better way to go to |newState|, update backpointer.
backpointers[newState] = (action, state)
if self.verbose >= 1:
print "No path found"
# Data structure for supporting uniform cost search.
class PriorityQueue:
def __init__(self):
self.DONE = -100000
self.heap = []
self.priorities = {} # Map from state to priority
# Insert |state| into the heap with priority |newPriority| if
# |state| isn't in the heap or |newPriority| is smaller than the existing
# priority.
# Return whether the priority queue was updated.
def update(self, state, newPriority):
oldPriority = self.priorities.get(state)
if oldPriority == None or newPriority < oldPriority:
self.priorities[state] = newPriority
heapq.heappush(self.heap, (newPriority, state))
return True
return False
# Returns (state with minimum priority, priority)
# or (None, None) if the priority queue is empty.
def removeMin(self):
while len(self.heap) > 0:
priority, state = heapq.heappop(self.heap)
if self.priorities[state] == self.DONE: continue # Outdated priority, skip
self.priorities[state] = self.DONE
return (state, priority)
return (None, None) # Nothing left...
class wordsegUtil:
SENTENCE_BEGIN = '-BEGIN-'
@staticmethod
def sliding(xs, windowSize):
for i in xrange(1, len(xs) + 1):
yield xs[max(0, i - windowSize):i]
@staticmethod
def removeAll(s, chars):
return ''.join(filter(lambda c: c not in chars, s))
@staticmethod
def alphaOnly(s):
s = s.replace('-', ' ')
return filter(lambda c: c.isalpha() or c == ' ', s)
@staticmethod
def cleanLine(l):
return wordsegUtil.alphaOnly(l.strip().lower())
@staticmethod
def words(l):
return l.split()
@staticmethod
def editNeighbors(w):
for i in range(len(w)):
t = w[:]
t = t[:i] + t[i+1:]
yield t
############################################################
# Make an n-gram model of words in text from a corpus.
@staticmethod
def makeLanguageModels(path):
unigramCounts = collections.Counter()
totalCounts = 0
bigramCounts = collections.Counter()
bitotalCounts = collections.Counter()
VOCAB_SIZE = 600000
LONG_WORD_THRESHOLD = 5
LENGTH_DISCOUNT = 0.15
def bigramWindow(win):
assert len(win) in [1, 2]
if len(win) == 1:
return ('-BEGIN-', win[0])
else:
return tuple(win)
with open(path, 'r') as f:
for l in f:
ws = wordsegUtil.words(wordsegUtil.cleanLine(l))
unigrams = [x[0] for x in wordsegUtil.sliding(ws, 1)]
bigrams = [bigramWindow(x) for x in wordsegUtil.sliding(ws, 2)]
totalCounts += len(unigrams)
unigramCounts.update(unigrams)
bigramCounts.update(bigrams)
bitotalCounts.update([x[0] for x in bigrams])
def unigramCost(x):
if x not in unigramCounts:
length = max(LONG_WORD_THRESHOLD, len(x))
return -(length * math.log(LENGTH_DISCOUNT) + math.log(1.0) - math.log(VOCAB_SIZE))
else:
return math.log(totalCounts) - math.log(unigramCounts[x])
def bigramModel(a, b):
return math.log(bitotalCounts[a] + VOCAB_SIZE) - math.log(bigramCounts[(a, b)] + 1)
return unigramCost, bigramModel
@staticmethod
def logSumExp(x, y):
lo = min(x, y)
hi = max(x, y)
return math.log(1.0 + math.exp(lo - hi)) + hi;
@staticmethod
def smoothUnigramAndBigram(unigramCost, bigramModel, a):
'''Coefficient `a` is Bernoulli weight favoring unigram'''
# Want: -log( a * exp(-u) + (1-a) * exp(-b) )
# = -log( exp(log(a) - u) + exp(log(1-a) - b) )
# = -logSumExp( log(a) - u, log(1-a) - b )
def smoothModel(w1, w2):
u = unigramCost(w2)
b = bigramModel(w1, w2)
return -logSumExp(math.log(a) - u, math.log(1-a) - b)
return smoothModel
############################################################
# Make a map for inverse lookup of words missing chars ->
# full words
@staticmethod
def makeInverseRemovalDictionary(path):
wordsRemovedToFull = collections.defaultdict(set)
dictionary = collections.defaultdict(lambda: False)
with open(path, 'r') as f:
for l in f:
for w in wordsegUtil.words(wordsegUtil.cleanLine(l)):
dictionary[w] = True
for wp in wordsegUtil.editNeighbors(w): # all edit distance 1 candidates
wordsRemovedToFull[wp].add(w)
wordsRemovedToFull = dict(wordsRemovedToFull)
empty = set()
def possibleFills(short):
return wordsRemovedToFull.get(short, empty)
return possibleFills, dictionary
class Reconstructor:
def __init__(self, corpus=None):
corpus = corpus or 'leo-will.txt'
self.unigramCost, self.bigramCost = wordsegUtil.makeLanguageModels(corpus)
self.possibleFills, self.dictionary = wordsegUtil.makeInverseRemovalDictionary(corpus)
def reconstruct(self, s):
""" reconstruct a sentance s
"""
s = wordsegUtil.cleanLine(s)
ws = [w for w in wordsegUtil.words(s)]
return spellCheckUtil.insertVowels(ws, self.bigramCost, self.possibleFills, self.dictionary)
class Shell:
def main(self):
args = self.parseArgs()
if args.model and args.model not in ['seg', 'ins', 'both']:
print 'Unrecognized model:', args.model
sys.exit(1)
corpus = args.text_corpus or 'leo-will.txt'
sys.stdout.write('Training language cost functions [corpus: %s]... ' % corpus)
sys.stdout.flush()
unigramCost, bigramCost = wordsegUtil.makeLanguageModels(corpus)
possibleFills, dictionary = wordsegUtil.makeInverseRemovalDictionary(corpus)
print 'Done!'
print ''
self.repl(unigramCost, bigramCost, possibleFills, dictionary, command=args.model)
def parseArgs(self):
p = argparse.ArgumentParser()
p.add_argument('--text-corpus', help='Text training corpus')
p.add_argument('--model', help='Always use this model')
return p.parse_args()
# REPL and main entry point
def repl(self, unigramCost, bigramCost, possibleFills, dictionary, command=None):
'''REPL: read, evaluate, print, loop'''
while True:
sys.stdout.write('>> ')
line = sys.stdin.readline().strip()
if not line:
break
if command is None:
cmdAndLine = line.split(None, 1)
cmd, line = cmdAndLine[0], ' '.join(cmdAndLine[1:])
else:
cmd = command
line = line
print ''
if cmd == 'ins':
line = wordsegUtil.cleanLine(line)
# ws = [wordsegUtil.removeAll(w, 'aeiou') for w in wordsegUtil.words(line)]
ws = [w for w in wordsegUtil.words(line)]
print ' Query (ins):', ' '.join(ws)
print ''
print ' ' + spellCheckUtil.insertVowels(ws, bigramCost, possibleFills, dictionary)
else:
print 'Unrecognized command:', cmd
print ''
if __name__ == '__main__':
shell = Shell()
shell.main()
|
py | b406e2765d50afaadb9e6e6f4a17052df3567890 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import pandas as pd
import numpy as np
import re
import pickle
from tensorflow.python.keras.preprocessing import sequence
from tensorflow.python.keras.preprocessing import text
from tensorflow.python.keras import models
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.layers import Dropout
from tensorflow.python.keras.layers import Embedding
from tensorflow.python.keras.layers import Conv1D
from tensorflow.python.keras.layers import MaxPooling1D
from tensorflow.python.keras.layers import GlobalAveragePooling1D
from tensorflow.python.keras.layers import LSTM, Reshape,SpatialDropout1D
from google.cloud import storage
tf.logging.set_verbosity(tf.logging.INFO)
# CLASSES = {'github': 0, 'nytimes': 1, 'techcrunch': 2} # label-to-int mapping
labels = ['DELETION OF INTEREST', 'RETURNED CHECK', 'BILL', 'POLICY CHANGE',
'CANCELLATION NOTICE', 'DECLARATION', 'CHANGE ENDORSEMENT',
'NON-RENEWAL NOTICE', 'BINDER', 'REINSTATEMENT NOTICE',
'EXPIRATION NOTICE', 'INTENT TO CANCEL NOTICE', 'APPLICATION',
'BILL BINDER']
CLASSES = {k:v for v,k in enumerate(labels)}
TOP_K = 20000 # Limit on the number vocabulary size used for tokenization
MAX_SEQUENCE_LENGTH = 400 # Sentences will be truncated/padded to this length
"""
Helper function to download data from Google Cloud Storage
# Arguments:
source: string, the GCS URL to download from (e.g. 'gs://bucket/file.csv')
destination: string, the filename to save as on local disk. MUST be filename
ONLY, doesn't support folders. (e.g. 'file.csv', NOT 'folder/file.csv')
# Returns: nothing, downloads file to local disk
"""
def download_from_gcs(source, destination):
search = re.search('gs://(.*?)/(.*)', source)
bucket_name = search.group(1)
blob_name = search.group(2)
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
bucket.blob(blob_name).download_to_filename(destination)
"""
Parses raw tsv containing hacker news headlines and returns (sentence, integer label) pairs
# Arguments:
train_data_path: string, path to tsv containing training data.
can be a local path or a GCS url (gs://...)
eval_data_path: string, path to tsv containing eval data.
can be a local path or a GCS url (gs://...)
# Returns:
((train_sentences, train_labels), (test_sentences, test_labels)): sentences
are lists of strings, labels are numpy integer arrays
"""
def load_hacker_news_data(train_data_path, eval_data_path):
if train_data_path.startswith('gs://'):
download_from_gcs(train_data_path, destination='train.csv')
train_data_path = 'train.csv'
if eval_data_path.startswith('gs://'):
download_from_gcs(eval_data_path, destination='eval.csv')
eval_data_path = 'eval.csv'
# Parse CSV using pandas
df_train = pd.read_csv(train_data_path)
df_eval = pd.read_csv(eval_data_path)
return ((list(df_train['doc']), np.array(df_train['labels'].map(CLASSES))),
(list(df_eval['doc']), np.array(df_eval['labels'].map(CLASSES))))
# column_names = ('label', 'text')
# df_train = pd.read_csv(train_data_path, names=column_names, sep='\t')
# df_eval = pd.read_csv(eval_data_path, names=column_names, sep='\t')
# return ((list(df_train['text']), np.array(df_train['label'].map(CLASSES))),
# (list(df_eval['text']), np.array(df_eval['label'].map(CLASSES))))
# def keras_estimator(model_dir,
# config,
# learning_rate,
# # filters=64,
# # dropout_rate=0.2,
# embedding_dim=200,
# # kernel_size=3,
# # pool_size=3,
# embedding_path=None,
# word_index=None):
# # Create model instance.
# model = models.Sequential()
# num_features = min(len(word_index) + 1, TOP_K)
# # Add embedding layer. If pre-trained embedding is used add weights to the
# # embeddings layer and set trainable to input is_embedding_trainable flag.
# if embedding_path != None:
# embedding_matrix = get_embedding_matrix(word_index, embedding_path, embedding_dim)
# is_embedding_trainable = True # set to False to freeze embedding weights
# model.add(Embedding(input_dim=num_features,
# output_dim=embedding_dim,
# input_length=MAX_SEQUENCE_LENGTH,
# weights=[embedding_matrix],
# trainable=is_embedding_trainable))
# else:
# model.add(Embedding(input_dim=num_features,
# output_dim=embedding_dim,
# input_length=MAX_SEQUENCE_LENGTH))
# model.add(SpatialDropout1D(0.2))
# # model.add(Reshape(target_shape = [N_INPUTS, 1]))
# # model.add(LSTM(units=200,return_sequences = True))
# model.add(LSTM(units=200,dropout=0.2, recurrent_dropout=0.2))
# model.add(Dense(units = 100, activation = tf.nn.relu))
# model.add(Dense(len(CLASSES), activation='softmax'))
# # Compile model with learning parameters.
# optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
# # model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['acc'])
# model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['acc'])
# estimator = tf.keras.estimator.model_to_estimator(keras_model=model, model_dir=model_dir, config=config)
# return estimator
"""
Create tf.estimator compatible input function
# Arguments:
texts: [strings], list of sentences
labels: numpy int vector, integer labels for sentences
tokenizer: tf.python.keras.preprocessing.text.Tokenizer
used to convert sentences to integers
batch_size: int, number of records to use for each train batch
mode: tf.estimator.ModeKeys.TRAIN or tf.estimator.ModeKeys.EVAL
# Returns:
tf.estimator.inputs.numpy_input_fn, produces feature and label
tensors one batch at a time
"""
def input_fn(texts, labels, tokenizer, batch_size, mode):
# Transform text to sequence of integers
x = tokenizer.texts_to_sequences(texts)
# Fix sequence length to max value. Sequences shorter than the length are
# padded in the beginning and sequences longer are truncated
# at the beginning.
x = sequence.pad_sequences(x, maxlen=MAX_SEQUENCE_LENGTH)
# default settings for training
num_epochs = None
shuffle = True
# override if this is eval
if mode == tf.estimator.ModeKeys.EVAL:
num_epochs = 1
shuffle = False
return tf.estimator.inputs.numpy_input_fn(
x,
y=labels,
batch_size=batch_size,
num_epochs=num_epochs,
shuffle=shuffle,
queue_capacity=50000
)
"""
Builds a CNN model using keras and converts to tf.estimator.Estimator
# Arguments
model_dir: string, file path where training files will be written
config: tf.estimator.RunConfig, specifies properties of tf Estimator
filters: int, output dimension of the layers.
kernel_size: int, length of the convolution window.
embedding_dim: int, dimension of the embedding vectors.
dropout_rate: float, percentage of input to drop at Dropout layers.
pool_size: int, factor by which to downscale input at MaxPooling layer.
embedding_path: string , file location of pre-trained embedding (if used)
defaults to None which will cause the model to train embedding from scratch
word_index: dictionary, mapping of vocabulary to integers. used only if
pre-trained embedding is provided
# Returns
A tf.estimator.Estimator
"""
def keras_estimator(model_dir,
config,
learning_rate,
filters=64,
dropout_rate=0.2,
embedding_dim=200,
kernel_size=3,
pool_size=3,
embedding_path=None,
word_index=None):
# Create model instance.
model = models.Sequential()
num_features = min(len(word_index) + 1, TOP_K)
# Add embedding layer. If pre-trained embedding is used add weights to the
# embeddings layer and set trainable to input is_embedding_trainable flag.
if embedding_path != None:
embedding_matrix = get_embedding_matrix(word_index, embedding_path, embedding_dim)
is_embedding_trainable = True # set to False to freeze embedding weights
model.add(Embedding(input_dim=num_features,
output_dim=embedding_dim,
input_length=MAX_SEQUENCE_LENGTH,
weights=[embedding_matrix],
trainable=is_embedding_trainable))
else:
model.add(Embedding(input_dim=num_features,
output_dim=embedding_dim,
input_length=MAX_SEQUENCE_LENGTH))
model.add(Dropout(rate=dropout_rate))
model.add(Conv1D(filters=filters,
kernel_size=kernel_size,
activation='relu',
bias_initializer='random_uniform',
padding='same'))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Conv1D(filters=filters * 2,
kernel_size=kernel_size,
activation='relu',
bias_initializer='random_uniform',
padding='same'))
model.add(GlobalAveragePooling1D())
model.add(Dropout(rate=dropout_rate))
model.add(Dense(len(CLASSES), activation='softmax'))
# Compile model with learning parameters.
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['acc'])
estimator = tf.keras.estimator.model_to_estimator(keras_model=model, model_dir=model_dir, config=config)
return estimator
"""
Defines the features to be passed to the model during inference
Expects already tokenized and padded representation of sentences
# Arguments: none
# Returns: tf.estimator.export.ServingInputReceiver
"""
def serving_input_fn():
feature_placeholder = tf.placeholder(tf.int16, [None, MAX_SEQUENCE_LENGTH])
features = feature_placeholder # pass as-is
return tf.estimator.export.TensorServingInputReceiver(features, feature_placeholder)
"""
Takes embedding for generic voabulary and extracts the embeddings
matching the current vocabulary
The pre-trained embedding file is obtained from https://nlp.stanford.edu/projects/glove/
# Arguments:
word_index: dict, {key =word in vocabulary: value= integer mapped to that word}
embedding_path: string, location of the pre-trained embedding file on disk
embedding_dim: int, dimension of the embedding space
# Returns: numpy matrix of shape (vocabulary, embedding_dim) that contains the embedded
representation of each word in the vocabulary.
"""
def get_embedding_matrix(word_index, embedding_path, embedding_dim):
# Read the pre-trained embedding file and get word to word vector mappings.
embedding_matrix_all = {}
# Download if embedding file is in GCS
if embedding_path.startswith('gs://'):
download_from_gcs(embedding_path, destination='embedding.csv')
embedding_path = 'embedding.csv'
with open(embedding_path) as f:
for line in f: # Every line contains word followed by the vector value
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embedding_matrix_all[word] = coefs
# Prepare embedding matrix with just the words in our word_index dictionary
num_words = min(len(word_index) + 1, TOP_K)
embedding_matrix = np.zeros((num_words, embedding_dim))
for word, i in word_index.items():
if i >= TOP_K:
continue
embedding_vector = embedding_matrix_all.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
return embedding_matrix
"""
Main orchestrator. Responsible for calling all other functions in model.py
# Arguments:
output_dir: string, file path where training files will be written
hparams: dict, command line parameters passed from task.py
# Returns: nothing, kicks off training and evaluation
"""
def train_and_evaluate(output_dir, hparams):
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
# Load Data
((train_texts, train_labels), (test_texts, test_labels)) = load_hacker_news_data(
hparams['train_data_path'], hparams['eval_data_path'])
# Create vocabulary from training corpus.
tokenizer = text.Tokenizer(num_words=TOP_K)
tokenizer.fit_on_texts(train_texts)
# Save token dictionary to use during prediction time
pickle.dump(tokenizer, open('tokenizer.pickled', 'wb'))
# Create estimator
run_config = tf.estimator.RunConfig(save_checkpoints_steps=500)
estimator = keras_estimator(
model_dir=output_dir,
config=run_config,
learning_rate=hparams['learning_rate'],
embedding_path=hparams['embedding_path'],
word_index=tokenizer.word_index
)
# Create TrainSpec
train_steps = hparams['num_epochs'] * len(train_texts) / hparams['batch_size']
train_spec = tf.estimator.TrainSpec(
input_fn=input_fn(
train_texts,
train_labels,
tokenizer,
hparams['batch_size'],
mode=tf.estimator.ModeKeys.TRAIN),
max_steps=train_steps
)
# Create EvalSpec
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(
input_fn=input_fn(
test_texts,
test_labels,
tokenizer,
hparams['batch_size'],
mode=tf.estimator.ModeKeys.EVAL),
steps=None,
exporters=exporter,
start_delay_secs=10,
throttle_secs=10
)
# Start training
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
|
py | b406e3f0a39731fefdb439022c786c6554caa80d | '''OpenGL extension VERSION.GL_4_6
This module customises the behaviour of the
OpenGL.raw.GL.VERSION.GL_4_6 to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/VERSION/GL_4_6.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.VERSION.GL_4_6 import *
from OpenGL.raw.GL.VERSION.GL_4_6 import _EXTENSION_NAME
def glInitGl46VERSION():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
py | b406e3f3bec57b6b0c1ea8cdffbec153ac10dd5b | ###############################################################
# SPDX-License-Identifier: BSD-2-Clause-Patent
# SPDX-FileCopyrightText: 2020 the prplMesh contributors (see AUTHORS.md)
# This code is subject to the terms of the BSD+Patent license.
# See LICENSE file for more details.
###############################################################
from .prplmesh_base import PrplMeshBase
from environment import VirtualAPDocker
from opts import err
class PrplMeshStationDummy(PrplMeshBase):
model = "STA_dummy"
__mac_base = 0
def __init__(self, *args, **kwargs):
'''Generate dummy Station.'''
self.args = args
self.kwargs = kwargs
config = kwargs.get("config", kwargs)
self.name = config.get("name", "station")
self.mac = config.get("mac", None)
if self.mac is None:
raise ValueError(err("{} device \"{}\" has no MAC!".format(self.model, self.name)))
def wifi_connect(self, vap: VirtualAPDocker) -> bool:
"""Connect to the Access Point. Return True if successful."""
vap.radio.send_bwl_event("EVENT AP-STA-CONNECTED {}".format(self.mac))
return True
def wifi_disconnect(self, vap: VirtualAPDocker) -> bool:
'''Disassociate "sta" from this VAP.'''
vap.radio.send_bwl_event("EVENT AP-STA-DISCONNECTED {}".format(self.mac))
return True
def wifi_connect_check(self, vap: VirtualAPDocker) -> bool:
"""Connect and verify connection"""
return self.wifi_connect(vap)
def disable_wifi(self) -> bool:
"""Disable wifi connection"""
return True
|
py | b406e441a3debca07bda2c22b850e795adb355ab | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Recipe.image_alt'
db.add_column('recipes_recipe', 'image_alt',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'Recipe.image_title'
db.add_column('recipes_recipe', 'image_title',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Recipe.image_alt'
db.delete_column('recipes_recipe', 'image_alt')
# Deleting field 'Recipe.image_title'
db.delete_column('recipes_recipe', 'image_title')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'categories.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'visits_num': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'categories.subcategory': {
'Meta': {'object_name': 'SubCategory'},
'category': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['categories.Category']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'visits_num': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'cities_light.city': {
'Meta': {'unique_together': "(('region', 'name'),)", 'object_name': 'City'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.Country']"}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.Region']", 'null': 'True', 'blank': 'True'}),
'search_names': ('cities_light.models.ToSearchTextField', [], {'default': "''", 'max_length': '4000', 'db_index': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'})
},
'cities_light.country': {
'Meta': {'object_name': 'Country'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'code2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'code3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'continent': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'}),
'tld': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '5', 'blank': 'True'})
},
'cities_light.region': {
'Meta': {'unique_together': "(('country', 'name'),)", 'object_name': 'Region'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.Country']"}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'geoname_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'gallery.gallery': {
'Meta': {'object_name': 'Gallery'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'visits_num': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'ingredients.usaingredient': {
'Meta': {'object_name': 'USAIngredient'},
'alpha_carot': ('django.db.models.fields.FloatField', [], {}),
'ash': ('django.db.models.fields.FloatField', [], {}),
'beta_carot': ('django.db.models.fields.FloatField', [], {}),
'beta_crypt': ('django.db.models.fields.FloatField', [], {}),
'calcium': ('django.db.models.fields.FloatField', [], {}),
'carbohydrt': ('django.db.models.fields.FloatField', [], {}),
'cholestrl': ('django.db.models.fields.FloatField', [], {}),
'choline_total': ('django.db.models.fields.FloatField', [], {}),
'copper': ('django.db.models.fields.FloatField', [], {}),
'energy': ('django.db.models.fields.FloatField', [], {}),
'fa_mono': ('django.db.models.fields.FloatField', [], {}),
'fa_poly': ('django.db.models.fields.FloatField', [], {}),
'fa_sat': ('django.db.models.fields.FloatField', [], {}),
'fiber_td': ('django.db.models.fields.FloatField', [], {}),
'folate_dfe': ('django.db.models.fields.FloatField', [], {}),
'folate_total': ('django.db.models.fields.FloatField', [], {}),
'folic_acid': ('django.db.models.fields.FloatField', [], {}),
'food_folate': ('django.db.models.fields.FloatField', [], {}),
'gm_wt1': ('django.db.models.fields.FloatField', [], {}),
'gmwt_2': ('django.db.models.fields.FloatField', [], {}),
'gmwt_desc1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gmwt_desc2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iron': ('django.db.models.fields.FloatField', [], {}),
'lipid_total': ('django.db.models.fields.FloatField', [], {}),
'lut_zea': ('django.db.models.fields.FloatField', [], {}),
'lycopene': ('django.db.models.fields.FloatField', [], {}),
'magnesium': ('django.db.models.fields.FloatField', [], {}),
'manganese': ('django.db.models.fields.FloatField', [], {}),
'name_rus': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ndb_no': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'niacin': ('django.db.models.fields.FloatField', [], {}),
'panto_acid': ('django.db.models.fields.FloatField', [], {}),
'phosphorus': ('django.db.models.fields.FloatField', [], {}),
'potassium': ('django.db.models.fields.FloatField', [], {}),
'protein': ('django.db.models.fields.FloatField', [], {}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'refuse_pct': ('django.db.models.fields.FloatField', [], {}),
'retinol': ('django.db.models.fields.FloatField', [], {}),
'riboflavin': ('django.db.models.fields.FloatField', [], {}),
'selenium': ('django.db.models.fields.FloatField', [], {}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sodium': ('django.db.models.fields.FloatField', [], {}),
'sugar_total': ('django.db.models.fields.FloatField', [], {}),
'thiamin': ('django.db.models.fields.FloatField', [], {}),
'translated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updatable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'vi_vit_d_ui': ('django.db.models.fields.FloatField', [], {}),
'vitamin_a_rae': ('django.db.models.fields.FloatField', [], {}),
'vitamin_a_ui': ('django.db.models.fields.FloatField', [], {}),
'vitamin_b12': ('django.db.models.fields.FloatField', [], {}),
'vitamin_b6': ('django.db.models.fields.FloatField', [], {}),
'vitamin_c': ('django.db.models.fields.FloatField', [], {}),
'vitamin_d': ('django.db.models.fields.FloatField', [], {}),
'vitamin_e': ('django.db.models.fields.FloatField', [], {}),
'vitamin_k': ('django.db.models.fields.FloatField', [], {}),
'water': ('django.db.models.fields.FloatField', [], {}),
'zinc': ('django.db.models.fields.FloatField', [], {})
},
'profiles.award': {
'Meta': {'object_name': 'Award'},
'icon': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'profiles.profile': {
'Meta': {'object_name': 'Profile'},
'about': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'added_recipes_num': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'avatar': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'awards': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['profiles.Award']", 'null': 'True', 'blank': 'True'}),
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'books': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'cake_master': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.City']", 'null': 'True', 'blank': 'True'}),
'cook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cookery_in_life': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.Country']", 'null': 'True', 'blank': 'True'}),
'fb_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'gallery': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['gallery.Gallery']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_login_ip': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_avatar': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'registration_ip': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'twitter_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'visits_num': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vk_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'recipes.cuisine': {
'Meta': {'object_name': 'Cuisine'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'recipes.holiday': {
'Meta': {'object_name': 'Holiday'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'icon': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'recipes.ingredient': {
'Meta': {'object_name': 'Ingredient'},
'addit_info': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ingredient_group': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ingredient_info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ingredients.USAIngredient']"}),
'measure': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'recipe': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ingredients'", 'to': "orm['recipes.Recipe']"}),
'value': ('django.db.models.fields.FloatField', [], {})
},
'recipes.ingredientgroup': {
'Meta': {'object_name': 'IngredientGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'recipes.ingredientinfo': {
'Meta': {'object_name': 'IngredientInfo'},
'allergen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'calory': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'carbs': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'carbs_measure': ('django.db.models.fields.CharField', [], {'default': "'mgramm'", 'max_length': '255'}),
'fat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'fat_measure': ('django.db.models.fields.CharField', [], {'default': "'mgramm'", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'protein': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'protein_measure': ('django.db.models.fields.CharField', [], {'default': "'mgramm'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'recipes.mineral': {
'Meta': {'object_name': 'Mineral'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ingredient_info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['recipes.IngredientInfo']"}),
'measure': ('django.db.models.fields.CharField', [], {'default': "'gramm'", 'max_length': '255'}),
'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['recipes.MineralTitle']"}),
'value': ('django.db.models.fields.FloatField', [], {})
},
'recipes.mineraltitle': {
'Meta': {'object_name': 'MineralTitle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'recipes.prepmethod': {
'Meta': {'object_name': 'PrepMethod'},
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'recipes.recipe': {
'Meta': {'object_name': 'Recipe'},
'add_watermark': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'age_limit': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'caloric_value': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['categories.Category']", 'null': 'True', 'blank': 'True'}),
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complexity': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'cuisine': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['recipes.Cuisine']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'diet': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'eating_time': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'holiday': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['recipes.Holiday']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_alt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'image_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'images': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['gallery.Gallery']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'is_photorecipe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'main_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'portion_num': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'preparation_method': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'recipes'", 'null': 'True', 'to': "orm['recipes.PrepMethod']"}),
'prepare_time_from': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'prepare_time_to': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2012, 8, 15, 0, 0)'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'season': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['recipes.Season']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'sub_category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['categories.SubCategory']", 'null': 'True', 'blank': 'True'}),
'taste': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visits_num': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'recipes.recipedescstep': {
'Meta': {'ordering': "('recipe',)", 'unique_together': "(('recipe', 'step_num'),)", 'object_name': 'RecipeDescStep'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'recipe': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'steps'", 'to': "orm['recipes.Recipe']"}),
'step_num': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'recipes.recipesbox': {
'Meta': {'object_name': 'RecipesBox'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'own_recipesboxes'", 'to': "orm['profiles.Profile']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'recipesboxes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['profiles.Profile']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'recipe_list': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['recipes.Recipe']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'recipes.season': {
'Meta': {'object_name': 'Season'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'recipes.vitamin': {
'Meta': {'object_name': 'Vitamin'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ingredient_info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['recipes.IngredientInfo']"}),
'measure': ('django.db.models.fields.CharField', [], {'default': "'gramm'", 'max_length': '255'}),
'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['recipes.VitaminTitle']"}),
'value': ('django.db.models.fields.FloatField', [], {})
},
'recipes.vitamintitle': {
'Meta': {'object_name': 'VitaminTitle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['recipes'] |
py | b406e4f98e3087bbf810ffac5d496aa4d49c707c | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# @author: Li Li ([email protected]) #
# created on 08/19/2014 #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import six
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from nose.tools import assert_equal, assert_not_equal
from skbeam.core.constants.xrf import (XrfElement, emission_line_search,
XrayLibWrap, XrayLibWrap_Energy)
from skbeam.core.utils import NotInstalledError
from skbeam.core.constants.basic import basic
def test_element_data():
"""
smoke test of all elements
"""
data1 = []
data2 = []
name_list = []
for i in range(100):
e = XrfElement(i+1)
data1.append(e.cs(10)['Ka1'])
name_list.append(e.name)
for item in name_list:
e = XrfElement(item)
data2.append(e.cs(10)['Ka1'])
assert_array_equal(data1, data2)
return
def test_element_finder():
true_name = sorted(['Eu', 'Cu'])
out = emission_line_search(8, 0.05, 10)
found_name = sorted(list(six.iterkeys(out)))
assert_equal(true_name, found_name)
return
def test_XrayLibWrap_notpresent():
from skbeam.core.constants import xrf
# stash the original xraylib object
xraylib = xrf.xraylib
# force the not present exception to be raised by setting xraylib to None
xrf.xraylib = None
assert_raises(NotInstalledError, xrf.XrfElement, None)
assert_raises(NotInstalledError, xrf.emission_line_search,
None, None, None)
assert_raises(NotInstalledError, xrf.XrayLibWrap, None, None)
assert_raises(NotInstalledError, xrf.XrayLibWrap_Energy,
None, None, None)
# reset xraylib so nothing else breaks
xrf.xraylib = xraylib
def test_XrayLibWrap():
for Z in range(1, 101):
for infotype in XrayLibWrap.opts_info_type:
xlw = XrayLibWrap(Z, infotype)
assert_not_equal(xlw.all, None)
for key in xlw:
assert_not_equal(xlw[key], None)
assert_equal(xlw.info_type, infotype)
# make sure len doesn't break
len(xlw)
def test_XrayLibWrap_Energy():
for Z in range(1, 101):
for infotype in XrayLibWrap_Energy.opts_info_type:
incident_energy = 10
xlwe = XrayLibWrap_Energy(element=Z,
info_type=infotype,
incident_energy=incident_energy)
incident_energy *= 2
xlwe.incident_energy = incident_energy
assert_equal(xlwe.incident_energy, incident_energy)
assert_equal(xlwe.info_type, infotype)
def test_cs_different_units():
e = XrfElement('Fe')
# test at different energies
for eng in range(10, 20):
cs1 = np.array([v for k, v in e.cs(eng).all]) # unit in cm2/g
cs2 = np.array([v for k, v in e.csb(eng).all]) # unit in barns/atom
cs1 /= cs1[0]
cs2 /= cs2[0]
# ratio should be the same no matter which unit is used
assert_array_almost_equal(cs1, cs2, decimal=10)
def smoke_test_element_creation():
prev_element = None
elements = [elm for abbrev, elm in six.iteritems(basic)
if isinstance(abbrev, int)]
elements.sort()
for element in elements:
Z = element.Z
element.mass
element.density
sym = element.sym
inits = [Z, sym, sym.upper(), sym.lower(), sym.swapcase()]
element = None
for init in inits:
element = XrfElement(init)
# obtain the next four attributes to make sure the XrayLibWrap is
# working
element.bind_energy
element.fluor_yield
element.jump_factor
element.emission_line.all
if prev_element is not None:
# compare prev_element to element
assert_equal(prev_element.__lt__(element), True)
assert_equal(prev_element < element, True)
assert_equal(prev_element.__eq__(element), False)
assert_equal(prev_element == element, False)
assert_equal(prev_element >= element, False)
assert_equal(prev_element > element, False)
# compare element to prev_element
assert_equal(element < prev_element, False)
assert_equal(element.__lt__(prev_element), False)
assert_equal(element <= prev_element, False)
assert_equal(element.__eq__(prev_element), False)
assert_equal(element == prev_element, False)
assert_equal(element >= prev_element, True)
assert_equal(element > prev_element, True)
prev_element = element
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
py | b406e601033ca2061be6fb3993c726400d047791 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, cast
import numpy as np
import torch
from ax.core.types import TCandidateMetadata, TConfig, TGenMetadata
from ax.models.torch.botorch_defaults import (
get_and_fit_model,
get_NEI,
recommend_best_observed_point,
scipy_optimizer,
)
from ax.models.torch.utils import (
_get_X_pending_and_observed,
_to_inequality_constraints,
normalize_indices,
predict_from_model,
subset_model,
)
from ax.models.torch_base import TorchModel
from ax.utils.common.constants import Keys
from ax.utils.common.docutils import copy_doc
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.models.model import Model
from torch import Tensor
logger = get_logger(__name__)
TModelConstructor = Callable[
[
List[Tensor],
List[Tensor],
List[Tensor],
List[int],
List[int],
List[str],
Optional[Dict[str, Tensor]],
Any,
],
Model,
]
TModelPredictor = Callable[[Model, Tensor], Tuple[Tensor, Tensor]]
TAcqfConstructor = Callable[
[
Model,
Tensor,
Optional[Tuple[Tensor, Tensor]],
Optional[Tensor],
Optional[Tensor],
Any,
],
AcquisitionFunction,
]
TOptimizer = Callable[
[
AcquisitionFunction,
Tensor,
int,
Optional[List[Tuple[Tensor, Tensor, float]]],
Optional[Dict[int, float]],
Optional[Callable[[Tensor], Tensor]],
Any,
],
Tuple[Tensor, Tensor],
]
TBestPointRecommender = Callable[
[
TorchModel,
List[Tuple[float, float]],
Tensor,
Optional[Tuple[Tensor, Tensor]],
Optional[Tuple[Tensor, Tensor]],
Optional[Dict[int, float]],
Optional[TConfig],
Optional[Dict[int, float]],
],
Optional[Tensor],
]
class BotorchModel(TorchModel):
r"""
Customizable botorch model.
By default, this uses a noisy Expected Improvement acquisition function on
top of a model made up of separate GPs, one for each outcome. This behavior
can be modified by providing custom implementations of the following
components:
- a `model_constructor` that instantiates and fits a model on data
- a `model_predictor` that predicts outcomes using the fitted model
- a `acqf_constructor` that creates an acquisition function from a fitted model
- a `acqf_optimizer` that optimizes the acquisition function
- a `best_point_recommender` that recommends a current "best" point (i.e.,
what the model recommends if the learning process ended now)
Args:
model_constructor: A callable that instantiates and fits a model on data,
with signature as described below.
model_predictor: A callable that predicts using the fitted model, with
signature as described below.
acqf_constructor: A callable that creates an acquisition function from a
fitted model, with signature as described below.
acqf_optimizer: A callable that optimizes the acquisition function, with
signature as described below.
best_point_recommender: A callable that recommends the best point, with
signature as described below.
refit_on_cv: If True, refit the model for each fold when performing
cross-validation.
refit_on_update: If True, refit the model after updating the training
data using the `update` method.
warm_start_refitting: If True, start model refitting from previous
model parameters in order to speed up the fitting process.
Call signatures:
::
model_constructor(
Xs,
Ys,
Yvars,
task_features,
fidelity_features,
metric_names,
state_dict,
**kwargs,
) -> model
Here `Xs`, `Ys`, `Yvars` are lists of tensors (one element per outcome),
`task_features` identifies columns of Xs that should be modeled as a task,
`fidelity_features` is a list of ints that specify the positions of fidelity
parameters in 'Xs', `metric_names` provides the names of each `Y` in `Ys`,
`state_dict` is a pytorch module state dict, and `model` is a BoTorch `Model`.
Optional kwargs are being passed through from the `BotorchModel` constructor.
This callable is assumed to return a fitted BoTorch model that has the same
dtype and lives on the same device as the input tensors.
::
model_predictor(model, X) -> [mean, cov]
Here `model` is a fitted botorch model, `X` is a tensor of candidate points,
and `mean` and `cov` are the posterior mean and covariance, respectively.
::
acqf_constructor(
model,
objective_weights,
outcome_constraints,
X_observed,
X_pending,
**kwargs,
) -> acq_function
Here `model` is a botorch `Model`, `objective_weights` is a tensor of weights
for the model outputs, `outcome_constraints` is a tuple of tensors describing
the (linear) outcome constraints, `X_observed` are previously observed points,
and `X_pending` are points whose evaluation is pending. `acq_function` is a
BoTorch acquisition function crafted from these inputs. For additional
details on the arguments, see `get_NEI`.
::
acqf_optimizer(
acq_function,
bounds,
n,
inequality_constraints,
fixed_features,
rounding_func,
**kwargs,
) -> candidates
Here `acq_function` is a BoTorch `AcquisitionFunction`, `bounds` is a tensor
containing bounds on the parameters, `n` is the number of candidates to be
generated, `inequality_constraints` are inequality constraints on parameter
values, `fixed_features` specifies features that should be fixed during
generation, and `rounding_func` is a callback that rounds an optimization
result appropriately. `candidates` is a tensor of generated candidates.
For additional details on the arguments, see `scipy_optimizer`.
::
best_point_recommender(
model,
bounds,
objective_weights,
outcome_constraints,
linear_constraints,
fixed_features,
model_gen_options,
target_fidelities,
) -> candidates
Here `model` is a TorchModel, `bounds` is a list of tuples containing bounds
on the parameters, `objective_weights` is a tensor of weights for the model outputs,
`outcome_constraints` is a tuple of tensors describing the (linear) outcome
constraints, `linear_constraints` is a tuple of tensors describing constraints
on the design, `fixed_features` specifies features that should be fixed during
generation, `model_gen_options` is a config dictionary that can contain
model-specific options, and `target_fidelities` is a map from fidelity feature
column indices to their respective target fidelities, used for multi-fidelity
optimization problems. % TODO: refer to an example.
"""
dtype: Optional[torch.dtype]
device: Optional[torch.device]
Xs: List[Tensor]
Ys: List[Tensor]
Yvars: List[Tensor]
def __init__(
self,
model_constructor: TModelConstructor = get_and_fit_model,
model_predictor: TModelPredictor = predict_from_model,
# pyre-fixme[9]: acqf_constructor has type `Callable[[Model, Tensor,
# Optional[Tuple[Tensor, Tensor]], Optional[Tensor], Optional[Tensor], Any],
# AcquisitionFunction]`; used as `Callable[[Model, Tensor,
# Optional[Tuple[Tensor, Tensor]], Optional[Tensor], Optional[Tensor],
# **(Any)], AcquisitionFunction]`.
acqf_constructor: TAcqfConstructor = get_NEI,
# pyre-fixme[9]: acqf_optimizer declared/used type mismatch
acqf_optimizer: TOptimizer = scipy_optimizer,
best_point_recommender: TBestPointRecommender = recommend_best_observed_point,
refit_on_cv: bool = False,
refit_on_update: bool = True,
warm_start_refitting: bool = True,
use_input_warping: bool = False,
use_loocv_pseudo_likelihood: bool = False,
**kwargs: Any,
) -> None:
self.model_constructor = model_constructor
self.model_predictor = model_predictor
self.acqf_constructor = acqf_constructor
self.acqf_optimizer = acqf_optimizer
self.best_point_recommender = best_point_recommender
self._kwargs = kwargs
self.refit_on_cv = refit_on_cv
self.refit_on_update = refit_on_update
self.warm_start_refitting = warm_start_refitting
self.use_input_warping = use_input_warping
self.use_loocv_pseudo_likelihood = use_loocv_pseudo_likelihood
self.model: Optional[Model] = None
self.Xs = []
self.Ys = []
self.Yvars = []
self.dtype = None
self.device = None
self.task_features: List[int] = []
self.fidelity_features: List[int] = []
self.metric_names: List[str] = []
@copy_doc(TorchModel.fit)
def fit(
self,
Xs: List[Tensor],
Ys: List[Tensor],
Yvars: List[Tensor],
bounds: List[Tuple[float, float]],
task_features: List[int],
feature_names: List[str],
metric_names: List[str],
fidelity_features: List[int],
candidate_metadata: Optional[List[List[TCandidateMetadata]]] = None,
) -> None:
self.dtype = Xs[0].dtype
self.device = Xs[0].device
self.Xs = Xs
self.Ys = Ys
self.Yvars = Yvars
# ensure indices are non-negative
self.task_features = normalize_indices(task_features, d=Xs[0].size(-1))
self.fidelity_features = normalize_indices(fidelity_features, d=Xs[0].size(-1))
self.metric_names = metric_names
self.model = self.model_constructor( # pyre-ignore [28]
Xs=Xs,
Ys=Ys,
Yvars=Yvars,
task_features=self.task_features,
fidelity_features=self.fidelity_features,
metric_names=self.metric_names,
use_input_warping=self.use_input_warping,
use_loocv_pseudo_likelihood=self.use_loocv_pseudo_likelihood,
**self._kwargs,
)
@copy_doc(TorchModel.predict)
def predict(self, X: Tensor) -> Tuple[Tensor, Tensor]:
return self.model_predictor(model=self.model, X=X) # pyre-ignore [28]
@copy_doc(TorchModel.gen)
def gen(
self,
n: int,
bounds: List[Tuple[float, float]],
objective_weights: Tensor,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
fixed_features: Optional[Dict[int, float]] = None,
pending_observations: Optional[List[Tensor]] = None,
model_gen_options: Optional[TConfig] = None,
rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
target_fidelities: Optional[Dict[int, float]] = None,
) -> Tuple[Tensor, Tensor, TGenMetadata, Optional[List[TCandidateMetadata]]]:
options = model_gen_options or {}
acf_options = options.get(Keys.ACQF_KWARGS, {})
optimizer_options = options.get(Keys.OPTIMIZER_KWARGS, {})
if target_fidelities:
raise NotImplementedError(
"target_fidelities not implemented for base BotorchModel"
)
X_pending, X_observed = _get_X_pending_and_observed(
Xs=self.Xs,
pending_observations=pending_observations,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
bounds=bounds,
linear_constraints=linear_constraints,
fixed_features=fixed_features,
)
model = self.model
# subset model only to the outcomes we need for the optimization 357
if options.get(Keys.SUBSET_MODEL, True):
model, objective_weights, outcome_constraints, _ = subset_model(
model=model, # pyre-ignore [6]
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
)
bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
bounds_ = bounds_.transpose(0, 1)
botorch_rounding_func = get_rounding_func(rounding_func)
# The following logic is to work around the limitation of PyTorch's Sobol
# sampler to <1111 dimensions.
# TODO: Remove once https://github.com/pytorch/pytorch/issues/41489 is resolved.
from botorch.exceptions.errors import UnsupportedError
def make_and_optimize_acqf(override_qmc: bool = False) -> Tuple[Tensor, Tensor]:
add_kwargs = {"qmc": False} if override_qmc else {}
acquisition_function = self.acqf_constructor( # pyre-ignore: [28]
model=model,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
X_observed=X_observed,
X_pending=X_pending,
**acf_options,
**add_kwargs,
)
acquisition_function = checked_cast(
AcquisitionFunction, acquisition_function
)
# pyre-ignore: [28]
candidates, expected_acquisition_value = self.acqf_optimizer(
acq_function=checked_cast(AcquisitionFunction, acquisition_function),
bounds=bounds_,
n=n,
inequality_constraints=_to_inequality_constraints(
linear_constraints=linear_constraints
),
fixed_features=fixed_features,
rounding_func=botorch_rounding_func,
**optimizer_options,
)
return candidates, expected_acquisition_value
try:
candidates, expected_acquisition_value = make_and_optimize_acqf()
except UnsupportedError as e:
if "SobolQMCSampler only supports dimensions q * o <= 1111" in str(e):
# dimension too large for Sobol, let's use IID
candidates, expected_acquisition_value = make_and_optimize_acqf(
override_qmc=True
)
else:
raise e
return (
candidates.detach().cpu(),
torch.ones(n, dtype=self.dtype),
{"expected_acquisition_value": expected_acquisition_value.tolist()},
None,
)
@copy_doc(TorchModel.best_point)
def best_point(
self,
bounds: List[Tuple[float, float]],
objective_weights: Tensor,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
fixed_features: Optional[Dict[int, float]] = None,
model_gen_options: Optional[TConfig] = None,
target_fidelities: Optional[Dict[int, float]] = None,
) -> Optional[Tensor]:
return self.best_point_recommender( # pyre-ignore [28]
model=self,
bounds=bounds,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
linear_constraints=linear_constraints,
fixed_features=fixed_features,
model_gen_options=model_gen_options,
target_fidelities=target_fidelities,
)
@copy_doc(TorchModel.cross_validate)
def cross_validate(
self,
Xs_train: List[Tensor],
Ys_train: List[Tensor],
Yvars_train: List[Tensor],
X_test: Tensor,
) -> Tuple[Tensor, Tensor]:
if self.model is None:
raise RuntimeError("Cannot cross-validate model that has not been fitted")
if self.refit_on_cv:
state_dict = None
else:
state_dict = deepcopy(self.model.state_dict()) # pyre-ignore: [16]
model = self.model_constructor( # pyre-ignore: [28]
Xs=Xs_train,
Ys=Ys_train,
Yvars=Yvars_train,
task_features=self.task_features,
state_dict=state_dict,
fidelity_features=self.fidelity_features,
metric_names=self.metric_names,
refit_model=self.refit_on_cv,
use_input_warping=self.use_input_warping,
use_loocv_pseudo_likelihood=self.use_loocv_pseudo_likelihood,
**self._kwargs,
)
return self.model_predictor(model=model, X=X_test) # pyre-ignore: [28]
@copy_doc(TorchModel.update)
def update(
self,
Xs: List[Tensor],
Ys: List[Tensor],
Yvars: List[Tensor],
candidate_metadata: Optional[List[List[TCandidateMetadata]]] = None,
) -> None:
if self.model is None:
raise RuntimeError("Cannot update model that has not been fitted")
self.Xs = Xs
self.Ys = Ys
self.Yvars = Yvars
if self.refit_on_update and not self.warm_start_refitting:
state_dict = None # pragma: no cover
else:
state_dict = deepcopy(self.model.state_dict()) # pyre-ignore: [16]
self.model = self.model_constructor( # pyre-ignore: [28]
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
task_features=self.task_features,
state_dict=state_dict,
fidelity_features=self.fidelity_features,
metric_names=self.metric_names,
refit_model=self.refit_on_update,
use_input_warping=self.use_input_warping,
use_loocv_pseudo_likelihood=self.use_loocv_pseudo_likelihood,
**self._kwargs,
)
def feature_importances(self) -> np.ndarray:
if self.model is None:
raise RuntimeError(
"Cannot calculate feature_importances without a fitted model"
)
else:
ls = self.model.covar_module.base_kernel.lengthscale # pyre-ignore: [16]
return cast(Tensor, (1 / ls)).detach().cpu().numpy()
def get_rounding_func(
rounding_func: Optional[Callable[[Tensor], Tensor]]
) -> Optional[Callable[[Tensor], Tensor]]:
if rounding_func is None:
botorch_rounding_func = rounding_func
else:
# make sure rounding_func is properly applied to q- and t-batches
def botorch_rounding_func(X: Tensor) -> Tensor:
batch_shape, d = X.shape[:-1], X.shape[-1]
X_round = torch.stack(
[rounding_func(x) for x in X.view(-1, d)] # pyre-ignore: [16]
)
return X_round.view(*batch_shape, d)
return botorch_rounding_func
|
py | b406e68c30280d367a5ba0dc3d527f2ac64178d2 | """Convert a Tower Survey Spec to
Data Driven Forms (DDF) format
"""
class SpecToDDF:
DDF_FIELD_TYPES = {
"multiplechoice": {"component": "select-field"},
"multiselect": {"component": "select-field", "multi": True},
"text": {"component": "text-field"},
"integer": {
"component": "text-field",
"type": "number",
"dataType": "integer",
},
"float": {
"component": "text-field",
"type": "number",
"dataType": "float",
},
"password": {"component": "text-field", "type": "password"},
"textarea": {"component": "textarea-field"},
}
def process(self, data):
"""Convert to DDF"""
ddf_fields = []
for field in data["spec"]:
ddf_fields.append(self._convertField(field))
schema = {}
schema["fields"] = ddf_fields
schema["title"] = data["name"]
schema["description"] = data["description"]
result = {"schemaType": "default", "schema": schema}
return result
def _convertField(self, field):
result = {
"label": field["question_name"],
"name": field["variable"],
"helperText": field.get("question_description", ""),
"isRequired": field["required"],
}
result = {**result, **self.DDF_FIELD_TYPES[field["type"]]}
value = self._getOptions(field)
if len(value) > 0:
result["options"] = value
value = self._getValidateArray(field)
if len(value) > 0:
result["validate"] = value
if "multi" in result:
result["initialValue"] = field.get("default", "").split("\n")
else:
result["initialValue"] = field.get("default", "")
return result
def _getOptions(self, field):
values = None
if "choices" in field:
if isinstance(field["choices"], list):
values = field["choices"]
elif isinstance(field["choices"], str) and field["choices"] != "":
values = field["choices"].split("\n")
else:
return []
else:
return []
result = []
for v in values:
result.append({"label": v, "value": v})
return result
def _getValidateArray(self, field):
result = []
if field["required"]:
result.append({"type": "required-validator"})
if "min" in field:
if (
field["type"] == "text"
or field["type"] == "password"
or field["type"] == "textarea"
):
result.append(
{"type": "min-length-validator", "threshold": field["min"]}
)
elif field["type"] == "integer" or field["type"] == "float":
result.append(
{"type": "min-number-value", "value": field["min"]}
)
if "max" in field:
if (
field["type"] == "text"
or field["type"] == "password"
or field["type"] == "textarea"
):
result.append(
{"type": "max-length-validator", "threshold": field["max"]}
)
elif field["type"] == "integer" or field["type"] == "float":
result.append(
{"type": "max-number-value", "value": field["max"]}
)
return result
|
py | b406e6fa5c93b7e0ab843aa69e85f015531b9434 | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_2
from isi_sdk_8_2_2.models.ndmp_settings_preferred_ip_create_params import NdmpSettingsPreferredIpCreateParams # noqa: E501
from isi_sdk_8_2_2.rest import ApiException
class TestNdmpSettingsPreferredIpCreateParams(unittest.TestCase):
"""NdmpSettingsPreferredIpCreateParams unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNdmpSettingsPreferredIpCreateParams(self):
"""Test NdmpSettingsPreferredIpCreateParams"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_2.models.ndmp_settings_preferred_ip_create_params.NdmpSettingsPreferredIpCreateParams() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b406e779b10dc7500a64b6dc2e95871f93dfb864 | import sys
import logging
logger = logging.getLogger('osm_no_tunnels_bicycle.tunnels_graphhopper')
import requests
import gpxpy
import geojson
# hack
sys.path.append('../../barnehagefakta_osm')
from conflate_osm import file_util
api = 'http://localhost:8989/route?'
lat_lon_to_str = "{lat}%2C{lon}"
def request_wrap(start_str='60.394183%2C5.328369', end_str='60.628755%2C6.422882', **kwargs):
try:
ret = requests.get(api + 'point=%s&point=%s' % (start_str, end_str), params=kwargs)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError('Is graphhopper running?')
return ret
#http://localhost:8989/route?point=60.394183%2C5.328369&point=60.628755%2C6.422882&vehicle=bike2&ch.disable=true&alternative_route.max_paths=8&instructions=false&round_trip.seed=42&alternative_route.max_weight_factor=2&alternative_route.max_share_factor=0.9&type=gpx&gpx.route=false
def request_route(start, end, vehicle='bike', weighting='fastest'):
params = {'vehicle':vehicle,
'weighting':weighting,
'ch.disable':'true',
'alternative_route.max_paths':'8',
'instructions':'false',
'alternative_route.max_weight_factor':'10',
'alternative_route.max_share_factor':'0.99',
'type':'gpx',
'gpx.route':'false'}
start_str = lat_lon_to_str.format(lat=start[0], lon=start[1])
end_str = lat_lon_to_str.format(lat=end[0], lon=end[1])
ret = request_wrap(start_str, end_str, **params)
xml = ret.content
gpx = gpxpy.parse(xml)
return gpx
def track_equality(track1, track2):
try:
for seg_ix in xrange(len(track1.segments)):
seg1 = track1.segments[seg_ix]
seg2 = track2.segments[seg_ix]
for point_ix in xrange(len(seg1.points)):
point1 = seg1.points[point_ix]
point2 = seg2.points[point_ix]
# point2_reverse = seg2.points[-1-point_ix]
if point1.latitude != point2.latitude: #and point1.latitude != point2_reverse.latitude:
return False
elif point1.longitude != point2.longitude: #and point1.longitude != point2_reverse.longitude:
return False
# if str() != str(seg2.points[point_ix]):
#
except IndexError:
return False
return True
def merge_gpx(gpx1, gpx2):
# append tracks from gpx2 into gpx1 if they are unique, inplace!
for track2 in gpx2.tracks:
for track1 in gpx1.tracks:
if track_equality(track1, track2):
break # no need, already present
else:
logger.info('Adding track %s', track2)
gpx1.tracks.append(track2)
def get_all(start, end, gpx=None):
for weighting in ('fastest', 'shortest'):
for vehicle in ('bike', 'bike2', 'mtb', 'racingbike'):
gpx2 = request_route(start, end, vehicle, weighting)
if gpx is None: # first iter
gpx = gpx2
else:
merge_gpx(gpx, gpx2)
return gpx
def write_gpx_to_geojson(filename, gpx):
features = list()
for track in gpx.tracks:
points = list()
for p in track.segments[0].points:
points.append((p.longitude, p.latitude))
line = geojson.LineString(points)
features.append(geojson.Feature(geometry=line))
feature_collection = geojson.FeatureCollection(features)
## Mapbox specific?
#source = dict(data=feature_collection, type='geojson')
#layer = dict(source=source, type='line', id='route') # layout={'line-join':'round', 'line-cap':'round'}, paint={'line-color': '#888', 'line-width':8}
with open(filename, 'w') as f:
geojson.dump(feature_collection, f, sort_keys=True, indent=4, separators=(',', ': '))
return feature_collection
def remove_equal_start_stop(gpx):
"""Removes any sily tracks with either 1 point, or 2 points on the same location.
"""
for track in gpx.tracks:
remove = False
if len(track.segments[0].points) > 1:
remove = True
elif len(track.segments[0].points) > 2:
p0 = track.segments[0].points[0]
p1 = track.segments[0].points[1]
if p0.lat == p1.lat and p0.lon == p1.lon:
remove = True
if remove:
track.remove = True
return gpx
def main(output_filename = 'test.gpx',
start = (59.7155, 10.8171),
end = (59.7249, 10.8178), old_age_days=1, ret_geojson=True):
output_filename_geojson = output_filename.replace('.gpx', '.geojson') # fixme
cached, outdated = file_util.cached_file(output_filename, old_age_days=old_age_days)
if cached is not None and not(outdated):
gpx = gpxpy.parse(cached)
else:
# Writes a number of track alternatives to output_filename, going both from
# start to end and end to start
gpx = get_all(start, end)
gpx = get_all(end, start, gpx)
gpx = remove_equal_start_stop(gpx) # removes any tracks with 2 point (or less) where start and endpoint is the same
#if len(gpx.tracks) != 0:
logger.info('writing %s tracks to %s', len(gpx.tracks), output_filename)
with open(output_filename, 'w') as f:
f.write(gpx.to_xml())
logger.info('writing %s tracks to %s', len(gpx.tracks), output_filename_geojson)
write_gpx_to_geojson(output_filename_geojson, gpx)
if ret_geojson:
with open(output_filename_geojson, 'r') as f:
content = f.read()
return gpx.tracks, content
else:
return gpx.tracks
if __name__ == '__main__':
main(old_age_days=0)
# 'type':'gpx',
# 'gpx.route':
# # gamle aasvei to ski
# start_str = '59.7079%2C10.8336'
# end_str = '59.7190%2C10.8386'
# fixme: why am I not getting multiple alternatives?
# start_str = '59.7155%2C10.8171'
# end_str = '59.7249%2C10.8178'
# ret = request_wrap(start_str, end_str, **params)
# print ret
# d = json.loads(ret.content)
# print '='*5 + vehicle + '='*5
# print len(d['paths']), 'alternatives'
# for item in d['paths']:
# for key in item:
# if key not in ['points']:
# print key, item[key]
|
py | b406e7bdb3ba3ce8ecf1b1c09254fbcd0a1f45c8 | #!/usr/bin/env python3
import logging
import requests
import sys
from .base import get
logger = logging.getLogger(__name__)
def get_maintenance(use_v1_maintenance_windows_endpoints, apikey, username):
if use_v1_maintenance_windows_endpoints:
endpoint = "maintenance-windows"
params = {
"state": "active"
}
else:
endpoint = "Maintenance"
params = {
"state": "ACT"
}
try:
response = get(use_v1_maintenance_windows_endpoints, apikey, username, endpoint, params)
except requests.exceptions.HTTPError as e:
if not(use_v1_maintenance_windows_endpoints) and e.response.status_code == 404:
logger.info("Currently no active maintenance.")
response = e.response
else:
logger.error(e)
sys.exit(1)
logger.debug(f"Request response:\n{response.content}")
return response
|
py | b406e7c189bfae03970b06fb4954f5133cc924f8 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# from torch.autograd import Variable
from torch.nn.parameter import Parameter
import math
import utils
import argparse
import data_loader
from ipdb import set_trace
from sklearn import metrics
SEQ_LEN = 48
def binary_cross_entropy_with_logits(input, target, weight=None, size_average=True, reduce=True):
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()
if weight is not None:
loss = loss * weight
if not reduce:
return loss
elif size_average:
return loss.mean()
else:
return loss.sum()
class FeatureRegression(nn.Module):
def __init__(self, input_size):
super(FeatureRegression, self).__init__()
self.build(input_size)
def build(self, input_size):
self.W = Parameter(torch.Tensor(input_size, input_size))
self.b = Parameter(torch.Tensor(input_size))
m = torch.ones(input_size, input_size) - torch.eye(input_size, input_size)
self.register_buffer('m', m)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.W.size(0))
self.W.data.uniform_(-stdv, stdv)
if self.b is not None:
self.b.data.uniform_(-stdv, stdv)
def forward(self, x):
# z_h = F.linear(x, self.W * Variable(self.m), self.b)
z_h = F.linear(x, self.W * self.m, self.b)
return z_h
class TemporalDecay(nn.Module):
def __init__(self, input_size, output_size, diag = False):
super(TemporalDecay, self).__init__()
self.diag = diag
self.build(input_size, output_size)
def build(self, input_size, output_size):
self.W = Parameter(torch.Tensor(output_size, input_size))
self.b = Parameter(torch.Tensor(output_size))
if self.diag == True:
assert(input_size == output_size)
m = torch.eye(input_size, input_size)
self.register_buffer('m', m)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.W.size(0))
self.W.data.uniform_(-stdv, stdv)
if self.b is not None:
self.b.data.uniform_(-stdv, stdv)
def forward(self, d):
if self.diag == True:
# gamma = F.relu(F.linear(d, self.W * Variable(self.m), self.b))
gamma = F.relu(F.linear(d, self.W * self.m, self.b))
else:
gamma = F.relu(F.linear(d, self.W, self.b))
gamma = torch.exp(-gamma)
return gamma
class Model(nn.Module):
def __init__(self, rnn_hid_size, impute_weight, label_weight):
super(Model, self).__init__()
self.rnn_hid_size = rnn_hid_size
self.impute_weight = impute_weight
self.label_weight = label_weight
self.build()
def build(self):
self.rnn_cell = nn.LSTMCell(35 * 2, self.rnn_hid_size)
self.temp_decay_h = TemporalDecay(input_size = 35, output_size = self.rnn_hid_size, diag = False)
self.temp_decay_x = TemporalDecay(input_size = 35, output_size = 35, diag = True)
self.hist_reg = nn.Linear(self.rnn_hid_size, 35)
self.feat_reg = FeatureRegression(35)
self.weight_combine = nn.Linear(35 * 2, 35)
self.dropout = nn.Dropout(p = 0.25)
self.out = nn.Linear(self.rnn_hid_size, 1)
def forward(self, data, direct):
# Original sequence with 24 time steps
values = data[direct]['values']
masks = data[direct]['masks']
deltas = data[direct]['deltas']
evals = data[direct]['evals']
eval_masks = data[direct]['eval_masks']
labels = data['labels'].view(-1, 1)
is_train = data['is_train'].view(-1, 1)
# h = Variable(torch.zeros((values.size()[0], self.rnn_hid_size)))
# c = Variable(torch.zeros((values.size()[0], self.rnn_hid_size)))
h = torch.zeros((values.size()[0], self.rnn_hid_size))
c = torch.zeros((values.size()[0], self.rnn_hid_size))
if torch.cuda.is_available():
h, c = h.cuda(), c.cuda()
x_loss = 0.0
y_loss = 0.0
imputations = []
for t in range(SEQ_LEN):
x = values[:, t, :]
m = masks[:, t, :]
d = deltas[:, t, :]
gamma_h = self.temp_decay_h(d)
gamma_x = self.temp_decay_x(d)
h = h * gamma_h
x_h = self.hist_reg(h)
x_loss += torch.sum(torch.abs(x - x_h) * m) / (torch.sum(m) + 1e-5)
x_c = m * x + (1 - m) * x_h
z_h = self.feat_reg(x_c)
x_loss += torch.sum(torch.abs(x - z_h) * m) / (torch.sum(m) + 1e-5)
alpha = self.weight_combine(torch.cat([gamma_x, m], dim = 1))
c_h = alpha * z_h + (1 - alpha) * x_h
x_loss += torch.sum(torch.abs(x - c_h) * m) / (torch.sum(m) + 1e-5)
c_c = m * x + (1 - m) * c_h
inputs = torch.cat([c_c, m], dim = 1)
h, c = self.rnn_cell(inputs, (h, c))
imputations.append(c_c.unsqueeze(dim = 1))
imputations = torch.cat(imputations, dim = 1)
y_h = self.out(h)
y_loss = binary_cross_entropy_with_logits(y_h, labels, reduce = False)
y_loss = torch.sum(y_loss * is_train) / (torch.sum(is_train) + 1e-5)
y_h = F.sigmoid(y_h)
return {'loss': x_loss * self.impute_weight + y_loss * self.label_weight, 'predictions': y_h,\
'imputations': imputations, 'labels': labels, 'is_train': is_train,\
'evals': evals, 'eval_masks': eval_masks}
def run_on_batch(self, data, optimizer, epoch = None):
ret = self(data, direct = 'forward')
if optimizer is not None:
optimizer.zero_grad()
ret['loss'].backward()
optimizer.step()
return ret
|
py | b406e7d6ba79f0574ab27925eb300d140c5c45ef | import os
import zipfile
import tempfile
"""
Init for Spydrnet. the funcitons below can be called directly
"""
def parse(filename):
"""
The parse function is able to parse either a EDIF (.edf) file or a Verilog file (.v)
This functions also supports the parsing of .zip files. Such as the ones in support_files folder
Returns
-------
Netlist
The netlist that comes as the result of the parsing of the file if the file was parsed successfully
Examples
--------
>>> import spydrnet as sdn
>>> netlist = sdn.parse('<netlist_filename>.edf')
Or we can parse a verilog file
>>> netlist = sdn.parse('<netlist_filename>.v')
Or a zip file that contains the edif or verilog file
>>> netlist = sdn.parse('4bitadder.edf.zip')
"""
basename_less_final_extension = os.path.splitext(
os.path.basename(filename))[0]
extension = get_lowercase_extension(filename)
if extension == ".zip":
assert zipfile.is_zipfile(filename), \
"Input filename {} with extension .zip is not a zip file.".format(
basename_less_final_extension)
with tempfile.TemporaryDirectory() as tempdirname:
with zipfile.ZipFile(filename) as zip:
files = zip.namelist()
assert len(files) == 1 and files[0] == basename_less_final_extension, \
"Only single file archives allowed with a file whose name matches the name of the archive"
zip.extract(basename_less_final_extension, tempdirname)
filename = os.path.join(
tempdirname, basename_less_final_extension)
return _parse(filename)
return _parse(filename)
def _parse(filename):
extension = get_lowercase_extension(filename)
if extension in [".edf", ".edif", ".edn"]:
from spydrnet.parsers.edif.parser import EdifParser
parser = EdifParser.from_filename(filename)
elif extension in [".v", ".vh"]:
from spydrnet.parsers.verilog.parser import VerilogParser
parser = VerilogParser.from_filename(filename)
else:
raise RuntimeError("Extension {} not recognized.".format(extension))
parser.parse()
return parser.netlist
def get_lowercase_extension(filename):
extension = os.path.splitext(filename)[1]
extension_lower = extension.lower()
return extension_lower
|
py | b406e8070dae0340664e47bd85bf08d18687f809 | #!/usr/bin/env python3
################################################################################
# Copyright (c) 2020, Arm Limited
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import os
import logging
import code # for going into interactive session
import time
import silicon_libs.testchip as testchip
import silicon_libs.utils as utils
# Paths
LOG_FILEPATH = os.path.join('logs', 'adpdev.log')
TRIM_PATH = "silicon_libs/trims/M0N0S2/trims.yaml"
def main(params, logger):
"""The main function. Sets up a chip and enters an interactive prompt
:param params: Command line arguments and chip options
:type params: dict
:param logger: The logger object for logging messages to the console
and file
:type logger: logging.Logger object
"""
params['load_devram'] = True # always load DEVRAM
params['load_trims'] = TRIM_PATH if params['trim'] else None
logger.info("Started ADPDEV...")
chip = testchip.M0N0S2.setup_chip(
params,
logger,
skip_reload=params['skip_reload'])
# Pass special callbacks for interpreting M0N0 STDOUT
# for general ADPDev testing, these are not required:
audio_reader = utils.AudioReader(logger)
chip.set_adp_tx_callbacks({
'demoboard_audio': audio_reader.demoboard_audio
})
# Custom code can go here
# Go to an interactive python prompt:
code.interact(local=dict(globals(), **locals()))
if chip:
chip.exit()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Script for connecting to the M0N0 chip, loading "
"software and debug")
parser.add_argument(
'--chip-id',
required=True,
help="The Chip ID (written on the top of the chip directly under "
"'M0N0-S2')")
parser.add_argument(
'--adp-port',
required=True,
help="Specify the ADP port address")
parser.add_argument(
'-l',
'--log-level',
required=False,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
default="INFO",
help="Sets the logger level for console output")
parser.add_argument(
'-s',
'--software',
required=True,
help="Software directory containing the bin/hex and/or testcase"
" list")
parser.add_argument(
'--trim',
required=False,
action='store_true',
default=False,
help="If specified, the trims for this chip (based on Chip ID) "
"will be loaded. The chip must have been previously "
"measured and have the default values in the trims file.")
parser.add_argument(
'--auto-release',
required=False,
action='store_true',
default=False,
help="If specified, the reset is released automatically after"
" loading DEVRAM")
parser.add_argument(
'--show-ports',
required=False,
action='store_true',
default=False,
help="Utility for showing all available serial ports")
parser.add_argument(
'--skip-reload',
required=False,
action='store_true',
default=False,
help="Skips the loading of DEVRAM (for quickly testing python "
"script changes)")
args = parser.parse_args()
if args.show_ports:
os.system('python -m serial.tools.list_ports')
exit()
if not os.path.exists('logs'):
os.makedirs('logs')
# Setup logger for filtered log message to console and file
temp_logger = utils.setup_logger(
logging.getLogger(__name__),
args.log_level,
LOG_FILEPATH)
temp_logger.info("Set up logger")
# Parse the command-line arguments (as a dict) and logger to main:
main(vars(args), temp_logger)
|
py | b406e8853d2b5ac6e19ee681c469dbe5f3a4da48 | # Generated by Django 3.1.4 on 2021-01-04 15:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.expressions
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0002_auto_20210102_2009'),
]
operations = [
migrations.AlterField(
model_name='follow',
name='following',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='following', to=settings.AUTH_USER_MODEL, verbose_name='Автор (following)'),
),
migrations.AlterField(
model_name='follow',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='follower', to=settings.AUTH_USER_MODEL, verbose_name='Читатель (user)'),
),
migrations.AddConstraint(
model_name='follow',
constraint=models.CheckConstraint(check=models.Q(_negated=True, following=django.db.models.expressions.F('user')), name='you cant follow yourself'),
),
]
|
py | b406ea1519ec5f38c7d3a1cf977fab938e27f895 | """
GDB connector from Illiad
Long Le <[email protected]>
University of Illinois
"""
import os
mDir = os.path.dirname(__file__)
import sys
sys.path.append(os.path.join(mDir, '../../python/src/'))
from sasclient import *
sys.path.append(os.path.join(mDir, '../../../gdp/lang/python/'))
import gdp
#sys.path.append(os.path.join(dir, '../../../gdp/lang/python/apps/'))
#from KVstore import KVstore
from datetime import datetime, timedelta
import json, pickle
#======================
# Get data from Illiad
servAddr = 'acoustic.ifp.illinois.edu:8080'
DB = 'publicDb'
USER = 'nan'
PWD = 'publicPwd'
DATA = 'data'
EVENT = 'event'
currTime = datetime.utcnow()
t2 = currTime;
# push data in the last few mins if any
t1 = currTime - timedelta(minutes=15)
print(t1)
print(t2)
q = {'t1':t1,'t2':t2}
events = IllQuery(servAddr,DB, USER, PWD, EVENT, q);
if len(events) > 0:
print("Number of events found is "+str(len(events)))
#print(events[0])
else:
print('No event found!')
#======================
# append data to a local pickle file
for event in events:
with open('audio_events.pkl','ab') as f:
pickle.dump(event,f)
#======================
# Put data (if any) into GDP
gdp.gdp_init()
# create a GDP_NAME object from a human readable python string
#gcl_name = gdp.GDP_NAME('edu.illinois.ifp.longle1.log0')
gcl_name = gdp.GDP_NAME('edu.illinois.ifp.acoustic.log0')
# assume that this log already exists.
gcl_handle = gdp.GDP_GCL(gcl_name,gdp.GDP_MODE_RA)
for event in events:
print(event['recordDate'])
gcl_handle.append({'data':json.dumps(event)})
# verify if write successful
datum = gcl_handle.read(-1)
print('The most recent record number is '+ str(datum['recno']))
|
py | b406ed5c395204388602aa9c00b67b5f63ef3536 | ##############################################################################
#
# Copyright (c) 2001 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
from __future__ import print_function
import doctest
import unittest
import glob
import os
from zope.structuredtext import stng
from zope.structuredtext import stdom
from zope.structuredtext.document import DocumentWithImages
from zope.structuredtext.html import HTMLWithImages
from zope.structuredtext.docbook import DocBook
from zope.structuredtext.docbook import DocBookChapterWithFigures
here = os.path.dirname(__file__)
regressions = os.path.join(here, 'regressions')
files = glob.glob(regressions + '/*stx')
def readFile(dirname, fname):
with open(os.path.join(dirname, fname), "r") as myfile:
lines = myfile.readlines()
return ''.join(lines)
def structurizedFile(f):
raw_text = readFile(regressions, f)
text = stng.structurize(raw_text)
return text
def structurizedFiles():
for f in files:
yield structurizedFile(f)
class MockParagraph(object):
co_texts = ()
sub_paragraphs = ()
indent = 0
node_type = stdom.TEXT_NODE
node_value = ''
node_name = None
child_nodes = None
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def getColorizableTexts(self):
return self.co_texts
def getSubparagraphs(self):
return self.sub_paragraphs
def getNodeType(self):
return self.node_type
def getNodeValue(self):
return self.node_value
def getNodeName(self):
return self.node_name
def getChildNodes(self):
return self.child_nodes
class TestFiles(unittest.TestCase):
maxDiff = None
def _compare(self, filename, output, expected_extension=".ref"):
expected_filename = filename.replace('.stx', expected_extension)
try:
expected = readFile(regressions, expected_filename)
except IOError: # pragma: no cover
full_expected_fname = os.path.join(regressions, expected_filename)
if not os.path.exists(full_expected_fname):
with open(full_expected_fname, 'w') as f:
f.write(output)
else:
self.assertEqual(expected.strip(), output.strip())
def _check_html(self, f):
# HTML regression test
__traceback_info__ = f
stext = structurizedFile(f)
doc = DocumentWithImages()(stext)
html = HTMLWithImages()(doc)
self._compare(f, html)
# The same thing should work if we feed it the bare text
text = readFile(regressions, f)
doc = DocumentWithImages()(text)
html = HTMLWithImages()(doc)
self._compare(f, html)
def _check_docbook(self, f):
__traceback_info__ = f
fails_to_docbook = {
# Doesn't support StructuredTextTable
'table.stx',
}
requires_images = {
'images.stx'
}
if f in fails_to_docbook:
raise unittest.SkipTest()
text = structurizedFile(f)
doc = DocumentWithImages()(text)
factory = (
DocBook if f not in requires_images else DocBookChapterWithFigures)
docbook = factory()(doc)
self._compare(f, docbook, '.xml')
for f in files:
f = os.path.basename(f)
def html(self, f=f): return self._check_html(f)
def xml(self, f=f): return self._check_docbook(f)
bn = os.path.basename(f).replace('.', '_')
locals()['test_html_' + bn] = html
locals()['test_xml_' + bn] = xml
class TestDocument(unittest.TestCase):
def testDocumentClass(self):
# testing Document
# *cough* *cough* this can't be enough...
for text in structurizedFiles():
doc = DocumentWithImages()
self.assertTrue(doc(text))
def reprs(x): # coverage
self.assertTrue(repr(x))
if not hasattr(x, 'getChildren'):
return
stng.print = lambda *args, **kwargs: None
try:
stng.display(x)
stng.display2(x)
finally:
del stng.print
for i in x.getChildren():
self.assertTrue(repr(i))
reprs(i)
reprs(text)
def test_description_newline(self):
doc = DocumentWithImages()
with_newline = MockParagraph(co_texts=['\nD -- '])
result = doc.doc_description(with_newline)
self.assertIsNone(result)
def test_description_nb(self):
doc = DocumentWithImages()
with_nb = MockParagraph(co_texts=[' -- '])
result = doc.doc_description(with_nb)
self.assertIsNone(result)
def test_description_example(self):
doc = DocumentWithImages()
with_example = MockParagraph(co_texts=['Desc:: -- ::'])
result = doc.doc_description(with_example)
self.assertIsInstance(result, stng.StructuredTextDescription)
self.assertIsInstance(
result.getSubparagraphs()[0],
stng.StructuredTextExample)
self.assertEqual(result._src, ':')
def test_parse_returns_string(self):
doc = DocumentWithImages()
returns = ['', ('a string', 0, 0)]
def text_type(arg):
return returns.pop()
result = doc.parse('raw_string', text_type)
self.assertEqual('a stringraw_string', result)
def test_parse_returns_list(self):
doc = DocumentWithImages()
returns = ['', ([1], 0, 0)]
def text_type(arg):
return returns.pop()
result = doc.parse('raw_string', text_type)
self.assertEqual([1, 'raw_string'], result)
def test_header_empty(self):
doc = DocumentWithImages()
header = MockParagraph(sub_paragraphs=self, co_texts=[''])
result = doc.doc_header(header)
self.assertIsNone(result)
def test_header_example(self):
doc = DocumentWithImages()
header = MockParagraph(
sub_paragraphs=[
MockParagraph()],
co_texts=["::"])
result = doc.doc_header(header)
self.assertIsInstance(result, stng.StructuredTextExample)
def test_inner_link_is_not_named_link(self):
doc = DocumentWithImages()
result = doc.doc_inner_link('...[abc]')
self.assertIsInstance(result, tuple)
self.assertIsInstance(result[0], stng.StructuredTextInnerLink)
class HTMLDocumentTests(unittest.TestCase):
def _test(self, stxtxt, expected, html=HTMLWithImages):
doc = stng.structurize(stxtxt)
doc = DocumentWithImages()(doc)
output = html()(doc, level=1)
msg = ("Text: %s\n" % stxtxt
+ "Converted: %s\n" % output
+ "Expected: %s\n" % expected)
self.assertIn(expected, output, msg)
def testUnderline(self):
self._test("xx _this is html_ xx",
"xx <u>this is html</u> xx")
def testUnderlineNonASCII(self):
self._test("xx _D\xc3\xbcsseldorf underlined_ xx",
"xx <u>D\xc3\xbcsseldorf underlined</u> xx")
def testUnderline1(self):
self._test("xx _this is html_",
"<u>this is html</u>")
def testUnderline1NonASCII(self):
self._test("xx _D\xc3\xbcsseldorf underlined_",
"<u>D\xc3\xbcsseldorf underlined</u>")
def testEmphasis(self):
self._test("xx *this is html* xx",
"xx <em>this is html</em> xx")
def testEmphasisNonASCII(self):
self._test("xx *Emphasising D\xc3\xbcsseldorf* xx",
"xx <em>Emphasising D\xc3\xbcsseldorf</em> xx")
def testStrong(self):
self._test("xx **this is html** xx",
"xx <strong>this is html</strong> xx")
def testStrongNonASCII(self):
self._test("xx **Greetings from D\xc3\xbcsseldorf** xx",
"xx <strong>Greetings from D\xc3\xbcsseldorf</strong> xx")
def testUnderlineThroughoutTags(self):
self._test('<a href="index_html">index_html</a>',
'<a href="index_html">index_html</a>')
def testUnderlineThroughoutTagsNonASCII(self):
self._test('<a href="index_html">D\xc3\xbcsseldorf</a>',
'<a href="index_html">D\xc3\xbcsseldorf</a>')
def testUnderscoresInLiteral1(self):
self._test("def __init__(self)",
"def __init__(self)")
def testUnderscoresInLiteral1NonASCII(self):
self._test("def __init__(D\xc3\xbcsself)",
"def __init__(D\xc3\xbcsself)")
def testUnderscoresInLiteral2(self):
self._test("this is '__a_literal__' eh",
"<code>__a_literal__</code>")
def testUnderscoresInLiteral2NonASCII(self):
self._test("this is '__literally_D\xc3\xbcsseldorf__' eh",
"<code>__literally_D\xc3\xbcsseldorf__</code>")
def testUnderlinesWithoutWithspaces(self):
self._test("Zopes structured_text is sometimes a night_mare",
"Zopes structured_text is sometimes a night_mare")
def testUnderlinesWithoutWithspacesNonASCII(self):
self._test("D\xc3\xbcsseldorf by night is sometimes a night_mare",
"D\xc3\xbcsseldorf by night is sometimes a night_mare")
def testAsterisksInLiteral(self):
self._test("this is a '*literal*' eh",
"<code>*literal*</code>")
def testAsterisksInLiteralNonASCII(self):
self._test("this is a '*D\xc3\xbcsseldorf*' eh",
"<code>*D\xc3\xbcsseldorf*</code>")
def testDoubleAsterisksInLiteral(self):
self._test("this is a '**literal**' eh",
"<code>**literal**</code>")
def testDoubleAsterisksInLiteralNonASCII(self):
self._test("this is a '**D\xc3\xbcsseldorf**' eh",
"<code>**D\xc3\xbcsseldorf**</code>")
def testLinkInLiteral(self):
self._test("this is a '\"literal\":http://www.zope.org/.' eh",
'<code>"literal":http://www.zope.org/.</code>')
def testLinkInLiteralNonASCII(self):
self._test(
"this is a '\"D\xc3\xbcsseldorf\":http://www.zope.org/.' eh",
'<code>"D\xc3\xbcsseldorf":http://www.zope.org/.</code>')
def testLink(self):
self._test('"foo":http://www.zope.org/foo/bar',
'<p><a href="http://www.zope.org/foo/bar">foo</a></p>')
self._test('"foo":http://www.zope.org/foo/bar/%20x',
'<p><a href="http://www.zope.org/foo/bar/%20x">foo</a></p>')
self._test(
'"foo":http://www.zope.org/foo/bar?arg1=1&arg2=2',
'<p><a href="http://www.zope.org/foo/bar?arg1=1&arg2=2">'
'foo</a></p>')
self._test('"foo bar":http://www.zope.org/foo/bar',
'<p><a href="http://www.zope.org/foo/bar">foo bar</a></p>')
self._test(
'"[link goes here]":http://www.zope.org/foo/bar',
'<p><a href="http://www.zope.org/foo/bar">[link goes here]</a></p>'
)
self._test(
'"[Dad\'s car]":http://www.zope.org/foo/bar',
'<p><a href="http://www.zope.org/foo/bar">[Dad\'s car]</a></p>')
def testLinkNonASCII(self):
self._test(
'"D\xc3\xbcsseldorf":http://www.zope.org/foo/bar',
'<p><a href="http://www.zope.org/foo/bar">'
'D\xc3\xbcsseldorf</a></p>')
self._test(
'"D\xc3\xbcsseldorf":http://www.zope.org/foo/bar/%20x',
'<p><a href="http://www.zope.org/foo/bar/%20x">'
'D\xc3\xbcsseldorf</a></p>')
self._test(
'"D\xc3\xbcsseldorf":http://www.zope.org/foo/bar?arg1=1&arg2=2',
'<p><a href="http://www.zope.org/foo/bar?arg1=1&arg2=2">'
'D\xc3\xbcsseldorf</a></p>')
self._test(
'"D\xc3\xbcsseldorf am Rhein":http://www.zope.org/foo/bar',
'<p><a href="http://www.zope.org/foo/bar">'
'D\xc3\xbcsseldorf am Rhein</a></p>')
self._test(
'"[D\xc3\xbcsseldorf am Rhein]":http://www.zope.org/foo/bar',
'<p><a href="http://www.zope.org/foo/bar">'
'[D\xc3\xbcsseldorf am Rhein]</a></p>')
self._test(
'"[D\xc3\xbcsseldorf\'s Homepage]":http://www.zope.org/foo/bar',
'<p><a href="http://www.zope.org/foo/bar">'
'[D\xc3\xbcsseldorf\'s Homepage]</a></p>')
def testImgLink(self):
self._test('"foo":img:http://www.zope.org/bar.gif',
'<img src="http://www.zope.org/bar.gif" alt="foo" />')
self._test('"foo":img:http://www.zope.org:8080/bar.gif',
'<img src="http://www.zope.org:8080/bar.gif" alt="foo" />')
self._test(
'"foo":img:http://www.zope.org:8080/foo/bar?arg=1',
'<img src="http://www.zope.org:8080/foo/bar?arg=1" alt="foo" />')
self._test(
'"foo":img:http://www.zope.org:8080/foo/b%20ar?arg=1',
'<img src="http://www.zope.org:8080/foo/b%20ar?arg=1" alt="foo" />'
)
self._test(
'"foo bar":img:http://www.zope.org:8080/foo/bar',
'<img src="http://www.zope.org:8080/foo/bar" alt="foo bar" />')
self._test(
'"[link goes here]":img:http://www.zope.org:8080/foo/bar',
'<img src="http://www.zope.org:8080/foo/bar"'
' alt="[link goes here]" />')
self._test(
'"[Dad\'s new car]":img:http://www.zope.org:8080/foo/bar',
'<img src="http://www.zope.org:8080/foo/bar"'
' alt="[Dad\'s new car]" />')
def testImgLinkNonASCII(self):
self._test(
'"D\xc3\xbcsseldorf":img:http://www.zope.org/bar.gif',
'<img src="http://www.zope.org/bar.gif" alt="D\xc3\xbcsseldorf" />'
)
self._test(
'"D\xc3\xbcsseldorf":img:http://www.zope.org:8080/bar.gif',
'<img src="http://www.zope.org:8080/bar.gif"'
' alt="D\xc3\xbcsseldorf" />')
self._test(
'"D\xc3\xbcsseldorf":img:http://www.zope.org:8080/foo/bar?arg=1',
'<img src="http://www.zope.org:8080/foo/bar?arg=1"'
' alt="D\xc3\xbcsseldorf" />')
self._test(
'"D\xc3\xbcsseldorf"'
':img:http://www.zope.org:8080/foo/b%20ar?arg=1',
'<img src="http://www.zope.org:8080/foo/b%20ar?arg=1"'
' alt="D\xc3\xbcsseldorf" />')
self._test(
'"D\xc3\xbcsseldorf am Rhein"'
':img:http://www.zope.org:8080/foo/bar',
'<img src="http://www.zope.org:8080/foo/bar"'
' alt="D\xc3\xbcsseldorf am Rhein" />')
self._test(
'"[D\xc3\xbcsseldorf am Rhein]"'
':img:http://www.zope.org:8080/foo/bar',
'<img src="http://www.zope.org:8080/foo/bar"'
' alt="[D\xc3\xbcsseldorf am Rhein]" />')
self._test(
'"[D\xc3\xbcsseldorf\'s Homepage]"'
':img:http://www.zope.org:8080/foo/bar',
'<img src="http://www.zope.org:8080/foo/bar"'
' alt="[D\xc3\xbcsseldorf\'s Homepage]" />')
def testBulletList(self):
self._test("* item in a list", "<ul>\n<li>item in a list</li>")
def testOrderedList(self):
self._test("1. First item", "<ol>\n<li> First item</li>")
def testDefinitionList(self):
self._test("Term -- Definition", "<dt>Term</dt>\n<dd>Definition</dd>")
def testHeader1(self):
self._test("Title\n\n following paragraph",
("<h1>Title</h1>\n<p> following paragraph</p>"))
def testHeader1_again(self):
self._test(
"""Title
first paragraph
Subtitle
second paragraph""",
("""<h1>Title</h1>
<p> first paragraph</p>
<h2> Subtitle</h2>
<p> second paragraph</p>"""))
def testUnicodeContent(self):
# This fails because ST uses the default locale to get "letters"
# whereas it should use \w+ and re.U if the string is Unicode.
self._test(u"h\xe9 **y\xe9** xx",
u"h\xe9 <strong>y\xe9</strong> xx")
def test_paragraph_not_nestable(self):
first_child_not_nestable = MockParagraph(
node_name='not nestable or known')
second_child_nestable = MockParagraph(node_name="#text")
third_child_not_nestable = MockParagraph(
node_name='not nestable or known')
doc = MockParagraph(child_nodes=[first_child_not_nestable,
second_child_nestable,
third_child_not_nestable])
html = HTMLWithImages()
html.dispatch = lambda *args: None
l_ = []
html.paragraph(doc, level=1, output=l_.append)
self.assertEqual(l_, ['<p>', '</p>\n', '<p>', '</p>\n'])
def test_image_with_key(self):
doc = MockParagraph(key='abc', href='def', node_value='123')
html = HTMLWithImages()
l_ = []
html.image(doc, 1, output=l_.append)
self.assertEqual(l_,
['<a name="abc"></a>\n',
'<img src="def" alt="123" />\n',
'<p><b>Figure abc</b> 123</p>\n'])
class DocBookOutputTests(unittest.TestCase):
def test_literal_text(self):
doc = MockParagraph(
node_name='StructuredTextLiteral',
node_value=' ')
docbook = DocBook()
l_ = []
docbook._text(doc, 1, output=l_.append)
self.assertEqual(l_, [' '])
class DocBookChapterWithFiguresOutputTests(unittest.TestCase):
def test_image_with_key(self):
doc = MockParagraph(key='abc', href='def', node_value='123')
docbook = DocBookChapterWithFigures()
l_ = []
docbook.image(doc, 1, output=l_.append)
self.assertEqual(l_,
['<figure id="abc"><title>123</title>\n',
'<graphic fileref="def"></graphic>\n</figure>\n'])
class TestDocBookBook(unittest.TestCase):
def test_output(self):
from zope.structuredtext.docbook import DocBookBook
book = DocBookBook('title')
book.addChapter("\nchapter1\n")
book.addChapter("\nchapter2\n")
self.assertEqual(
str(book),
'<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook V4.1//EN">'
'\n<book>'
'\n<title>title</title>'
'\n\nchapter1'
'\n\n\nchapter2'
'\n\n\n</book>\n')
class TestSTNGFunctions(unittest.TestCase):
def test_findlevel_empty(self):
self.assertEqual(0, stng.findlevel({}, 42))
def test_structurize_empty(self):
paragraphs = ''
result = stng.structurize(paragraphs)
self.assertIsInstance(result, stng.StructuredTextDocument)
class TestStructuredTextDocument(unittest.TestCase):
def test_set_texts_noop(self):
doc = stng.StructuredTextDocument()
self.assertEqual((), doc.getColorizableTexts())
doc.setColorizableTexts(self)
self.assertEqual((), doc.getColorizableTexts())
class TestStructuredTextExample(unittest.TestCase):
def test_set_texts_noop(self):
doc = stng.StructuredTextExample(())
self.assertEqual((), doc.getColorizableTexts())
doc.setColorizableTexts(self)
self.assertEqual((), doc.getColorizableTexts())
class TestStructuredTextParagraph(unittest.TestCase):
def test_attributes(self):
p = stng.StructuredTextParagraph(src='', k=42)
self.assertEqual(p.getAttribute("k"), 42)
self.assertIsNone(p.getAttribute('does not exist'))
self.assertIsNone(p.getAttributeNode('does not exist'))
self.assertIsInstance(p.getAttributeNode('k'), stdom.Attr)
nnmap = p.getAttributes()
self.assertIsInstance(nnmap, stdom.NamedNodeMap)
class TestStructuredTextRow(unittest.TestCase):
def test_set_columns(self):
# What we set gets wrapped in another list
row = stng.StructuredTextRow((), {})
row.setColumns(self)
self.assertEqual([self], row.getColumns())
class TestStructuredTextMarkup(unittest.TestCase):
def test_repr(self):
m = stng.StructuredTextMarkup('value')
self.assertEqual("StructuredTextMarkup('value')", repr(m))
class TestStructuredTextTable(unittest.TestCase):
def test_get_columns(self):
row = stng.StructuredTextRow((), {})
table = stng.StructuredTextTable((), '', ())
table._rows = [row]
self.assertEqual([[[]]], table.getColumns())
table.setColumns(table.getColumns())
def test_suite():
suite = unittest.defaultTestLoader.loadTestsFromName(__name__)
suite.addTest(doctest.DocTestSuite(
'zope.structuredtext',
optionflags=doctest.ELLIPSIS,
))
return suite
|
py | b406ee40fd5562555ad80772c5f02f147cd51d2e | import unittest
import sys
import os
sys.path.append(os.getcwd().replace('\\','/') + '/../')
from googleStorage import GoogleStorage
#These tests assume an empty bucket
class googleStorageTest(unittest.TestCase):
@classmethod
def setUp(self):
self.googleStorage = GoogleStorage('nothing')
def test_shouldListAllFiles(self):
results = list(self.googleStorage.listFiles(''))
self.assertEqual(len(results), 0)
for i in range(3):
self.googleStorage.addFile(os.getcwd()+'/testfiles/test1.txt', 'listTest'+str(i)+'.txt')
results = list(self.googleStorage.listFiles(''))
self.assertEqual(len(results), 3)
for i in range(3):
self.googleStorage.deleteFile('listTest'+str(i)+'.txt')
def test_shouldAddFile(self):
self.googleStorage.addFile(os.getcwd()+'/testfiles/test1.txt', 'addTest.txt')
self.assertTrue(self.googleStorage.isFile('addTest.txt'))
self.googleStorage.deleteFile('addTest.txt')
def test_shouldDeleteFile(self):
self.googleStorage.addFile(os.getcwd()+'/testfiles/test1.txt', 'deleteTest.txt')
self.assertTrue(self.googleStorage.isFile('deleteTest.txt'))
self.googleStorage.deleteFile('deleteTest.txt')
self.assertFalse(self.googleStorage.isFile('deleteTest.txt'))
def test_shouldGetFile(self):
self.googleStorage.addFile(os.getcwd()+'/testfiles/test1.txt', 'downloadTest.txt')
self.googleStorage.getFile('downloadTest.txt', os.getcwd()+'/downloadedFile.txt')
self.assertTrue(os.path.isfile(os.getcwd()+'/downloadedFile.txt'))
os.remove(os.getcwd()+'/downloadedFile.txt')
self.googleStorage.deleteFile('downloadTest.txt')
def test_shouldGetFileUrl(self):
self.googleStorage.addFile(os.getcwd()+'/testfiles/test1.txt', 'fileUrlTest.txt')
result = self.googleStorage.getFileUrl('fileUrlTest.txt')
self.assertEqual(result, 'https://www.googleapis.com/storage/'\
+ self.googleStorage.bucket.id + '/fileUrlTest.txt' )
self.googleStorage.deleteFile('fileUrlTest.txt')
def test_shouldGetNonExistentFileUrl(self):
self.assertRaises(OSError, self.googleStorage.getFileUrl, 'nonExistentFile.txt')
def test_isFileTrue(self):
self.googleStorage.addFile(os.getcwd()+'/testfiles/test1.txt', 'addTest.txt')
self.assertTrue(self.googleStorage.isFile('addTest.txt'))
self.googleStorage.deleteFile('addTest.txt')
def test_isFileFalse(self):
self.assertFalse(self.googleStorage.isFile('nonExistentFile.txt'))
def test_shouldUpdatefile(self):
self.googleStorage.addFile(os.getcwd()+'/testfiles/test1.txt', 'fileToUpdate.txt')
self.googleStorage.getFile('fileToUpdate.txt', os.getcwd()+'fileUpdating.txt')
fd = os.open(os.getcwd()+'fileUpdating.txt', os.O_RDWR)
self.assertEqual(os.read(fd, 13), 'Test file One')
os.close(fd)
self.googleStorage.addFile(os.getcwd()+'/testfiles/test3.txt', 'fileToUpdate.txt')
self.googleStorage.getFile('fileToUpdate.txt',os.getcwd()+'fileUpdating.txt')
fd = os.open(os.getcwd()+'fileUpdating.txt', os.O_RDWR)
self.assertEqual(os.read(fd, 15), 'Third test file')
os.close(fd)
os.remove(os.getcwd()+'fileUpdating.txt')
self.googleStorage.deleteFile('fileToUpdate.txt')
if __name__ == '__main__':
unittest.main() |
py | b406eea38feae16a6ff605210c8206e9a1ccf333 | import inspect
class InfoDictMetaClass(type):
def __new__(cls, name, bases, attrs):
key='__default_dict__'
dic={}
for k,v in attrs.items():
if not inspect.isfunction(v):
if not k.startswith('__'):
dic[k]=v
attrs[key]=dic
return type.__new__(cls,name,bases,attrs)
class InfoDict(dict,metaclass=InfoDictMetaClass):
def __init__(self,*args,**kwargs):
dic=dict(*args,**kwargs)
dic.update(**self.__default_dict__)
super().__init__(**dic)
def demo():
class Info(InfoDict):
name='张三'
age=21
gender='男'
a=Info()
print(a)
if __name__ == '__main__':
demo() |
py | b406ef8dced14dbaf9b0ac20c11c14e7449fa903 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests for the OEF protocol."""
from unittest import mock
from aea.helpers.search.models import (
Attribute,
DataModel,
Description,
)
from packages.fetchai.protocols.oef.message import OEFMessage
from packages.fetchai.protocols.oef.serialization import OEFSerializer
def test_oef_type_string_value():
"""Test the string value of the type."""
assert (
str(OEFMessage.Type.REGISTER_SERVICE) == "register_service"
), "The string representation must be register_service"
assert (
str(OEFMessage.Type.UNREGISTER_SERVICE) == "unregister_service"
), "The string representation must be unregister_service"
assert (
str(OEFMessage.Type.REGISTER_AGENT) == "register_agent"
), "The string representation must be register_agent"
assert (
str(OEFMessage.Type.UNREGISTER_AGENT) == "unregister_agent"
), "The string representation must be unregister_agent"
assert (
str(OEFMessage.Type.SEARCH_SERVICES) == "search_services"
), "The string representation must be search_services"
assert (
str(OEFMessage.Type.SEARCH_AGENTS) == "search_agents"
), "The string representation must be search_agents"
assert (
str(OEFMessage.Type.OEF_ERROR) == "oef_error"
), "The string representation must be oef_error"
assert (
str(OEFMessage.Type.DIALOGUE_ERROR) == "dialogue_error"
), "The string representation must be dialogue_error"
assert (
str(OEFMessage.Type.SEARCH_RESULT) == "search_result"
), "The string representation must be search_result"
def test_oef_error_operation():
"""Test the string value of the error operation."""
assert (
str(OEFMessage.OEFErrorOperation.REGISTER_SERVICE) == "0"
), "The string representation must be 0"
assert (
str(OEFMessage.OEFErrorOperation.UNREGISTER_SERVICE) == "1"
), "The string representation must be 1"
assert (
str(OEFMessage.OEFErrorOperation.SEARCH_SERVICES) == "2"
), "The string representation must be 2"
assert (
str(OEFMessage.OEFErrorOperation.SEARCH_SERVICES_WIDE) == "3"
), "The string representation must be 3"
assert (
str(OEFMessage.OEFErrorOperation.SEARCH_AGENTS) == "4"
), "The string representation must be 4"
assert (
str(OEFMessage.OEFErrorOperation.SEND_MESSAGE) == "5"
), "The string representation must be 5"
assert (
str(OEFMessage.OEFErrorOperation.REGISTER_AGENT) == "6"
), "The string representation must be 6"
assert (
str(OEFMessage.OEFErrorOperation.UNREGISTER_AGENT) == "7"
), "The string representation must be 7"
assert (
str(OEFMessage.OEFErrorOperation.OTHER) == "10000"
), "The string representation must be 10000"
def test_oef_message_consistency():
"""Tests the consistency of an OEFMessage."""
attribute_foo = Attribute("foo", int, True, "a foo attribute.")
attribute_bar = Attribute("bar", str, True, "a bar attribute.")
data_model_foobar = DataModel(
"foobar", [attribute_foo, attribute_bar], "A foobar data model."
)
description_foobar = Description(
{"foo": 1, "bar": "baz"}, data_model=data_model_foobar
)
msg = OEFMessage(
type=OEFMessage.Type.REGISTER_AGENT,
id=0,
agent_description=description_foobar,
agent_id="address",
)
with mock.patch.object(OEFMessage.Type, "__eq__", return_value=False):
assert not msg._is_consistent()
def test_oef_message_oef_error():
"""Tests the OEF_ERROR type of message."""
msg = OEFMessage(
type=OEFMessage.Type.OEF_ERROR,
id=0,
operation=OEFMessage.OEFErrorOperation.SEARCH_AGENTS,
)
assert OEFMessage(
type=OEFMessage.Type.OEF_ERROR,
id=0,
operation=OEFMessage.OEFErrorOperation.SEARCH_AGENTS,
), "Expects an oef message Error!"
msg_bytes = OEFSerializer().encode(msg)
assert len(msg_bytes) > 0, "Expects the length of bytes not to be Empty"
deserialized_msg = OEFSerializer().decode(msg_bytes)
assert msg == deserialized_msg, "Expected the deserialized_msg to me equals to msg"
def test_oef_message_dialoge_error():
"""Tests the OEFMEssage of type DialogueError."""
assert OEFMessage(
type=OEFMessage.Type.DIALOGUE_ERROR, id=0, dialogue_id=1, origin="myKey"
), "Could not create the message of type DialogueError"
|
py | b406f10adfdbbfc2cef0b86f9824da9553f15a1a | #!/usr/bin/env python
'''
Project: Geothon (https://github.com/MBoustani/Geothon)
File: Vector/netcdf_to_shp.py
Description: This code converts netCDF file to Shapefile.
Author: Maziyar Boustani (github.com/MBoustani)
'''
import os
from netCDF4 import Dataset
try:
import ogr
except ImportError:
from osgeo import ogr
try:
import osr
except ImportError:
from osgeo import osr
#an exmaple of netCDF file
nc_file = "../static_files/netcdf/airs_h2o_128x256_miroc5_sep04.nc"
#open the netCDF file
nc_dataset = Dataset(nc_file, 'r')
#netCDF variables
latitude = 'lat'
longitude = 'lon'
time = 'time'
value = 'H2OMMRLevStd_average'
#get number of time (time dimension)
num_time = len(nc_dataset.dimensions[time])
#get netCDF variable objects
latitudes = nc_dataset.variables[latitude]
longitudes = nc_dataset.variables[longitude]
values = nc_dataset.variables[value]
#get netCDF variable values
lats = latitudes[:]
lons = longitudes[:]
vals = values[:, :, :, :]
#make a list of latitudes and longitudes
latitudes = [int(i) for i in lats]
longitudes = [int(i) for i in lons]
#define multipoint geometry (datapoints)
multipoint = ogr.Geometry(ogr.wkbMultiPoint)
#an output shapefile name
shapefile = 'multipoints.shp'
#an output shapefile layer
layer_name = 'multipoint_layer'
#create ESRI shapefile dirver
driver = ogr.GetDriverByName('ESRI Shapefile')
#create shapefile data_source(file)
if os.path.exists(shapefile):
driver.DeleteDataSource(shapefile)
data_source = driver.CreateDataSource(shapefile)
#create spatial reference
srs = osr.SpatialReference()
#in this case wgs84
srs.ImportFromEPSG(4326)
#create a shapefile layer
layer = data_source.CreateLayer(layer_name, srs, ogr.wkbPoint)
#make all columns(fields) in layer
for time in range(num_time):
field_name = ogr.FieldDefn("time_{0}".format(time), ogr.OFTString)
field_name.SetWidth(50)
layer.CreateField(field_name)
for lat in range(len(latitudes)):
for lon in range(len(longitudes)):
#define a point geometry
point = ogr.Geometry(ogr.wkbPoint)
#add point to the geometry
point.AddPoint(longitudes[lon], latitudes[lat])
#create a feature
feature = ogr.Feature(layer.GetLayerDefn())
#set point geometry to feature
feature.SetGeometry(point)
for time in range(num_time):
#fill the attribute table with netCDF values for each time
#putting '0' for 'alt' variable to pick first alt
feature.SetField("time_{0}".format(time), str(vals[lon, lat, 0, time]))
#create feature in layer
layer.CreateFeature(feature)
#destroy feature
feature.Destroy()
|
py | b406f110196efeed53115bbc6609554daea1b31e | from django.urls import path
from . import views
app_name = 'feedbacks'
urlpatterns = [
path('', views.index, name='index'),
path('submitted/', views.feedback_submitted, name='feedback_submitted'),
path('contact/', views.contact, name='contact'),
]
|
py | b406f132a42057d78ece3f552bd2193669b1dbc6 | import sys
import os
#pylint: disable=E0602
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
class PushButtonDemo(QDialog):
def __init__(self):
super().__init__()
vlayout = QVBoxLayout()
self.btn1 = QPushButton("Button 1")
self.btn1.setCheckable(True)
self.btn1.toggle()
self.btn1.clicked.connect(lambda: self.whichbtn(self.btn1))
self.btn1.clicked.connect(lambda: self.btnState(self.btn1))
vlayout.addWidget(self.btn1)
self.btn2 = QPushButton("image")
dirName, fileName = os.path.split(os.path.abspath(__file__))
relativePixmap = "../images/python.png"
self.btn2.setIcon(QIcon(QPixmap(os.path.join(dirName, relativePixmap))))
self.btn2.clicked.connect(lambda: self.whichbtn(self.btn2))
self.btn2.clicked.connect(lambda: self.btnState(self.btn2))
vlayout.addWidget(self.btn2)
self.btn3 = QPushButton("Disabled")
self.btn3.setEnabled(False)
vlayout.addWidget(self.btn3)
self.btn4 = QPushButton("&Download")
self.btn4.setDefault(True)
self.btn4.clicked.connect(lambda: self.whichbtn(self.btn4))
self.btn4.clicked.connect(lambda: self.btnState(self.btn4))
vlayout.addWidget(self.btn4)
self.setLayout(vlayout)
self.setWindowTitle("PushButton Demo")
def btnState(self, btn):
if btn.isChecked():
print("button {0} pressed".format(btn.text()))
else:
print("button {0} released".format(btn.text()))
def whichbtn(self, btn):
print("cliced button is " + btn.text())
if __name__ == '__main__':
app = QApplication(sys.argv)
demo = PushButtonDemo()
demo.show()
sys.exit(app.exec()) |
py | b406f1ee63afddb885114a592e6e983dd1f4be73 | # -*- coding: utf-8 -*-
from .basededatos import BaseDeDatos
class PerInstitucionMedica(BaseDeDatos):
def obtener_uno(self, id_):
"""
Obtiene y retorna un objeto según el id dado.
:param id_: int >= 0
:return: object
"""
if id_ >= 0:
id_ = (id_,)
sql = 'SELECT * FROM instituciones_medicas WHERE id=?'
return self.obtener(sql, id_)
else:
print 'El parámetro debe ser mayor o igual a 0.'
return None
def obtener_listado(self, **kwargs):
"""
Obtiene y retorna un listado de objetos según los filtros pasados.
:param kwargs: dict
:return: dict
"""
if 'pagina' in kwargs:
total_filas = self.contar_filas('pacientes')
offset = kwargs['pagina'] * 10 #resultados por pagina
dataset = None
if offset < total_filas: # TODO: ver aca el asunto de paginacion
sql = 'SELECT * FROM instituciones_medicas LIMIT(10) OFFSET(?) WHERE ' \
'baja=0'
data = (offset,)
dataset = self.obtener(sql, data, True)
else:
sql = 'SELECT * FROM instituciones_medicas WHERE baja=0'
dataset = self.obtener(sql, lista=True)
return dataset
else:
return []
def agregar_objeto(self, obj):
"""
Prepara los datos de un objeto para ser insertado en la base de datos.
:param obj: object
:return: object
"""
sql = 'INSERT INTO instituciones_medicas VALUES (null, ?, ?)'
id_ = self.salvar(sql, (obj.nombre, obj.baja,))
obj.id_ = id_
return obj
def actualizar_objeto(self, obj):
"""
Prepara los datos de un objeto para actualizar su registro correlativo
en la base de datos.
:param obj: object
:return: bool
"""
sql = 'UPDATE instituciones_medicas SET nombre = ?, baja = ? WHERE \
id = ?'
return self.actualizar(sql, (obj.nombre, obj.baja))
def baja_objeto(self, obj):
"""
Obtiene el id del objeto para dar una baja lógica en el registro co-
rrespondiente en la base de datos.
:param obj: object
:return: bool
"""
sql = 'UPDATE instituciones_medicas SET baja = ? WHERE id = ?'
return self.actualizar(sql, (1, obj.id_))
|
py | b406f2c45379cdd0156efb2bcbe4d561eabeab71 | import manta_client.base as manta_base
Settings = manta_base.Settings
Config = manta_base.Config
from manta_client import env, sdk, util
__version__ = "0.1.0.dev1"
init = sdk.init
setup = sdk.setup
login = sdk.login
# global vars
experiment = None
config = None
meta = None
# global functions
log = None
save = None
alarm = None
use_artifact = None
log_artifact = None
__all__ = [
"__version__",
"init",
"setup",
"login",
"Settings",
"Config",
"experiment",
"config",
"meta",
"log",
"save",
"alarm",
"use_artifact",
"log_artifact",
]
|
py | b406f2e5213e8560e30c17ded4c50f25726d8d49 | default_config_url = "https://raw.githubusercontent.com/PeachyPrinter/peachyinstaller/master/config.json"
supported_configuration_versions = [0, ] |
py | b406f387ffed6baf54bd06fa7f4b2e8e89b92583 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineCaptureParameters(Model):
"""Capture Virtual Machine parameters.
:param vhd_prefix: The captured virtual hard disk's name prefix.
:type vhd_prefix: str
:param destination_container_name: The destination container name.
:type destination_container_name: str
:param overwrite_vhds: Specifies whether to overwrite the destination
virtual hard disk, in case of conflict.
:type overwrite_vhds: bool
"""
_validation = {
'vhd_prefix': {'required': True},
'destination_container_name': {'required': True},
'overwrite_vhds': {'required': True},
}
_attribute_map = {
'vhd_prefix': {'key': 'vhdPrefix', 'type': 'str'},
'destination_container_name': {'key': 'destinationContainerName', 'type': 'str'},
'overwrite_vhds': {'key': 'overwriteVhds', 'type': 'bool'},
}
def __init__(self, vhd_prefix, destination_container_name, overwrite_vhds):
super(VirtualMachineCaptureParameters, self).__init__()
self.vhd_prefix = vhd_prefix
self.destination_container_name = destination_container_name
self.overwrite_vhds = overwrite_vhds
|
py | b406f38e9b6ce559963a55e59841867f73a7033c | #!/usr/bin/env python3
import pandas as pd
import phik
# Import dataframes
df_merged = pd.read_pickle("data/merged.pkl")
df_media = pd.read_pickle("data/media.pkl")
df_civsoc = pd.read_pickle("data/civsoc.pkl")
def save_corr_matrix(df, name):
corr = df.phik_matrix()
corr.to_pickle(f"./data/corr_sig/{name}.pkl")
corr.to_excel(f"./data/corr_sig/{name}.xlsx")
corr.to_csv(f"./data/corr_sig/{name}.csv")
def save_sig_matrix(df, name):
sig = df.significance_matrix(significance_method="asymptotic")
sig.to_pickle(f"./data/corr_sig/{name}.pkl")
sig.to_excel(f"./data/corr_sig/{name}.xlsx")
sig.to_csv(f"./data/corr_sig/{name}.csv")
df_corr_merged = save_corr_matrix(df_merged, "merged_corr")
df_corr_media = save_corr_matrix(df_media, "media_corr")
df_corr_civsoc = save_corr_matrix(df_civsoc, "civsoc_corr")
df_corr_merged = save_sig_matrix(df_merged, "merged_sig")
df_corr_media = save_sig_matrix(df_media, "media_sig")
df_corr_civsoc = save_sig_matrix(df_civsoc, "civsoc_sig")
|
py | b406f4b8952c67741291a14cfaa080c0bc5642b7 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_naboo_abbots_camp_large1.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
py | b406f4cd86ae25ae3d409f71f35f997b232445b4 | """
Distributed under the MIT License. See LICENSE.txt for more info.
"""
from django.db import migrations
def insert(apps, schema_editor):
SearchPage = apps.get_model('mwasurveyweb', 'SearchPage')
initial_search_page_info = [
('observation', 'Observation'),
('processing', 'Processing'),
]
display_order = 0
for search_page in initial_search_page_info:
SearchPage.objects.create(
name=search_page[0],
display_name=search_page[1],
display_order=display_order,
)
# update display order
display_order += 1
def revert(apps, schema_editor):
SearchPage = apps.get_model('mwasurveyweb', 'SearchPage')
SearchPage.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('mwasurveyweb', '0001_initial'),
]
operations = [
migrations.RunPython(code=insert, reverse_code=revert)
]
|
py | b406f504566f2fd42be55b4b1ad6abd1d1f3afbe | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['SnapshotPolicy']
class SnapshotPolicy(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
daily_schedule: Optional[pulumi.Input[pulumi.InputType['DailyScheduleArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
hourly_schedule: Optional[pulumi.Input[pulumi.InputType['HourlyScheduleArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
monthly_schedule: Optional[pulumi.Input[pulumi.InputType['MonthlyScheduleArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
snapshot_policy_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
weekly_schedule: Optional[pulumi.Input[pulumi.InputType['WeeklyScheduleArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Snapshot policy information
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the NetApp account
:param pulumi.Input[pulumi.InputType['DailyScheduleArgs']] daily_schedule: Schedule for daily snapshots
:param pulumi.Input[bool] enabled: The property to decide policy is enabled or not
:param pulumi.Input[pulumi.InputType['HourlyScheduleArgs']] hourly_schedule: Schedule for hourly snapshots
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[pulumi.InputType['MonthlyScheduleArgs']] monthly_schedule: Schedule for monthly snapshots
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] snapshot_policy_name: The name of the snapshot policy target
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
:param pulumi.Input[pulumi.InputType['WeeklyScheduleArgs']] weekly_schedule: Schedule for weekly snapshots
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__['account_name'] = account_name
__props__['daily_schedule'] = daily_schedule
__props__['enabled'] = enabled
__props__['hourly_schedule'] = hourly_schedule
__props__['location'] = location
__props__['monthly_schedule'] = monthly_schedule
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['snapshot_policy_name'] = snapshot_policy_name
__props__['tags'] = tags
__props__['weekly_schedule'] = weekly_schedule
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:netapp/v20200801:SnapshotPolicy"), pulumi.Alias(type_="azure-native:netapp:SnapshotPolicy"), pulumi.Alias(type_="azure-nextgen:netapp:SnapshotPolicy"), pulumi.Alias(type_="azure-native:netapp/latest:SnapshotPolicy"), pulumi.Alias(type_="azure-nextgen:netapp/latest:SnapshotPolicy"), pulumi.Alias(type_="azure-native:netapp/v20200501:SnapshotPolicy"), pulumi.Alias(type_="azure-nextgen:netapp/v20200501:SnapshotPolicy"), pulumi.Alias(type_="azure-native:netapp/v20200601:SnapshotPolicy"), pulumi.Alias(type_="azure-nextgen:netapp/v20200601:SnapshotPolicy"), pulumi.Alias(type_="azure-native:netapp/v20200701:SnapshotPolicy"), pulumi.Alias(type_="azure-nextgen:netapp/v20200701:SnapshotPolicy"), pulumi.Alias(type_="azure-native:netapp/v20200901:SnapshotPolicy"), pulumi.Alias(type_="azure-nextgen:netapp/v20200901:SnapshotPolicy"), pulumi.Alias(type_="azure-native:netapp/v20201101:SnapshotPolicy"), pulumi.Alias(type_="azure-nextgen:netapp/v20201101:SnapshotPolicy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SnapshotPolicy, __self__).__init__(
'azure-native:netapp/v20200801:SnapshotPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SnapshotPolicy':
"""
Get an existing SnapshotPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["daily_schedule"] = None
__props__["enabled"] = None
__props__["hourly_schedule"] = None
__props__["location"] = None
__props__["monthly_schedule"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["tags"] = None
__props__["type"] = None
__props__["weekly_schedule"] = None
return SnapshotPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dailySchedule")
def daily_schedule(self) -> pulumi.Output[Optional['outputs.DailyScheduleResponse']]:
"""
Schedule for daily snapshots
"""
return pulumi.get(self, "daily_schedule")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
The property to decide policy is enabled or not
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="hourlySchedule")
def hourly_schedule(self) -> pulumi.Output[Optional['outputs.HourlyScheduleResponse']]:
"""
Schedule for hourly snapshots
"""
return pulumi.get(self, "hourly_schedule")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="monthlySchedule")
def monthly_schedule(self) -> pulumi.Output[Optional['outputs.MonthlyScheduleResponse']]:
"""
Schedule for monthly snapshots
"""
return pulumi.get(self, "monthly_schedule")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Snapshot policy name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="weeklySchedule")
def weekly_schedule(self) -> pulumi.Output[Optional['outputs.WeeklyScheduleResponse']]:
"""
Schedule for weekly snapshots
"""
return pulumi.get(self, "weekly_schedule")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | b406f520c5f6fa67ffb38803ddc3a9b7bc92a5ba | # -*- coding: utf-8 -*-
from datetime import datetime
def u(string):
return unicode(string,
encoding='UTF-8',
errors="replace").encode('UTF-8',
errors="replace")
header_start_tpl = u("""
///
/// Generated by iod-symbolizer at %s
/// iod-symbolizer by Alexander Bohn -- http://github.com/fish2000/iod-symbolizer
/// IOD library by Matthieu Garrigues -- https://github.com/matt-42/iod
///
#ifndef IOD_SYMBOLYZER_OUTPUT_HEADER_TEMPLATE_
#define IOD_SYMBOLYZER_OUTPUT_HEADER_TEMPLATE_
#include <iod/symbol.hh>
""")
header_end_tpl = u("""
#endif /// IOD_SYMBOLYZER_OUTPUT_HEADER_TEMPLATE_
""")
header_num_symbol_tpl = u("""
#ifndef IOD_SYMBOL_%(symbol)s
#define IOD_SYMBOL_%(symbol)s
iod_define_number_symbol(%(symbol)s)
#endif
""")
header_std_symbol_tpl = u("""
#ifndef IOD_SYMBOL_%(symbol)s
#define IOD_SYMBOL_%(symbol)s
iod_define_symbol(%(symbol)s)
#endif
""")
def generate_header(symbol_set, when=None):
if when is None:
when = datetime.now().isoformat()
header_out = header_start_tpl % when
for symbol in sorted(symbol_set):
try:
int(symbol)
except ValueError:
# it's not a number
header_out += header_std_symbol_tpl % dict(symbol=u(symbol))
else:
# ok, it's a number
header_out += header_num_symbol_tpl % dict(symbol=u(symbol))
header_out += header_end_tpl
return header_out |
py | b406f557bca237d17641cc36d6f71c9a154624b7 | """Named binary tags."""
from typing import Any, Union
from mcipc.rcon.enumerations import Item, TargetSelector
from mcipc.rcon.functions import stringify
__all__ = ['NBT']
def tags_to_str(tags: dict[str, Any]) -> str:
"""Returns the tags as a string."""
return ', '.join(f'{key}={stringify(val)}' for key, val in tags.items())
class NBT:
"""Represents a named binary tag."""
__slots__ = ('target', 'tags')
def __init__(self, target: Union[Item, TargetSelector, str], **tags):
"""Stores the object name and optional tags."""
self.target = target
self.tags = tags
def __repr__(self):
"""Returns a str representation for eval()."""
return f'{type(self).__name__}({self.target!r}, {self.tags!r})'
def __str__(self):
"""Returns a str representation for RCON."""
return f'{stringify(self.target)}[{tags_to_str(self.tags)}]'
|
py | b406f5713a83bded6e0542db8f98c463e916a988 | # 13TeV workflow added my Ian M. Nugent ([email protected])
#
# import the definition of the steps and input files:
from Configuration.PyReleaseValidation.relval_steps import *
# here only define the workflows as a combination of the steps defined above:
workflows = Matrix()
# each workflow defines a name and a list of steps to be done.
# if no explicit name/label given for the workflow (first arg),
# the name of step1 will be used
# 'generator' the base set of relval for generators
# 'extendedgen' extends the base set to a more thorough assesment of GEN
# the two sets are exclusive
# LO Generators
workflows[507]=['',['SoftQCDDiffractive_13TeV_pythia8','HARVESTGEN']]
workflows[508]=['',['SoftQCDnonDiffractive_13TeV_pythia8','HARVESTGEN']]
workflows[509]=['',['SoftQCDelastic_13TeV_pythia8','HARVESTGEN']]
workflows[510]=['',['SoftQCDinelastic_13TeV_pythia8','HARVESTGEN']]
# Matrix Element Generations (sherpa)
#workflows[533]=['',['sherpa_ZtoEE_0j_BlackHat_13TeV_MASTER','HARVESTGEN']]
workflows[534]=['',['sherpa_ZtoEE_0j_OpenLoops_13TeV_MASTER','HARVESTGEN']]
workflows[535] = ['', ['TTbar_13TeV_Pow_herwig7','HARVESTGEN']]
# Hadronization (LHE Generation + Hadronization)
workflows[555]=['DYTollJets_NLO_Mad_13TeV_py8',['DYToll012Jets_5f_NLO_FXFX_Madgraph_LHE_13TeV','Hadronizer_TuneCP5_13TeV_aMCatNLO_FXFX_5f_max2j_max0p_LHE_pythia8','HARVESTGEN2']] # ALWAYS RUN
workflows[513]=['WTolNuJets_LO_Mad_13TeV_py8',['WTolNu01234Jets_5f_LO_MLM_Madgraph_LHE_13TeV','Hadronizer_TuneCP5_13TeV_MLM_5f_max4j_LHE_pythia8','HARVESTGEN2']] # ALWAYS RUN
workflows[551]=['TTbar012Jets_NLO_Mad_13TeV_py8',['TTbar012Jets_5f_NLO_FXFX_Madgraph_LHE_13TeV','Hadronizer_TuneCP5_13TeV_aMCatNLO_FXFX_5f_max2j_max1p_LHE_pythia8','HARVESTGEN2']] # ALWAYS RUN
workflows[556]=['TTbar_NLO_Pow_13TeV_py8',['TTbar_Pow_LHE_13TeV','Hadronizer_TuneCP5_13TeV_powhegEmissionVeto2p_pythia8','HARVESTGEN2']] # ALWAYS RUN
workflows[514]=['GGToHgg_NLO_Pow_13TeV_py8',['GGToH_Pow_LHE_13TeV','Hadronizer_TuneCP5_13TeV_Hgg_powhegEmissionVeto_pythia8','HARVESTGEN2']] # ALWAYS RUN
workflows[552]=['VHToHtt_NLO_Pow_13TeV_py8',['VHToH_Pow_LHE_13TeV','Hadronizer_TuneCP5_13TeV_Htt_powhegEmissionVeto_pythia8','HARVESTGEN2']] # ALWAYS RUN
workflows[554]=['VBFToH4l_NLO_Pow_JHU_13TeV_py8',['VBFToH_Pow_JHU4l_LHE_13TeV','Hadronizer_TuneCP5_13TeV_powhegEmissionVeto_pythia8','HARVESTGEN2']] # ALWAYS RUN
workflows[515]=['DYTollJets_LO_Mad_13TeV_py8_taupinu',['DYToll01234Jets_5f_LO_MLM_Madgraph_LHE_13TeV','Hadronizer_TuneCP5_13TeV_MLM_5f_max4j_LHE_pythia8_taupinu','HARVESTGEN2']]
workflows[516]=['WTolNuJets_LO_Mad_13TeV_py8_taupinu',['WTolNu01234Jets_5f_LO_MLM_Madgraph_LHE_13TeV','Hadronizer_TuneCP5_13TeV_MLM_5f_max4j_LHE_pythia8_taupinu','HARVESTGEN2']]
workflows[517]=['VHToHtt_NLO_Pow_13TeV_py8_taupinu',['VHToH_Pow_LHE_13TeV','Hadronizer_TuneCP5_13TeV_Httpinu_powhegEmissionVeto_pythia8','HARVESTGEN2']]
workflows[518]=['DYTollJets_LO_Mad_13TeV_py8_taurhonu',['DYToll01234Jets_5f_LO_MLM_Madgraph_LHE_13TeV','Hadronizer_TuneCP5_13TeV_MLM_5f_max4j_LHE_pythia8_taurhonu','HARVESTGEN2']]
workflows[519]=['WTolNuJets_LO_Mad_13TeV_py8_taurhonu',['WTolNu01234Jets_5f_LO_MLM_Madgraph_LHE_13TeV','Hadronizer_TuneCP5_13TeV_MLM_5f_max4j_LHE_pythia8_taurhonu','HARVESTGEN2']]
workflows[520]=['VHToHtt_NLO_Pow_13TeV_py8_taurhonu',['VHToH_Pow_LHE_13TeV','Hadronizer_TuneCP5_13TeV_Httrhonu_powhegEmissionVeto_pythia8','HARVESTGEN2']]
# External Decays
workflows[521]=['WTolNuJets_LO_Mad_13TeV_py8_Ta',['WTolNu01234Jets_5f_LO_MLM_Madgraph_LHE_13TeV','Hadronizer_TuneCP5_13TeV_MLM_5f_max4j_LHE_pythia8_Tauola','HARVESTGEN2']] # ALWAYS RUN
workflows[522]=['DYTollJets_LO_Mad_13TeV_py8_Ta',['DYToll01234Jets_5f_LO_MLM_Madgraph_LHE_13TeV','Hadronizer_TuneCP5_13TeV_MLM_5f_max4j_LHE_pythia8_Tauola','HARVESTGEN2']] # ALWAYS RUN
workflows[523]=['TTbar012Jets_NLO_Mad_13TeV_py8_Evt',['TTbar012Jets_5f_NLO_FXFX_Madgraph_LHE_13TeV','Hadronizer_TuneCP5_13TeV_aMCatNLO_FXFX_5f_max2j_max1p_LHE_pythia8_evtgen','HARVESTGEN2']] # ALWAYS RUN
workflows[524]=['VHToHtt_NLO_Pow_13TeV_py8_Ta',['VHToH_Pow_LHE_13TeV','Hadronizer_TuneCP5_13TeV_Htt_powhegEmissionVeto_pythia8_tauola','HARVESTGEN2']] # ALWAYS RUN
workflows[527]=['VHToHtt_NLO_Pow_13TeV_py8_Ta_taupinu',['VHToH_Pow_LHE_13TeV','Hadronizer_TuneCP5_13TeV_Httpinu_powhegEmissionVeto_pythia8_tauola','HARVESTGEN2']]
workflows[529]=['DYTollJets_LO_Mad_13TeV_py8_Ta_taurhonu',['DYToll01234Jets_5f_LO_MLM_Madgraph_LHE_13TeV','Hadronizer_TuneCP5_13TeV_MLM_5f_max4j_LHE_pythia8_Tauola_taurhonu','HARVESTGEN2']]
workflows[530]=['VHToHtt_NLO_Pow_13TeV_py8_Ta_taurhonu',['VHToH_Pow_LHE_13TeV','Hadronizer_TuneCP5_13TeV_Httrhonu_powhegEmissionVeto_pythia8_tauola','HARVESTGEN2']]
workflows[526]=['DYTollJets_LO_Mad_13TeV_py8_Ta_taupinu',['DYToll01234Jets_5f_LO_MLM_Madgraph_LHE_13TeV','Hadronizer_TuneCP5_13TeV_MLM_5f_max4j_LHE_pythia8_Tauola_taupinu','HARVESTGEN2']]
workflows[525]=['WTolNuJets_LO_Mad_13TeV_py8_Ta_taupinu',['WTolNu01234Jets_5f_LO_MLM_Madgraph_LHE_13TeV','Hadronizer_TuneCP5_13TeV_MLM_5f_max4j_LHE_pythia8_Tauola_taupinu','HARVESTGEN2']]
workflows[528]=['WTolNuJets_LO_Mad_13TeV_py8_Ta_taurhonu',['WTolNu01234Jets_5f_LO_MLM_Madgraph_LHE_13TeV','Hadronizer_TuneCP5_13TeV_MLM_5f_max4j_LHE_pythia8_Tauola_taurhonu','HARVESTGEN2']]
# Heavy Ion
#workflows[532]=['',['Hijing_PPb_MinimumBias','HARVESTGEN']]
# Miscellaneous
workflows[560]=['',['ZprimeToll_M3000_13TeV_pythia8','HARVESTGEN']]
workflows[561]=['',['WprimeTolNu_M3000_13TeV_pythia8','HARVESTGEN']]
workflows[562]=['BulkG_ZZ_2L2Q_M1200_narrow_13TeV_pythia8',['BulkG_M1200_narrow_2L2Q_LHE_13TeV','Hadronizer_TuneCUETP8M1_Mad_pythia8','HARVESTGEN2']]
|
py | b406f62ee149f1dd222bf4efa4319aab343a27c4 | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import automation
from esphome.automation import maybe_simple_id
from esphome.components import power_supply
from esphome.const import CONF_ID, CONF_INVERTED, CONF_LEVEL, CONF_MAX_POWER, \
CONF_MIN_POWER, CONF_POWER_SUPPLY
from esphome.core import CORE, coroutine
IS_PLATFORM_COMPONENT = True
BINARY_OUTPUT_SCHEMA = cv.Schema({
cv.Optional(CONF_POWER_SUPPLY): cv.use_id(power_supply.PowerSupply),
cv.Optional(CONF_INVERTED): cv.boolean,
})
FLOAT_OUTPUT_SCHEMA = BINARY_OUTPUT_SCHEMA.extend({
cv.Optional(CONF_MAX_POWER): cv.percentage,
cv.Optional(CONF_MIN_POWER): cv.percentage,
})
output_ns = cg.esphome_ns.namespace('output')
BinaryOutput = output_ns.class_('BinaryOutput')
BinaryOutputPtr = BinaryOutput.operator('ptr')
FloatOutput = output_ns.class_('FloatOutput', BinaryOutput)
FloatOutputPtr = FloatOutput.operator('ptr')
# Actions
TurnOffAction = output_ns.class_('TurnOffAction', automation.Action)
TurnOnAction = output_ns.class_('TurnOnAction', automation.Action)
SetLevelAction = output_ns.class_('SetLevelAction', automation.Action)
@coroutine
def setup_output_platform_(obj, config):
if CONF_INVERTED in config:
cg.add(obj.set_inverted(config[CONF_INVERTED]))
if CONF_POWER_SUPPLY in config:
power_supply_ = yield cg.get_variable(config[CONF_POWER_SUPPLY])
cg.add(obj.set_power_supply(power_supply_))
if CONF_MAX_POWER in config:
cg.add(obj.set_max_power(config[CONF_MAX_POWER]))
if CONF_MIN_POWER in config:
cg.add(obj.set_min_power(config[CONF_MIN_POWER]))
@coroutine
def register_output(var, config):
if not CORE.has_id(config[CONF_ID]):
var = cg.Pvariable(config[CONF_ID], var)
yield setup_output_platform_(var, config)
BINARY_OUTPUT_ACTION_SCHEMA = maybe_simple_id({
cv.Required(CONF_ID): cv.use_id(BinaryOutput),
})
@automation.register_action('output.turn_on', TurnOnAction, BINARY_OUTPUT_ACTION_SCHEMA)
def output_turn_on_to_code(config, action_id, template_arg, args):
paren = yield cg.get_variable(config[CONF_ID])
yield cg.new_Pvariable(action_id, template_arg, paren)
@automation.register_action('output.turn_off', TurnOffAction, BINARY_OUTPUT_ACTION_SCHEMA)
def output_turn_off_to_code(config, action_id, template_arg, args):
paren = yield cg.get_variable(config[CONF_ID])
yield cg.new_Pvariable(action_id, template_arg, paren)
@automation.register_action('output.set_level', SetLevelAction, cv.Schema({
cv.Required(CONF_ID): cv.use_id(FloatOutput),
cv.Required(CONF_LEVEL): cv.templatable(cv.percentage),
}))
def output_set_level_to_code(config, action_id, template_arg, args):
paren = yield cg.get_variable(config[CONF_ID])
var = cg.new_Pvariable(action_id, template_arg, paren)
template_ = yield cg.templatable(config[CONF_LEVEL], args, float)
cg.add(var.set_level(template_))
yield var
def to_code(config):
cg.add_global(output_ns.using)
|
py | b406f6724aa6d452528d4a946f1677ac6911a05f | """
The config module holds package-wide configurables and provides
a uniform API for working with them.
Overview
========
This module supports the following requirements:
- options are referenced using keys in dot.notation, e.g. "x.y.option - z".
- keys are case-insensitive.
- functions should accept partial/regex keys, when unambiguous.
- options can be registered by modules at import time.
- options can be registered at init-time (via core.config_init)
- options have a default value, and (optionally) a description and
validation function associated with them.
- options can be deprecated, in which case referencing them
should produce a warning.
- deprecated options can optionally be rerouted to a replacement
so that accessing a deprecated option reroutes to a differently
named option.
- options can be reset to their default value.
- all option can be reset to their default value at once.
- all options in a certain sub - namespace can be reset at once.
- the user can set / get / reset or ask for the description of an option.
- a developer can register and mark an option as deprecated.
- you can register a callback to be invoked when the the option value
is set or reset. Changing the stored value is considered misuse, but
is not verboten.
Implementation
==============
- Data is stored using nested dictionaries, and should be accessed
through the provided API.
- "Registered options" and "Deprecated options" have metadata associcated
with them, which are stored in auxilary dictionaries keyed on the
fully-qualified key, e.g. "x.y.z.option".
- the config_init module is imported by the package's __init__.py file.
placing any register_option() calls there will ensure those options
are available as soon as pandas is loaded. If you use register_option
in a module, it will only be available after that module is imported,
which you should be aware of.
- `config_prefix` is a context_manager (for use with the `with` keyword)
which can save developers some typing, see the docstring.
"""
import re
from collections import namedtuple
from contextlib import contextmanager
import warnings
from pandas.compat import map, lmap, u
import pandas.compat as compat
DeprecatedOption = namedtuple('DeprecatedOption', 'key msg rkey removal_ver')
RegisteredOption = namedtuple('RegisteredOption',
'key defval doc validator cb')
_deprecated_options = {} # holds deprecated option metdata
_registered_options = {} # holds registered option metdata
_global_config = {} # holds the current values for registered options
_reserved_keys = ['all'] # keys which have a special meaning
class OptionError(AttributeError, KeyError):
"""Exception for pandas.options, backwards compatible with KeyError
checks
"""
#
# User API
def _get_single_key(pat, silent):
keys = _select_options(pat)
if len(keys) == 0:
if not silent:
_warn_if_deprecated(pat)
raise OptionError('No such keys(s): %r' % pat)
if len(keys) > 1:
raise OptionError('Pattern matched multiple keys')
key = keys[0]
if not silent:
_warn_if_deprecated(key)
key = _translate_key(key)
return key
def _get_option(pat, silent=False):
key = _get_single_key(pat, silent)
# walk the nested dict
root, k = _get_root(key)
return root[k]
def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
raise ValueError("Must provide an even number of non-keyword "
"arguments")
# default to false
silent = kwargs.pop('silent', False)
if kwargs:
raise TypeError('_set_option() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
for k, v in zip(args[::2], args[1::2]):
key = _get_single_key(k, silent)
o = _get_registered_option(key)
if o and o.validator:
o.validator(v)
# walk the nested dict
root, k = _get_root(key)
root[k] = v
if o.cb:
if silent:
with warnings.catch_warnings(record=True):
o.cb(key)
else:
o.cb(key)
def _describe_option(pat='', _print_desc=True):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
s = u('')
for k in keys: # filter by pat
s += _build_option_description(k)
if _print_desc:
print(s)
else:
return s
def _reset_option(pat, silent=False):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
if len(keys) > 1 and len(pat) < 4 and pat != 'all':
raise ValueError('You must specify at least 4 characters when '
'resetting multiple keys, use the special keyword '
'"all" to reset all the options to their default '
'value')
for k in keys:
_set_option(k, _registered_options[k].defval, silent=silent)
def get_default_val(pat):
key = _get_single_key(pat, silent=True)
return _get_registered_option(key).defval
class DictWrapper(object):
""" provide attribute-style access to a nested dict"""
def __init__(self, d, prefix=""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __setattr__(self, key, val):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
# you can't set new keys
# can you can't overwrite subtrees
if key in self.d and not isinstance(self.d[key], dict):
_set_option(prefix, val)
else:
raise OptionError("You can only set the value of existing options")
def __getattr__(self, key):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
v = object.__getattribute__(self, "d")[key]
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
return _get_option(prefix)
def __dir__(self):
return list(self.d.keys())
# For user convenience, we'd like to have the available options described
# in the docstring. For dev convenience we'd like to generate the docstrings
# dynamically instead of maintaining them by hand. To this, we use the
# class below which wraps functions inside a callable, and converts
# __doc__ into a propery function. The doctsrings below are templates
# using the py2.6+ advanced formatting syntax to plug in a concise list
# of options, and option descriptions.
class CallableDynamicDoc(object):
def __init__(self, func, doc_tmpl):
self.__doc_tmpl__ = doc_tmpl
self.__func__ = func
def __call__(self, *args, **kwds):
return self.__func__(*args, **kwds)
@property
def __doc__(self):
opts_desc = _describe_option('all', _print_desc=False)
opts_list = pp_options_list(list(_registered_options.keys()))
return self.__doc_tmpl__.format(opts_desc=opts_desc,
opts_list=opts_list)
_get_option_tmpl = """
get_option(pat)
Retrieves the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_set_option_tmpl = """
set_option(pat, value)
Sets the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
value :
new value of option.
Returns
-------
None
Raises
------
OptionError if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_describe_option_tmpl = """
describe_option(pat, _print_desc=False)
Prints the description for one or more registered options.
Call with not arguments to get a listing for all registered options.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp pattern. All matching keys will have their description displayed.
_print_desc : bool, default True
If True (default) the description(s) will be printed to stdout.
Otherwise, the description(s) will be returned as a unicode string
(for testing).
Returns
-------
None by default, the description(s) as a unicode string if _print_desc
is False
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_reset_option_tmpl = """
reset_option(pat)
Reset one or more options to their default value.
Pass "all" as argument to reset all options.
Available options:
{opts_list}
Parameters
----------
pat : str/regex
If specified only options matching `prefix*` will be reset.
Note: partial matches are supported for convenience, but unless you
use the full option name (e.g. x.y.z.option_name), your code may break
in future versions if new options with similar names are introduced.
Returns
-------
None
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
# bind the functions with their docstrings into a Callable
# and use that as the functions exposed in pd.api
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
options = DictWrapper(_global_config)
#
# Functions for use by pandas developers, in addition to User - api
class option_context(object):
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
...
"""
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
raise ValueError('Need to invoke as'
'option_context(pat, val, [(pat, val), ...)).')
self.ops = list(zip(args[::2], args[1::2]))
def __enter__(self):
undo = []
for pat, val in self.ops:
undo.append((pat, _get_option(pat, silent=True)))
self.undo = undo
for pat, val in self.ops:
_set_option(pat, val, silent=True)
def __exit__(self, *args):
if self.undo:
for pat, val in self.undo:
_set_option(pat, val, silent=True)
def register_option(key, defval, doc='', validator=None, cb=None):
"""Register an option in the package-wide pandas config object
Parameters
----------
key - a fully-qualified key, e.g. "x.y.option - z".
defval - the default value of the option
doc - a string description of the option
validator - a function of a single argument, should raise `ValueError` if
called with a value which is not a legal value for the option.
cb - a function of a single argument "key", which is called
immediately after an option value is set/reset. key is
the full name of the option.
Returns
-------
Nothing.
Raises
------
ValueError if `validator` is specified and `defval` is not a valid value.
"""
import tokenize
import keyword
key = key.lower()
if key in _registered_options:
raise OptionError("Option '%s' has already been registered" % key)
if key in _reserved_keys:
raise OptionError("Option '%s' is a reserved key" % key)
# the default value should be legal
if validator:
validator(defval)
# walk the nested dict, creating dicts as needed along the path
path = key.split('.')
for k in path:
if not bool(re.match('^' + tokenize.Name + '$', k)):
raise ValueError("%s is not a valid identifier" % k)
if keyword.iskeyword(k):
raise ValueError("%s is a python keyword" % k)
cursor = _global_config
for i, p in enumerate(path[:-1]):
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option"
% '.'.join(path[:i]))
if p not in cursor:
cursor[p] = {}
cursor = cursor[p]
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option" %
'.'.join(path[:-1]))
cursor[path[-1]] = defval # initialize
# save the option metadata
_registered_options[key] = RegisteredOption(key=key, defval=defval,
doc=doc, validator=validator,
cb=cb)
def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
"""
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key - the name of the option to be deprecated. must be a fully-qualified
option name (e.g "x.y.z.rkey").
msg - (Optional) a warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey - (Optional) the name of an option to reroute access to.
If specified, any referenced `key` will be re-routed to `rkey`
including set/get/reset.
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
used by the default message if no `msg` is specified.
removal_ver - (Optional) specifies the version in which this option will
be removed. used by the default message if no `msg`
is specified.
Returns
-------
Nothing
Raises
------
OptionError - if key has already been deprecated.
"""
key = key.lower()
if key in _deprecated_options:
raise OptionError("Option '%s' has already been defined as deprecated."
% key)
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
#
# functions internal to the module
def _select_options(pat):
"""returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == 'all': # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)]
def _get_root(key):
path = key.split('.')
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
def _is_deprecated(key):
""" Returns True if the given option has been deprecated """
key = key.lower()
return key in _deprecated_options
def _get_deprecated_option(key):
"""
Retrieves the metadata for a deprecated option, if `key` is deprecated.
Returns
-------
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
"""
try:
d = _deprecated_options[key]
except KeyError:
return None
else:
return d
def _get_registered_option(key):
"""
Retrieves the option metadata if `key` is a registered option.
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
return _registered_options.get(key)
def _translate_key(key):
"""
if key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key
def _warn_if_deprecated(key):
"""
Checks if `key` is a deprecated option and if so, prints a warning.
Returns
-------
bool - True if `key` is deprecated, False otherwise.
"""
d = _get_deprecated_option(key)
if d:
if d.msg:
print(d.msg)
warnings.warn(d.msg, DeprecationWarning)
else:
msg = "'%s' is deprecated" % key
if d.removal_ver:
msg += ' and will be removed in %s' % d.removal_ver
if d.rkey:
msg += ", please use '%s' instead." % d.rkey
else:
msg += ', please refrain from using it.'
warnings.warn(msg, DeprecationWarning)
return True
return False
def _build_option_description(k):
""" Builds a formatted description of a registered option and prints it """
o = _get_registered_option(k)
d = _get_deprecated_option(k)
s = u('%s ') % k
if o.doc:
s += '\n'.join(o.doc.strip().split('\n'))
else:
s += 'No description available.'
if o:
s += u('\n [default: %s] [currently: %s]') % (o.defval,
_get_option(k, True))
if d:
s += u('\n (Deprecated')
s += (u(', use `%s` instead.') % d.rkey if d.rkey else '')
s += u(')')
s += '\n\n'
return s
def pp_options_list(keys, width=80, _print=False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
from itertools import groupby
def pp(name, ks):
pfx = ('- ' + name + '.[' if name else '')
ls = wrap(', '.join(ks), width, initial_indent=pfx,
subsequent_indent=' ', break_long_words=False)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + ']'
return ls
ls = []
singles = [x for x in sorted(keys) if x.find('.') < 0]
if singles:
ls += pp('', singles)
keys = [x for x in keys if x.find('.') >= 0]
for k, g in groupby(sorted(keys), lambda x: x[:x.rfind('.')]):
ks = [x[len(k) + 1:] for x in list(g)]
ls += pp(k, ks)
s = '\n'.join(ls)
if _print:
print(s)
else:
return s
#
# helpers
@contextmanager
def config_prefix(prefix):
"""contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.
Example:
import pandas.core.config as cf
with cf.config_prefix("display.font"):
cf.register_option("color", "red")
cf.register_option("size", " 5 pt")
cf.set_option(size, " 6 pt")
cf.get_option(size)
...
etc'
will register options "display.font.color", "display.font.size", set the
value of "display.font.size"... and so on.
"""
# Note: reset_option relies on set_option, and on key directly
# it does not fit in to this monkey-patching scheme
global register_option, get_option, set_option, reset_option
def wrap(func):
def inner(key, *args, **kwds):
pkey = '%s.%s' % (prefix, key)
return func(pkey, *args, **kwds)
return inner
_register_option = register_option
_get_option = get_option
_set_option = set_option
set_option = wrap(set_option)
get_option = wrap(get_option)
register_option = wrap(register_option)
yield None
set_option = _set_option
get_option = _get_option
register_option = _register_option
# These factories and methods are handy for use as the validator
# arg in register_option
def is_type_factory(_type):
"""
Parameters
----------
`_type` - a type to be compared against (e.g. type(x) == `_type`)
Returns
-------
validator - a function of a single argument x , which raises
ValueError if type(x) is not equal to `_type`
"""
def inner(x):
if type(x) != _type:
raise ValueError("Value must have type '%s'" % str(_type))
return inner
def is_instance_factory(_type):
"""
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which raises
ValueError if x is not an instance of `_type`
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
from pandas.io.formats.printing import pprint_thing
type_repr = "|".join(map(pprint_thing, _type))
else:
type_repr = "'%s'" % _type
def inner(x):
if not isinstance(x, _type):
raise ValueError("Value must be an instance of %s" % type_repr)
return inner
def is_one_of_factory(legal_values):
callables = [c for c in legal_values if callable(c)]
legal_values = [c for c in legal_values if not callable(c)]
def inner(x):
from pandas.io.formats.printing import pprint_thing as pp
if x not in legal_values:
if not any([c(x) for c in callables]):
pp_values = pp("|".join(lmap(pp, legal_values)))
msg = "Value must be one of {0}".format(pp_values)
if len(callables):
msg += " or a callable"
raise ValueError(msg)
return inner
# common type validators, for convenience
# usage: register_option(... , validator = is_int)
is_int = is_type_factory(int)
is_bool = is_type_factory(bool)
is_float = is_type_factory(float)
is_str = is_type_factory(str)
is_unicode = is_type_factory(compat.text_type)
is_text = is_instance_factory((str, bytes))
def is_callable(obj):
"""
Parameters
----------
`obj` - the object to be checked
Returns
-------
validator - returns True if object is callable
raises ValueError otherwise.
"""
if not callable(obj):
raise ValueError("Value must be a callable")
return True
|
py | b406f98dcea66a5e710698cdf32a4af93b010eaa | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-03 14:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('comments', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='content_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='content_type_set_for_comment', to='contenttypes.ContentType'),
),
migrations.AlterField(
model_name='commentflag',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comment_flags', to=settings.AUTH_USER_MODEL),
),
migrations.AlterModelOptions(
name='rootheader',
options={'ordering': ('-pk',), 'verbose_name': 'diskusia', 'verbose_name_plural': 'diskusie'},
),
]
|
py | b406f99ea494c5a937d1defeb7b8c97e4a0a9dd8 | from layer import *
class ReplicatedSoftmaxLayer(Layer):
def __init__(self, *args, **kwargs):
super(ReplicatedSoftmaxLayer, self).__init__(*args, **kwargs)
@classmethod
def IsLayerType(cls, proto):
return proto.hyperparams.activation == \
deepnet_pb2.Hyperparams.REPLICATED_SOFTMAX
def ApplyActivation(self):
state = self.state
temp = self.batchsize_temp
state.max(axis=0, target=temp)
state.add_row_mult(temp, -1)
cm.exp(state)
state.sum(axis=0, target=temp)
self.NN.divide(temp, target=temp)
state.mult_by_row(temp)
def Sample(self):
sample = self.sample
state = self.state
use_lightspeed = False
if use_lightspeed: # Do sampling on cpu.
temp = self.expanded_batch
state.sum(axis=0, target=self.temp)
state.div_by_row(self.temp, target=temp)
probs_cpu = temp.asarray().astype(np.float64)
numsamples = self.NN.asarray()
samples_cpu = lightspeed.SampleSoftmax(probs_cpu, numsamples)
sample.overwrite(samples_cpu.astype(np.float32))
else:
if self.proto.hyperparams.adaptive_prior > 0:
sample.assign(0)
temp_sample = self.expanded_batch
numsamples = int(self.proto.hyperparams.adaptive_prior)
for i in range(numsamples):
state.perturb_prob_for_softmax_sampling(target=temp_sample)
temp_sample.choose_max_and_accumulate(sample)
else:
NN = self.NN.asarray().reshape(-1)
numdims, batchsize = self.state.shape
max_samples = self.big_sample_matrix.shape[1]
for i in range(batchsize):
nn = NN[i]
factor = 1
if nn > max_samples:
nn = max_samples
factor = float(nn) / max_samples
samples = self.big_sample_matrix.slice(0, nn)
samples.assign(0)
samples.add_col_vec(self.state.slice(i, i+1))
samples.perturb_prob_for_softmax_sampling()
samples.choose_max(axis=0)
samples.sum(axis=1, target=sample.slice(i, i+1))
if factor > 1:
sample.slice(i, i+1).mult(factor)
def ComputeDeriv(self):
"""Compute derivative w.r.t input given derivative w.r.t output."""
raise Exception('Back prop through replicated softmax not implemented.')
def AllocateMemory(self, batchsize):
super(ReplicatedSoftmaxLayer, self).AllocateMemory(batchsize)
self.expansion_matrix = cm.CUDAMatrix(np.eye(self.numlabels))
self.big_sample_matrix = cm.empty((self.numlabels * self.dimensions, 1000))
def AllocateBatchsizeDependentMemory(self, batchsize):
super(ReplicatedSoftmaxLayer, self).AllocateBatchsizeDependentMemory(batchsize)
dimensions = self.dimensions
numlabels = self.numlabels
self.expanded_batch = cm.CUDAMatrix(np.zeros((numlabels * dimensions, batchsize)))
self.batchsize_temp = cm.CUDAMatrix(np.zeros((dimensions, batchsize)))
if self.is_input or self.is_initialized or self.is_output:
self.data = cm.CUDAMatrix(np.zeros((numlabels * dimensions, batchsize)))
self.NN = cm.CUDAMatrix(np.ones((1, batchsize)))
self.counter = cm.empty(self.NN.shape)
self.count_filter = cm.empty(self.NN.shape)
def ResetState(self, rand=False):
if self.hyperparams.normalize:
self.NN.assign(self.hyperparams.normalize_to)
else:
self.NN.assign(1)
super(ReplicatedSoftmaxLayer, self).ResetState(rand=rand)
def GetData(self):
self.state.assign(self.data)
h = self.hyperparams
self.state.sum(axis=0, target=self.NN)
self.NN.add(self.tiny) # To deal with documents of 0 words.
if h.multiplicative_prior > 0:
self.NN.mult(1 + h.multiplicative_prior)
self.state.mult(1 + h.multiplicative_prior)
if h.additive_prior > 0:
self.state.div_by_row(self.NN)
self.NN.add(h.additive_prior)
self.state.mult_by_row(self.NN)
if h.adaptive_prior > 0:
self.state.div_by_row(self.NN)
self.state.mult(h.adaptive_prior)
self.NN.assign(h.adaptive_prior)
def GetLoss(self, get_deriv=False):
"""Compute loss and also deriv w.r.t to it if asked for.
Compute the loss function. Targets should be in self.data, predictions
should be in self.state.
Args:
get_deriv: If True, compute the derivative w.r.t the loss function and put
it in self.deriv.
"""
perf = deepnet_pb2.Metrics()
perf.MergeFrom(self.proto.performance_stats)
perf.count = self.batchsize
tiny = self.tiny
temp = self.batchsize_temp
if self.loss_function == deepnet_pb2.Layer.SQUARED_LOSS:
if get_deriv:
target = self.deriv
else:
target = self.statesize
if self.hyperparams.normalize_error:
self.data.sum(axis=0, target=temp)
temp.add(self.tiny)
self.data.div_by_row(temp, target=target)
self.state.div_by_row(self.NN, target=self.expanded_batch)
target.subtract(self.expanded_batch)
else:
self.data.sum(axis=0, target=temp)
temp.add(self.tiny)
self.state.div_by_row(temp, target=target)
target.subtract(self.data)
error = target.euclid_norm()**2
perf.error = error
else:
raise Exception('Unknown loss function for Replicated Softmax units.')
return perf
def GetSparsityDivisor(self):
raise Exception('Sparsity not implemented for replicated softmax units.')
def CollectSufficientStatistics(self, neg=False):
"""Collect sufficient statistics for this layer."""
h = self.hyperparams
self.state.div_by_row(self.NN)
if not neg:
self.state.sum(axis=1, target=self.suff_stats)
else:
self.suff_stats.add_sums(self.state, axis=1, mult=-1.0)
self.state.mult_by_row(self.NN)
|
py | b406fbefd6f12135533874a5cf8980ed24ffcfc9 | # A linked list is given such that each node contains an additional random pointer which could point to any node in the list or null.
#
# Return a deep copy of the list.
# Definition for singly-linked list with a random pointer.
class RandomListNode(object):
def __init__(self, x):
self.label = x
self.next = None
self.random = None
class Solution(object):
# recursive
# def __init__(self):
# self.visited = {}
#
# def copyRandomList(self, head):
# """
# :type head: RandomListNode
# :rtype: RandomListNode
# """
# if not head:
# return None
# if head in self.visited:
# return self.visited[head]
# node = RandomListNode(head.label)
# self.visited[head] = node
# node.next = self.copyRandomList(head.next)
# node.random = self.copyRandomList(head.random)
# return node
def copyRandomList(self, head):
"""
:type head: RandomListNode
:rtype: RandomListNode
"""
# iterate
if not head:
return None
visited = {}
p = head
while head:
if head in visited:
node = visited[head]
else:
node = RandomListNode(head.label)
visited[head] = node
if head.random:
if head.random in visited:
node.random = visited[head.random]
else:
node.random = RandomListNode(head.random.label)
visited[head.random] = node.random
if head.next:
if head.next in visited:
node.next = visited[head.next]
else:
node.next = RandomListNode(head.next.label)
visited[head.next] = node.next
head = head.next
return visited[p]
|
py | b406fc8bc6b7c2b1f833ca70371934e14f11a7ff | #
# PySNMP MIB module NT-ENTERPRISE-DATA-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NT-ENTERPRISE-DATA-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:24:49 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
nortel, = mibBuilder.importSymbols("NORTEL-MIB", "nortel")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, IpAddress, Integer32, NotificationType, ObjectIdentity, TimeTicks, iso, Unsigned32, MibIdentifier, ModuleIdentity, Counter64, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "IpAddress", "Integer32", "NotificationType", "ObjectIdentity", "TimeTicks", "iso", "Unsigned32", "MibIdentifier", "ModuleIdentity", "Counter64", "Counter32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ntEnterpriseData = ModuleIdentity((1, 3, 6, 1, 4, 1, 562, 73))
ntEnterpriseData.setRevisions(('2008-10-13 00:00', '2008-02-25 00:00', '2007-06-01 00:00', '2007-03-15 00:00', '2006-02-07 00:00', '2006-02-01 00:00', '2006-01-25 00:00', '2006-01-18 00:00', '2005-11-21 00:00',))
if mibBuilder.loadTexts: ntEnterpriseData.setLastUpdated('200802250000Z')
if mibBuilder.loadTexts: ntEnterpriseData.setOrganization('Nortel Networks')
ntEnterpriseDataMibs = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 1))
ntEnterpriseDataTasmanMibs = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 1, 1))
ntEnterpriseDataTasmanMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 1, 1, 1))
ntEnterpriseDataTasmanInterfaces = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 1, 1, 2))
ntEnterpriseDataTasmanModules = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 1, 1, 3))
ntEnterpriseDataNewburyMibs = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 1, 2))
ntEnterpriseDataRegistration = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2))
ntEnterpriseSwitches = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 1))
ntEthernetSwitch = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 1, 1))
ntEthernetRoutingSwitch = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 1, 2))
ntEnterpriseRouters = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2))
ntSecureRouter6200Series = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 1))
ntSecureRouter6230 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 1, 1))
ntSecureRouter6280 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 1, 2))
ntSecureRouter3000Series = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 2))
ntSecureRouter3120 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 2, 1))
ntSecureRouter1000Series = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 3))
ntSecureRouter1001 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 3, 1))
ntSecureRouter1002 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 3, 2))
ntSecureRouter1004 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 3, 3))
ntSecureRouter1002E = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 3, 4))
ntSecureRouter1004E = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 3, 5))
ntSecureRouter1001S = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 3, 6))
ntSecureRouterNESeries = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 4))
ntSecureRouterNE05 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 4, 1))
ntSecureRouterNE08 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 4, 2))
ntSecureRouterNE16 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 4, 3))
ntSecureRouterNE20 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 4, 4))
ntSecureRouter4000Series = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 5))
ntSecureRouter4134 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 5, 1))
ntSecureRouter2000Series = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 6))
ntSecureRouter2330 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 2, 6, 1))
ntEnterpriseServiceGateways = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 3))
enterpriseGateway = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 4))
advancedGateway2000Series = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 4, 1))
advancedGateway2330 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 73, 2, 4, 1, 1))
mibBuilder.exportSymbols("NT-ENTERPRISE-DATA-MIB", ntSecureRouterNE16=ntSecureRouterNE16, ntEnterpriseDataRegistration=ntEnterpriseDataRegistration, ntSecureRouter1002E=ntSecureRouter1002E, ntSecureRouterNE05=ntSecureRouterNE05, ntEthernetRoutingSwitch=ntEthernetRoutingSwitch, ntSecureRouterNE08=ntSecureRouterNE08, ntSecureRouter1001S=ntSecureRouter1001S, ntSecureRouter4000Series=ntSecureRouter4000Series, ntEnterpriseData=ntEnterpriseData, ntEnterpriseDataMibs=ntEnterpriseDataMibs, ntSecureRouter3000Series=ntSecureRouter3000Series, ntSecureRouter6200Series=ntSecureRouter6200Series, ntEnterpriseSwitches=ntEnterpriseSwitches, ntSecureRouterNE20=ntSecureRouterNE20, ntEnterpriseDataTasmanMgmt=ntEnterpriseDataTasmanMgmt, ntSecureRouter4134=ntSecureRouter4134, ntSecureRouter3120=ntSecureRouter3120, ntSecureRouter6280=ntSecureRouter6280, ntEnterpriseDataNewburyMibs=ntEnterpriseDataNewburyMibs, ntSecureRouter1004E=ntSecureRouter1004E, ntEnterpriseRouters=ntEnterpriseRouters, advancedGateway2330=advancedGateway2330, ntSecureRouter1002=ntSecureRouter1002, ntSecureRouterNESeries=ntSecureRouterNESeries, PYSNMP_MODULE_ID=ntEnterpriseData, ntSecureRouter1004=ntSecureRouter1004, ntSecureRouter1001=ntSecureRouter1001, ntEnterpriseDataTasmanInterfaces=ntEnterpriseDataTasmanInterfaces, enterpriseGateway=enterpriseGateway, ntSecureRouter2330=ntSecureRouter2330, ntSecureRouter2000Series=ntSecureRouter2000Series, ntSecureRouter1000Series=ntSecureRouter1000Series, ntEnterpriseDataTasmanModules=ntEnterpriseDataTasmanModules, ntEnterpriseDataTasmanMibs=ntEnterpriseDataTasmanMibs, advancedGateway2000Series=advancedGateway2000Series, ntEnterpriseServiceGateways=ntEnterpriseServiceGateways, ntEthernetSwitch=ntEthernetSwitch, ntSecureRouter6230=ntSecureRouter6230)
|
py | b406fca800454ff28e42cc6be7df7a0b07004fc3 | """Membership serializers."""
# Django REST Framework
from rest_framework import serializers
# Serializers
from cride.users.serializers.users import UserModelSerializer
# Models
from cride.circles.models import Membership, Invitation
from cride.users.models import User
# Django Utilities
from django.utils import timezone
class MembershipModelSerializer(serializers.ModelSerializer):
"""Member model serializer"""
user = UserModelSerializer(read_only=True)
invited_by = serializers.StringRelatedField()
joined_at = serializers.DateTimeField(source='created', read_only=True)
class Meta:
"""Meta class."""
model = Membership
fields = (
'user',
'is_admin',
'is_active',
'used_invitations',
'remaining_invitation',
'rides_taken',
'rides_offered',
'joined_at',
'invited_by'
)
read_only_fields = (
'user',
'used_invitations',
'invited_by'
)
class AddMemberSerializer(serializers.Serializer):
"""Add member serializer
Handle the process to add member a circle"""
invitation_code = serializers.CharField(min_length=8)
user = serializers.HiddenField(default=serializers.CurrentUserDefault())
def validate_user(self, data):
"""Verify user is not a member"""
circle = self.context['circle']
user = data
q = Membership.objects.filter(
circle=circle,
user=user
)
if q.exists():
raise serializers.ValidationError('User is already member of this circle.')
return data
def validate_invitation_code(self, data):
"""Verify if code exists and it is related to the circle."""
try:
invitation = Invitation.objects.get(
code=data,
circle=self.context['circle'],
used=False
)
except Invitation.DoesNotExist:
raise serializers.ValidationError('Invalid Invitation')
self.context['invitation'] = invitation
return data
def validate(self, data):
"""Verify circle could accept a new member"""
circle = self.context['circle']
if circle.is_limited and circle.members.count() >= circle.members_limit:
raise serializers.ValidationError('Circle has reached a member limit')
return data
def create(self, data):
"""Add member to circle"""
circle = self.context['circle']
invitation = self.context['invitation']
user = User.objects.filter(username=data['user'])[0]
now = timezone.now()
member = Membership.objects.create(
user=user,
profile=user.profile,
circle=circle,
invited_by=invitation.issued_by
)
# Update Invitation
invitation.used_by = user
invitation.used = True
invitation.used_at = now
invitation.save()
# Update issuer data
issuer = Membership.objects.get(
user=invitation.issued_by,
circle=circle
)
issuer.used_invitations += 1
issuer.remaining_invitation -= 1
issuer.save()
return member
|
py | b406fe0fa296734650fa5dd35497b942c4de9cda | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Main Python API for analyzing binary size."""
import argparse
import bisect
import calendar
import collections
import copy
import datetime
import functools
import gzip
import itertools
import logging
import os
import posixpath
import re
import shlex
import string
import subprocess
import sys
import tempfile
import time
import zipfile
import zlib
import apkanalyzer
import ar
import data_quality
import demangle
import describe
import dir_metadata
import dwarfdump
import file_format
import function_signature
import linker_map_parser
import models
import ninja_parser
import nm
import obj_analyzer
import parallel
import path_util
import readelf
import string_extract
import zip_util
sys.path.insert(1, os.path.join(path_util.TOOLS_SRC_ROOT, 'tools', 'grit'))
from grit.format import data_pack
_UNCOMPRESSED_COMPRESSION_RATIO_THRESHOLD = 0.9
# Holds computation state that is live only when an output directory exists.
_OutputDirectoryContext = collections.namedtuple('_OutputDirectoryContext', [
'elf_object_paths', # Only when elf_path is also provided.
'known_inputs', # Only when elf_path is also provided.
'output_directory',
'thin_archives',
])
# When ensuring matching section sizes between .elf and .map files, these
# sections should be ignored. When lld creates a combined library with
# partitions, some sections (like .text) exist in each partition, but the ones
# below are common. At library splitting time, llvm-objcopy pulls what's needed
# from these sections into the new libraries. Hence, the ELF sections will end
# up smaller than the combined .map file sections.
_SECTION_SIZE_BLOCKLIST = ['.symtab', '.shstrtab', '.strtab']
# Tunable constant "knobs" for CreateContainerAndSymbols().
class SectionSizeKnobs:
def __init__(self):
# A limit on the number of symbols an address can have, before these symbols
# are compacted into shared symbols. Increasing this value causes more data
# to be stored .size files, but is also more expensive.
# Effect of max_same_name_alias_count (as of Oct 2017, with min_pss = max):
# 1: shared .text syms = 1772874 bytes, file size = 9.43MiB (645476 syms).
# 2: shared .text syms = 1065654 bytes, file size = 9.58MiB (669952 syms).
# 6: shared .text syms = 464058 bytes, file size = 10.11MiB (782693 syms).
# 10: shared .text syms = 365648 bytes, file size = 10.24MiB (813758 syms).
# 20: shared .text syms = 86202 bytes, file size = 10.38MiB (854548 syms).
# 40: shared .text syms = 48424 bytes, file size = 10.50MiB (890396 syms).
# 50: shared .text syms = 41860 bytes, file size = 10.54MiB (902304 syms).
# max: shared .text syms = 0 bytes, file size = 11.10MiB (1235449 syms).
self.max_same_name_alias_count = 40 # 50kb is basically negligable.
# File name: Source file.
self.apk_other_files = {
'assets/icudtl.dat':
'../../third_party/icu/android/icudtl.dat',
'assets/snapshot_blob_32.bin':
'../../v8/snapshot_blob_32.bin',
'assets/snapshot_blob_64.bin':
'../../v8/snapshot_blob_64.bin',
'assets/unwind_cfi_32':
'../../base/trace_event/cfi_backtrace_android.cc',
'assets/webapk_dex_version.txt':
'../../chrome/android/webapk/libs/runtime_library_version.gni',
'lib/armeabi-v7a/libarcore_sdk_c_minimal.so':
'../../third_party/arcore-android-sdk/BUILD.gn',
'lib/armeabi-v7a/libarcore_sdk_c.so':
'../../third_party/arcore-android-sdk/BUILD.gn',
'lib/armeabi-v7a/libcrashpad_handler_trampoline.so':
'../../third_party/crashpad/BUILD.gn',
'lib/armeabi-v7a/libyoga.so':
'../../chrome/android/feed/BUILD.gn',
'lib/armeabi-v7a/libelements.so':
'../../chrome/android/feed/BUILD.gn',
'lib/arm64-v8a/libarcore_sdk_c_minimal.so':
'../../third_party/arcore-android-sdk/BUILD.gn',
'lib/arm64-v8a/libarcore_sdk_c.so':
'../../third_party/arcore-android-sdk/BUILD.gn',
'lib/arm64-v8a/libcrashpad_handler_trampoline.so':
'../../third_party/crashpad/BUILD.gn',
'lib/arm64-v8a/libyoga.so':
'../../chrome/android/feed/BUILD.gn',
'lib/arm64-v8a/libelements.so':
'../../chrome/android/feed/BUILD.gn',
}
# Parameters and states for archiving a container.
class ContainerArchiveOptions:
def __init__(self, top_args, sub_args):
# An estimate of pak translation compression ratio to make comparisons
# between .size files reasonable. Otherwise this can differ every pak
# change.
self.pak_compression_ratio = 0.38 if sub_args.minimal_apks_file else 0.33
# Whether to count number of relative relocations instead of binary size.
self.relocations_mode = top_args.relocations
self.analyze_java = not (sub_args.native_only or sub_args.no_java
or top_args.native_only or top_args.no_java
or self.relocations_mode)
# This may be further disabled downstream, e.g., for the case where an APK
# is specified, but it contains no .so files.
self.analyze_native = not (sub_args.java_only or sub_args.no_native
or top_args.java_only or top_args.no_native)
self.track_string_literals = sub_args.track_string_literals
def _OpenMaybeGzAsText(path):
"""Calls `gzip.open()` if |path| ends in ".gz", otherwise calls `open()`."""
if path.endswith('.gz'):
return gzip.open(path, 'rt')
return open(path, 'rt')
def _NormalizeNames(raw_symbols):
"""Ensures that all names are formatted in a useful way.
This includes:
- Deriving |name| and |template_name| from |full_name|.
- Stripping of return types (for functions).
- Moving "vtable for" and the like to be suffixes rather than prefixes.
"""
found_prefixes = set()
for symbol in raw_symbols:
full_name = symbol.full_name
# See comment in _CalculatePadding() about when this can happen. Don't
# process names for non-native sections.
if symbol.IsPak():
# full_name: "about_ui_resources.grdp: IDR_ABOUT_UI_CREDITS_HTML".
space_idx = full_name.rindex(' ')
name = full_name[space_idx + 1:]
symbol.template_name = name
symbol.name = name
elif (full_name.startswith('**') or symbol.IsOverhead()
or symbol.IsOther()):
symbol.template_name = full_name
symbol.name = full_name
elif symbol.IsDex():
symbol.full_name, symbol.template_name, symbol.name = (
function_signature.ParseJava(full_name))
elif symbol.IsStringLiteral():
symbol.full_name = full_name
symbol.template_name = full_name
symbol.name = full_name
elif symbol.IsNative():
# Remove [clone] suffix, and set flag accordingly.
# Search from left-to-right, as multiple [clone]s can exist.
# Example name suffixes:
# [clone .part.322] # GCC
# [clone .isra.322] # GCC
# [clone .constprop.1064] # GCC
# [clone .11064] # clang
# http://unix.stackexchange.com/questions/223013/function-symbol-gets-part-suffix-after-compilation
idx = full_name.find(' [clone ')
if idx != -1:
full_name = full_name[:idx]
symbol.flags |= models.FLAG_CLONE
# Clones for C symbols.
if symbol.section == 't':
idx = full_name.rfind('.')
if idx != -1 and full_name[idx + 1:].isdigit():
new_name = full_name[:idx]
# Generated symbols that end with .123 but are not clones.
# Find these via:
# size_info.symbols.WhereInSection('t').WhereIsGroup().SortedByCount()
if new_name not in ('__tcf_0', 'startup'):
full_name = new_name
symbol.flags |= models.FLAG_CLONE
# Remove .part / .isra / .constprop.
idx = full_name.rfind('.', 0, idx)
if idx != -1:
full_name = full_name[:idx]
# E.g.: vtable for FOO
idx = full_name.find(' for ', 0, 30)
if idx != -1:
found_prefixes.add(full_name[:idx + 4])
full_name = '{} [{}]'.format(full_name[idx + 5:], full_name[:idx])
# E.g.: virtual thunk to FOO
idx = full_name.find(' to ', 0, 30)
if idx != -1:
found_prefixes.add(full_name[:idx + 3])
full_name = '{} [{}]'.format(full_name[idx + 4:], full_name[:idx])
# Strip out return type, and split out name, template_name.
# Function parsing also applies to non-text symbols.
# E.g. Function statics.
symbol.full_name, symbol.template_name, symbol.name = (
function_signature.Parse(full_name))
# Remove anonymous namespaces (they just harm clustering).
symbol.template_name = symbol.template_name.replace(
'(anonymous namespace)::', '')
symbol.full_name = symbol.full_name.replace(
'(anonymous namespace)::', '')
non_anonymous_name = symbol.name.replace('(anonymous namespace)::', '')
if symbol.name != non_anonymous_name:
symbol.flags |= models.FLAG_ANONYMOUS
symbol.name = non_anonymous_name
# Allow using "is" to compare names (and should help with RAM). This applies
# to all symbols.
function_signature.InternSameNames(symbol)
logging.debug('Found name prefixes of: %r', found_prefixes)
def _NormalizeObjectPath(path):
"""Normalizes object paths.
Prefixes are removed: obj/, ../../
Archive names made more pathy: foo/bar.a(baz.o) -> foo/bar.a/baz.o
"""
if path.startswith('obj/'):
# Convert obj/third_party/... -> third_party/...
path = path[4:]
elif path.startswith('../../'):
# Convert ../../third_party/... -> third_party/...
path = path[6:]
if path.endswith(')'):
# Convert foo/bar.a(baz.o) -> foo/bar.a/baz.o so that hierarchical
# breakdowns consider the .o part to be a separate node.
start_idx = path.rindex('(')
path = os.path.join(path[:start_idx], path[start_idx + 1:-1])
return path
def _NormalizeSourcePath(path):
"""Returns (is_generated, normalized_path)"""
if path.startswith('gen/'):
# Convert gen/third_party/... -> third_party/...
return True, path[4:]
if path.startswith('../../'):
# Convert ../../third_party/... -> third_party/...
return False, path[6:]
return True, path
def _ExtractSourcePathsAndNormalizeObjectPaths(raw_symbols,
object_source_mapper,
address_source_mapper):
"""Fills in the |source_path| attribute and normalizes |object_path|."""
if object_source_mapper:
logging.info('Looking up source paths from ninja files')
for symbol in raw_symbols:
if symbol.IsDex() or symbol.IsOther():
continue
# Native symbols and pak symbols use object paths.
object_path = symbol.object_path
if not object_path:
continue
# We don't have source info for prebuilt .a files.
if not os.path.isabs(object_path) and not object_path.startswith('..'):
symbol.source_path = object_source_mapper.FindSourceForPath(object_path)
assert object_source_mapper.unmatched_paths_count == 0, (
'One or more source file paths could not be found. Likely caused by '
'.ninja files being generated at a different time than the .map file.')
if address_source_mapper:
logging.info('Looking up source paths from dwarfdump')
for symbol in raw_symbols:
if symbol.section_name != models.SECTION_TEXT:
continue
source_path = address_source_mapper.FindSourceForTextAddress(
symbol.address)
if source_path and not os.path.isabs(source_path):
symbol.source_path = source_path
# Majority of unmatched queries are for assembly source files (ex libav1d)
# and v8 builtins.
assert address_source_mapper.unmatched_queries_ratio < 0.03, (
'Percentage of failing |address_source_mapper| queries ' +
'({}%) >= 3% '.format(
address_source_mapper.unmatched_queries_ratio * 100) +
'FindSourceForTextAddress() likely has a bug.')
logging.info('Normalizing source and object paths')
for symbol in raw_symbols:
if symbol.object_path:
symbol.object_path = _NormalizeObjectPath(symbol.object_path)
if symbol.source_path:
symbol.generated_source, symbol.source_path = _NormalizeSourcePath(
symbol.source_path)
def _ComputeAncestorPath(path_list, symbol_count):
"""Returns the common ancestor of the given paths."""
if not path_list:
return ''
prefix = os.path.commonprefix(path_list)
# Check if all paths were the same.
if prefix == path_list[0]:
return prefix
# Put in buckets to cut down on the number of unique paths.
if symbol_count >= 100:
symbol_count_str = '100+'
elif symbol_count >= 50:
symbol_count_str = '50-99'
elif symbol_count >= 20:
symbol_count_str = '20-49'
elif symbol_count >= 10:
symbol_count_str = '10-19'
else:
symbol_count_str = str(symbol_count)
# Put the path count as a subdirectory so that grouping by path will show
# "{shared}" as a bucket, and the symbol counts as leafs.
if not prefix:
return os.path.join('{shared}', symbol_count_str)
return os.path.join(os.path.dirname(prefix), '{shared}', symbol_count_str)
def _CompactLargeAliasesIntoSharedSymbols(raw_symbols, knobs):
"""Converts symbols with large number of aliases into single symbols.
The merged symbol's path fields are changed to common-ancestor paths in
the form: common/dir/{shared}/$SYMBOL_COUNT
Assumes aliases differ only by path (not by name).
"""
num_raw_symbols = len(raw_symbols)
num_shared_symbols = 0
src_cursor = 0
dst_cursor = 0
while src_cursor < num_raw_symbols:
symbol = raw_symbols[src_cursor]
raw_symbols[dst_cursor] = symbol
dst_cursor += 1
aliases = symbol.aliases
if aliases and len(aliases) > knobs.max_same_name_alias_count:
symbol.source_path = _ComputeAncestorPath(
[s.source_path for s in aliases if s.source_path], len(aliases))
symbol.object_path = _ComputeAncestorPath(
[s.object_path for s in aliases if s.object_path], len(aliases))
symbol.generated_source = all(s.generated_source for s in aliases)
symbol.aliases = None
num_shared_symbols += 1
src_cursor += len(aliases)
else:
src_cursor += 1
raw_symbols[dst_cursor:] = []
num_removed = src_cursor - dst_cursor
logging.debug('Converted %d aliases into %d shared-path symbols',
num_removed, num_shared_symbols)
def _ConnectNmAliases(raw_symbols):
"""Ensures |aliases| is set correctly for all symbols."""
prev_sym = raw_symbols[0]
for sym in raw_symbols[1:]:
# Don't merge bss symbols.
if sym.address > 0 and prev_sym.address == sym.address:
# Don't merge padding-only symbols (** symbol gaps).
if prev_sym.size > 0:
# Don't merge if already merged.
if prev_sym.aliases is None or prev_sym.aliases is not sym.aliases:
if prev_sym.aliases:
prev_sym.aliases.append(sym)
else:
prev_sym.aliases = [prev_sym, sym]
sym.aliases = prev_sym.aliases
prev_sym = sym
def _AssignNmAliasPathsAndCreatePathAliases(raw_symbols, object_paths_by_name):
num_found_paths = 0
num_unknown_names = 0
num_path_mismatches = 0
num_aliases_created = 0
ret = []
for symbol in raw_symbols:
ret.append(symbol)
full_name = symbol.full_name
# '__typeid_' symbols appear in linker .map only, and not nm output.
if full_name.startswith('__typeid_'):
if object_paths_by_name.get(full_name):
logging.warning('Found unexpected __typeid_ symbol in nm output: %s',
full_name)
continue
# Don't skip if symbol.IsBss(). This is needed for LLD-LTO to work, since
# .bss object_path data are unavailable for linker_map_parser, and need to
# be extracted here. For regular LLD flow, incorrect aliased symbols can
# arise. But that's a lesser evil compared to having LLD-LTO .bss missing
# object_path and source_path.
# TODO(huangs): Fix aliased symbols for the LLD case.
if (symbol.IsStringLiteral() or
not full_name or
full_name[0] in '*.' or # e.g. ** merge symbols, .Lswitch.table
full_name == 'startup'):
continue
object_paths = object_paths_by_name.get(full_name)
if object_paths:
num_found_paths += 1
else:
# Happens a lot with code that has LTO enabled (linker creates symbols).
num_unknown_names += 1
continue
if symbol.object_path and symbol.object_path not in object_paths:
if num_path_mismatches < 10:
logging.warning('Symbol path reported by .map not found by nm.')
logging.warning('sym=%r', symbol)
logging.warning('paths=%r', object_paths)
object_paths.append(symbol.object_path)
object_paths.sort()
num_path_mismatches += 1
symbol.object_path = object_paths[0]
if len(object_paths) > 1:
# Create one symbol for each object_path.
aliases = symbol.aliases or [symbol]
symbol.aliases = aliases
num_aliases_created += len(object_paths) - 1
for object_path in object_paths[1:]:
new_sym = models.Symbol(
symbol.section_name, symbol.size, address=symbol.address,
full_name=full_name, object_path=object_path, aliases=aliases)
aliases.append(new_sym)
ret.append(new_sym)
logging.debug('Cross-referenced %d symbols with nm output. '
'num_unknown_names=%d num_path_mismatches=%d '
'num_aliases_created=%d',
num_found_paths, num_unknown_names, num_path_mismatches,
num_aliases_created)
# Currently: num_unknown_names=1246 out of 591206 (0.2%).
if num_unknown_names > len(raw_symbols) * 0.01:
logging.warning('Abnormal number of symbols not found in .o files (%d)',
num_unknown_names)
return ret
def _DiscoverMissedObjectPaths(raw_symbols, known_inputs):
# Missing object paths are caused by .a files added by -l flags, which are not
# listed as explicit inputs within .ninja rules.
missed_inputs = set()
for symbol in raw_symbols:
path = symbol.object_path
if path.endswith(')'):
# Convert foo/bar.a(baz.o) -> foo/bar.a
path = path[:path.rindex('(')]
if path and path not in known_inputs:
missed_inputs.add(path)
return missed_inputs
def _CreateMergeStringsReplacements(merge_string_syms,
list_of_positions_by_object_path):
"""Creates replacement symbols for |merge_syms|."""
ret = []
STRING_LITERAL_NAME = models.STRING_LITERAL_NAME
assert len(merge_string_syms) == len(list_of_positions_by_object_path)
tups = zip(merge_string_syms, list_of_positions_by_object_path)
for merge_sym, positions_by_object_path in tups:
merge_sym_address = merge_sym.address
new_symbols = []
ret.append(new_symbols)
for object_path, positions in positions_by_object_path.items():
for offset, size in positions:
address = merge_sym_address + offset
symbol = models.Symbol(
models.SECTION_RODATA,
size,
address=address,
full_name=STRING_LITERAL_NAME,
object_path=object_path)
new_symbols.append(symbol)
logging.debug('Created %d string literal symbols', sum(len(x) for x in ret))
logging.debug('Sorting string literals')
for symbols in ret:
# For de-duping & alias creation, order by address & size.
# For alias symbol ordering, sort by object_path.
symbols.sort(key=lambda x: (x.address, -x.size, x.object_path))
logging.debug('Deduping string literals')
num_removed = 0
size_removed = 0
num_aliases = 0
for i, symbols in enumerate(ret):
if not symbols:
continue
prev_symbol = symbols[0]
new_symbols = [prev_symbol]
for symbol in symbols[1:]:
padding = symbol.address - prev_symbol.end_address
if (prev_symbol.address == symbol.address and
prev_symbol.size == symbol.size):
# String is an alias.
num_aliases += 1
aliases = prev_symbol.aliases
if aliases:
aliases.append(symbol)
symbol.aliases = aliases
else:
aliases = [prev_symbol, symbol]
prev_symbol.aliases = aliases
symbol.aliases = aliases
elif padding + symbol.size <= 0:
# String is a substring of prior one.
num_removed += 1
size_removed += symbol.size
continue
elif padding < 0:
# String overlaps previous one. Adjust to not overlap.
symbol.address -= padding
symbol.size += padding
new_symbols.append(symbol)
prev_symbol = symbol
ret[i] = new_symbols
logging.debug(
'Removed %d overlapping string literals (%d bytes) & created %d aliases',
num_removed, size_removed, num_aliases)
return ret
def _UpdateSymbolNamesFromNm(raw_symbols, names_by_address):
"""Updates raw_symbols names with extra information from nm."""
logging.debug('Update symbol names')
# linker_map_parser extracts '** outlined function' without knowing how many
# such symbols exist at each address. nm has this information, and stores the
# value as, e.g., '** outlined function * 5'. Copy the information over.
for s in raw_symbols:
if s.full_name.startswith('** outlined function'):
name_list = names_by_address.get(s.address)
if name_list:
for name in name_list:
if name.startswith('** outlined function'):
s.full_name = name
break
def _AddNmAliases(raw_symbols, names_by_address):
"""Adds symbols that were removed by identical code folding."""
# Step 1: Create list of (index_of_symbol, name_list).
logging.debug('Creating alias list')
replacements = []
num_new_symbols = 0
num_missing = 0
missing_names = collections.defaultdict(list)
for i, s in enumerate(raw_symbols):
# Don't alias padding-only symbols (e.g. ** symbol gap)
if s.size_without_padding == 0:
continue
# Also skip artificial symbols that won't appear in nm output.
if s.full_name.startswith('** CFI jump table'):
continue
name_list = names_by_address.get(s.address)
if name_list:
if s.full_name not in name_list:
num_missing += 1
missing_names[s.full_name].append(s.address)
# Sometimes happens for symbols from assembly files.
if num_missing < 10:
logging.debug('Name missing from aliases: %s %s (addr=%x)',
s.full_name, name_list, s.address)
continue
replacements.append((i, name_list))
num_new_symbols += len(name_list) - 1
if missing_names and logging.getLogger().isEnabledFor(logging.INFO):
for address, names in names_by_address.items():
for name in names:
if name in missing_names:
logging.info('Missing name %s is at address %x instead of [%s]' %
(name, address, ','.join('%x' % a for a in missing_names[name])))
if float(num_new_symbols) / len(raw_symbols) < .05:
logging.warning('Number of aliases is oddly low (%.0f%%). It should '
'usually be around 25%%. Ensure --tool-prefix is correct. ',
float(num_new_symbols) / len(raw_symbols) * 100)
# Step 2: Create new symbols as siblings to each existing one.
logging.debug('Creating %d new symbols from nm output', num_new_symbols)
expected_num_symbols = len(raw_symbols) + num_new_symbols
ret = []
prev_src = 0
for cur_src, name_list in replacements:
ret += raw_symbols[prev_src:cur_src]
prev_src = cur_src + 1
sym = raw_symbols[cur_src]
# Create symbols (|sym| gets recreated and discarded).
new_syms = []
for full_name in name_list:
# Do not set |aliases| in order to avoid being pruned by
# _CompactLargeAliasesIntoSharedSymbols(), which assumes aliases differ
# only by path. The field will be set afterwards by _ConnectNmAliases().
new_syms.append(models.Symbol(
sym.section_name, sym.size, address=sym.address, full_name=full_name))
ret += new_syms
ret += raw_symbols[prev_src:]
assert expected_num_symbols == len(ret)
return ret
def LoadAndPostProcessSizeInfo(path, file_obj=None):
"""Returns a SizeInfo for the given |path|."""
logging.debug('Loading results from: %s', path)
size_info = file_format.LoadSizeInfo(path, file_obj=file_obj)
logging.info('Normalizing symbol names')
_NormalizeNames(size_info.raw_symbols)
logging.info('Loaded %d symbols', len(size_info.raw_symbols))
return size_info
def LoadAndPostProcessDeltaSizeInfo(path, file_obj=None):
"""Returns a tuple of SizeInfos for the given |path|."""
logging.debug('Loading results from: %s', path)
before_size_info, after_size_info = file_format.LoadDeltaSizeInfo(
path, file_obj=file_obj)
logging.info('Normalizing symbol names')
_NormalizeNames(before_size_info.raw_symbols)
_NormalizeNames(after_size_info.raw_symbols)
logging.info('Loaded %d + %d symbols', len(before_size_info.raw_symbols),
len(after_size_info.raw_symbols))
return before_size_info, after_size_info
def _GetModuleInfoList(minimal_apks_path):
module_info_list = []
with zipfile.ZipFile(minimal_apks_path) as z:
for info in z.infolist():
# E.g.:
# splits/base-master.apk
# splits/base-en.apk
# splits/vr-master.apk
# splits/vr-en.apk
# TODO(agrieve): Might be worth measuring a non-en locale as well.
m = re.match(r'splits/(.*)-master\.apk', info.filename)
if m:
module_info_list.append((m.group(1), info.file_size))
return sorted(module_info_list)
def _CollectModuleSizes(minimal_apks_path):
sizes_by_module = collections.defaultdict(int)
for module_name, file_size in _GetModuleInfoList(minimal_apks_path):
sizes_by_module[module_name] += file_size
return sizes_by_module
def _ExtendSectionRange(section_range_by_name, section_name, delta_size):
(prev_address, prev_size) = section_range_by_name.get(section_name, (0, 0))
section_range_by_name[section_name] = (prev_address, prev_size + delta_size)
def CreateMetadata(args, linker_name, build_config):
"""Creates metadata dict while updating |build_config|.
Args:
args: Resolved command-line args.
linker_name: A coded linker name (see linker_map_parser.py).
build_config: Common build configurations to update or to undergo
consistency checks.
Returns:
A dict of models.METADATA_* -> values. Performs "best effort" extraction
using available data.
"""
logging.debug('Constructing metadata')
def update_build_config(key, value):
if key in build_config:
old_value = build_config[key]
if value != old_value:
raise ValueError('Inconsistent {}: {} (was {})'.format(
key, value, old_value))
else:
build_config[key] = value
metadata = {}
# Ensure all paths are relative to output directory to make them hermetic.
if args.output_directory:
shorten_path = lambda path: os.path.relpath(path, args.output_directory)
gn_args = _ParseGnArgs(os.path.join(args.output_directory, 'args.gn'))
update_build_config(models.BUILD_CONFIG_GN_ARGS, gn_args)
else:
# If output directory is unavailable, just store basenames.
shorten_path = os.path.basename
if args.tool_prefix:
relative_tool_prefix = path_util.ToToolsSrcRootRelative(args.tool_prefix)
update_build_config(models.BUILD_CONFIG_TOOL_PREFIX, relative_tool_prefix)
if linker_name:
update_build_config(models.BUILD_CONFIG_LINKER_NAME, linker_name)
# Deduce GIT revision (cached via @lru_cache).
git_rev = _DetectGitRevision(args.source_directory)
if git_rev:
update_build_config(models.BUILD_CONFIG_GIT_REVISION, git_rev)
if args.elf_file:
metadata[models.METADATA_ELF_FILENAME] = shorten_path(args.elf_file)
architecture = readelf.ArchFromElf(args.elf_file, args.tool_prefix)
metadata[models.METADATA_ELF_ARCHITECTURE] = architecture
timestamp_obj = datetime.datetime.utcfromtimestamp(
os.path.getmtime(args.elf_file))
timestamp = calendar.timegm(timestamp_obj.timetuple())
metadata[models.METADATA_ELF_MTIME] = timestamp
build_id = readelf.BuildIdFromElf(args.elf_file, args.tool_prefix)
metadata[models.METADATA_ELF_BUILD_ID] = build_id
relocations_count = _CountRelocationsFromElf(args.elf_file,
args.tool_prefix)
metadata[models.METADATA_ELF_RELOCATIONS_COUNT] = relocations_count
if args.map_file:
metadata[models.METADATA_MAP_FILENAME] = shorten_path(args.map_file)
if args.minimal_apks_file:
metadata[models.METADATA_APK_FILENAME] = shorten_path(
args.minimal_apks_file)
if args.split_name and args.split_name != 'base':
metadata[models.METADATA_APK_SIZE] = os.path.getsize(args.apk_file)
metadata[models.METADATA_APK_SPLIT_NAME] = args.split_name
else:
sizes_by_module = _CollectModuleSizes(args.minimal_apks_file)
for name, size in sizes_by_module.items():
key = models.METADATA_APK_SIZE
if name != 'base':
key += '-' + name
metadata[key] = size
elif args.apk_file:
metadata[models.METADATA_APK_FILENAME] = shorten_path(args.apk_file)
metadata[models.METADATA_APK_SIZE] = os.path.getsize(args.apk_file)
return metadata
def _ResolveThinArchivePaths(raw_symbols, thin_archives):
"""Converts object_paths for thin archives to external .o paths."""
for symbol in raw_symbols:
object_path = symbol.object_path
if object_path.endswith(')'):
start_idx = object_path.rindex('(')
archive_path = object_path[:start_idx]
if archive_path in thin_archives:
subpath = object_path[start_idx + 1:-1]
symbol.object_path = ar.CreateThinObjectPath(archive_path, subpath)
def _DeduceObjectPathForSwitchTables(raw_symbols, object_paths_by_name):
strip_num_suffix_regexp = re.compile(r'\s+\(\.\d+\)$')
num_switch_tables = 0
num_unassigned = 0
num_deduced = 0
num_arbitrations = 0
for s in raw_symbols:
if s.full_name.startswith('Switch table for '):
num_switch_tables += 1
# Strip 'Switch table for ' prefix.
name = s.full_name[17:]
# Strip, e.g., ' (.123)' suffix.
name = re.sub(strip_num_suffix_regexp, '', name)
object_paths = object_paths_by_name.get(name, None)
if not s.object_path:
if object_paths is None:
num_unassigned += 1
else:
num_deduced += 1
# If ambiguity arises, arbitrate by taking the first.
s.object_path = object_paths[0]
if len(object_paths) > 1:
num_arbitrations += 1
else:
assert object_paths and s.object_path in object_paths
if num_switch_tables > 0:
logging.info(
'Found %d switch tables: Deduced %d object paths with ' +
'%d arbitrations. %d remain unassigned.', num_switch_tables,
num_deduced, num_arbitrations, num_unassigned)
def _NameStringLiterals(raw_symbols, elf_path, tool_prefix):
# Assign ASCII-readable string literals names like "string contents".
STRING_LENGTH_CUTOFF = 30
PRINTABLE_TBL = [False] * 256
for ch in string.printable:
PRINTABLE_TBL[ord(ch)] = True
for sym, name in string_extract.ReadStringLiterals(raw_symbols, elf_path,
tool_prefix):
# Newlines and tabs are used as delimiters in file_format.py
# At this point, names still have a terminating null byte.
name = name.replace(b'\n', b'').replace(b'\t', b'').strip(b'\00')
is_printable = all(PRINTABLE_TBL[c] for c in name)
if is_printable:
name = name.decode('ascii')
if len(name) > STRING_LENGTH_CUTOFF:
sym.full_name = '"{}[...]"'.format(name[:STRING_LENGTH_CUTOFF])
else:
sym.full_name = '"{}"'.format(name)
else:
sym.full_name = models.STRING_LITERAL_NAME
def _ParseElfInfo(map_path, elf_path, tool_prefix, track_string_literals,
outdir_context=None, linker_name=None):
"""Adds ELF section ranges and symbols."""
assert map_path or elf_path, 'Need a linker map or an ELF file.'
assert map_path or not track_string_literals, (
'track_string_literals not yet implemented without map file')
if elf_path:
elf_section_ranges = readelf.SectionInfoFromElf(elf_path, tool_prefix)
# Run nm on the elf file to retrieve the list of symbol names per-address.
# This list is required because the .map file contains only a single name
# for each address, yet multiple symbols are often coalesced when they are
# identical. This coalescing happens mainly for small symbols and for C++
# templates. Such symbols make up ~500kb of libchrome.so on Android.
elf_nm_result = nm.CollectAliasesByAddressAsync(elf_path, tool_prefix)
# Run nm on all .o/.a files to retrieve the symbol names within them.
# The list is used to detect when mutiple .o files contain the same symbol
# (e.g. inline functions), and to update the object_path / source_path
# fields accordingly.
# Looking in object files is required because the .map file choses a
# single path for these symbols.
# Rather than record all paths for each symbol, set the paths to be the
# common ancestor of all paths.
if outdir_context and map_path:
bulk_analyzer = obj_analyzer.BulkObjectFileAnalyzer(
tool_prefix, outdir_context.output_directory,
track_string_literals=track_string_literals)
bulk_analyzer.AnalyzePaths(outdir_context.elf_object_paths)
if map_path:
logging.info('Parsing Linker Map')
with _OpenMaybeGzAsText(map_path) as f:
map_section_ranges, raw_symbols, linker_map_extras = (
linker_map_parser.MapFileParser().Parse(linker_name, f))
if outdir_context and outdir_context.thin_archives:
_ResolveThinArchivePaths(raw_symbols, outdir_context.thin_archives)
else:
logging.info('Collecting symbols from nm')
raw_symbols = nm.CreateUniqueSymbols(elf_path, tool_prefix,
elf_section_ranges)
if elf_path and map_path:
logging.debug('Validating section sizes')
differing_elf_section_sizes = {}
differing_map_section_sizes = {}
for k, (_, elf_size) in elf_section_ranges.items():
if k in _SECTION_SIZE_BLOCKLIST:
continue
(_, map_size) = map_section_ranges.get(k)
if map_size != elf_size:
differing_map_section_sizes[k] = map_size
differing_elf_section_sizes[k] = elf_size
if differing_map_section_sizes:
logging.error('ELF file and .map file do not agree on section sizes.')
logging.error('readelf: %r', differing_elf_section_sizes)
logging.error('.map file: %r', differing_map_section_sizes)
sys.exit(1)
if elf_path and map_path and outdir_context:
missed_object_paths = _DiscoverMissedObjectPaths(
raw_symbols, outdir_context.known_inputs)
missed_object_paths = ar.ExpandThinArchives(
missed_object_paths, outdir_context.output_directory)[0]
bulk_analyzer.AnalyzePaths(missed_object_paths)
bulk_analyzer.SortPaths()
if track_string_literals:
merge_string_syms = [s for s in raw_symbols if
s.full_name == '** merge strings' or
s.full_name == '** lld merge strings']
# More likely for there to be a bug in supersize than an ELF to not have a
# single string literal.
assert merge_string_syms
string_ranges = [(s.address, s.size) for s in merge_string_syms]
bulk_analyzer.AnalyzeStringLiterals(elf_path, string_ranges)
# Map file for some reason doesn't demangle all names.
# Demangle prints its own log statement.
demangle.DemangleRemainingSymbols(raw_symbols, tool_prefix)
object_paths_by_name = {}
if elf_path:
logging.info(
'Adding symbols removed by identical code folding (as reported by nm)')
# This normally does not block (it's finished by this time).
names_by_address = elf_nm_result.get()
_UpdateSymbolNamesFromNm(raw_symbols, names_by_address)
raw_symbols = _AddNmAliases(raw_symbols, names_by_address)
if map_path and outdir_context:
object_paths_by_name = bulk_analyzer.GetSymbolNames()
logging.debug(
'Fetched path information for %d symbols from %d files',
len(object_paths_by_name),
len(outdir_context.elf_object_paths) + len(missed_object_paths))
_DeduceObjectPathForSwitchTables(raw_symbols, object_paths_by_name)
# For aliases, this provides path information where there wasn't any.
logging.info('Creating aliases for symbols shared by multiple paths')
raw_symbols = _AssignNmAliasPathsAndCreatePathAliases(
raw_symbols, object_paths_by_name)
if track_string_literals:
logging.info('Waiting for string literal extraction to complete.')
list_of_positions_by_object_path = bulk_analyzer.GetStringPositions()
bulk_analyzer.Close()
if track_string_literals:
logging.info('Deconstructing ** merge strings into literals')
replacements = _CreateMergeStringsReplacements(merge_string_syms,
list_of_positions_by_object_path)
for merge_sym, literal_syms in zip(merge_string_syms, replacements):
# Don't replace if no literals were found.
if literal_syms:
# Re-find the symbols since aliases cause their indices to change.
idx = raw_symbols.index(merge_sym)
# This assignment is a bit slow (causes array to be shifted), but
# is fast enough since len(merge_string_syms) < 10.
raw_symbols[idx:idx + 1] = literal_syms
if map_path:
linker_map_parser.DeduceObjectPathsFromThinMap(raw_symbols,
linker_map_extras)
if elf_path and track_string_literals:
_NameStringLiterals(raw_symbols, elf_path, tool_prefix)
# If we have an ELF file, use its ranges as the source of truth, since some
# sections can differ from the .map.
return (elf_section_ranges if elf_path else map_section_ranges, raw_symbols,
object_paths_by_name)
def _ComputePakFileSymbols(
file_name, contents, res_info, symbols_by_id, compression_ratio=1):
id_map = {
id(v): k
for k, v in sorted(list(contents.resources.items()), reverse=True)
}
alias_map = {
k: id_map[id(v)]
for k, v in contents.resources.items() if id_map[id(v)] != k
}
name = posixpath.basename(file_name)
# Hyphens used for language regions. E.g.: en-GB.pak, sr-Latn.pak, ...
# Longest translated .pak file without hyphen: fil.pak
if '-' in name or len(name) <= 7:
section_name = models.SECTION_PAK_TRANSLATIONS
else:
# E.g.: resources.pak, chrome_100_percent.pak.
section_name = models.SECTION_PAK_NONTRANSLATED
overhead = (12 + 6) * compression_ratio # Header size plus extra offset
# Key just needs to be unique from other IDs and pak overhead symbols.
symbols_by_id[-len(symbols_by_id) - 1] = models.Symbol(
section_name, overhead, full_name='Overhead: {}'.format(file_name))
for resource_id in sorted(contents.resources):
if resource_id in alias_map:
# 4 extra bytes of metadata (2 16-bit ints)
size = 4
resource_id = alias_map[resource_id]
else:
resource_data = contents.resources[resource_id]
# 6 extra bytes of metadata (1 32-bit int, 1 16-bit int)
size = len(resource_data) + 6
name, source_path = res_info[resource_id]
if resource_id not in symbols_by_id:
full_name = '{}: {}'.format(source_path, name)
new_symbol = models.Symbol(
section_name, 0, address=resource_id, full_name=full_name)
if (section_name == models.SECTION_PAK_NONTRANSLATED and
_IsPakContentUncompressed(resource_data)):
new_symbol.flags |= models.FLAG_UNCOMPRESSED
symbols_by_id[resource_id] = new_symbol
size *= compression_ratio
symbols_by_id[resource_id].size += size
return section_name
def _IsPakContentUncompressed(content):
raw_size = len(content)
# Assume anything less than 100 bytes cannot be compressed.
if raw_size < 100:
return False
compressed_size = len(zlib.compress(content, 1))
compression_ratio = compressed_size / float(raw_size)
return compression_ratio < _UNCOMPRESSED_COMPRESSION_RATIO_THRESHOLD
class _ResourceSourceMapper:
def __init__(self, size_info_prefix, knobs):
self._knobs = knobs
self._res_info = self._LoadResInfo(size_info_prefix)
self._pattern_dollar_underscore = re.compile(r'\$+(.*?)(?:__\d)+')
self._pattern_version_suffix = re.compile(r'-v\d+/')
@staticmethod
def _ParseResInfoFile(res_info_path):
with open(res_info_path, 'r') as info_file:
return dict(l.rstrip().split('\t') for l in info_file)
def _LoadResInfo(self, size_info_prefix):
apk_res_info_path = size_info_prefix + '.res.info'
res_info_without_root = self._ParseResInfoFile(apk_res_info_path)
# We package resources in the res/ folder only in the apk.
res_info = {
os.path.join('res', dest): source
for dest, source in res_info_without_root.items()
}
res_info.update(self._knobs.apk_other_files)
return res_info
def FindSourceForPath(self, path):
# Sometimes android adds $ in front and __# before extension.
path = self._pattern_dollar_underscore.sub(r'\1', path)
ret = self._res_info.get(path)
if ret:
return ret
# Android build tools may append extra -v flags for the root dir.
path = self._pattern_version_suffix.sub('/', path)
ret = self._res_info.get(path)
if ret:
return ret
return None
def _ParsePakInfoFile(pak_info_path):
with open(pak_info_path, 'r') as info_file:
res_info = {}
for line in info_file.readlines():
name, res_id, path = line.split(',')
res_info[int(res_id)] = (name, path.strip())
return res_info
def _ParsePakSymbols(symbols_by_id, object_paths_by_pak_id):
raw_symbols = []
for resource_id, symbol in symbols_by_id.items():
raw_symbols.append(symbol)
paths = object_paths_by_pak_id.get(resource_id)
if not paths:
continue
symbol.object_path = paths[0]
if len(paths) == 1:
continue
aliases = symbol.aliases or [symbol]
symbol.aliases = aliases
for path in paths[1:]:
new_sym = models.Symbol(
symbol.section_name, symbol.size, address=symbol.address,
full_name=symbol.full_name, object_path=path, aliases=aliases)
aliases.append(new_sym)
raw_symbols.append(new_sym)
raw_total = 0.0
int_total = 0
for symbol in raw_symbols:
raw_total += symbol.size
# We truncate rather than round to ensure that we do not over attribute. It
# is easier to add another symbol to make up the difference.
symbol.size = int(symbol.size)
int_total += symbol.size
# Attribute excess to translations since only those are compressed.
overhead_size = round(raw_total - int_total)
if overhead_size:
raw_symbols.append(
models.Symbol(models.SECTION_PAK_TRANSLATIONS,
overhead_size,
address=raw_symbols[-1].end_address,
full_name='Overhead: Pak compression artifacts'))
# Pre-sort to make final sort faster.
# Note: _SECTION_SORT_ORDER[] for pak symbols matches section_name ordering.
raw_symbols.sort(
key=lambda s: (s.section_name, s.IsOverhead(), s.address, s.object_path))
return raw_symbols
def _ParseApkElfSectionRanges(section_ranges, metadata, apk_elf_result):
if metadata:
logging.debug('Extracting section sizes from .so within .apk')
apk_build_id, apk_section_ranges, elf_overhead_size = apk_elf_result.get()
assert apk_build_id == metadata[models.METADATA_ELF_BUILD_ID], (
'BuildID from apk_elf_result did not match')
packed_section_name = None
architecture = metadata[models.METADATA_ELF_ARCHITECTURE]
# Packing occurs enabled only arm32 & arm64.
if architecture == 'arm':
packed_section_name = '.rel.dyn'
elif architecture == 'arm64':
packed_section_name = '.rela.dyn'
if packed_section_name:
unpacked_range = section_ranges.get(packed_section_name)
if unpacked_range is None:
logging.warning('Packed section not present: %s', packed_section_name)
elif unpacked_range != apk_section_ranges.get(packed_section_name):
# These ranges are different only when using relocation_packer, which
# hasn't been used since switching from gold -> lld.
apk_section_ranges['%s (unpacked)' %
packed_section_name] = unpacked_range
else:
_, apk_section_ranges, elf_overhead_size = apk_elf_result.get()
return apk_section_ranges, elf_overhead_size
class _ResourcePathDeobfuscator:
def __init__(self, pathmap_path):
self._pathmap = self._LoadResourcesPathmap(pathmap_path)
def _LoadResourcesPathmap(self, pathmap_path):
"""Load the pathmap of obfuscated resource paths.
Returns: A dict mapping from obfuscated paths to original paths or an
empty dict if passed a None |pathmap_path|.
"""
if pathmap_path is None:
return {}
pathmap = {}
with open(pathmap_path, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('--') or line == '':
continue
original, renamed = line.split(' -> ')
pathmap[renamed] = original
return pathmap
def MaybeRemapPath(self, path):
long_path = self._pathmap.get(path)
if long_path:
return long_path
# if processing a .minimal.apks, we are actually just processing the base
# module.
long_path = self._pathmap.get('base/{}'.format(path))
if long_path:
# The first 5 chars are 'base/', which we don't need because we are
# looking directly inside the base module apk.
return long_path[5:]
return path
def _ParseApkOtherSymbols(section_ranges, apk_path, apk_so_path,
resources_pathmap_path, size_info_prefix, metadata,
knobs):
res_source_mapper = _ResourceSourceMapper(size_info_prefix, knobs)
resource_deobfuscator = _ResourcePathDeobfuscator(resources_pathmap_path)
apk_symbols = []
dex_size = 0
zip_info_total = 0
zipalign_total = 0
with zipfile.ZipFile(apk_path) as z:
signing_block_size = zip_util.MeasureApkSignatureBlock(z)
for zip_info in z.infolist():
zip_info_total += zip_info.compress_size
# Account for zipalign overhead that exists in local file header.
zipalign_total += zip_util.ReadZipInfoExtraFieldLength(z, zip_info)
# Account for zipalign overhead that exists in central directory header.
# Happens when python aligns entries in apkbuilder.py, but does not
# exist when using Android's zipalign. E.g. for bundle .apks files.
zipalign_total += len(zip_info.extra)
# Skip main shared library, pak, and dex files as they are accounted for.
if (zip_info.filename == apk_so_path
or zip_info.filename.endswith('.pak')):
continue
if zip_info.filename.endswith('.dex'):
dex_size += zip_info.file_size
continue
resource_filename = resource_deobfuscator.MaybeRemapPath(
zip_info.filename)
source_path = res_source_mapper.FindSourceForPath(resource_filename)
if source_path is None:
source_path = os.path.join(models.APK_PREFIX_PATH, resource_filename)
apk_symbols.append(
models.Symbol(
models.SECTION_OTHER,
zip_info.compress_size,
source_path=source_path,
full_name=resource_filename)) # Full name must disambiguate
# Store zipalign overhead and signing block size as metadata rather than an
# "Overhead:" symbol because they fluctuate in size, and would be a source of
# noise in symbol diffs if included as symbols (http://crbug.com/1130754).
# Might be even better if we had an option in Tiger Viewer to ignore certain
# symbols, but taking this as a short-cut for now.
metadata[models.METADATA_ZIPALIGN_OVERHEAD] = zipalign_total
metadata[models.METADATA_SIGNING_BLOCK_SIZE] = signing_block_size
# Overhead includes:
# * Size of all local zip headers (minus zipalign padding).
# * Size of central directory & end of central directory.
overhead_size = (os.path.getsize(apk_path) - zip_info_total - zipalign_total -
signing_block_size)
assert overhead_size >= 0, 'Apk overhead must be non-negative'
zip_overhead_symbol = models.Symbol(
models.SECTION_OTHER, overhead_size, full_name='Overhead: APK file')
apk_symbols.append(zip_overhead_symbol)
_ExtendSectionRange(section_ranges, models.SECTION_OTHER,
sum(s.size for s in apk_symbols))
return dex_size, apk_symbols
def _CreatePakObjectMap(object_paths_by_name):
# IDS_ macro usages result in templated function calls that contain the
# resource ID in them. These names are collected along with all other symbols
# by running "nm" on them. We just need to extract the values from them.
object_paths_by_pak_id = {}
PREFIX = 'void ui::AllowlistedResource<'
id_start_idx = len(PREFIX)
id_end_idx = -len('>()')
for name in object_paths_by_name:
if name.startswith(PREFIX):
pak_id = int(name[id_start_idx:id_end_idx])
object_paths_by_pak_id[pak_id] = object_paths_by_name[name]
return object_paths_by_pak_id
def _FindPakSymbolsFromApk(opts, section_ranges, apk_path, size_info_prefix):
with zipfile.ZipFile(apk_path) as z:
pak_zip_infos = (f for f in z.infolist() if f.filename.endswith('.pak'))
pak_info_path = size_info_prefix + '.pak.info'
res_info = _ParsePakInfoFile(pak_info_path)
symbols_by_id = {}
total_compressed_size = 0
total_uncompressed_size = 0
for zip_info in pak_zip_infos:
contents = data_pack.ReadDataPackFromString(z.read(zip_info))
compression_ratio = 1.0
if zip_info.compress_size < zip_info.file_size:
total_compressed_size += zip_info.compress_size
total_uncompressed_size += zip_info.file_size
compression_ratio = opts.pak_compression_ratio
section_name = _ComputePakFileSymbols(
zip_info.filename, contents,
res_info, symbols_by_id, compression_ratio=compression_ratio)
_ExtendSectionRange(section_ranges, section_name, zip_info.compress_size)
if total_uncompressed_size > 0:
actual_ratio = (
float(total_compressed_size) / total_uncompressed_size)
logging.info(
'Pak Compression Ratio: %f Actual: %f Diff: %.0f',
opts.pak_compression_ratio, actual_ratio,
(opts.pak_compression_ratio - actual_ratio) * total_uncompressed_size)
return symbols_by_id
def _FindPakSymbolsFromFiles(section_ranges, pak_files, pak_info_path,
output_directory):
"""Uses files from args to find and add pak symbols."""
res_info = _ParsePakInfoFile(pak_info_path)
symbols_by_id = {}
for pak_file_path in pak_files:
with open(pak_file_path, 'rb') as f:
contents = data_pack.ReadDataPackFromString(f.read())
section_name = _ComputePakFileSymbols(
os.path.relpath(pak_file_path, output_directory), contents, res_info,
symbols_by_id)
_ExtendSectionRange(section_ranges, section_name,
os.path.getsize(pak_file_path))
return symbols_by_id
def _CalculateElfOverhead(section_ranges, elf_path):
if elf_path:
section_sizes_total_without_bss = sum(
size for k, (address, size) in section_ranges.items()
if k not in models.BSS_SECTIONS)
elf_overhead_size = (
os.path.getsize(elf_path) - section_sizes_total_without_bss)
assert elf_overhead_size >= 0, (
'Negative ELF overhead {}'.format(elf_overhead_size))
return elf_overhead_size
return 0
def _OverwriteSymbolSizesWithRelocationCount(raw_symbols, tool_prefix,
elf_path):
logging.info('Removing non-native symbols')
raw_symbols = [sym for sym in raw_symbols if sym.IsNative()]
logging.info('Overwriting symbol sizes with relocation count')
# Last symbol address is the end of the last symbol, so we don't misattribute
# all relros after the last symbol to that symbol.
symbol_addresses = [s.address for s in raw_symbols]
symbol_addresses.append(raw_symbols[-1].end_address)
for symbol in raw_symbols:
symbol.address = 0
symbol.size = 0
symbol.padding = 0
relro_addresses = readelf.CollectRelocationAddresses(elf_path, tool_prefix)
# More likely for there to be a bug in supersize than an ELF to have any
# relative relocations.
assert relro_addresses
logging.info('Adding %d relocations', len(relro_addresses))
for addr in relro_addresses:
# Attribute relros to largest symbol start address that precede them.
idx = bisect.bisect_right(symbol_addresses, addr) - 1
if 0 <= idx < len(raw_symbols):
symbol = raw_symbols[idx]
for alias in symbol.aliases or [symbol]:
alias.size += 1
raw_symbols = [sym for sym in raw_symbols if sym.size]
return raw_symbols
def _AddUnattributedSectionSymbols(raw_symbols, section_ranges):
# Create symbols for ELF sections not covered by existing symbols.
logging.info('Searching for symbol gaps...')
new_syms_by_section = collections.defaultdict(list)
for section_name, group in itertools.groupby(
raw_symbols, lambda s: s.section_name):
# Get last Last symbol in group.
for sym in group:
pass
end_address = sym.end_address # pylint: disable=undefined-loop-variable
size_from_syms = end_address - section_ranges[section_name][0]
overhead = section_ranges[section_name][1] - size_from_syms
assert overhead >= 0, (
('End of last symbol (%x) in section %s is %d bytes after the end of '
'section from readelf (%x).') % (end_address, section_name, -overhead,
sum(section_ranges[section_name])))
if overhead > 0 and section_name not in models.BSS_SECTIONS:
new_syms_by_section[section_name].append(
models.Symbol(section_name,
overhead,
address=end_address,
full_name='** {} (unattributed)'.format(section_name)))
logging.info('Last symbol in %s does not reach end of section, gap=%d',
section_name, overhead)
# Sections that should not bundle into ".other".
unsummed_sections, summed_sections = models.ClassifySections(
section_ranges.keys())
other_elf_symbols = []
# Sort keys to ensure consistent order (> 1 sections may have address = 0).
for section_name, (_, section_size) in list(section_ranges.items()):
# Handle sections that don't appear in |raw_symbols|.
if (section_name not in unsummed_sections
and section_name not in summed_sections):
other_elf_symbols.append(
models.Symbol(models.SECTION_OTHER,
section_size,
full_name='** ELF Section: {}'.format(section_name)))
_ExtendSectionRange(section_ranges, models.SECTION_OTHER, section_size)
other_elf_symbols.sort(key=lambda s: (s.address, s.full_name))
# TODO(agrieve): It would probably simplify things to use a dict of
# section_name->raw_symbols while creating symbols.
# Merge |new_syms_by_section| into |raw_symbols| while maintaining ordering.
ret = []
for section_name, group in itertools.groupby(
raw_symbols, lambda s: s.section_name):
ret.extend(group)
ret.extend(new_syms_by_section[section_name])
return ret, other_elf_symbols
def _ParseNinjaFiles(output_directory, elf_path=None):
linker_elf_path = elf_path
if elf_path:
# For partitioned libraries, the actual link command outputs __combined.so.
partitioned_elf_path = elf_path.replace('.so', '__combined.so')
if os.path.exists(partitioned_elf_path):
linker_elf_path = partitioned_elf_path
logging.info('Parsing ninja files, looking for %s.',
(linker_elf_path or 'source mapping only (elf_path=None)'))
source_mapper, ninja_elf_object_paths = ninja_parser.Parse(
output_directory, linker_elf_path)
logging.debug('Parsed %d .ninja files.', source_mapper.parsed_file_count)
if elf_path:
assert ninja_elf_object_paths, (
'Failed to find link command in ninja files for ' +
os.path.relpath(linker_elf_path, output_directory))
return source_mapper, ninja_elf_object_paths
def CreateContainerAndSymbols(knobs=None,
opts=None,
container_name=None,
metadata=None,
map_path=None,
tool_prefix=None,
output_directory=None,
source_directory=None,
elf_path=None,
apk_path=None,
mapping_path=None,
resources_pathmap_path=None,
apk_so_path=None,
pak_files=None,
pak_info_file=None,
linker_name=None,
size_info_prefix=None):
"""Creates a Container (with sections sizes) and symbols for a SizeInfo.
Args:
knobs: Instance of SectionSizeKnobs.
opts: Instance of ContainerArchiveOptions.
container_name: Name for the created Container. May be '' if only one
Container exists.
metadata: Metadata dict from CreateMetadata().
map_path: Path to the linker .map(.gz) file to parse.
tool_prefix: Prefix for c++filt & nm (required).
output_directory: Build output directory. If None, source_paths and symbol
alias information will not be recorded.
source_directory: Path to source root.
elf_path: Path to the corresponding unstripped ELF file. Used to find symbol
aliases and inlined functions. Can be None.
apk_path: Path to the .apk file to measure.
mapping_path: Path to the .mapping file for DEX symbol processing.
resources_pathmap_path: Path to the pathmap file that maps original
resource paths to shortened resource paths.
apk_so_path: Path to an .so file within an APK file.
pak_files: List of paths to .pak files.
pak_info_file: Path to a .pak.info file.
linker_name: A coded linker name (see linker_map_parser.py).
size_info_prefix: Path to $out/size-info/$ApkName.
Returns:
A tuple of (container, raw_symbols).
containers is a Container instance that stores metadata and section_sizes
(section_sizes maps section names to respective sizes).
raw_symbols is a list of Symbol objects.
"""
assert elf_path or not opts.relocations_mode, (
'--relocations-mode requires a ELF file')
knobs = knobs or SectionSizeKnobs()
if apk_path and apk_so_path:
# Extraction takes around 1 second, so do it in parallel.
apk_elf_result = parallel.ForkAndCall(_ElfInfoFromApk,
(apk_path, apk_so_path, tool_prefix))
else:
apk_elf_result = None
outdir_context = None
object_source_mapper = None
address_source_mapper = None
section_ranges = {}
raw_symbols = []
if opts.analyze_native and output_directory:
if map_path:
# Finds all objects passed to the linker and creates a map of .o -> .cc.
object_source_mapper, ninja_elf_object_paths = _ParseNinjaFiles(
output_directory, elf_path)
else:
ninja_elf_object_paths = None
logging.info('Parsing source path info via dwarfdump')
address_source_mapper = dwarfdump.CreateAddressSourceMapper(
elf_path, tool_prefix)
# Start by finding elf_object_paths so that nm can run on them while the
# linker .map is being parsed.
if ninja_elf_object_paths:
elf_object_paths, thin_archives = ar.ExpandThinArchives(
ninja_elf_object_paths, output_directory)
known_inputs = set(elf_object_paths)
known_inputs.update(ninja_elf_object_paths)
else:
elf_object_paths = []
known_inputs = None
# When we don't know which elf file is used, just search all paths.
if opts.analyze_native and object_source_mapper:
thin_archives = set(
p for p in object_source_mapper.IterAllPaths() if p.endswith('.a')
and ar.IsThinArchive(os.path.join(output_directory, p)))
else:
thin_archives = None
outdir_context = _OutputDirectoryContext(
elf_object_paths=elf_object_paths,
known_inputs=known_inputs,
output_directory=output_directory,
thin_archives=thin_archives)
if opts.analyze_native:
section_ranges, raw_symbols, object_paths_by_name = _ParseElfInfo(
map_path,
elf_path,
tool_prefix,
opts.track_string_literals,
outdir_context=outdir_context,
linker_name=linker_name)
if apk_elf_result:
section_ranges, elf_overhead_size = _ParseApkElfSectionRanges(
section_ranges, metadata, apk_elf_result)
elif elf_path:
# Strip ELF before capturing section information to avoid recording
# debug sections.
with tempfile.NamedTemporaryFile(suffix=os.path.basename(elf_path)) as f:
strip_path = path_util.GetStripPath(tool_prefix)
subprocess.run([strip_path, '-o', f.name, elf_path], check=True)
section_ranges = readelf.SectionInfoFromElf(f.name, tool_prefix)
elf_overhead_size = _CalculateElfOverhead(section_ranges, f.name)
if elf_path:
raw_symbols, other_elf_symbols = _AddUnattributedSectionSymbols(
raw_symbols, section_ranges)
pak_symbols_by_id = None
other_symbols = []
if apk_path and size_info_prefix and not opts.relocations_mode:
# Can modify |section_ranges|.
pak_symbols_by_id = _FindPakSymbolsFromApk(opts, section_ranges, apk_path,
size_info_prefix)
# Can modify |section_ranges|.
dex_size, other_symbols = _ParseApkOtherSymbols(section_ranges, apk_path,
apk_so_path,
resources_pathmap_path,
size_info_prefix, metadata,
knobs)
if opts.analyze_java:
dex_symbols = apkanalyzer.CreateDexSymbols(apk_path, mapping_path,
size_info_prefix)
# We can't meaningfully track section size of dex methods vs other, so
# just fake the size of dex methods as the sum of symbols, and make
# "dex other" responsible for any unattributed bytes.
dex_method_size = int(
round(
sum(s.pss for s in dex_symbols
if s.section_name == models.SECTION_DEX_METHOD)))
section_ranges[models.SECTION_DEX_METHOD] = (0, dex_method_size)
section_ranges[models.SECTION_DEX] = (0, dex_size - dex_method_size)
dex_other_size = int(
round(
sum(s.pss for s in dex_symbols
if s.section_name == models.SECTION_DEX)))
unattributed_dex = section_ranges[models.SECTION_DEX][1] - dex_other_size
# Compare against -5 instead of 0 to guard against round-off errors.
assert unattributed_dex >= -5, ('Dex symbols take up more space than '
'the dex sections have available')
if unattributed_dex > 0:
dex_symbols.append(
models.Symbol(
models.SECTION_DEX,
unattributed_dex,
full_name='** .dex (unattributed - includes string literals)'))
raw_symbols.extend(dex_symbols)
elif pak_files and pak_info_file:
# Can modify |section_ranges|.
pak_symbols_by_id = _FindPakSymbolsFromFiles(
section_ranges, pak_files, pak_info_file, output_directory)
if elf_path:
elf_overhead_symbol = models.Symbol(
models.SECTION_OTHER, elf_overhead_size, full_name='Overhead: ELF file')
_ExtendSectionRange(section_ranges, models.SECTION_OTHER, elf_overhead_size)
other_symbols.append(elf_overhead_symbol)
other_symbols.extend(other_elf_symbols)
if pak_symbols_by_id:
logging.debug('Extracting pak IDs from symbol names, and creating symbols')
object_paths_by_pak_id = {}
if opts.analyze_native:
object_paths_by_pak_id = _CreatePakObjectMap(object_paths_by_name)
pak_raw_symbols = _ParsePakSymbols(
pak_symbols_by_id, object_paths_by_pak_id)
raw_symbols.extend(pak_raw_symbols)
# Always have .other come last.
other_symbols.sort(key=lambda s: (s.IsOverhead(), s.full_name.startswith(
'**'), s.address, s.full_name))
raw_symbols.extend(other_symbols)
_ExtractSourcePathsAndNormalizeObjectPaths(raw_symbols, object_source_mapper,
address_source_mapper)
dir_metadata.PopulateComponents(raw_symbols, source_directory)
logging.info('Converting excessive aliases into shared-path symbols')
_CompactLargeAliasesIntoSharedSymbols(raw_symbols, knobs)
logging.debug('Connecting nm aliases')
_ConnectNmAliases(raw_symbols)
if opts.relocations_mode:
raw_symbols = _OverwriteSymbolSizesWithRelocationCount(
raw_symbols, tool_prefix, elf_path)
section_sizes = {k: size for k, (address, size) in section_ranges.items()}
container = models.Container(name=container_name,
metadata=metadata,
section_sizes=section_sizes)
for symbol in raw_symbols:
symbol.container = container
# Sorting for relocations mode causes .data and .data.rel.ro to be interleaved
# due to setting all addresses to 0.
if not opts.relocations_mode:
file_format.SortSymbols(raw_symbols, check_already_mostly_sorted=True)
return container, raw_symbols
def CreateSizeInfo(build_config,
container_list,
raw_symbols_list,
normalize_names=True):
"""Performs operations on all symbols and creates a SizeInfo object."""
assert len(container_list) == len(raw_symbols_list)
all_raw_symbols = []
for raw_symbols in raw_symbols_list:
file_format.CalculatePadding(raw_symbols)
# Do not call _NormalizeNames() during archive since that method tends to
# need tweaks over time. Calling it only when loading .size files allows for
# more flexibility.
if normalize_names:
_NormalizeNames(raw_symbols)
all_raw_symbols += raw_symbols
return models.SizeInfo(build_config, container_list, all_raw_symbols)
@functools.lru_cache
def _DetectGitRevision(directory):
"""Runs git rev-parse to get the SHA1 hash of the current revision.
Args:
directory: Path to directory where rev-parse command will be run.
Returns:
A string with the SHA1 hash, or None if an error occured.
"""
try:
git_rev = subprocess.check_output(
['git', '-C', directory, 'rev-parse', 'HEAD']).decode('ascii')
return git_rev.rstrip()
except Exception:
logging.warning('Failed to detect git revision for file metadata.')
return None
def _ElfIsMainPartition(elf_path, tool_prefix):
section_ranges = readelf.SectionInfoFromElf(elf_path, tool_prefix)
return models.SECTION_PART_END in section_ranges.keys()
def _CountRelocationsFromElf(elf_path, tool_prefix):
args = [path_util.GetObjDumpPath(tool_prefix), '--private-headers', elf_path]
stdout = subprocess.check_output(args).decode('ascii')
relocations = re.search('REL[AR]?COUNT\s*(.+)', stdout).group(1)
return int(relocations, 16)
@functools.lru_cache
def _ParseGnArgs(args_path):
"""Returns a list of normalized "key=value" strings."""
args = {}
with open(args_path) as f:
for l in f:
# Strips #s even if within string literal. Not a problem in practice.
parts = l.split('#')[0].split('=')
if len(parts) != 2:
continue
args[parts[0].strip()] = parts[1].strip()
return ["%s=%s" % x for x in sorted(args.items())]
def _DetectLinkerName(map_path):
with _OpenMaybeGzAsText(map_path) as f:
return linker_map_parser.DetectLinkerNameFromMapFile(f)
def _ElfInfoFromApk(apk_path, apk_so_path, tool_prefix):
"""Returns a tuple of (build_id, section_ranges, elf_overhead_size)."""
with zip_util.UnzipToTemp(apk_path, apk_so_path) as temp:
build_id = readelf.BuildIdFromElf(temp, tool_prefix)
section_ranges = readelf.SectionInfoFromElf(temp, tool_prefix)
elf_overhead_size = _CalculateElfOverhead(section_ranges, temp)
return build_id, section_ranges, elf_overhead_size
def _AddContainerArguments(parser):
"""Add arguments applicable to a single container."""
# Special: Use _IdentifyInputFile() to detect main file argument.
parser.add_argument('-f', metavar='FILE',
help='Auto-identify input file type.')
# Main file argument: Exactly one should be specified (perhaps via -f), with
# the exception that --map-file can be specified in addition.
# _IdentifyInputFile() should be kept updated.
parser.add_argument('--apk-file',
help='.apk file to measure. Other flags can generally be '
'derived when this is used.')
parser.add_argument('--minimal-apks-file',
help='.minimal.apks file to measure. Other flags can '
'generally be derived when this is used.')
parser.add_argument('--elf-file', help='Path to input ELF file.')
parser.add_argument('--map-file',
help='Path to input .map(.gz) file. Defaults to '
'{{elf_file}}.map(.gz)?. If given without '
'--elf-file, no size metadata will be recorded.')
# Auxiliary file arguments.
parser.add_argument('--mapping-file',
help='Proguard .mapping file for deobfuscation.')
parser.add_argument('--resources-pathmap-file',
help='.pathmap.txt file that contains a maping from '
'original resource paths to shortened resource paths.')
parser.add_argument('--pak-file', action='append',
help='Paths to pak files.')
parser.add_argument('--pak-info-file',
help='This file should contain all ids found in the pak '
'files that have been passed in.')
parser.add_argument('--aux-elf-file',
help='Path to auxiliary ELF if the main file is APK, '
'useful for capturing metadata.')
# Non-file argument.
parser.add_argument('--no-string-literals', dest='track_string_literals',
default=True, action='store_false',
help='Disable breaking down "** merge strings" into more '
'granular symbols.')
parser.add_argument('--no-map-file',
dest='ignore_linker_map',
action='store_true',
help='Use debug information to capture symbol sizes '
'instead of linker map file.')
parser.add_argument(
'--relocations',
action='store_true',
help='Instead of counting binary size, count number of relative '
'relocation instructions in ELF code.')
parser.add_argument(
'--java-only', action='store_true', help='Run on only Java symbols')
parser.add_argument(
'--native-only', action='store_true', help='Run on only native symbols')
parser.add_argument(
'--no-java', action='store_true', help='Do not run on Java symbols')
parser.add_argument(
'--no-native', action='store_true', help='Do not run on native symbols')
parser.add_argument(
'--include-padding',
action='store_true',
help='Include a padding field for each symbol, instead of rederiving '
'from consecutive symbols on file load.')
parser.add_argument(
'--check-data-quality',
action='store_true',
help='Perform sanity checks to ensure there is no missing data.')
# The split_name arg is used for bundles to identify DFMs.
parser.set_defaults(split_name=None)
def AddArguments(parser):
parser.add_argument('size_file', help='Path to output .size file.')
parser.add_argument('--source-directory',
help='Custom path to the root source directory.')
parser.add_argument('--output-directory',
help='Path to the root build directory.')
parser.add_argument('--tool-prefix',
help='Path prefix for c++filt, nm, readelf.')
parser.add_argument(
'--no-output-directory',
action='store_true',
help='Skips all data collection that requires build intermediates.')
parser.add_argument('--ssargs-file',
help='Path to SuperSize multi-container arguments file.')
_AddContainerArguments(parser)
def _IdentifyInputFile(args, on_config_error):
"""Identifies main input file type from |args.f|, and updates |args|.
Identification is performed on filename alone, i.e., the file need not exist.
The result is written to a field in |args|. If the field exists then it
simply gets overwritten.
If '.' is missing from |args.f| then --elf-file is assumed.
Returns:
The primary input file.
"""
if args.f:
if args.f.endswith('.minimal.apks'):
args.minimal_apks_file = args.f
elif args.f.endswith('.apk'):
args.apk_file = args.f
elif args.f.endswith('.so') or '.' not in os.path.basename(args.f):
args.elf_file = args.f
elif args.f.endswith('.map') or args.f.endswith('.map.gz'):
args.map_file = args.f
elif args.f.endswith('.ssargs'):
# Fails if trying to nest them, which should never happen.
args.ssargs_file = args.f
else:
on_config_error('Cannot identify file ' + args.f)
args.f = None
ret = [
args.apk_file, args.elf_file, args.minimal_apks_file,
args.__dict__.get('ssargs_file')
]
ret = [v for v in ret if v]
# --map-file can be a main file, or used with another main file.
if not ret and args.map_file:
ret.append(args.map_file)
elif not ret:
on_config_error(
'Must pass at least one of --apk-file, --minimal-apks-file, '
'--elf-file, --map-file, --ssargs-file')
elif len(ret) > 1:
on_config_error(
'Found colliding --apk-file, --minimal-apk-file, --elf-file, '
'--ssargs-file')
return ret[0]
def ParseSsargs(lines):
"""Parses .ssargs data.
An .ssargs file is a text file to specify multiple containers as input to
SuperSize-archive. After '#'-based comments, start / end whitespaces, and
empty lines are stripped, each line specifies a distinct container. Format:
* Positional argument: |name| for the container.
* Main input file specified by -f, --apk-file, --elf-file, etc.:
* Can be an absolute path.
* Can be a relative path. In this case, it's up to the caller to supply the
base directory.
* -f switch must not specify another .ssargs file.
* For supported switches: See _AddContainerArguments().
Args:
lines: An iterator containing lines of .ssargs data.
Returns:
A list of arguments, one for each container.
Raises:
ValueError: Parse error, including input line number.
"""
sub_args_list = []
parser = argparse.ArgumentParser(add_help=False)
parser.error = lambda msg: (_ for _ in ()).throw(ValueError(msg))
parser.add_argument('name')
_AddContainerArguments(parser)
try:
for lineno, line in enumerate(lines, 1):
toks = shlex.split(line, comments=True)
if not toks: # Skip if line is empty after stripping comments.
continue
sub_args_list.append(parser.parse_args(toks))
except ValueError as e:
e.args = ('Line %d: %s' % (lineno, e.args[0]), )
raise e
return sub_args_list
def _DeduceNativeInfo(tentative_output_dir, apk_path, elf_path, map_path,
ignore_linker_map, tool_prefix, on_config_error):
apk_so_path = None
if apk_path:
with zipfile.ZipFile(apk_path) as z:
lib_infos = [
f for f in z.infolist()
if f.filename.endswith('.so') and f.file_size > 0
]
if not lib_infos:
return None, map_path, None
# TODO(agrieve): Add support for multiple .so files, and take into account
# secondary architectures.
apk_so_path = max(lib_infos, key=lambda x: x.file_size).filename
logging.debug('Sub-apk path=%s', apk_so_path)
if not elf_path and tentative_output_dir:
elf_path = os.path.join(
tentative_output_dir, 'lib.unstripped',
os.path.basename(apk_so_path.replace('crazy.', '')))
logging.debug('Detected --elf-file=%s', elf_path)
if map_path:
if not map_path.endswith('.map') and not map_path.endswith('.map.gz'):
on_config_error('Expected --map-file to end with .map or .map.gz')
elif elf_path:
# TODO(agrieve): Support breaking down partitions.
is_partition = elf_path.endswith('_partition.so')
if is_partition:
on_config_error('Found unexpected _partition.so: ' + elf_path)
if not ignore_linker_map:
if _ElfIsMainPartition(elf_path, tool_prefix):
map_path = elf_path.replace('.so', '__combined.so') + '.map'
else:
map_path = elf_path + '.map'
if not os.path.exists(map_path):
map_path += '.gz'
if not ignore_linker_map and not os.path.exists(map_path):
# Consider a missing linker map fatal only for the base module. For .so
# files in feature modules, allow skipping breakdowns.
on_config_error(
'Could not find .map(.gz)? file. Ensure you have built with '
'is_official_build=true and generate_linker_map=true, or use '
'--map-file to point me a linker map file, or use --no-map-file.')
return elf_path, map_path, apk_so_path
def _DeduceAuxPaths(args, apk_prefix):
mapping_path = args.mapping_file
resources_pathmap_path = args.resources_pathmap_file
if apk_prefix:
if not mapping_path:
mapping_path = apk_prefix + '.mapping'
logging.debug('Detected --mapping-file=%s', mapping_path)
if not resources_pathmap_path:
possible_pathmap_path = apk_prefix + '.pathmap.txt'
# This could be pointing to a stale pathmap file if path shortening was
# previously enabled but is disabled for the current build. However, since
# current apk/aab will have unshortened paths, looking those paths up in
# the stale pathmap which is keyed by shortened paths would not find any
# mapping and thus should not cause any issues.
if os.path.exists(possible_pathmap_path):
resources_pathmap_path = possible_pathmap_path
logging.debug('Detected --resources-pathmap-file=%s',
resources_pathmap_path)
return mapping_path, resources_pathmap_path
def _ReadMultipleArgsFromStream(lines, base_dir, err_prefix, on_config_error):
try:
ret = ParseSsargs(lines)
except ValueError as e:
on_config_error('%s: %s' % (err_prefix, e.args[0]))
for sub_args in ret:
for k, v in sub_args.__dict__.items():
# Translate file arguments to be relative to |sub_dir|.
if (k.endswith('_file') or k == 'f') and isinstance(v, str):
sub_args.__dict__[k] = os.path.join(base_dir, v)
return ret
def _ReadMultipleArgsFromFile(ssargs_file, on_config_error):
with open(ssargs_file, 'r') as fh:
lines = list(fh)
err_prefix = 'In file ' + ssargs_file
# Supply |base_dir| as the directory containing the .ssargs file, to ensure
# consistent behavior wherever SuperSize-archive runs.
base_dir = os.path.dirname(os.path.abspath(ssargs_file))
return _ReadMultipleArgsFromStream(lines, base_dir, err_prefix,
on_config_error)
def _ProcessContainerArgs(top_args, sub_args, container_name, on_config_error):
# Copy output_directory, tool_prefix, etc. into sub_args.
for k, v in top_args.__dict__.items():
sub_args.__dict__.setdefault(k, v)
opts = ContainerArchiveOptions(top_args, sub_args)
apk_prefix = sub_args.minimal_apks_file or sub_args.apk_file
if apk_prefix:
# Allow either .minimal.apks or just .apks.
apk_prefix = apk_prefix.replace('.minimal.apks', '.aab')
apk_prefix = apk_prefix.replace('.apks', '.aab')
sub_args.mapping_path, resources_pathmap_path = _DeduceAuxPaths(
sub_args, apk_prefix)
linker_name = None
if opts.analyze_native:
is_base_module = sub_args.split_name in (None, 'base')
# We don't yet support analyzing .so files outside of base modules.
if not is_base_module:
opts.analyze_native = False
else:
tool_prefix_finder = path_util.ToolPrefixFinder(
value=sub_args.tool_prefix,
output_directory=top_args.output_directory,
linker_name='lld')
sub_args.elf_file, sub_args.map_file, apk_so_path = _DeduceNativeInfo(
tentative_output_dir=top_args.output_directory,
apk_path=sub_args.apk_file,
elf_path=sub_args.elf_file or sub_args.aux_elf_file,
map_path=sub_args.map_file,
ignore_linker_map=sub_args.ignore_linker_map,
tool_prefix=tool_prefix_finder.Finalized(),
on_config_error=on_config_error)
if sub_args.ignore_linker_map:
sub_args.map_file = None
if opts.analyze_native:
if sub_args.map_file:
linker_name = _DetectLinkerName(sub_args.map_file)
logging.info('Linker name: %s', linker_name)
else:
# TODO(crbug.com/1193507): Remove when we implement string literal
# tracking without map files.
# nm emits some string literal symbols, but most exist in symbol gaps.
opts.track_string_literals = False
tool_prefix_finder = path_util.ToolPrefixFinder(
value=sub_args.tool_prefix,
output_directory=top_args.output_directory,
linker_name=linker_name)
sub_args.tool_prefix = tool_prefix_finder.Finalized()
else:
# Trust that these values will not be used, and set to None.
sub_args.elf_file = None
sub_args.map_file = None
apk_so_path = None
size_info_prefix = None
if top_args.output_directory and apk_prefix:
size_info_prefix = os.path.join(top_args.output_directory, 'size-info',
os.path.basename(apk_prefix))
# Need one or the other to have native symbols.
if not sub_args.elf_file and not sub_args.map_file:
opts.analyze_native = False
container_args = sub_args.__dict__.copy()
container_args.update(opts.__dict__)
logging.info('Container Params: %r', container_args)
return (sub_args, opts, container_name, apk_so_path, resources_pathmap_path,
linker_name, size_info_prefix)
def _IsOnDemand(apk_path):
# Check if the manifest specifies whether or not to extract native libs.
output = subprocess.check_output([
path_util.GetAapt2Path(), 'dump', 'xmltree', '--file',
'AndroidManifest.xml', apk_path
]).decode('ascii')
def parse_attr(name):
# http://schemas.android.com/apk/res/android:isFeatureSplit(0x0101055b)=true
# http://schemas.android.com/apk/distribution:onDemand=true
m = re.search(name + r'(?:\(.*?\))?=(\w+)', output)
return m and m.group(1) == 'true'
is_feature_split = parse_attr('android:isFeatureSplit')
# Can use <dist:on-demand>, or <module dist:onDemand="true">.
on_demand = parse_attr(
'distribution:onDemand') or 'distribution:on-demand' in output
on_demand = bool(on_demand and is_feature_split)
return on_demand
def _IterSubArgs(top_args, on_config_error):
"""Generates main paths (may be deduced) for each containers given by input.
Yields:
For each container, main paths and other info needed to create size_info.
"""
main_file = _IdentifyInputFile(top_args, on_config_error)
if top_args.no_output_directory:
top_args.output_directory = None
else:
output_directory_finder = path_util.OutputDirectoryFinder(
value=top_args.output_directory,
any_path_within_output_directory=main_file)
top_args.output_directory = output_directory_finder.Finalized()
if not top_args.source_directory:
top_args.source_directory = path_util.GetSrcRootFromOutputDirectory(
top_args.output_directory)
assert top_args.source_directory
if top_args.ssargs_file:
sub_args_list = _ReadMultipleArgsFromFile(top_args.ssargs_file,
on_config_error)
else:
sub_args_list = [top_args]
# Do a quick first pass to ensure inputs have been built.
for sub_args in sub_args_list:
main_file = _IdentifyInputFile(sub_args, on_config_error)
if not os.path.exists(main_file):
raise Exception('Input does not exist: ' + main_file)
# Each element in |sub_args_list| specifies a container.
for sub_args in sub_args_list:
main_file = _IdentifyInputFile(sub_args, on_config_error)
if hasattr(sub_args, 'name'):
container_name = sub_args.name
else:
container_name = os.path.basename(main_file)
if set(container_name) & set('<>?'):
parser.error('Container name cannot have characters in "<>?"')
# If needed, extract .apk file to a temp file and process that instead.
if sub_args.minimal_apks_file:
for module_name, _ in _GetModuleInfoList(sub_args.minimal_apks_file):
with zip_util.UnzipToTemp(
sub_args.minimal_apks_file,
'splits/{}-master.apk'.format(module_name)) as temp:
module_sub_args = copy.copy(sub_args)
module_sub_args.apk_file = temp
module_sub_args.split_name = module_name
module_sub_args.name = '{}/{}.apk'.format(container_name, module_name)
# Make on-demand a part of the name so that:
# * It's obvious from the name which DFMs are on-demand.
# * Diffs that change an on-demand status show as adds/removes.
if _IsOnDemand(temp):
module_sub_args.name += '?'
if module_name != 'base':
# TODO(crbug.com/1143690): Fix native analysis for split APKs.
module_sub_args.map_file = None
yield _ProcessContainerArgs(top_args, module_sub_args,
module_sub_args.name, on_config_error)
else:
yield _ProcessContainerArgs(top_args, sub_args, container_name,
on_config_error)
def Run(top_args, on_config_error):
if not top_args.size_file.endswith('.size'):
on_config_error('size_file must end with .size')
if top_args.check_data_quality:
start_time = time.time()
knobs = SectionSizeKnobs()
build_config = {}
seen_container_names = set()
container_list = []
raw_symbols_list = []
# Iterate over each container.
for (sub_args, opts, container_name, apk_so_path, resources_pathmap_path,
linker_name, size_info_prefix) in _IterSubArgs(top_args,
on_config_error):
if container_name in seen_container_names:
raise ValueError('Duplicate container name: {}'.format(container_name))
seen_container_names.add(container_name)
metadata = CreateMetadata(sub_args, linker_name, build_config)
container, raw_symbols = CreateContainerAndSymbols(
knobs=knobs,
opts=opts,
container_name=container_name,
metadata=metadata,
map_path=sub_args.map_file,
tool_prefix=sub_args.tool_prefix,
elf_path=sub_args.elf_file,
apk_path=sub_args.apk_file,
mapping_path=sub_args.mapping_path,
output_directory=sub_args.output_directory,
source_directory=sub_args.source_directory,
resources_pathmap_path=resources_pathmap_path,
apk_so_path=apk_so_path,
pak_files=sub_args.pak_file,
pak_info_file=sub_args.pak_info_file,
linker_name=linker_name,
size_info_prefix=size_info_prefix)
container_list.append(container)
raw_symbols_list.append(raw_symbols)
size_info = CreateSizeInfo(build_config,
container_list,
raw_symbols_list,
normalize_names=False)
if logging.getLogger().isEnabledFor(logging.DEBUG):
for line in data_quality.DescribeSizeInfoCoverage(size_info):
logging.debug(line)
logging.info('Recorded info for %d symbols', len(size_info.raw_symbols))
for container in size_info.containers:
logging.info('Recording metadata: \n %s',
'\n '.join(describe.DescribeDict(container.metadata)))
logging.info('Saving result to %s', top_args.size_file)
file_format.SaveSizeInfo(size_info,
top_args.size_file,
include_padding=top_args.include_padding)
size_in_mb = os.path.getsize(top_args.size_file) / 1024.0 / 1024.0
logging.info('Done. File size is %.2fMiB.', size_in_mb)
if top_args.check_data_quality:
logging.info('Checking data quality')
data_quality.CheckDataQuality(size_info, top_args.track_string_literals)
duration = (time.time() - start_time) / 60
if duration > 10:
raise data_quality.QualityCheckError(
'Command should not take longer than 10 minutes.'
' Took {:.1f} minutes.'.format(duration))
|
py | b406fe504f94c97c8933d04f49f2c7a59195f538 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: st9_8
"""
Utility command line script used to convert tsv annotated file to txt lines file format.
Usage:
python convert_tsv_line.py [filename]
"""
from pathlib import Path
import sys
import csv
import json
if __name__ == '__main__':
if len(sys.argv) <= 1:
print('Error: Please provide the file to be converted')
sys.exit(1)
file_path = Path(sys.argv[1])
output_file = file_path.parent / 'questions_annoted_ner_data.txt'
if len(sys.argv) == 4:
if sys.argv[2] == '-o':
output_file = sys.argv[3]
else:
print(f'Error: Unrecogized option "{sys.argv[2]}"')
sys.exit(1)
with open(file_path) as tsv_file:
with open(output_file, 'w') as lines:
tsv_reader = csv.reader(tsv_file, delimiter='\t')
line_text = []
ner_text = []
for row in tsv_reader:
if row:
line_text.append(row[0].strip())
if len(row[1]) > 1:
for word in row[0].split():
ner_text.append(row[1])
else:
ner_text.append(row[1].strip())
else:
if 'question' in file_path.name:
lines.write(' '.join(line_text) + ' ?')
lines.write('\n')
lines.write(' '.join(ner_text))
lines.write('\n')
lines.write('\n')
else:
lines.write(' '.join(line_text))
lines.write('\n')
lines.write(' '.join(ner_text))
lines.write('\n')
line_text = []
ner_text = []
print('Successfully converted tsv data to line files file')
print(f'New file saved at: {output_file}')
# python convert_tsv_to_lines.py ../ner/sentences_ner_training_data.tsv -o ../data/CRF_training/sentences_annotated_ner_data.txt
# python convert_tsv_to_lines.py ../ner/questions_ner_data.tsv -o ../data/CRF_training/questions_annotated_ner_data.txt |
py | b406fec944f302dd55f6d80b51bc4e0ea0c65406 | """Unit tests for PyGraphviz inteface."""
import os
import tempfile
from nose import SkipTest
from nose.tools import assert_true, assert_equal, assert_raises
from networkx.testing import assert_edges_equal, assert_nodes_equal
import networkx as nx
class TestAGraph(object):
@classmethod
def setupClass(cls):
global pygraphviz
try:
import pygraphviz
except ImportError:
raise SkipTest('PyGraphviz not available.')
def build_graph(self, G):
edges = [('A', 'B'), ('A', 'C'), ('A', 'C'), ('B', 'C'), ('A', 'D')]
G.add_edges_from(edges)
G.add_node('E')
G.graph['metal'] = 'bronze'
return G
def assert_equal(self, G1, G2):
assert_nodes_equal(G1.nodes(), G2.nodes())
assert_edges_equal(G1.edges(), G2.edges())
assert_equal(G1.graph['metal'], G2.graph['metal'])
def agraph_checks(self, G):
G = self.build_graph(G)
A = nx.nx_agraph.to_agraph(G)
H = nx.nx_agraph.from_agraph(A)
self.assert_equal(G, H)
fname = tempfile.mktemp()
nx.drawing.nx_agraph.write_dot(H, fname)
Hin = nx.nx_agraph.read_dot(fname)
os.unlink(fname)
self.assert_equal(H, Hin)
(fd, fname) = tempfile.mkstemp()
with open(fname, 'w') as fh:
nx.drawing.nx_agraph.write_dot(H, fh)
with open(fname, 'r') as fh:
Hin = nx.nx_agraph.read_dot(fh)
os.unlink(fname)
self.assert_equal(H, Hin)
def test_from_agraph_name(self):
G = nx.Graph(name='test')
A = nx.nx_agraph.to_agraph(G)
H = nx.nx_agraph.from_agraph(A)
assert_equal(G.name, 'test')
def test_undirected(self):
self.agraph_checks(nx.Graph())
def test_directed(self):
self.agraph_checks(nx.DiGraph())
def test_multi_undirected(self):
self.agraph_checks(nx.MultiGraph())
def test_multi_directed(self):
self.agraph_checks(nx.MultiDiGraph())
def test_view_pygraphviz(self):
G = nx.Graph() # "An empty graph cannot be drawn."
assert_raises(nx.NetworkXException, nx.nx_agraph.view_pygraphviz, G)
G = nx.barbell_graph(4, 6)
nx.nx_agraph.view_pygraphviz(G)
def test_view_pygraphviz_edgelable(self):
G = nx.Graph()
G.add_edge(1, 2, weight=7)
G.add_edge(2, 3, weight=8)
nx.nx_agraph.view_pygraphviz(G, edgelabel='weight')
def test_from_agraph_name(self):
G = nx.Graph(name='test')
A = nx.nx_agraph.to_agraph(G)
H = nx.nx_agraph.from_agraph(A)
assert_equal(G.name, 'test')
def test_graph_with_reserved_keywords(self):
# test attribute/keyword clash case for #1582
# node: n
# edges: u,v
G = nx.Graph()
G = self.build_graph(G)
G.node['E']['n'] = 'keyword'
G.edges[('A', 'B')]['u'] = 'keyword'
G.edges[('A', 'B')]['v'] = 'keyword'
A = nx.nx_agraph.to_agraph(G)
|
py | b407001dfa15cbb427481e49f7a9d91b62c15e76 | """
Given an unsorted array of integers and a number k < size(array),
find the kth largest integer in the array
"""
import sys
from typing import List
import heapq
def left_child_index(index: int) -> int:
return 1 + (index << 1)
def right_child_index(index: int) -> int:
return 2 + (index << 1)
class MinHeap:
def __init__(self, array: List[int]) -> None:
self.heap = array
self.size = len(self.heap)
self.temp_val = sys.maxsize
def heapify(self, index) -> None:
while index < self.size:
left_index = left_child_index(index)
right_index = right_child_index(index)
min_index = index
if left_index < self.size and self.heap[min_index] > self.heap[left_index]:
min_index = left_index
if right_index < self.size and self.heap[min_index] > self.heap[right_index]:
min_index = right_index
if min_index == index:
break
else:
self.heap[min_index], self.heap[index] = self.heap[index], self.heap[min_index]
index = min_index
def remove_element(self) -> int:
return_value = self.heap[0]
self.heap[0] = self.temp_val
self.heapify(0)
self.size -= 1
return return_value
def kth_largest_element_min_heap(array: List[int], k: int) -> int:
# create a min heap of first k elements
heap = MinHeap(array[:k])
for index in range(k//2, -1, -1):
heap.heapify(index)
# for each of the remaining elements, if found greater than min,
# replace and heapify
for element in array[k:]:
if element > heap.heap[0]:
heap.heap[0] = element
heap.heapify(0)
# return the top element. it's the kth largest element
return heap.remove_element()
class MaxHeap(MinHeap):
def __init__(self, array: List[int]) -> None:
super().__init__(array)
self.temp_val = -sys.maxsize
def heapify(self, index) -> None:
while index < self.size:
left_index = left_child_index(index)
right_index = right_child_index(index)
max_index = index
if left_index < self.size and self.heap[max_index] < self.heap[left_index]:
max_index = left_index
if right_index < self.size and self.heap[max_index] < self.heap[right_index]:
max_index = right_index
if max_index == index:
break
else:
self.heap[max_index], self.heap[index] = self.heap[index], self.heap[max_index]
index = max_index
def kth_largest_element_max_heap(array: List[int], k: int) -> int:
heap = MaxHeap(array)
for index in range(len(array) // 2, -1, -1):
heap.heapify(index)
for _ in range(k-1):
heap.remove_element()
return heap.remove_element()
def kth_largest_element_inbuilt_heap(array: List[int], k: int) -> int:
heapq.heapify(array)
for _ in range(len(array) - k):
heapq.heappop(array)
return heapq.heappop(array)
if __name__ == "__main__":
assert kth_largest_element_min_heap([11, 3, 2, 1, 15, 5, 4, 45, 88, 96, 50, 45], 3) == 50
assert kth_largest_element_max_heap([11, 3, 2, 1, 15, 5, 4, 45, 88, 96, 50, 45], 3) == 50
assert kth_largest_element_inbuilt_heap([11, 3, 2, 1, 15, 5, 4, 45, 88, 96, 50, 45], 3) == 50
|
py | b407003949163aa02e83eed8a57414719389fb00 | from types import SimpleNamespace as NS
from .coord_cartesian import coord_cartesian
class coord_flip(coord_cartesian):
"""
Flipped cartesian coordinates
The horizontal becomes vertical, and vertical becomes
horizontal. This is primarily useful for converting
geoms and statistics which display y conditional
on x, to x conditional on y.
Parameters
----------
xlim : None | (float, float)
Limits for x axis. If None, then they are
automatically computed.
ylim : None | (float, float)
Limits for y axis. If None, then they are
automatically computed.
expand : bool
If `True`, expand the coordinate axes by
some factor. If `False`, use the limits
from the data.
"""
def labels(self, label_lookup):
return flip_labels(coord_cartesian.labels(
self, label_lookup))
def transform(self, data, panel_params, munch=False):
data = flip_labels(data)
return coord_cartesian.transform(self, data,
panel_params,
munch=munch)
def setup_panel_params(self, scale_x, scale_y):
panel_params = coord_cartesian.setup_panel_params(
self, scale_x, scale_y)
return flip_labels(panel_params)
def setup_layout(self, layout):
# switch the scales
x, y = 'SCALE_X', 'SCALE_Y'
layout[x], layout[y] = layout[y].copy(), layout[x].copy()
return layout
def range(self, panel_params):
"""
Return the range along the dimensions of the coordinate system
"""
# Defaults to providing the 2D x-y ranges
return NS(x=panel_params.y.range,
y=panel_params.x.range)
def flip_labels(obj):
"""
Rename fields x to y and y to x
Parameters
----------
obj : dict_like | types.SimpleNamespace
Object with labels to rename
"""
def sub(a, b):
"""
Substitute all keys that start with a to b
"""
for label in list(obj.keys()):
if label.startswith(a):
new_label = b+label[1:]
obj[new_label] = obj.pop(label)
if hasattr(obj, 'keys'): # dict or dataframe
sub('x', 'z')
sub('y', 'x')
sub('z', 'y')
elif hasattr(obj, 'x') and hasattr(obj, 'y'):
obj.x, obj.y = obj.y, obj.x
return obj
|
py | b407009afb74cec3d9c519cc6fca86cf643db343 | import nexmo
class Voice():
#application_id and private_key are needed for the calling methods
#Passing a Nexmo Client is also possible
def __init__(
self,
client=None,
application_id=None,
private_key=None,
):
try:
# Client is protected
self._client = client
if self._client is None:
self._client = nexmo.Client(application_id=application_id, private_key=private_key)
except Exception as e:
print('Error: {error_message}'.format(error_message=str(e)))
# Creates a new call session
def create_call(self, params=None, **kwargs):
return self._jwt_signed_post("/v1/calls", params or kwargs)
# Get call history paginated. Pass start and end dates to filter the retrieved information
def get_calls(self, params=None, **kwargs):
return self._jwt_signed_get("/v1/calls", params or kwargs)
# Get a single call record by identifier
def get_call(self, uuid):
return self._jwt_signed_get("/v1/calls/{uuid}".format(uuid=uuid))
# Update call data using custom ncco
def update_call(self, uuid, params=None, **kwargs):
return self._jwt_signed_put(
"/v1/calls/{uuid}".format(uuid=uuid), params or kwargs
)
# Plays audio streaming into call in progress - stream_url parameter is required
def send_audio(self, uuid, params=None, **kwargs):
return self._jwt_signed_put(
"/v1/calls/{uuid}/stream".format(uuid=uuid), params or kwargs
)
# Play an speech into specified call - text parameter (text to speech) is required
def send_speech(self, uuid, params=None, **kwargs):
return self._jwt_signed_put(
"/v1/calls/{uuid}/talk".format(uuid=uuid), params or kwargs
)
# plays DTMF tones into the specified call
def send_dtmf(self, uuid, params=None, **kwargs):
return self._jwt_signed_put(
"/v1/calls/{uuid}/dtmf".format(uuid=uuid), params or kwargs
)
# Stops audio recently played into specified call
def stop_audio(self, uuid):
return self._jwt_signed_delete("/v1/calls/{uuid}/stream".format(uuid=uuid))
# Stop a speech recently played into specified call
def stop_speech(self, uuid):
return self._jwt_signed_delete("/v1/calls/{uuid}/talk".format(uuid=uuid))
# Deprecated section
# This methods are deprecated, to use them a definition of client with key and secret parameters is mandatory
def initiate_call(self, params=None, **kwargs):
return self._client.post(self._client.host(), "/call/json", params or kwargs)
def initiate_tts_call(self, params=None, **kwargs):
return self._client.post(self._client.api_host(), "/tts/json", params or kwargs)
def initiate_tts_prompt_call(self, params=None, **kwargs):
return self._client.post(self._client.api_host(), "/tts-prompt/json", params or kwargs)
# End deprecated section
# Utils methods
# _jwt_signed_post private method that Allows developer perform signed post request
def _jwt_signed_post(self, request_uri, params):
uri = "https://{api_host}{request_uri}".format(
api_host=self._client.api_host(), request_uri=request_uri
)
# Uses the client session to perform the call action with api
return self._client.parse(
self._client.api_host(), self._client.session.post(uri, json=params, headers=self._client._headers())
)
# _jwt_signed_post private method that Allows developer perform signed get request
def _jwt_signed_get(self, request_uri, params=None):
uri = "https://{api_host}{request_uri}".format(
api_host=self._client.api_host(), request_uri=request_uri
)
return self._client.parse(
self._client.api_host(),
self._client.session.get(uri, params=params or {}, headers=self._client._headers()),
)
# _jwt_signed_put private method that Allows developer perform signed put request
def _jwt_signed_put(self, request_uri, params):
uri = "https://{api_host}{request_uri}".format(
api_host=self._client.api_host(), request_uri=request_uri
)
return self._client.parse(
self._client.api_host(), self._client.session.put(uri, json=params, headers=self._client._headers())
)
# _jwt_signed_put private method that Allows developer perform signed put request
def _jwt_signed_delete(self, request_uri):
uri = "https://{api_host}{request_uri}".format(
api_host=self._client.api_host(), request_uri=request_uri
)
return self._client.parse(
self._client.api_host(), self._client.session.delete(uri, headers=self._client._headers())
)
|
py | b4070315564b3b80bf9cce6119d3c04ee2fabcfa | from .adapters import *
from .bounds import *
from .ensemble import *
from .extractors import *
from .regridder import *
from .sources import *
__all__ = ['adapters', 'bounds', 'ensemble',
'extractors', 'regridder', 'sources']
|
py | b4070344a76fa37094ff20e0fcc0d63a04a3f562 | from object_detection.protos.string_int_label_map_pb2 import StringIntLabelMap
from google.protobuf import text_format
label_map_path = './imagenet_label_map.pbtxt'
x = StringIntLabelMap()
fid = open(label_map_path, 'r')
text_format.Merge(fid.read(), x)
fid.close() |
py | b407042d5cf133325d885b3dfa2817a99f1ef04b | # Inspired from OpenAI Baselines
import gym
import numpy as np
import random
def set_global_seeds(i):
np.random.seed(i)
random.seed(i)
|
py | b407044e91412bef8cd03bec20124f70101a1759 | from layer import BaseLayer, FileWatch
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.arrays import *
from OpenGL.GL import shaders
import OpenGL
import time
import math
import numpy as np
import traceback, sys
import random
class Layer(BaseLayer):
def init(self):
self.level = 100
self.vao = None
self.vbo = None
self.fragShader = None
self.vertShader = None
self.shaderProg = None
self.fragCode = None
self.timeAcc = 0
self.fw = FileWatch("glsl/wedges.glsl")
self.beatpt1 = 0
self.beatpt1accum = 0
self.beataccumpt1 = 0
self.rand1 = 1
self.rand1pt = 1
self.rand2 = 1
self.rand2pt = 1
self.rand3 = 1
self.rand3pt = 1
self.rand4 = 1
self.rand4pt = 1
self.rand5 = 0
def render(self, runtimeData):
if runtimeData.bpm:
self.beatpt1 += (-self.beatpt1 / (runtimeData.bpm/60 / 3)) * runtimeData.dt
self.beataccumpt1 += ((runtimeData.beataccum-self.beataccumpt1) / (runtimeData.bpm / 60 / 3)) * runtimeData.dt
self.rand1pt += ((self.rand1 - self.rand1pt) / (runtimeData.bpm/60 / 3)) * runtimeData.dt
self.rand2pt += ((self.rand2 - self.rand2pt) / (runtimeData.bpm / 60 / 3)) * runtimeData.dt
self.rand3pt += ((self.rand3 - self.rand3pt) / (runtimeData.bpm / 60 / 3)) * runtimeData.dt
self.rand4pt += ((self.rand4 - self.rand4pt) / (runtimeData.bpm / 60 / 3)) * runtimeData.dt
self.beatpt1accum += self.beatpt1*runtimeData.dt
if runtimeData.beataccum-self.beataccumpt1 > 10:
self.beataccumpt1 = runtimeData.beataccum
if runtimeData.beat:
self.beatpt1 = 1
self.rand1 = random.uniform(0, 1)
self.rand2 = random.uniform(0, 1)
self.rand3 = random.uniform(0, 1)
self.rand4 = random.uniform(0, 1)
self.rand5 = random.uniform(-1, 1)
if self.checkUniform(runtimeData, "wedges", "master", 1) <= 0:
return
if self.vao is None or self.vbo is None:
self.genVAOVBO()
if self.fragShader is None or self.vertShader is None or self.shaderProg is None:
self.genShaders(self.fw.content)
if self.fw.check():
self.genShaders(self.fw.content)
glBindFramebuffer(GL_FRAMEBUFFER, runtimeData.fbo)
glUseProgram(self.shaderProg)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, runtimeData.fbotexture)
glUniform1i(glGetUniformLocation(self.shaderProg, "texIn"), 0)
self.u1("time", runtimeData.time)
self.u1("dt", runtimeData.dt)
self.u2("res", runtimeData.res)
self.u1("beatpt", self.beatpt1)
self.u1("rand1", self.rand1)
self.u1("rand1pt", self.rand2pt)
self.u1("rand2", self.rand2)
self.u1("rand2pt", self.rand3pt)
self.u1("rand3", self.rand3)
self.u1("rand3pt", self.rand4pt)
self.u1("rand4", self.rand4)
self.u1("rand4pt", self.rand1pt)
self.u1("beatptaccum", self.beatpt1accum)
self.u1("beataccumpt", self.beataccumpt1)
self.u1("rand5", self.rand5)
glBindVertexArray(self.vao)
glDrawArrays(GL_TRIANGLES, 0, 6)
glBindVertexArray(0)
def getData(self):
data = {
"vao": self.vao,
"vbo": self.vbo,
"vs": self.vertShader,
"fs": self.fragShader,
"sp": self.shaderProg,
"fc": self.fragCode
}
return data
def setData(self, data):
self.vao = data["vao"]
self.vbo = data["vbo"]
self.vertShader = data["vs"]
self.fragShader = data["fs"]
self.shaderProg = data["sp"]
self.fragCode = data["fc"]
def genVAOVBO(self):
vertices = np.array([
# X Y Z R G B U V
[1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0], # Top right
[-1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0, 1.0], # Top Left
[1.0, -1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0], # Bottom Right
[-1.0, -1.0, 0.0, 1.0, 1.0, 0.0, 0, 0], # Bottom Left
[1.0, -1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0], # Bottom Right
[-1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0, 1.0] # Top Left
], 'f')
# Generate vertex buffer object and fill it with vertex data from above
self.vbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.vbo)
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)
# Generate vertex array object and pass vertex data into it
self.vao = glGenVertexArrays(1)
glBindVertexArray(self.vao)
# XYZ
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 8 * vertices.itemsize, None)
glEnableVertexAttribArray(0)
# RGB
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * vertices.itemsize, ctypes.c_void_p(3 * vertices.itemsize))
glEnableVertexAttribArray(1)
# UV
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8 * vertices.itemsize, ctypes.c_void_p(6 * vertices.itemsize))
glEnableVertexAttribArray(2)
glBindBuffer(GL_ARRAY_BUFFER, 0)
glBindVertexArray(0)
def genShaders(self, fragCode):
defaultVertexShaderCode = """
#version 130
in vec3 position;
in vec3 color;
in vec2 texcoord;
out vec3 ourColor;
out vec2 ourTexcoord;
void main()
{
gl_Position = vec4(position.x, position.y, position.z, 1.0);
ourColor = color;
ourTexcoord = texcoord;
}"""
defaultFragmentShaderCode = """
#version 130
in vec3 ourColor;
in vec2 ourTexcoord;
out vec4 outColor;
void main()
{
outColor = vec4(ourColor.r, ourColor.g, ourColor.b, 1.0);
}"""
if self.fragCode is None:
self.fragCode = defaultFragmentShaderCode
try:
self.fragShader = shaders.compileShader(fragCode, GL_FRAGMENT_SHADER)
self.fragCode = fragCode
except:
print(traceback.print_exc(), file=sys.stderr)
# recompile OLD shadercode, but throw error aswell
self.fragShader = shaders.compileShader(self.fragCode, GL_FRAGMENT_SHADER)
self.vertShader = shaders.compileShader(defaultVertexShaderCode, GL_VERTEX_SHADER)
self.shaderProg = shaders.compileProgram(self.fragShader, self.vertShader)
def u1(self, name, v):
uniformLoc = glGetUniformLocation(self.shaderProg, name)
if not uniformLoc == -1:
glUniform1f(uniformLoc, v)
def u2(self, name, v):
uniformLoc = glGetUniformLocation(self.shaderProg, name)
if not uniformLoc == -1:
glUniform2f(uniformLoc, v[0], v[1])
def u3(self, name, v):
uniformLoc = glGetUniformLocation(self.shaderProg, name)
if not uniformLoc == -1:
glUniform3f(uniformLoc, v[0], v[1], v[2])
def u4(self, name, v):
uniformLoc = glGetUniformLocation(self.shaderProg, name)
if not uniformLoc == -1:
glUniform4f(uniformLoc, v[0], v[1], v[2], v[3])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.