repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
evilhero/mylar | mylar/mb.py | 1 | 27511 | # This file is part of Mylar.
#
# Mylar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mylar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import re
import time
import threading
import platform
import urllib, urllib2
from xml.dom.minidom import parseString, Element
from xml.parsers.expat import ExpatError
import requests
import mylar
from mylar import logger, db, cv
from mylar.helpers import multikeysort, replace_all, cleanName, listLibrary, listStoryArcs
import httplib
mb_lock = threading.Lock()
def patch_http_response_read(func):
def inner(*args):
try:
return func(*args)
except httplib.IncompleteRead, e:
return e.partial
return inner
httplib.HTTPResponse.read = patch_http_response_read(httplib.HTTPResponse.read)
if platform.python_version() == '2.7.6':
httplib.HTTPConnection._http_vsn = 10
httplib.HTTPConnection._http_vsn_str = 'HTTP/1.0'
def pullsearch(comicapi, comicquery, offset, type):
cnt = 1
for x in comicquery:
if cnt == 1:
filterline = '%s' % x
else:
filterline+= ',name:%s' % x
cnt+=1
PULLURL = mylar.CVURL + str(type) + 's?api_key=' + str(comicapi) + '&filter=name:' + filterline + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,deck,description,first_issue,last_issue&format=xml&sort=date_last_updated:desc&offset=' + str(offset) # 2012/22/02 - CVAPI flipped back to offset instead of page
#all these imports are standard on most modern python implementations
#logger.info('MB.PULLURL:' + PULLURL)
#new CV API restriction - one api request / second.
if mylar.CONFIG.CVAPI_RATE is None or mylar.CONFIG.CVAPI_RATE < 2:
time.sleep(2)
else:
time.sleep(mylar.CONFIG.CVAPI_RATE)
#download the file:
payload = None
try:
r = requests.get(PULLURL, params=payload, verify=mylar.CONFIG.CV_VERIFY, headers=mylar.CV_HEADERS)
except Exception as e:
logger.warn('Error fetching data from ComicVine: %s' % e)
return
try:
dom = parseString(r.content) #(data)
except ExpatError:
if u'<title>Abnormal Traffic Detected' in r.content:
logger.error('ComicVine has banned this server\'s IP address because it exceeded the API rate limit.')
else:
logger.warn('[WARNING] ComicVine is not responding correctly at the moment. This is usually due to some problems on their end. If you re-try things again in a few moments, it might work properly.')
return
except Exception as e:
logger.warn('[ERROR] Error returned from CV: %s' % e)
return
else:
return dom
def findComic(name, mode, issue, limityear=None, type=None):
#with mb_lock:
comicResults = None
comicLibrary = listLibrary()
comiclist = []
arcinfolist = []
commons = ['and', 'the', '&', '-']
for x in commons:
cnt = 0
for m in re.finditer(x, name.lower()):
cnt +=1
tehstart = m.start()
tehend = m.end()
if any([x == 'the', x == 'and']):
if len(name) == tehend:
tehend =-1
if not all([tehstart == 0, name[tehend] == ' ']) or not all([tehstart != 0, name[tehstart-1] == ' ', name[tehend] == ' ']):
continue
else:
name = name.replace(x, ' ', cnt)
originalname = name
if '+' in name:
name = re.sub('\+', 'PLUS', name)
pattern = re.compile(ur'\w+', re.UNICODE)
name = pattern.findall(name)
if '+' in originalname:
y = []
for x in name:
y.append(re.sub("PLUS", "%2B", x))
name = y
if limityear is None: limityear = 'None'
comicquery = name
if mylar.CONFIG.COMICVINE_API == 'None' or mylar.CONFIG.COMICVINE_API is None:
logger.warn('You have not specified your own ComicVine API key - this is a requirement. Get your own @ http://api.comicvine.com.')
return
else:
comicapi = mylar.CONFIG.COMICVINE_API
if type is None:
type = 'volume'
#let's find out how many results we get from the query...
searched = pullsearch(comicapi, comicquery, 0, type)
if searched is None:
return False
totalResults = searched.getElementsByTagName('number_of_total_results')[0].firstChild.wholeText
logger.fdebug("there are " + str(totalResults) + " search results...")
if not totalResults:
return False
if int(totalResults) > 1000:
logger.warn('Search returned more than 1000 hits [' + str(totalResults) + ']. Only displaying first 1000 results - use more specifics or the exact ComicID if required.')
totalResults = 1000
countResults = 0
while (countResults < int(totalResults)):
#logger.fdebug("querying " + str(countResults))
if countResults > 0:
offsetcount = countResults
searched = pullsearch(comicapi, comicquery, offsetcount, type)
comicResults = searched.getElementsByTagName(type)
body = ''
n = 0
if not comicResults:
break
for result in comicResults:
#retrieve the first xml tag (<tag>data</tag>)
#that the parser finds with name tagName:
arclist = []
if type == 'story_arc':
#call cv.py here to find out issue count in story arc
try:
logger.fdebug('story_arc ascension')
names = len(result.getElementsByTagName('name'))
n = 0
logger.fdebug('length: ' + str(names))
xmlpub = None #set this incase the publisher field isn't populated in the xml
while (n < names):
logger.fdebug(result.getElementsByTagName('name')[n].parentNode.nodeName)
if result.getElementsByTagName('name')[n].parentNode.nodeName == 'story_arc':
logger.fdebug('yes')
try:
xmlTag = result.getElementsByTagName('name')[n].firstChild.wholeText
xmlTag = xmlTag.rstrip()
logger.fdebug('name: ' + xmlTag)
except:
logger.error('There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible.')
return
elif result.getElementsByTagName('name')[n].parentNode.nodeName == 'publisher':
logger.fdebug('publisher check.')
xmlpub = result.getElementsByTagName('name')[n].firstChild.wholeText
n+=1
except:
logger.warn('error retrieving story arc search results.')
return
siteurl = len(result.getElementsByTagName('site_detail_url'))
s = 0
logger.fdebug('length: ' + str(names))
xmlurl = None
while (s < siteurl):
logger.fdebug(result.getElementsByTagName('site_detail_url')[s].parentNode.nodeName)
if result.getElementsByTagName('site_detail_url')[s].parentNode.nodeName == 'story_arc':
try:
xmlurl = result.getElementsByTagName('site_detail_url')[s].firstChild.wholeText
except:
logger.error('There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible.')
return
s+=1
xmlid = result.getElementsByTagName('id')[0].firstChild.wholeText
if xmlid is not None:
arcinfolist = storyarcinfo(xmlid)
logger.info('[IMAGE] : ' + arcinfolist['comicimage'])
comiclist.append({
'name': xmlTag,
'comicyear': arcinfolist['comicyear'],
'comicid': xmlid,
'cvarcid': xmlid,
'url': xmlurl,
'issues': arcinfolist['issues'],
'comicimage': arcinfolist['comicimage'],
'publisher': xmlpub,
'description': arcinfolist['description'],
'deck': arcinfolist['deck'],
'arclist': arcinfolist['arclist'],
'haveit': arcinfolist['haveit']
})
else:
comiclist.append({
'name': xmlTag,
'comicyear': arcyear,
'comicid': xmlid,
'url': xmlurl,
'issues': issuecount,
'comicimage': xmlimage,
'publisher': xmlpub,
'description': xmldesc,
'deck': xmldeck,
'arclist': arclist,
'haveit': haveit
})
logger.fdebug('IssueID\'s that are a part of ' + xmlTag + ' : ' + str(arclist))
else:
xmlcnt = result.getElementsByTagName('count_of_issues')[0].firstChild.wholeText
#here we can determine what called us, and either start gathering all issues or just limited ones.
if issue is not None and str(issue).isdigit():
#this gets buggered up with NEW/ONGOING series because the db hasn't been updated
#to reflect the proper count. Drop it by 1 to make sure.
limiter = int(issue) - 1
else: limiter = 0
#get the first issue # (for auto-magick calcs)
iss_len = len(result.getElementsByTagName('name'))
i=0
xmlfirst = '1'
xmllast = None
try:
while (i < iss_len):
if result.getElementsByTagName('name')[i].parentNode.nodeName == 'first_issue':
xmlfirst = result.getElementsByTagName('issue_number')[i].firstChild.wholeText
if '\xbd' in xmlfirst:
xmlfirst = '1' #if the first issue is 1/2, just assume 1 for logistics
elif result.getElementsByTagName('name')[i].parentNode.nodeName == 'last_issue':
xmllast = result.getElementsByTagName('issue_number')[i].firstChild.wholeText
if all([xmllast is not None, xmlfirst is not None]):
break
i+=1
except:
xmlfirst = '1'
if all([xmlfirst == xmllast, xmlfirst.isdigit(), xmlcnt == '0']):
xmlcnt = '1'
#logger.info('There are : ' + str(xmlcnt) + ' issues in this series.')
#logger.info('The first issue started at # ' + str(xmlfirst))
cnt_numerical = int(xmlcnt) + int(xmlfirst) # (of issues + start of first issue = numerical range)
#logger.info('The maximum issue number should be roughly # ' + str(cnt_numerical))
#logger.info('The limiter (issue max that we know of) is # ' + str(limiter))
if cnt_numerical >= limiter:
cnl = len (result.getElementsByTagName('name'))
cl = 0
xmlTag = 'None'
xmlimage = "cache/blankcover.jpg"
xml_lastissueid = 'None'
while (cl < cnl):
if result.getElementsByTagName('name')[cl].parentNode.nodeName == 'volume':
xmlTag = result.getElementsByTagName('name')[cl].firstChild.wholeText
#break
if result.getElementsByTagName('name')[cl].parentNode.nodeName == 'image':
xmlimage = result.getElementsByTagName('super_url')[0].firstChild.wholeText
if result.getElementsByTagName('name')[cl].parentNode.nodeName == 'last_issue':
xml_lastissueid = result.getElementsByTagName('id')[cl].firstChild.wholeText
cl+=1
if (result.getElementsByTagName('start_year')[0].firstChild) is not None:
xmlYr = result.getElementsByTagName('start_year')[0].firstChild.wholeText
else: xmlYr = "0000"
yearRange = []
tmpYr = re.sub('\?', '', xmlYr)
if tmpYr.isdigit():
yearRange.append(tmpYr)
tmpyearRange = int(xmlcnt) / 12
if float(tmpyearRange): tmpyearRange +1
possible_years = int(tmpYr) + tmpyearRange
for i in range(int(tmpYr), int(possible_years),1):
if not any(int(x) == int(i) for x in yearRange):
yearRange.append(str(i))
logger.fdebug('[RESULT][' + str(limityear) + '] ComicName:' + xmlTag + ' -- ' + str(xmlYr) + ' [Series years: ' + str(yearRange) + ']')
if tmpYr != xmlYr:
xmlYr = tmpYr
if any(map(lambda v: v in limityear, yearRange)) or limityear == 'None':
xmlurl = result.getElementsByTagName('site_detail_url')[0].firstChild.wholeText
idl = len (result.getElementsByTagName('id'))
idt = 0
xmlid = None
while (idt < idl):
if result.getElementsByTagName('id')[idt].parentNode.nodeName == 'volume':
xmlid = result.getElementsByTagName('id')[idt].firstChild.wholeText
break
idt+=1
if xmlid is None:
logger.error('Unable to figure out the comicid - skipping this : ' + str(xmlurl))
continue
publishers = result.getElementsByTagName('publisher')
if len(publishers) > 0:
pubnames = publishers[0].getElementsByTagName('name')
if len(pubnames) >0:
xmlpub = pubnames[0].firstChild.wholeText
else:
xmlpub = "Unknown"
else:
xmlpub = "Unknown"
#ignore specific publishers on a global scale here.
if mylar.CONFIG.BLACKLISTED_PUBLISHERS is not None and any([x for x in mylar.CONFIG.BLACKLISTED_PUBLISHERS if x.lower() == xmlpub.lower()]):
logger.fdebug('Blacklisted publisher [' + xmlpub + ']. Ignoring this result.')
continue
try:
xmldesc = result.getElementsByTagName('description')[0].firstChild.wholeText
except:
xmldesc = "None"
#this is needed to display brief synopsis for each series on search results page.
try:
xmldeck = result.getElementsByTagName('deck')[0].firstChild.wholeText
except:
xmldeck = "None"
xmltype = None
if xmldeck != 'None':
if any(['print' in xmldeck.lower(), 'digital' in xmldeck.lower(), 'paperback' in xmldeck.lower(), 'one shot' in re.sub('-', '', xmldeck.lower()).strip(), 'hardcover' in xmldeck.lower()]):
if all(['print' in xmldeck.lower(), 'reprint' not in xmldeck.lower()]):
xmltype = 'Print'
elif 'digital' in xmldeck.lower():
xmltype = 'Digital'
elif 'paperback' in xmldeck.lower():
xmltype = 'TPB'
elif 'hardcover' in xmldeck.lower():
xmltype = 'HC'
elif 'oneshot' in re.sub('-', '', xmldeck.lower()).strip():
xmltype = 'One-Shot'
else:
xmltype = 'Print'
if xmldesc != 'None' and xmltype is None:
if 'print' in xmldesc[:60].lower() and all(['print edition can be found' not in xmldesc.lower(), 'reprints' not in xmldesc.lower()]):
xmltype = 'Print'
elif 'digital' in xmldesc[:60].lower() and 'digital edition can be found' not in xmldesc.lower():
xmltype = 'Digital'
elif all(['paperback' in xmldesc[:60].lower(), 'paperback can be found' not in xmldesc.lower()]) or 'collects' in xmldesc[:60].lower():
xmltype = 'TPB'
elif 'hardcover' in xmldesc[:60].lower() and 'hardcover can be found' not in xmldesc.lower():
xmltype = 'HC'
elif any(['one-shot' in xmldesc[:60].lower(), 'one shot' in xmldesc[:60].lower()]) and any(['can be found' not in xmldesc.lower(), 'following the' not in xmldesc.lower()]):
i = 0
xmltype = 'One-Shot'
avoidwords = ['preceding', 'after the special', 'following the']
while i < 2:
if i == 0:
cbd = 'one-shot'
elif i == 1:
cbd = 'one shot'
tmp1 = xmldesc[:60].lower().find(cbd)
if tmp1 != -1:
for x in avoidwords:
tmp2 = xmldesc[:tmp1].lower().find(x)
if tmp2 != -1:
xmltype = 'Print'
i = 3
break
i+=1
else:
xmltype = 'Print'
if xmlid in comicLibrary:
haveit = comicLibrary[xmlid]
else:
haveit = "No"
comiclist.append({
'name': xmlTag,
'comicyear': xmlYr,
'comicid': xmlid,
'url': xmlurl,
'issues': xmlcnt,
'comicimage': xmlimage,
'publisher': xmlpub,
'description': xmldesc,
'deck': xmldeck,
'type': xmltype,
'haveit': haveit,
'lastissueid': xml_lastissueid,
'seriesrange': yearRange # returning additional information about series run polled from CV
})
#logger.fdebug('year: %s - constraint met: %s [%s] --- 4050-%s' % (xmlYr,xmlTag,xmlYr,xmlid))
else:
#logger.fdebug('year: ' + str(xmlYr) + ' - contraint not met. Has to be within ' + str(limityear))
pass
n+=1
#search results are limited to 100 and by pagination now...let's account for this.
countResults = countResults + 100
return comiclist
def storyarcinfo(xmlid):
comicLibrary = listStoryArcs()
arcinfo = {}
if mylar.CONFIG.COMICVINE_API == 'None' or mylar.CONFIG.COMICVINE_API is None:
logger.warn('You have not specified your own ComicVine API key - this is a requirement. Get your own @ http://api.comicvine.com.')
return
else:
comicapi = mylar.CONFIG.COMICVINE_API
#respawn to the exact id for the story arc and count the # of issues present.
ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(xmlid) + '/?api_key=' + str(comicapi) + '&field_list=issues,publisher,name,first_appeared_in_issue,deck,image&format=xml&offset=0'
#logger.fdebug('arcpull_url:' + str(ARCPULL_URL))
#new CV API restriction - one api request / second.
if mylar.CONFIG.CVAPI_RATE is None or mylar.CONFIG.CVAPI_RATE < 2:
time.sleep(2)
else:
time.sleep(mylar.CONFIG.CVAPI_RATE)
#download the file:
payload = None
try:
r = requests.get(ARCPULL_URL, params=payload, verify=mylar.CONFIG.CV_VERIFY, headers=mylar.CV_HEADERS)
except Exception as e:
logger.warn('While parsing data from ComicVine, got exception: %s' % e)
return
try:
arcdom = parseString(r.content)
except ExpatError:
if u'<title>Abnormal Traffic Detected' in r.content:
logger.error('ComicVine has banned this server\'s IP address because it exceeded the API rate limit.')
else:
logger.warn('While parsing data from ComicVine, got exception: %s for data: %s' % (e, r.content))
return
except Exception as e:
logger.warn('While parsing data from ComicVine, got exception: %s for data: %s' % (e, r.content))
return
try:
logger.fdebug('story_arc ascension')
issuedom = arcdom.getElementsByTagName('issue')
issuecount = len( issuedom ) #arcdom.getElementsByTagName('issue') )
isc = 0
arclist = ''
ordernum = 1
for isd in issuedom:
zeline = isd.getElementsByTagName('id')
isdlen = len( zeline )
isb = 0
while ( isb < isdlen):
if isc == 0:
arclist = str(zeline[isb].firstChild.wholeText).strip() + ',' + str(ordernum)
else:
arclist += '|' + str(zeline[isb].firstChild.wholeText).strip() + ',' + str(ordernum)
ordernum+=1
isb+=1
isc+=1
except:
logger.fdebug('unable to retrive issue count - nullifying value.')
issuecount = 0
try:
firstid = None
arcyear = None
fid = len ( arcdom.getElementsByTagName('id') )
fi = 0
while (fi < fid):
if arcdom.getElementsByTagName('id')[fi].parentNode.nodeName == 'first_appeared_in_issue':
if not arcdom.getElementsByTagName('id')[fi].firstChild.wholeText == xmlid:
logger.fdebug('hit it.')
firstid = arcdom.getElementsByTagName('id')[fi].firstChild.wholeText
break # - dont' break out here as we want to gather ALL the issue ID's since it's here
fi+=1
logger.fdebug('firstid: ' + str(firstid))
if firstid is not None:
firstdom = cv.pulldetails(comicid=None, type='firstissue', issueid=firstid)
logger.fdebug('success')
arcyear = cv.Getissue(firstid,firstdom,'firstissue')
except:
logger.fdebug('Unable to retrieve first issue details. Not caclulating at this time.')
try:
xmlimage = arcdom.getElementsByTagName('super_url')[0].firstChild.wholeText
except:
xmlimage = "cache/blankcover.jpg"
try:
xmldesc = arcdom.getElementsByTagName('desc')[0].firstChild.wholeText
except:
xmldesc = "None"
try:
xmlpub = arcdom.getElementsByTagName('publisher')[0].firstChild.wholeText
except:
xmlpub = "None"
try:
xmldeck = arcdom.getElementsByTagName('deck')[0].firstChild.wholeText
except:
xmldeck = "None"
if xmlid in comicLibrary:
haveit = comicLibrary[xmlid]
else:
haveit = "No"
arcinfo = {
#'name': xmlTag, #theese four are passed into it only when it's a new add
#'url': xmlurl, #needs to be modified for refreshing to work completely.
#'publisher': xmlpub,
'comicyear': arcyear,
'comicid': xmlid,
'issues': issuecount,
'comicimage': xmlimage,
'description': xmldesc,
'deck': xmldeck,
'arclist': arclist,
'haveit': haveit,
'publisher': xmlpub
}
return arcinfo
| gpl-3.0 | -6,026,704,201,314,912,000 | 47.434859 | 343 | 0.475955 | false | 4.714824 | true | false | false |
glenn-edgar/local_controller_3 | irrigation_control_py3/ref/irrigation_ctrl_startup.py | 3 | 64996 | import datetime
import time
import string
import urllib2
import math
import redis
import json
import eto
import py_cf
import os
import base64
import load_files
import watch_dog
class SprinklerQueueElementControl( ):
def __init__(self,redis,io_control,alarm_queue,counter_devices):
self.redis = redis
self.alarm_queue = alarm_queue
self.io_control = io_control
self.counter_devices = counter_devices
self.app_files = load_files.APP_FILES(redis)
self.redis.hset("CONTROL_VARIBALES","MAX_FLOW_TIME",0)
def check_for_excessive_flow_rate( self,*args ):
flow_value = float( check_redis_value( "global_flow_sensor_corrected" ) )
max_flow = float( check_redis_value( "FLOW_CUT_OFF"))
if max_flow == 0:
return # feature is not turned on
compact_data = self.redis.lindex( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE",0 )
json_string = base64.b64decode(compact_data)
json_object = json.loads(json_string)
run_time = int( json_object["run_time"])
elasped_time = int(json_object["elasped_time"])
schedule_step = int(json_object["step"])
step_number = json_object["step"]
schedule_name = json_object["schedule_name"]
if elasped_time < 3 :
return # let flow stabilize
if flow_value > max_flow:
over_load_time = int(self.redis.hget("CONTROL_VARIBALES","MAX_FLOW_TIME")) +1
if over_load_time > 2:
self.redis.hset("CONTROL_VARIABLES","SKIP_STATION","ON")
self.alarm_queue.store_past_action_queue("IRRIGATION:FLOW_ABORT","RED", { "schedule_name":json_object["schedule_name"],"step_number":json_object["step"],
"flow_value":flow_value,"max_flow":max_flow } )
self.redis.hset("CONTROL_VARIBALES","MAX_FLOW_TIME",0)
else:
self.redis.hset("CONTROL_VARIBALES","MAX_FLOW_TIME",over_load_time)
else:
self.redis.hset("CONTROL_VARIBALES","MAX_FLOW_TIME",0)
def check_redis_value( self,key):
value = redis.hget( "CONTROL_VARIABLES",key )
if value == None:
value = 0
return value
def check_current(self,*args):
compact_data = self.redis.lindex( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE",0 )
json_string = base64.b64decode(compact_data)
#print "json_string",json_string
json_object = json.loads(json_string)
temp = float( self.redis.hget( "CONTROL_VARIABLES","coil_current" ))
print "check_current temp",temp
if temp > 24:
self.redis.hset("CONTROL_VARIABLES","SKIP_STATION","ON")
self.clean_up_irrigation_cell( json_object )
self.alarm_queue.store_past_action_queue("IRRIGATION:CURRENT_ABORT","RED", { "schedule_name":json_object["schedule_name"],"step_number":json_object["step"] } )
return "RESET"
else:
return "DISABLE"
def start(self, *args ):
#print "start ------------------------------------------------->"
self.redis.hset("CONTROL_VARIBALES","MAX_FLOW_TIME",0)
compact_data = self.redis.lindex( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE",0 )
json_string = base64.b64decode(compact_data)
json_object = json.loads(json_string)
if self.check_redis_value("SUSPEND") == "ON":
#self.log_start_step( schedule_name, json_object["step"])
#self.io_control.turn_off_io(json_object["io_setup"])
#self.io_control.disable_all_sprinklers()
return # System is not processing commands right now
#print "start --- #1"
self.redis.hset("CONTROL_VARIABLES","SKIP_STATION","OFF")
#print "made it here"
run_time = int( json_object["run_time"])
elasped_time = int(json_object["elasped_time"])
schedule_step = int(json_object["step"])
step_number = json_object["step"]
schedule_name = json_object["schedule_name"]
#print "run_time",run_time
if json_object["eto_enable"] == True:
run_time = self.eto_update( run_time , json_object["io_setup"] )
#print "start ---#2 runtime",run_time
if run_time == 0:
self.clean_up_irrigation_cell(json_object)
json_object["run_time"] = 0
self.alarm_queue.store_past_action_queue("IRRIGATION:START:ETO_RESTRICTION","YELLOW", json_object )
return "RESET"
self.io_control.load_duration_counters( run_time )
#print "made it here"
self.io_control.turn_on_master_valves()
self.io_control.turn_on_io( json_object["io_setup"] )
station_by_pass = 0
elasped_time = 1
self.redis.hset( "CONTROL_VARIABLES","sprinkler_ctrl_mode","AUTO")
self.redis.hset( "CONTROL_VARIABLES","schedule_name", schedule_name )
self.redis.hset( "CONTROL_VARIABLES","schedule_step_number", step_number )
self.redis.hset( "CONTROL_VARIABLES","schedule_step", schedule_step )
self.redis.hset( "CONTROL_VARIABLES","schedule_time_count", elasped_time )
self.redis.hset( "CONTROL_VARIABLES","schedule_time_max", run_time )
self.log_start_step( schedule_name, json_object["step"])
#print "current_log",self.current_log_object
#print "flow_log", self.flow_log_object
json_object["elasped_time"] = elasped_time
json_object["run_time"] = run_time
json_string = json.dumps( json_object )
compact_data = base64.b64encode(json_string)
#print "start #end json string ",json_string
self.redis.lset( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE", 0, compact_data )
return "DISABLE"
def monitor( self, *args ):
#print "monitor --------------->"
# check to see if something is in the queue
length = self.redis.llen( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE" )
#print "length",length
if length == 0 :
return "CONTINUE"
compact_data = self.redis.lindex( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE",0 )
json_string = base64.b64decode(compact_data)
json_object = json.loads(json_string)
run_time = int( json_object["run_time"])
elasped_time = int(json_object["elasped_time"])
schedule_step = int(json_object["step"])
step_number = json_object["step"]
schedule_name = json_object["schedule_name"]
if (self.check_redis_value("SUSPEND") == "ON") :
#self.io_control.turn_off_io(json_object["io_setup"])
#self.io_control.disable_all_sprinklers()
return "HALT" # System is not processing commands right now
elasped_time = elasped_time +1
self.log_sensors( schedule_name, schedule_step)
if json_object["eto_enable"] == True:
self.update_eto_queue_a( 1, json_object["io_setup"] )
if (elasped_time <= run_time ) and ( self.check_redis_value("SKIP_STATION") != "ON" ):
self.io_control.turn_on_io( json_object["io_setup"] )
self.io_control.turn_on_master_valves()
self.redis.hset( "CONTROL_VARIABLES","schedule_time_count", elasped_time )
json_object["elasped_time"] = elasped_time
json_string = json.dumps( json_object )
compact_data = base64.b64encode(json_string)
self.redis.lset( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE", 0, compact_data )
return_value = "RESET"
else:
#print "normal end"
self.log_step_stop()
self.clean_up_irrigation_cell(json_object)
return_value = "DISABLE"
#print "cell returnValue is ",return_value
return return_value
def clean_up_irrigation_cell( self ,json_object ):
#print "made it to cleanup"
self.redis.delete("QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE")
self.redis.hset("CONTROL_VARIABLES", "schedule_name","offline" )
self.redis.hset("CONTROL_VARIABLES", "schedule_step_number",0 )
self.redis.hset("CONTROL_VARIABLES", "schedule_step",0 )
self.redis.hset("CONTROL_VARIABLES", "schedule_time_count",0 )
self.redis.hset( "CONTROL_VARIABLES","schedule_time_max",0 )
self.redis.hset( "CONTROL_VARIABLES","sprinkler_ctrl_mode","AUTO")
self.redis.hset( "CONTROL_VARIABLES","SKIP_STATION","OFF")
self.io_control.turn_off_io(json_object["io_setup"])
self.io_control.disable_all_sprinklers()
self.io_control.clear_duration_counters()
self.io_control.turn_off_master_valves()
def log_sensors(self, schedule_name,step):
if hasattr(self, 'current_log_object') == False:
self.current_log_object = self.initialize_object( "current_log",schedule_name,step)
if hasattr(self, 'flow_log_object') == False:
self.flow_log_object = self.initialize_object( "flow_log",schedule_name,step )
coil_current = float( self.redis.hget( "CONTROL_VARIABLES","coil_current" ))
self.log_coil_current ( coil_current )
for i in self.counter_devices.keys():
sensor_name = i
flow_value = self.redis.lindex("QUEUES:SPRINKLER:FLOW:"+str(i),0)
self.log_flow_rate( sensor_name, flow_value )
def log_flow_rate( self, sensor_name, flow_value ):
if self.flow_log_object["fields"].has_key( sensor_name ) == False:
self.initialize_field( self.flow_log_object, sensor_name )
temp = self.flow_log_object["fields"][ sensor_name ]
temp["count"] = temp["count"]+1
temp["data"].append( flow_value)
if flow_value > temp["max"] :
temp["max"] = flow_value
if flow_value < temp["min"] :
temp["min"] = flow_value
def log_coil_current ( self,coil_current ):
if self.current_log_object["fields"].has_key( "coil_current" ) == False:
self.initialize_field( self.current_log_object, "coil_current")
temp = self.current_log_object["fields"]["coil_current"]
temp["count"] = temp["count"]+1
temp["data"].append( coil_current )
if coil_current > temp["max"] :
temp["max"] = coil_current
if coil_current < temp["min"] :
temp["min"] = coil_current
def log_start_step( self, schedule_name, step):
#print "made it log start step"
self.current_log_object = self.initialize_object( "current_log",schedule_name,step)
self.flow_log_object = self.initialize_object( "flow_log",schedule_name,step )
self.alarm_queue.store_event_queue( "start_step", { "schedule":schedule_name, "step":step } )
def log_step_stop( self ):
if hasattr(self, 'flow_log_object') == False:
return # case when eto abort
obj = self.flow_log_object
self.alarm_queue.store_past_action_queue("IRRIGATION:END","GREEN", { "schedule_name":obj["schedule_name"],"step_name":obj["step"] } )
self.store_object( self.current_log_object, "coil" )
self.store_object( self.flow_log_object, "flow" )
obj = {}
obj["coil"] = self.current_log_object
obj["flow"] = self.flow_log_object
self.alarm_queue.store_event_queue( "irrigatation_store_object", obj )
self.current_log_object = None
self.flow_log_object = None
def store_object( self, obj ,queue_type ):
if obj == None:
return
#self.add_limits(obj, queue_type )
self.compute_object_statistics( obj )
queue = "log_data:"+queue_type+":"+obj["schedule_name"]+":"+str(obj["step"])
json_string = json.dumps(obj)
compact_data = base64.b64encode(json_string)
self.redis.lpush( queue, json_string )
self.redis.ltrim( queue,0,100)
def initialize_object( self, name,schedule_name,step ):
obj = {}
obj["name"] = name
obj["time"] = time.time()
obj["schedule_name"] = schedule_name
obj["step"] = step
obj["fields"] = {}
return obj
def initialize_field( self, obj ,field):
if obj["fields"].has_key(field) == False:
obj["fields"][field] = {}
obj["fields"][field]["max"] = -1000000
obj["fields"][field]["min"] = 1000000
obj["fields"][field]["count"] = 0
obj["fields"][field]["data"] = []
def compute_object_statistics( self, obj ):
#print "compute object statistics", obj
for j in obj["fields"] :
temp = obj["fields"][j]
temp["total"] = 0
count = 0
for m in temp["data"]:
m = float(m)
count = count +1
if count > 5:
temp["total"] = temp["total"] + m
#print "count ",count
if count > 5:
temp["average"] = temp["total"]/(count -5)
else:
temp["average"] = 0
temp["std"] = 0
count = 0
for m in temp["data"]:
m = float(m)
count = count +1
if count > 5 :
temp["std"] = temp["std"] + (m -temp["average"])*(m-temp["average"])
temp["std"] = math.sqrt(temp["std"]/(count-5))
else:
temp["std"] = 0
## 1 gallon is 0.133681 ft3
## assuming a 5 foot radius
## a 12 gallon/hour head 0.2450996343 inch/hour
## a 14 gallon/hour head 0.2859495733 inch/hour
## a 16 gallon/hour head 0.3267995123 inch/hour
##
##
##
##
## capacity of soil
## for silt 2 feet recharge rate 30 % recharge inches -- .13 * 24 *.3 = .936 inch
## for sand 1 feet recharge rate 30 % recharge inches -- .06 * 12 *.3 = .216 inch
##
## recharge rate for is as follows for 12 gallon/hour head:
## sand 1 feet .216/.245 which is 52 minutes
## silt 2 feet recharge rate is 3.820 hours or 229 minutes
##
## {"controller":"satellite_1", "pin": 9, "recharge_eto": 0.216, "recharge_rate":0.245 },
## eto_site_data
def eto_update( self, schedule_run_time, io_list ):
self.eto_site_data = self.app_files.load_file( "eto_site_setup.json" )
manage_eto = self.redis.hget( "CONTROL_VARIABLES","ETO_MANAGE_FLAG" )
if manage_eto == None:
manage_eto = 1
self.redis.hset("CONTROL_VARIABLES", "ETO_MANAGE_FLAG",manage_eto)
manage_eto = int( manage_eto )
if manage_eto == 1:
sensor_list = self.find_queue_names( io_list )
if len(sensor_list) != 0:
run_time = self.find_largest_runtime( schedule_run_time, sensor_list )
if run_time < schedule_run_time :
schedule_run_time = run_time
return schedule_run_time
def find_queue_names( self, io_list ):
eto_values = []
for j in io_list:
controller = j["remote"]
bits = j["bits"]
bit = bits[0]
index = 0
for m in self.eto_site_data:
if (m["controller"] == controller) and (m["pin"] == bit):
queue_name = controller+"|"+str(bit)
data = self.redis.hget( "ETO_RESOURCE", queue_name )
eto_values.append( [index, data, queue_name ] )
index = index +1
#print "eto values ",eto_values
return eto_values
def find_largest_runtime( self, run_time, sensor_list ):
runtime = 0
for j in sensor_list:
index = j[0]
deficient = float(j[1])
eto_temp = self.eto_site_data[index]
recharge_eto = float( eto_temp["recharge_eto"] )
recharge_rate = float(eto_temp["recharge_rate"])
if float(deficient) > recharge_eto :
runtime_temp = (deficient /recharge_rate)*60
if runtime_temp > runtime :
runtime = runtime_temp
#print "run time",runtime
return runtime
def update_eto_queue_a( self, run_time, io_list ):
self.eto_site_data = self.app_files.load_file( "eto_site_setup.json" )
manage_eto = self.redis.hget( "CONTROL_VARIABLES","ETO_MANAGE_FLAG" )
if manage_eto == None:
manage_eto = 1
self.redis.hset( "CONTROL_VARIABLES","ETO_MANAGE_FLAG",manage_eto)
manage_eto = int( manage_eto )
if manage_eto == 1:
sensor_list = self.find_queue_names( io_list )
if len(sensor_list) != 0:
self.update_eto_queue(run_time,sensor_list)
def update_eto_queue( self, run_time, sensor_list ):
for l in sensor_list:
j_index = l[0]
queue_name = l[2]
j = self.eto_site_data[ j_index ]
deficient = self.redis.hget("ETO_RESOURCE", queue_name )
if deficient == None:
deficient = 0
else:
deficient = float(deficient)
recharge_rate = float(j["recharge_rate"])
deficient = deficient - (recharge_rate/60)*run_time
if deficient < 0 :
deficient = 0
self.redis.hset( "ETO_RESOURCE", queue_name, deficient )
class SprinklerQueueControl():
def __init__(self,alarm_queue,redis):
self.alarm_queue = alarm_queue
self.redis = redis
#
# This function takes data from the IRRIGATION QUEUE And Transferrs it to the IRRIGATION_CELL_QUEUE
# IRRIGATION_CELL_QUEUE only has one element in it
#
def load_irrigation_cell(self,chainFlowHandle, chainObj, parameters,event ):
#print "load irrigation cell ######################################################################"
## if queue is empty the return
## this is for resuming an operation
length = self.redis.llen("QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE" )
#print "made it here cell ", length
if length > 0:
return "RESET"
length = self.redis.llen("QUEUES:SPRINKLER:IRRIGATION_QUEUE")
#print "length queue ",length
if length == 0:
return "RESET"
if self.redis.hget("CONTROL_VARIABLES","SUSPEND") == "ON":
return "RESET"
compact_data = self.redis.rpop( "QUEUES:SPRINKLER:IRRIGATION_QUEUE" )
json_string = base64.b64decode(compact_data)
json_object = json.loads(json_string)
if json_object["type"] == "RESISTANCE_CHECK":
chainFlowHandle.enable_chain_base( ["resistance_check"])
self.redis.hset("CONTROL_VARIABLES","SUSPEND","ON")
return "RESET"
if json_object["type"] == "CHECK_OFF":
chainFlowHandle.enable_chain_base( ["check_off_chain"])
self.redis.hset("CONTROL_VARIABLES","SUSPEND","ON")
return "RESET"
if json_object["type"] == "CLEAN_FILTER":
chainFlowHandle.enable_chain_base( ["clean_filter_action_chain"])
self.redis.hset("CONTROL_VARIABLES","SUSPEND","ON")
return "RESET"
if json_object["type"] == "IRRIGATION_STEP":
#print "irrigation step"
self.redis.lpush( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE", compact_data )
'''
if json_object["type"] == "START_SCHEDULE" :
self.redis.set( "schedule_step_number", json_object["step_number"] )
self.store_event_queue( "irrigation_schedule_start", json_object )
if json_object["type"] == "END_SCHEDULE" :
self.store_event_queue( "irrigation_schedule_stop", json_object )
'''
#print "load irrigation cell CONTINUE"
return "DISABLE"
class SprinklerControl():
def __init__(self, irrigation_control,alarm_queue,redis):
self.irrigation_control = irrigation_control
self.alarm_queue = alarm_queue
self.redis = redis
self.commands = {}
self.commands["OFFLINE"] = self.go_offline
self.commands["QUEUE_SCHEDULE"] = self.queue_schedule
self.commands["QUEUE_SCHEDULE_STEP"] = self.queue_schedule_step
self.commands["QUEUE_SCHEDULE_STEP_TIME"] = self.queue_schedule_step_time
self.commands["RESTART_PROGRAM"] = self.restart_program #tested
self.commands["NATIVE_SCHEDULE"] = self.queue_schedule_step_time
self.commands["NATIVE_SPRINKLER"] = self.direct_valve_control
self.commands["CLEAN_FILTER"] = self.clean_filter #tested
self.commands["OPEN_MASTER_VALVE"] = self.open_master_valve #tested
self.commands["CLOSE_MASTER_VALVE"] = self.close_master_valve #tested
self.commands["RESET_SYSTEM"] = self.reset_system #tested
self.commands["CHECK_OFF"] = self.check_off #tested
self.commands["SUSPEND"] = self.suspend #tested
self.commands["RESUME" ] = self.resume #tested
self.commands["SKIP_STATION"] = self.skip_station
self.commands["RESISTANCE_CHECK"] = self.resistance_check
self.app_files = load_files.APP_FILES(redis)
def dispatch_sprinkler_mode(self,chainFlowHandle, chainObj, parameters,event):
#try:
length = self.redis.llen( "QUEUES:SPRINKLER:CTRL")
#print length
if length > 0:
data = self.redis.rpop("QUEUES:SPRINKLER:CTRL")
data = base64.b64decode(data)
object_data = json.loads(data )
#print object_data["command"]
print "object_data",object_data
if self.commands.has_key( object_data["command"] ) :
self.commands[object_data["command"]]( object_data,chainFlowHandle, chainObj, parameters,event )
else:
self.alarm_queue.store_past_action_queue("Bad Irrigation Command","RED",object_data )
raise
#except:
#print "exception in dispatch mode"
#quit()
def suspend( self, *args ):
self.alarm_queue.store_past_action_queue("SUSPEND_OPERATION","YELLOW" )
self.irrigation_control.turn_off_master_valves()
self.irrigation_control.disable_all_sprinklers()
self.redis.hset("CONTROL_VARIABLES","SUSPEND","ON")
def resume( self, *args ):
self.alarm_queue.store_past_action_queue("RESUME_OPERATION","GREEN" )
self.redis.hset("CONTROL_VARIABLES","SUSPEND","OFF")
def skip_station( self, *args ):
self.alarm_queue.store_past_action_queue("SKIP_STATION","YELLOW" ,{"skip: on"} )
self.redis.hset("CONTROL_VARIABLES","SKIP_STATION","ON" )
def resistance_check( self, object_data, chainFlowHandle, chainObj, parameters, event ):
json_object = {}
json_object["type"] = "RESISTANCE_CHECK"
json_string = json.dumps( json_object)
compact_data = base64.b64encode(json_string)
self.redis.lpush( "QUEUES:SPRINKLER:IRRIGATION_QUEUE", compact_data )
alarm_queue.store_past_action_queue( "RESISTANCE_CHECK", "GREEN", { "action":"start" } )
def check_off( self,object_data,chainFlowHandle, chainObj, parameters,event ):
json_object = {}
json_object["type"] = "CHECK_OFF"
json_string = json.dumps( json_object)
compact_data = base64.b64encode(json_string)
self.redis.lpush( "QUEUES:SPRINKLER:IRRIGATION_QUEUE", compact_data )
alarm_queue.store_past_action_queue( "CHECK_OFF", "GREEN", { "action":"start" } )
def clean_filter( self, object_data,chainFlowHandle, chainObj, parameters,event ):
json_object = {}
json_object["type"] = "CLEAN_FILTER"
json_string = json.dumps( json_object)
compact_data = base64.b64encode(json_string)
self.redis.lpush( "QUEUES:SPRINKLER:IRRIGATION_QUEUE", compact_data )
alarm_queue.store_past_action_queue( "CLEAN_FILTER", "GREEN", { "action":"start" } )
def go_offline( self, object_data,chainFlowHandle, chainObj, parameters,event ):
self.alarm_queue.store_past_action_queue("OFFLINE","RED" )
self.redis.hset("CONTROL_VARIABLES","sprinkler_ctrl_mode","OFFLINE")
self.irrigation_control.turn_off_master_valves()
self.irrigation_control.disable_all_sprinklers()
self.clear_redis_sprinkler_data()
self.clear_redis_irrigate_queue()
self.redis.hset( "CONTROL_VARIABLES","schedule_name","OFFLINE")
self.redis.hset( "CONTROL_VARIABLES","current_log_object", None )
self.redis.hset( "CONTROL_VARIABLES","flow_log_object", None ) ### not sure of
self.redis.hset( "CONTROL_VARIABLES","SUSPEND","ON")
chainFlowHandle.disable_chain_base( ["monitor_irrigation_job_queue","monitor_irrigation_cell"])
chainFlowHandle.enable_chain_base( ["monitor_irrigation_job_queue"])
def queue_schedule( self, object_data,chainFlowHandle, chainObj, parameters,event ):
self.schedule_name = object_data["schedule_name"]
self.load_auto_schedule(self.schedule_name)
#self.redis.hset("CONTROL_VARIABLES","SUSPEND","OFF")
self.redis.hset("CONTROL_VARIABLES","SKIP_STATION","OFF")
self.alarm_queue.store_past_action_queue("QUEUE_SCHEDULE","GREEN",{ "schedule":self.schedule_name } )
def queue_schedule_step( self, object_data,chainFlowHandle, chainObj, parameters,event ):
self.schedule_name = object_data["schedule_name"]
self.schedule_step = object_data["step"]
self.schedule_step = int(self.schedule_step)
self.alarm_queue.store_past_action_queue("QUEUE_SCHEDULE_STEP","GREEN",{ "schedule":self.schedule_name,"step":self.schedule_step } )
#print "queue_schedule",self.schedule_name,self.schedule_step
self.load_step_data( self.schedule_name, self.schedule_step ,None,True )
#self.redis.hset("CONTROL_VARIABLES","SUSPEND","OFF")
self.redis.hset("CONTROL_VARIABLES","SKIP_STATION","OFF")
def queue_schedule_step_time( self, object_data,chainFlowHandle, chainObj, parameters,event ):
self.schedule_name = object_data["schedule_name"]
self.schedule_step = object_data["step"]
self.schedule_step_time = object_data["run_time"]
self.alarm_queue.store_past_action_queue("DIAGNOSTICS_SCHEDULE_STEP_TIME","YELLOW" , {"schedule_name":self.schedule_name, "schedule_step":self.schedule_step,"schedule_time":self.schedule_step_time})
self.schedule_step = int(self.schedule_step)
self.schedule_step_time = int(self.schedule_step_time)
self.irrigation_control.turn_off_master_valves()
self.irrigation_control.disable_all_sprinklers()
self.clear_redis_sprinkler_data()
self.clear_redis_irrigate_queue()
self.load_step_data( self.schedule_name, self.schedule_step, self.schedule_step_time,False )
self.redis.hset("CONTROL_VARIABLES","SUSPEND","OFF")
self.redis.hset("CONTROL_VARIABLES","SKIP_STATION","OFF")
def direct_valve_control( self, object_data,chainFlowHandle, chainObj, parameters,event ):
remote = object_data["controller"]
pin = object_data["pin"]
schedule_step_time = object_data["run_time"]
pin = int(pin)
schedule_step_time = int(schedule_step_time)
self.alarm_queue.store_past_action_queue("DIRECT_VALVE_CONTROL","YELLOW" ,{"remote":remote,"pin":pin,"time":schedule_step_time })
#print "made it here",object_data
self.irrigation_control.turn_off_master_valves()
self.irrigation_control.disable_all_sprinklers()
self.clear_redis_sprinkler_data()
self.clear_redis_irrigate_queue()
#print "direct_valve_control",remote,pin,schedule_step_time
self.load_native_data( remote,pin,schedule_step_time)
self.redis.hset("CONTROL_VARIABLES","SUSPEND","OFF")
self.redis.hset("CONTROL_VARIABLES","SKIP_STATION","OFF")
def open_master_valve( self, object_data,chainFlowHandle, chainObj, parameters,event ):
self.alarm_queue.store_past_action_queue("OPEN_MASTER_VALVE","YELLOW" )
self.irrigation_control.turn_on_master_valves()
chainFlowHandle.enable_chain_base([ "monitor_master_on_web"])
def close_master_valve( self, object_data,chainFlowHandle, chainObj, parameters,event ):
self.alarm_queue.store_past_action_queue("CLOSE_MASTER_VALVE","GREEN" )
chainFlowHandle.disable_chain_base( ["manual_master_valve_on_chain"])
chainFlowHandle.disable_chain_base( ["monitor_master_on_web"])
self.irrigation_control.turn_off_master_valves()
def reset_system( self, *args ):
self.alarm_queue.store_past_action_queue("REBOOT","RED" )
self.redis.hset( "CONTROL_VARIABLES","sprinkler_ctrl_mode","RESET_SYSTEM")
os.system("reboot")
def restart_program( self, *args ):
self.alarm_queue.store_past_action_queue("RESTART","RED" )
self.redis.hset( "CONTROL_VARIABLES","sprinkler_ctrl_mode","RESTART_PROGRAM")
quit()
def clear_redis_irrigate_queue( self,*args ):
#print "clearing irrigate queue"
self.redis.delete( "QUEUES:SPRINKLER:IRRIGATION_QUEUE" )
self.redis.delete( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE")
def clear_redis_sprinkler_data(self):
self.redis.hset("CONTROL_VARIABLES", "sprinkler_ctrl_mode","OFFLINE")
self.redis.hset( "CONTROL_VARIABLES","schedule_name","offline" )
self.redis.hset("CONTROL_VARIABLES", "schedule_step_number",0 )
self.redis.hset("CONTROL_VARIABLES", "schedule_step",0 )
self.redis.hset("CONTROL_VARIABLES", "schedule_time_count",0 )
self.redis.hset( "CONTROL_VARIABLES","schedule_time_max",0 )
def load_auto_schedule( self, schedule_name):
schedule_control = self.get_json_data( schedule_name )
step_number = len( schedule_control["schedule"] )
###
### load schedule start
###
###
#json_object = {}
#json_object["type"] = "START_SCHEDULE"
#json_object["schedule_name"] = schedule_name
#json_object["step_number"] = step_number
#json_string = json.dumps( json_object)
#self.redis.lpush( "QUEUES:SPRINKLER:IRRIGATION_QUEUE", json_string )
###
### load step data
###
###
for i in range(1,step_number+1):
self.load_step_data( schedule_name, i ,None,True )
###
### load schedule end
###
###
#json_object = {}
#json_object["type"] = "END_SCHEDULE"
#json_object["schedule_name"] = schedule_name
#json_object["step_number"] = step_number
#json_string = json.dumps( json_object)
#self.redis.lpush( "QUEUES:SPRINKLER:IRRIGATION_QUEUE", json_string )
# note schedule_step_time can be None then use what is in the schedule
def load_step_data( self, schedule_name, schedule_step, schedule_step_time ,eto_flag ):
#print "load step data schedule name ----------------->",schedule_name, schedule_step, schedule_step_time
temp = self.get_schedule_data( schedule_name, schedule_step)
if temp != None :
schedule_io = temp[0]
schedule_time = temp[1]
if schedule_step_time == None:
schedule_step_time = schedule_time
json_object = {}
json_object["type"] = "IRRIGATION_STEP"
json_object["schedule_name"] = schedule_name
json_object["step"] = schedule_step
json_object["io_setup"] = schedule_io
json_object["run_time"] = schedule_step_time
json_object["elasped_time"] = 0
json_object["eto_enable"] = eto_flag
json_string = json.dumps( json_object)
compact_data = base64.b64encode(json_string)
#print "load step data ===== step data is queued"
self.redis.lpush( "QUEUES:SPRINKLER:IRRIGATION_QUEUE", compact_data )
else:
self.store_event_queue( "non_existant_schedule", json_object )
raise # non schedule
# this is for loading user specified data
def load_native_data( self, remote,bit,time ):
json_object = {}
json_object["type"] = "IRRIGATION_STEP"
json_object["schedule_name"] = "MANUAL"
json_object["step"] = 1
json_object["io_setup"] = [{ "remote":remote, "bits":[bit] }]
json_object["run_time"] = time
json_object["elasped_time"] = 0
json_object["eto_enable"] = False
json_string = json.dumps( json_object)
compact_data = base64.b64encode(json_string)
#print "native load",json_string
self.redis.lpush( "QUEUES:SPRINKLER:IRRIGATION_QUEUE", compact_data)
#print self.redis.llen("QUEUES:SPRINKLER:IRRIGATION_QUEUE")
def get_schedule_data( self, schedule_name, schedule_step):
schedule_control = self.get_json_data( schedule_name )
if schedule_control != None:
io_control = schedule_control["schedule"][schedule_step -1]
m = io_control[0]
schedule_time = m[2]
# format io_control
new_io_control = []
for i in io_control:
temp = { }
temp["remote"] = i[0]
temp["bits"] = i[1]
new_io_control.append(temp)
return [ new_io_control, schedule_time ]
return None
def get_json_data( self, schedule_name ):
#print("get json data ",schedule_name)
sprinkler_ctrl = self.app_files.load_file("sprinkler_ctrl.json")
for j in sprinkler_ctrl :
if j["name"] == schedule_name:
json_data=open("app_data_files/"+j["link"])
json_data = json.load(json_data)
#print "json data",json_data
return json_data
return None
class Monitor():
#
# Measures current and flow rate every minute
# Up
#
def __init__(self, redis, basic_io_control,counter_devices,analog_devices, gpio_bit_input_devices, alarm_queue, udp_servers ):
self.redis = redis
self.basic_io_control = basic_io_control
self.counter_devices = counter_devices
self.analog_devices = analog_devices
self.gpio_inputs = gpio_bit_input_devices
self.alarm_queue = alarm_queue
self.counter_time_ref = time.time()
self.udp_servers = udp_servers
def log_clean_filter( self,*args):
self.redis.hset
self.alarm_queue.store_past_action_queue("CLEAN_FILTER","GREEN" )
self.redis.hset("CONTROLLER_STATUS","clean_filter",time.time() )
def set_suspend( self, *args):
self.redis.hset("CONTROL_VARIABLES","SUSPEND","ON")
def set_resume( self,*args):
self.redis.hset("CONTROL_VARIABLES","SUSPEND","OFF")
def verify_resume( self, *args):
if self.redis.hget("CONTROL_VARIABLES","SUSPEND") == "OFF":
return "DISABLE"
else:
return "HALT"
def clear_cleaning_sum(self, *args):
redis.hset("CONTROL_VARIABLES","cleaning_sum",0)
def check_to_clean_filter( self, chainFlowHandle, chainObj, parameters,event ):
cleaning_interval = redis.hget("CONTROL_VARIABLES","CLEANING_INTERVAL")
flow_value = float( check_redis_value( "global_flow_sensor_corrected" ) )
cleaning_sum = float( check_redis_value( "cleaning_sum") )
cleaning_sum = cleaning_sum + flow_value
redis.hset("CONTROL_VARIABLES","cleaning_sum",cleaning_sum)
if cleaning_interval == 0 :
return # no cleaning interval active
if cleaning_sum > cleaning_interval :
chainFlowHandle.enable_chain_base(["clean_filter_action_chain"])
def update_modbus_statistics( self, *args ):
servers = []
for i in self.udp_servers:
temp = modbus_control.get_all_counters(i)
if temp[0] == True:
servers.append(i)
data = json.loads(temp[1])
for j in data.keys():
if redis.hexists("MODBUS_STATISTICS:"+i,j) == False:
self.redis.hset("MODBUS_STATISTICS:"+i,j,json.dumps(data[j]))
else:
temp_json = redis.hget("MODBUS_STATISTICS:"+i,j)
temp_value = json.loads(temp_json)
temp_value["address"] = j
temp_value["failures"] = int(temp_value["failures"]) +int(data[j]["failures"])
temp_value["counts"] = int(temp_value["counts"]) + int(data[j]["counts"])
temp_value["total_failures"] = int(temp_value["total_failures"]) +int(data[j]["total_failures"])
temp_json = json.dumps(temp_value)
self.redis.hset("MODBUS_STATISTICS:"+i,j,temp_json)
modbus_control.clear_all_counters(i)
self.redis.set("MODBUS_INTERFACES",json.dumps(servers))
def clear_modbus_statistics( self,*args):
interfaces_json = self.redis.get("MODBUS_INTERFACES")
interfaces_value = json.loads(interfaces_json)
for i in interfaces_value:
self.redis.delete("MODBUS_STATISTICS:"+i)
def update_time_stamp( self, *args):
self.alarm_queue.update_time_stamp()
def measure_input_gpio( self, *args ):
for i in self.gpio_inputs:
self.basic_io_control.get_gpio_bit(i) # need to store values
def measure_flow_rate ( self, *args ):
deltat = time.time()-self.counter_time_ref
self.counter_time_ref = time.time()
for i in counter_devices.keys():
flow_value = self.basic_io_control.measure_counter(deltat,i)
self.redis.lpush("QUEUES:SPRINKLER:FLOW:"+str(i),flow_value )
self.redis.ltrim("QUEUES:SPRINKLER:FLOW:"+str(i),0,800)
if i == "main_sensor":
self.redis.hset("CONTROL_VARIABLES","global_flow_sensor",flow_value )
conversion_rate = counter_devices[i]["conversion_factor"]
self.redis.hset("CONTROL_VARIABLES","global_flow_sensor_corrected",flow_value*conversion_rate )
def measure_current( self, *args ):
for i in analog_devices.keys():
current = self.basic_io_control.get_analog( i )
self.redis.lpush( "QUEUES:SPRINKLER:CURRENT:"+i,current )
self.redis.ltrim( "QUEUES:SPRINKLER:CURRENT:"+i,0,800)
self.redis.hset( "CONTROL_VARIABLES",i, current )
def measure_current_a( self, *args ):
for i in analog_devices.keys():
current = self.basic_io_control.get_analog( i )
self.redis.hset( "CONTROL_VARIABLES",i, current )
class PLC_WATCH_DOG():
def __init__(self, redis, alarm_queue,watch_dog_interface ):
self.redis = redis
self.alarm_queue = alarm_queue
self.watch_dog_interface = watch_dog_interface
def read_wd_flag( self,*arg ):
try:
return_value = self.watch_dog_interface.read_wd_flag()
#print "read_wd_flag",return_value
except:
pass
return "DISABLE"
def write_wd_flag( self,value,*arg ):
try:
self.watch_dog_interface.write_wd_flag(1)
except:
pass
return "DISABLE"
def read_mode_switch( self,value,*arg ):
return_value = self.watch_dog_interface.read_mode_switch()
#print "read_mode_switch",return_value
return "DISABLE"
def read_mode( self,value,*arg ):
return_value = self.watch_dog_interface.read_mode()
#print "read_mode_switch",return_value
return "DISABLE"
if __name__ == "__main__":
import datetime
import time
import string
import urllib2
import math
import redis
import json
import eto
import py_cf
import os
import base64
import io_control_backup.alarm_queue
import io_control_backup.modbus_UDP_device
import io_control_backup.click
import io_control_backup.basic_io_control
import io_control_backup.irrigation_ctl
import io_control_backup.new_instrument
import watch_dog
#ir_ctl = Irrigation_Control("/media/mmc1/app_data_files","/media/mmc1/system_data_files")
from data_management.configuration import *
redis = redis.StrictRedis( host = '192.168.1.84', port=6379, db = 0 )
app_files = load_files.APP_FILES(redis)
sys_files = load_files.SYS_FILES(redis)
redis_dict = {}
redis_dict["GPIO_BITS"] = "GPIO_BITS"
redis_dict["GPIO_REGS"] = "GPIO_REGS"
redis_dict["GPIO_ADC"] = "GPIO_ADC"
redis_dict["COUNTER"] = "COUNTER"
redis.hset("CONTROL_VARIABLES","SUSPEND","OFF")
redis.hincrby("CONTROLLER_STATUS", "irrigation_resets")
alarm_queue = io_control_backup.alarm_queue.AlarmQueue( redis,"cloud_alarm_queue" )
io_server = io_control_backup.modbus_UDP_device.ModbusUDPDeviceClient(remote_devices , "192.168.1.84")
plc_click = io_control_backup.click.PLC_Click( alarm_queue, io_server, redis, redis_dict )
modbus_control = io_control_backup.modbus_UDP_device.ModbusUDPDeviceClient( [], "192.168.1.84")
plc_map = { "CLICK":plc_click }
basic_io_control = io_control_backup.basic_io_control.BasicIo( redis_dict = redis_dict, redis_server=redis, plc_interface=plc_map ,
gpio_bit_input_devices=gpio_bit_input_devices, gpio_bit_output_devices= None,
gpio_reg_input_devices=None, gpio_reg_output_devices= None,
analog_devices=analog_devices, counter_devices=counter_devices )
irrigation_io_control = io_control_backup.irrigation_ctl.IrrigationControl( irrigation_io, master_valve_list, plc_map, redis )
plc_watch_dog_interface = io_control_backup.irrigation_ctl.WatchDogControl( remote_devices, plc_map )
plc_watch_dog = PLC_WATCH_DOG( redis, alarm_queue,plc_watch_dog_interface )
monitor = Monitor(redis, basic_io_control, counter_devices, analog_devices, gpio_bit_input_devices,alarm_queue, ["192.168.1.84"] )
monitor.update_modbus_statistics()
wd_client = watch_dog.Watch_Dog_Client(redis, "irrigation_ctrl","irrigation control")
sprinkler_control = SprinklerControl(irrigation_io_control,alarm_queue,redis)
sprinkler_element = SprinklerQueueElementControl(redis,irrigation_io_control,alarm_queue,counter_devices )
sprinkler_queue = SprinklerQueueControl( alarm_queue, redis )
def check_redis_value( key):
value = redis.hget( "CONTROL_VARIABLES",key )
if value == None:
value = 0
return value
def clear_counters(*args):
for i,j in remote_devices.items():
ip = j["UDP"]
io_server.clear_all_counters(ip)
def check_off ( *args ):
temp = float(redis.hget( "CONTROL_VARIABLES","global_flow_sensor_corrected" ))
redis.hset("CONTROLLER_STATUS", "check_off",temp )
if temp > 1.:
redis.hset("ALARM","check_off",True)
redis.hset("CONTROL_VARIABLES","SUSPEND","ON")
alarm_queue.store_past_action_queue( "CHECK_OFF", "RED", { "action":"bad","flow_rate":temp } )
return_value = "DISABLE"
else:
redis.hset("CONTROL_VARIABLES","SUSPEND","OFF")
redis.hset("ALARMS","check_off",False)
alarm_queue.store_past_action_queue( "CHECK_OFF", "GREEN", { "action":"good","flow_rate":temp } )
return_value = "DISABLE"
return return_value
def detect_on_switch_on( self,*args):
for i in master_switch_keys:
try:
value = int(redis.hget("GPIO_BITS",i))
except:
value = 0
if value != 0:
print "++++++++++",value
return "DISABLE"
return "RESET"
def detect_off_switches(*args):
#print "detect off", master_reset_keys
for i in master_reset_keys:
try:
value = int(redis.hget("GPIO_BITS",i))
except:
value = 0
if value != 0:
print "-------",value
return True
return False
def clear_redis_set_keys( *args):
for i in master_switch_keys:
redis.hset("GPIO_BITS",i,0)
def clear_redis_clear_keys( *args):
for i in master_reset_keys:
redis.hset("GPIO_BITS",i,0)
def detect_switch_off( chainFlowHandle, chainObj, parameters, event ):
returnValue = "RESET"
if detect_off_switches() == True:
clear_redis_clear_keys()
returnValue = "DISABLE"
return returnValue
def check_for_uncompleted_sprinkler_element( chainFlowHandle,chainObj,parameters,event ):
#alarm_queue.store_past_action_queue("START_UP","RED" )
length = redis.llen("QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE" )
if length > 0:
#print "enabling chain"
chainFlowHandle.enable_chain_base( ["monitor_irrigation_cell"])
def check_irrigation_queue( chainFlowHandle,chainObj,parameters,event ):
#alarm_queue.store_past_action_queue("START_UP","RED" )
length = redis.llen("QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE" )
if length > 0:
print "Jobs in Queue"
return "TERMINATE"
else:
return "DISABLE"
def add_resistance_entry( remote_dictionary, pin_dictionary, remote, pin ):
if ( remote not in remote_dictionary ) or ( pin not in pin_dictionary ):
remote_dictionary.union( remote)
pin_dictionary.union(pin)
json_object = [ remote,pin]
json_string = json.dumps(json_object)
print "json_string",json_string
queue_object = base64.b64encode(json_string)
redis.lpush( "QUEUES:SPRINKLER:RESISTANCE_CHECK_QUEUE",queue_object )
def update_entry( remote_dictionary, pin_dictionary, remote,pin, schedule , dictionary ):
if dictionary.has_key( remote ) == False:
dictionary[remote] = {}
if dictionary[remote].has_key( pin ) == False:
dictionary[remote][pin] = list(set())
dictionary[remote][pin] = set( dictionary[remote][pin])
dictionary[remote][pin].union(schedule)
dictionary[remote][pin] = list( dictionary[remote][pin])
add_resistance_entry( remote_dictionary, pin_dictionary, remote, pin )
def assemble_relevant_valves( *args):
remote_dictionary = set()
pin_dictionary = set()
dictionary = {}
print "assemble relevant valves"
redis.delete( "QUEUES:SPRINKLER:RESISTANCE_CHECK_QUEUE" )
sprinkler_ctrl = app_files.load_file("sprinkler_ctrl.json")
for j in sprinkler_ctrl:
schedule = j["name"]
json_data =app_files.load_file(j["link"])
for i in json_data["schedule"]:
for k in i:
remote = k[0]
pin = str(k[1][0])
update_entry( remote_dictionary, pin_dictionary, remote,pin, schedule , dictionary )
master_valve = sys_files.load_file("master_valve_setup.json")
for j in master_valve:
remote = j[0]
pin = str(j[1][0])
update_entry( remote_dictionary, pin_dictionary, remote,pin, schedule , dictionary )
remote = j[2]
pin = str(j[3][0])
update_entry( remote_dictionary, pin_dictionary, remote,pin, schedule , dictionary )
json_string = json.dumps(dictionary)
queue_object = base64.b64encode(json_string)
redis.set( "SPRINKLER_RESISTANCE_DICTIONARY",queue_object)
def test_individual_valves( chainFlowHandle,chainObj,parameters,event ):
returnValue = "HALT"
if event["name"] == "INIT" :
parameters[1] = 0 # state variable
else:
if event["name"] == "TIME_TICK":
if parameters[1] == 0:
if redis.llen( "QUEUES:SPRINKLER:RESISTANCE_CHECK_QUEUE" ) == 0:
returnValue = "DISABLE"
else:
compact_data = redis.rpop( "QUEUES:SPRINKLER:RESISTANCE_CHECK_QUEUE" )
json_string = base64.b64decode(compact_data)
json_object = json.loads(json_string)
print "json object",json_object
irrigation_io_control.disable_all_sprinklers()
irrigation_io_control.load_duration_counters( 1 ) # 1 minute
irrigation_io_control.turn_on_valve( [{"remote": json_object[0], "bits":[int(json_object[1])]}] ) # {"remote":xxxx,"bits":[] }
parameters[1] = 1
parameters[2] = json_object[0]
parameters[3] = json_object[1]
else:
monitor.measure_current()
try:
coil_current = float( redis.hget( "CONTROL_VARIABLES","coil_current" ))
print "coil current",coil_current
queue = "log_data:resistance_log:"+parameters[2]+":"+parameters[3]
redis.lpush(queue, coil_current ) # necessary for web server
redis.ltrim(queue,0,10)
queue = "log_data:resistance_log_cloud:"+parameters[2]+":"+parameters[3]
redis.lpush(queue, json.dumps( { "current": coil_current, "time":time.time()} )) #necessary for cloud
redis.ltrim(queue,0,10)
except:
raise #should not happen
irrigation_io_control.disable_all_sprinklers()
parameters[1] = 0
return returnValue
#
# Adding chains
#
cf = py_cf.CF_Interpreter()
cf.define_chain("reboot_message", True) #tested
cf.insert_link( "link_1", "One_Step", [ clear_redis_set_keys ] )
cf.insert_link( "link_2", "One_Step", [ clear_redis_clear_keys ] )
cf.insert_link( "link_2", "One_Step", [ plc_watch_dog.read_mode ] )
cf.insert_link( "link_3", "One_Step", [ plc_watch_dog.read_mode_switch ] )
cf.insert_link( "link_3", "One_Step", [ irrigation_io_control.disable_all_sprinklers ] )
cf.insert_link( "link_4", "One_Step" ,[ check_for_uncompleted_sprinkler_element ] )
cf.insert_link( "link_5", "Terminate", [] )
cf.define_chain( "monitor_flow_rate", True) #tested
cf.insert_link( "link_1", "WaitEvent", [ "MINUTE_TICK" ] )
cf.insert_link( "link_2", "One_Step", [ monitor.measure_flow_rate ] )
cf.insert_link( "link_3", "One_Step", [ monitor.measure_current ] )
cf.insert_link( "link_5", "Reset", [] )
cf.define_chain("measure_input_gpio", False )# TBD
cf.insert_link( "link_1", "WaitTime", [30,0,0,0] )
cf.insert_link( "link_2", "One_Step", [ monitor.measure_input_gpio ] )
cf.insert_link( "link_3", "Reset", [] )
cf.define_chain("update_time_stamp", True) #tested
cf.insert_link( "link_1", "WaitTime", [10,0,0,0] )
cf.insert_link( "link_3", "One_Step", [ monitor.update_time_stamp ] )
cf.insert_link( "link_4", "Reset", [] )
cf.define_chain("watch_dog_thread",True) #tested
cf.insert_link( "link_0", "Log", ["Watch dog thread"] )
cf.insert_link( "link_1", "WaitEvent", [ "MINUTE_TICK" ] )
cf.insert_link( "link_3", "One_Step", [ wd_client.pat_wd ])
cf.insert_link( "link_5", "Reset", [])
cf.define_chain("clean_filter_chain", False) #tested
cf.insert_link( "link_1", "WaitTod", ["*",17,"*","*"] )
#cf.insert_link( "link_2", "Enable_Chain", [["clean_filter_action_chain"]] )
cf.insert_link( "link_3", "WaitTod", ["*",18,"*","*" ] )
cf.insert_link( "link_4", "Reset", [] )
cf.define_chain("clean_filter_action_chain", False) #tested
cf.insert_link( "link_0", "Code", [ check_irrigation_queue ] )
cf.insert_link( "link_1", "Log", ["Clean Step 1"] )
cf.insert_link( "link_2", "One_Step", [ monitor.set_suspend ])
cf.insert_link( "link_3", "One_Step", [ irrigation_io_control.disable_all_sprinklers ] )
cf.insert_link( "link_4", "One_Step", [ irrigation_io_control.turn_off_cleaning_valves ] )# turn off cleaning valve
cf.insert_link( "link_5", "One_Step", [ irrigation_io_control.turn_on_master_valves ] )# turn turn on master valve
cf.insert_link( "link_6", "WaitTime", [120,0,0,0] )
cf.insert_link( "link_1", "Log", ["Clean Step 3"] )
cf.insert_link( "link_7", "One_Step", [ irrigation_io_control.turn_on_cleaning_valves ] )# turn on cleaning valve
cf.insert_link( "link_8", "One_Step", [ irrigation_io_control.turn_off_master_valves ] )# turn turn off master valve
cf.insert_link( "link_9", "WaitTime", [30,0,0,0] )
cf.insert_link( "link_1", "Log", ["Clean Step 4"] )
cf.insert_link( "link_10", "One_Step", [ irrigation_io_control.turn_on_master_valves ] )# turn turn on master valve
cf.insert_link( "link_11", "WaitTime", [10,0,0,0] )
cf.insert_link( "link_1", "Log", ["Clean Step 5"] )
cf.insert_link( "link_12", "One_Step", [ irrigation_io_control.turn_off_cleaning_valves ] )# turn turn off master valve
cf.insert_link( "link_13", "One_Step", [ irrigation_io_control.turn_off_master_valves ] )# turn turn off cleaning valve
cf.insert_link( "link_14", "One_Step", [ irrigation_io_control.disable_all_sprinklers ] )
cf.insert_link( "link_15", "One_Step", [ monitor.clear_cleaning_sum ] )
cf.insert_link( "link_16", "One_Step", [ monitor.set_resume ])
cf.insert_link( "link_17", "One_Step", [ monitor.log_clean_filter ] )
cf.insert_link( "link_17", "Terminate", [] )
cf.define_chain("check_off", False ) # tested
cf.insert_link( "link_1", "WaitTod", ["*",16,"*","*"] )
#cf.insert_link( "link_2", "Enable_Chain", [["check_off_chain"]] )
cf.insert_link( "link_3", "WaitTod", ["*",17,"*","*" ] )
cf.insert_link( "link_4", "Reset", [] )
cf.define_chain("check_off_chain", False ) #tested
#cf.insert_link( "link_1", "Log", ["check off is active"] )
cf.insert_link( "link_16", "One_Step", [ monitor.set_suspend ] )
cf.insert_link( "link_2", "One_Step", [ irrigation_io_control.disable_all_sprinklers ] )
cf.insert_link( "link_3", "WaitTime", [15,0,0,0] )
cf.insert_link( "link_4", "One_Step", [ irrigation_io_control.turn_on_master_valves ] )# turn turn on master valve
cf.insert_link( "link_5", "One_Step", [ irrigation_io_control.turn_off_cleaning_valves ] )# turn turn off master valve
cf.insert_link( "link_6", "WaitTime", [300,0,0,0] )
cf.insert_link( "link_7", "Code", [ check_off ] )
cf.insert_link( "link_16", "One_Step", [ monitor.set_resume ])
cf.insert_link( "link_8", "One_Step", [ irrigation_io_control.turn_off_master_valves ] )# turn turn on master valve
cf.insert_link( "link_9", "Terminate", [] )
cf.define_chain("manual_master_valve_on_chain",False) #tested
#cf.insert_link( "link_1", "Log", ["manual master"] )
cf.insert_link( "link_2", "Code", [ monitor.verify_resume ])
cf.insert_link( "link_3", "One_Step", [ irrigation_io_control.turn_on_master_valves ] )
cf.insert_link( "link_4", "One_Step", [ irrigation_io_control.turn_off_cleaning_valves ] )# turn turn off master valve
cf.insert_link( "link_5", "WaitTime", [ 5,0,0,0] ) # wait 1 seconds
cf.insert_link( "link_6", "Reset", [] )
cf.define_chain("monitor_master_on_switch",False) #TBD
#cf.insert_link("link_1", "WaitTime", [5,0,0,0] )
#cf.insert_link("link_2", "Code", [ detect_on_switch_on ] )
#cf.insert_link("link_3", "One_Step", [ clear_redis_set_keys ] )
#cf.insert_link("link_4", "Enable_Chain", [["manual_master_valve_on_chain"]] )
#cf.insert_link("link_5", "Enable_Chain", [["manual_master_valve_off_chain"]] )
#cf.insert_link("link_6", "WaitTime", [3600*8,0,0,0] ) # wait 8 hours
#cf.insert_link("link_7", "Disable_Chain", [["manual_master_valve_on_chain"]] )
#cf.insert_link("link_8", "One_Step", [ irrigation_io_control.turn_off_master_valves ])
#cf.insert_link("link_9", "Reset", [])
cf.insert_link("link_9", "Halt", [])
cf.define_chain("monitor_master_on_web",False) #TBD
cf.insert_link( "link_0", "Log", ["monitor master on web"] )
cf.insert_link("link_1", "Enable_Chain", [["manual_master_valve_on_chain"]] )
cf.insert_link("link_2", "WaitTime", [ 3600*8,0,0,0] ) # wait 8 hour
cf.insert_link("link_3", "Enable_Chain", [["manual_master_valve_on_chain"]] )
cf.insert_link("link_4", "Disable_Chain", [["manual_master_valve_off_chain"]] )
cf.insert_link("link_5", "One_Step", [ irrigation_io_control.turn_off_master_valves ])
cf.insert_link("link_6", "Disable_Chain", [["monitor_master_on_web"]] )
cf.define_chain("manual_master_valve_off_chain",False ) #TBD
cf.insert_link("link_1", "WaitTime", [5,0,0,0] )
#cf.insert_link("link_1", "Code", [ detect_switch_off ] )
#cf.insert_link("link_2", "One_Step", [ clear_redis_clear_keys ] )
#cf.insert_link("link_3", "One_Step", [ clear_redis_set_keys ] )
#cf.insert_link("link_4", "Enable_Chain", [["monitor_master_on_switch"]] )
#cf.insert_link("link_5", "Disable_Chain", [["manual_master_valve_on_chain"]] )
#cf.insert_link("link_6", "Disable_Chain", [["monitor_master_on_web"]] )
#cf.insert_link("link_7", "One_Step", [ irrigation_io_control.turn_off_master_valves ] )# turn turn on master valve
#cf.insert_link("link_8", "One_Step", [ irrigation_io_control.turn_off_cleaning_valves ] )# turn turn off master valve
cf.insert_link("link_6", "Disable_Chain", [["manual_master_valve_off_chain"]] )
cf.define_chain("gpm_triggering_clean_filter",True) #TBDf
cf.insert_link( "link_1", "WaitEvent", [ "MINUTE_TICK" ] )
#cf.insert_link( "link_1", "Log", ["check to clean filter"] )
cf.insert_link( "link_2", "One_Step", [ monitor.check_to_clean_filter ] )
cf.insert_link( "link_3", "Reset", [] )
cf.define_chain("update_modbus_statistics",True) #tested
#cf.insert_link( "link_1", "Log", ["update modbus statistics"] )
cf.insert_link( "link_2", "One_Step", [ monitor.update_modbus_statistics ] )
cf.insert_link( "link_3", "WaitTime", [ 15,25,0,0] ) # wait 15 minutes
cf.insert_link( "link_4", "Reset", [] )
cf.define_chain("clear_modbus_statistics",True) #tested
cf.insert_link( "link_1", "WaitTod", ["*",1,"*","*"] )
#cf.insert_link( "link_2", "Log", ["clear modbus statistics"] )
cf.insert_link( "link_3", "One_Step", [ monitor.clear_modbus_statistics ] )
cf.insert_link( "link_4", "WaitTod", ["*",2,"*","*"] )
cf.insert_link( "link_5", "Reset", [] )
cf.define_chain("resistance_check",False) #not tested
cf.insert_link( "link_1", "Log", ["resistance check"] )
cf.insert_link( "link_2", "One_Step", [ monitor.set_suspend ])
cf.insert_link( "link_3", "One_Step", [ assemble_relevant_valves ] )
cf.insert_link( "link_4", "Code", [ test_individual_valves,0,0,0 ] )
cf.insert_link( "link_5", "One_Step", [ monitor.set_resume ])
cf.insert_link( "link_6", "Disable_Chain", [["resistance_check"]] )
cf.define_chain("plc_watch_dog", True ) #TBD
#cf.insert_link( "link_1", "Log", ["plc watch dog thread"] )
#cf.insert_link( "link_2", "One_Step", [ plc_watch_dog.read_mode ] )
#cf.insert_link( "link_3", "One_Step", [ plc_watch_dog.read_mode_switch ] )
cf.insert_link( "link_4", "One_Step", [ plc_watch_dog.read_wd_flag ] )
cf.insert_link( "link_5", "One_Step", [ plc_watch_dog.write_wd_flag ] )
cf.insert_link( "link_1", "WaitTime", [ 30,0,0,0] ) # wait 1 seconds
cf.insert_link( "link_7", "Reset", [] )
cf.define_chain( "plc_monitor_control_queue", True ) #tested
cf.insert_link( "link_1", "WaitTime", [ 1,0,0,0] ) # wait 1 seconds
cf.insert_link( "link_2", "One_Step", [ sprinkler_control.dispatch_sprinkler_mode ] )
cf.insert_link( "link_3", "Reset", [] )
cf.define_chain("monitor_irrigation_job_queue", True ) # tested
cf.insert_link( "link_1", "WaitTime", [ 5,0,0,0] ) # wait 5 seconds
cf.insert_link( "link_2", "Code", [ sprinkler_queue.load_irrigation_cell ] )
cf.insert_link( "link_3", "Code", [ sprinkler_element.start] )
cf.insert_link( "link_4", "WaitTime", [ 1,0,0,0] ) # wait 1 seconds
cf.insert_link( "link_5", "One_Step", [ monitor.measure_current ] )
cf.insert_link( "link_6", "Code", [ sprinkler_element.check_current ] )
cf.insert_link( "link_7", "Enable_Chain", [["monitor_irrigation_cell","monitor_current_sub" ]])
cf.insert_link( "link_8", "WaitEvent", ["CELL_DONE" ] )
cf.insert_link( "link_9", "Reset", [] )
cf.define_chain("monitor_current_sub", False )
cf.insert_link( "link_0", "Log" , [["monitor_current_sub chain is working"]])
cf.insert_link( "link_1", "WaitTime", [ 15,0,0,0] ) # wait 15 second
cf.insert_link( "link_2", "One_Step", [ monitor.measure_current_a ] )
cf.insert_link( "link_3", "One_Step", [ sprinkler_element.check_current ] )
cf.insert_link( "link_4", "Reset", [] )
cf.define_chain("monitor_irrigation_cell", False ) #Tested
cf.insert_link( "link_1", "WaitEvent", [ "MINUTE_TICK" ] )
cf.insert_link( "link_2", "One_Step", [ sprinkler_element.check_current ] )
cf.insert_link( "link_3", "One_Step", [ sprinkler_element.check_for_excessive_flow_rate ] )
cf.insert_link( "link_3", "Code", [ sprinkler_element.monitor ] )
cf.insert_link( "link_4", "SendEvent", ["CELL_DONE"] )
cf.insert_link( "link_5", "Disable_Chain", [["monitor_irrigation_cell","monitor_current_sub" ]])
length = redis.llen("QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE" )
cf_environ = py_cf.Execute_Cf_Environment( cf )
cf_environ.execute()
| mit | 4,540,945,076,651,094,500 | 42.301799 | 205 | 0.571051 | false | 3.385385 | false | false | false |
HeadCow/ARPS | report_crawler/crawler.py | 1 | 1533 | # -*- coding:utf-8 -*-
from __future__ import print_function
import os
import time
import shutil
import traceback
from report_crawler.spiders.__Global_function import get_localtime
from report_crawler.spiders.__Global_variable import REPORT_SAVEDIR
now_time = get_localtime(time.strftime("%Y-%m-%d", time.localtime()))
DATADIR = REPORT_SAVEDIR + '/' + str(now_time)
class Spider_starter(object):
def crawl(self):
self.X001()
def run_spider(self, spider_name):
dirname = REPORT_SAVEDIR + '/' + str(now_time) + '/' + spider_name[len(spider_name)-3:] + '/' + spider_name[0:len(spider_name)-3]
# If the dir is exist, clear the dir(today)
if os.path.exists(dirname):
shutil.rmtree(dirname, True)
# If one of the spiders has error, the print_exc() function will tell us which is criminal
try:
if not os.path.exists(DATADIR):
os.makedirs(DATADIR)
os.system('scrapy crawl ' + spider_name)
except:
traceback.print_exc()
def X001(self):
spider_list = {
'B': ['BNU001', 'BUAA001'],
'C': ['CSU001', 'CUMT001'],
'E': ['ECNU001'],
'H': ['HNU001'],
'J': ['JLU001'],
'N': ['NCU001', 'NKU001', 'NWSUAF001'],
'P': ['PKU001'],
'S': ['SCU001', 'SDU001', 'SEU001', 'SHU001', 'SUDA001', 'SWJTU001', 'SWU001', 'SYSU001'],
'T': ['THU001', 'TYUT001'],
'U': ['UESTC001'],
'W': ['WHU001'],
'Z': ['ZZU001']
}
for key in spider_list.keys():
for spider in spider_list[key]:
self.run_spider(spider)
if __name__ == '__main__':
starter = Spider_starter()
starter.crawl()
| mit | -6,852,349,848,664,271,000 | 25.894737 | 131 | 0.626223 | false | 2.647668 | false | false | false |
simpleton/eclipse2buck | build_secondary_dex_list.py | 1 | 1471 | #!/usr/bin/python
import sys
import os
from eclipse2buck import decorator
def check_force_flag(lines):
for line in lines:
tag = "plugin.build.force="
if line.startswith(tag):
line = line.strip('\r\n')
if (line[len(tag):] == "true"):
return True
return False
def extract_include(lines):
if check_force_flag(lines):
for line in lines:
tag = "plugin.build.include="
if line.startswith(tag):
line = line.strip('\r\n')
for item in line[len(tag):].split(" "):
if len(item) > 0:
print "\'" + item + "',"
@decorator.var("SECONDARY_DEX_PATTERN_LIST")
def print_secondary_pattern(folder):
for dirname in os.listdir(folder):
if os.path.isdir(folder+dirname) and (not dirname.startswith('.')):
filename = folder+dirname + "/plugin.properties"
if os.path.isfile(filename):
with open(filename) as fd:
extract_include(fd.readlines())
def dump_secondary_pattern(folder, outfile='./SECONDARY_DEX_PATTERN_LIST'):
with open(outfile, 'w') as out:
terminal = sys.stdout
sys.stdout = out
print_secondary_pattern(folder)
sys.stdout = terminal
if __name__ == "__main__":
if len(sys.argv) > 1:
root = sys.argv[1]
else:
root = "./"
print_secondary_pattern(root)
| mit | -734,623,793,647,208,100 | 27.843137 | 75 | 0.548606 | false | 3.860892 | false | false | false |
jamesmfriedman/django-primer | primer/comments/templatetags/primer_comment_tags.py | 1 | 4584 | from django import template
from django.template.loader import render_to_string
from primer.utils import get_request
from primer.comments.forms import CommentForm, StatusForm, TimelineForm, get_comment_form
from primer.comments.utils import get_content_types_hash, get_content_types_list
register = template.Library()
@register.simple_tag(takes_context=True)
def comments(context, target, **kwargs):
"""
This renders a comments list
Arguments
see setup_comment_data
"""
comment_data = setup_comment_data('comments', target, **kwargs)
return render_to_string('comments/base_comments.html', comment_data, context)
@register.simple_tag(takes_context=True)
def wall(context, target, **kwargs):
comment_data = setup_comment_data('wall', target, **kwargs)
return render_to_string('comments/base_comments.html', comment_data, context)
@register.simple_tag(takes_context=True)
def timeline(context, target, position='center', **kwargs):
kwargs['css_class_name'] = 'comments-timeline-%s' % position
comment_data = setup_comment_data('timeline', target, **kwargs)
return render_to_string('comments/base_comments.html', comment_data, context)
def setup_comment_data(comments_type, target, placeholder = None, stream = [], limit = 10, reversed = 0, read_only = 0, forms = None, tab_class = 'nav-pills', login_required = True, css_class_name=''):
"""
Sets up comment data for walls, comment lists, timelines, etc
Arguments
comments_type: comments, wall, or timeline
target: a single db object that the comments belong to
placeholder : the placeholder text for the comment input box
limit: the number of comments per page
reversed: 0 or 1 as boolean. Reverses the direction of the list and renders the form at the top or bottom
read_only: 0 or 1 as boolean. Whether or not the wall is read only
"""
if comments_type == 'comments':
css_class_name = 'comments-list %s' % css_class_name
else:
css_class_name = ' comments-%s %s' % (comments_type, css_class_name)
if not forms:
if comments_type == 'wall':
comment_forms = [StatusForm(target, comments_type = comments_type)]
elif comments_type == 'timeline':
comment_forms = [TimelineForm(target, comments_type = comments_type)]
else:
comment_forms = [CommentForm(target, comments_type = comments_type)]
else:
comment_forms = [get_comment_form(form)(target, comments_type = comments_type) for form in forms.replace(' ', '').split(',')]
# setup reversed properly, we only allow reversed for comments
if comments_type != 'comments':
reversed = False
# optionally overwrite the placeholder text that gets passed in
if placeholder:
comment_form.fields['comment'].widget.attrs['placeholder'] = placeholder
# add this set of data to the session and get
# the comment hash
stream = list(stream)
stream.extend([target])
comment_hash = add_to_session(stream, read_only)
return {
'target' : target,
'comment_forms' : comment_forms,
'comment_hash' : comment_hash,
'limit' : limit,
'comments_type' : comments_type,
'read_only' : read_only,
'css_class_name' : css_class_name,
'tab_class' : tab_class,
'login_required' : login_required,
'is_reversed' : reversed
}
def add_to_session(target, read_only):
"""
This adds a hash that identifies the contents of a wall of comments_list in the session
This hash will get checked against when loading more comments, to make sure
They are allowed to load the content they are asking for
The hashing algorithm is sha224
Arguments
target: the target(s) that are getting hashed
"""
# for security, store a hash of this comments conetents in the users session
request = get_request()
# create our list if nonexistant
if not 'primer_comment_hashes' in request.session:
request.session['primer_comment_hashes'] = {}
# convert the stream to a serialized list of content_types and pks
target_list = get_content_types_list(target)
comment_hash = get_content_types_hash(target_list)
# add it to the session
request.session['primer_comment_hashes'][comment_hash] = {
'content_types' : target_list,
'read_only' : bool(read_only),
'blah' : 'foo'
}
request.session.save()
print read_only
return comment_hash
| apache-2.0 | -1,496,575,086,178,685,700 | 34.534884 | 201 | 0.665794 | false | 3.9213 | false | false | false |
ndexbio/ndex-enrich | similarity_map_utils.py | 1 | 9365 | from __future__ import division
from ndex.networkn import NdexGraph
import data_model as dm
import operator
from math import sqrt
# TODO: only one edge between each pair of nodes. Take the best one.
def create_similarity_map_from_enrichment_files(map_name, directory, e_set_name, min_subsumption, max_edges=3, ext_link_paths=None):
e_data = dm.EnrichmentData(directory)
e_data.load_one_eset(e_set_name)
e_set = e_data.get_e_set(e_set_name)
return create_similarity_map(map_name, e_set, min_subsumption, max_edges=max_edges, ext_link_paths=ext_link_paths)
def create_similarity_map(name, e_set, min_subsumption, id_attribute="genes", max_edges=5, ext_link_paths=None):
similarity_graph = NdexGraph()
similarity_graph.set_name(name)
if ext_link_paths is None:
ext_link_paths = {
'BiopaxFile': 'NCI_PID_BIOPAX_2016-06-08-PC2v8-API',
'GSEAFile': 'NCI_PID_GSEA_2017-04-06',
'serverSubDomain': 'public'
}
set_name_to_node_id_map = {}
id_sets = {}
remove_super_nodes = []
for network_id in e_set.id_set_map:
id_set_object = e_set.id_set_map[network_id]
network_name = id_set_object.name
id_set = id_set_object.set
id_sets[network_id] = id_set
att = {id_attribute: list(id_set)}
node_id = similarity_graph.add_new_node(network_name, att)
gene_count = float(len(id_set_object.gene_set))
similarity_graph.set_node_attribute(node_id, "gene count", gene_count)
similarity_graph.set_node_attribute(node_id, "width", sqrt(gene_count))
similarity_graph.set_node_attribute(node_id, "ndex:internalLink", "[%s](%s)" % ("<i class='fa fa-eye' aria-hidden='true'></i> View network<br />",network_id))
if ext_link_paths is not None:
externalLink1 = "[%s](%s)" %("<i class='fa fa-download' aria-hidden='true'></i> BioPAX3 file (.owl)<br />","ftp://ftp.ndexbio.org/" + ext_link_paths.get('BiopaxFile') + "/" + network_name.replace(" ", "%20") + ".owl.gz")
externalLink2 = "[%s](%s)" % ("<i class='fa fa-download' aria-hidden='true'></i> GSEA gene set (.grp)<br />","ftp://ftp.ndexbio.org/" + ext_link_paths.get('GSEAFile') + "/" + network_name.replace(" ", "%20") + ".grp.gz")
externalLink3 = "[%s](%s)" % ("<i class='fa fa-download' aria-hidden='true'></i> CX file (.cx)","http://" + ext_link_paths.get('serverSubDomain') + ".ndexbio.org/v2/network/" + network_id + "?download=true")
similarity_graph.set_node_attribute(node_id, "ndex:externalLink", [externalLink1, externalLink2, externalLink3])
if(network_name == "NCI Pathway Interaction Database - Final Revision"):
remove_super_nodes.append(node_id)
set_name_to_node_id_map[network_id] = node_id
source_similarities = {}
for network_id_1 in id_sets.keys():
source_node_id = set_name_to_node_id_map[network_id_1]
list_1 = list(id_sets[network_id_1])
set_1_size = len(list_1)
similarities = []
for network_id_2 in id_sets.keys():
if network_id_1 != network_id_2:
set_1 = id_sets[network_id_1]
set_2 = id_sets[network_id_2]
target_node_id = set_name_to_node_id_map[network_id_2]
list_2 = list(id_sets[network_id_2])
set_2_size = len(list_2)
overlap = list(set_1.intersection(set_2))
size_overlap=len(overlap)
if size_overlap != 0:
subsumes = size_overlap/set_2_size
#subsumes_1 = size_overlap/set_2_size
#subsumes_2 = size_overlap/set_1_size
#subsumes = min(subsumes_1, subsumes_2)
if size_overlap > 3:
print "overlap: %s %s" % (size_overlap, overlap)
similarity = {"source_node_id": source_node_id,
"target_node_id": target_node_id,
"subsumes": subsumes}
similarity["atts"] = {"subsumes": subsumes,
"overlap": overlap,
"overlap_size": size_overlap}
similarities.append(similarity)
else:
print "no overlap"
# rank the similarities
similarities = sorted(similarities, key=operator.itemgetter('subsumes'), reverse=True)
source_similarities[network_id_1] = similarities
temp_similarity_graph = similarity_graph.copy()
for network_id, similarities in source_similarities.iteritems():
count = 0
for similarity in similarities:
if count >= max_edges:
break
if count == 0 or similarity["subsumes"] > min_subsumption:
atts = similarity["atts"]
source_node_id = similarity["source_node_id"]
target_node_id = similarity["target_node_id"]
edge_id = temp_similarity_graph.add_edge_between(source_node_id, target_node_id, attr_dict=atts)
count = count + 1
# always include the most similar node to make sure that each node has at least one edge and the graph is connected
# don't connect more than max_edges
for network_id, similarities in source_similarities.iteritems():
count = 0
for similarity in similarities:
if count >= max_edges:
break
if count == 0 or similarity["subsumes"] > min_subsumption:
atts = similarity["atts"]
source_node_id = similarity["source_node_id"]
source_gene_count = similarity_graph.node[source_node_id].get("gene count")
target_node_id = similarity["target_node_id"]
target_gene_count = similarity_graph.node[target_node_id].get("gene count")
edge_overlap = float(atts["overlap_size"])
# If the edge is pointing from low gene count to high gene count we proceed.
# if the edge is pointing from high gene count to low count we check
# the edge map to see if the converse edge exists. If so we skip adding and
# let the converse edge populate
# if there is no acceptable edge we switch the source and target and proceed
if(target_gene_count > source_gene_count):
edge_function = edge_overlap / (source_gene_count + target_gene_count - edge_overlap)
edge_id = similarity_graph.add_edge_between(source_node_id, target_node_id, attr_dict=atts, interaction='shares genes with')
similarity_graph.set_edge_attribute(edge_id, "overlap metric", similarity["subsumes"])
similarity_graph.set_edge_attribute(edge_id, "edge function", edge_function)
if(similarity["subsumes"] > 0.4):
similarity_graph.set_edge_attribute(edge_id, "strength", "high")
else:
similarity_graph.set_edge_attribute(edge_id, "strength", "low")
elif(target_gene_count == source_gene_count):
if(source_node_id not in similarity_graph[target_node_id] and target_node_id not in similarity_graph[source_node_id]):
edge_function = edge_overlap / (source_gene_count + target_gene_count - edge_overlap)
edge_id = similarity_graph.add_edge_between(source_node_id, target_node_id, attr_dict=atts, interaction='shares genes with')
similarity_graph.set_edge_attribute(edge_id, "overlap metric", similarity["subsumes"])
similarity_graph.set_edge_attribute(edge_id, "edge function", edge_function)
if(similarity["subsumes"] > 0.4):
similarity_graph.set_edge_attribute(edge_id, "strength", "high")
else:
similarity_graph.set_edge_attribute(edge_id, "strength", "low")
else:
if(source_node_id in temp_similarity_graph[target_node_id]):
print "Converse edge exists. Skipping " + str(source_node_id) + ", " + str(target_node_id)
else:
edge_function = edge_overlap / (source_gene_count + target_gene_count - edge_overlap)
edge_id = similarity_graph.add_edge_between(target_node_id, source_node_id, attr_dict=atts, interaction='shares genes with')
similarity_graph.set_edge_attribute(edge_id, "overlap metric", similarity["subsumes"])
similarity_graph.set_edge_attribute(edge_id, "edge function", edge_function)
if(similarity["subsumes"] > 0.4):
similarity_graph.set_edge_attribute(edge_id, "strength", "high")
else:
similarity_graph.set_edge_attribute(edge_id, "strength", "low")
count = count + 1
for remove_this_node in remove_super_nodes:
similarity_graph.remove_node(remove_this_node)
return similarity_graph
| bsd-2-clause | 3,672,040,321,551,617,000 | 51.027778 | 249 | 0.577256 | false | 3.687008 | false | false | false |
sserrot/champion_relationships | venv/Lib/site-packages/pip/_internal/utils/setuptools_build.py | 18 | 5058 | import sys
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Optional, Sequence
# Shim to wrap setup.py invocation with setuptools
#
# We set sys.argv[0] to the path to the underlying setup.py file so
# setuptools / distutils don't take the path to the setup.py to be "-c" when
# invoking via the shim. This avoids e.g. the following manifest_maker
# warning: "warning: manifest_maker: standard file '-c' not found".
_SETUPTOOLS_SHIM = (
"import sys, setuptools, tokenize; sys.argv[0] = {0!r}; __file__={0!r};"
"f=getattr(tokenize, 'open', open)(__file__);"
"code=f.read().replace('\\r\\n', '\\n');"
"f.close();"
"exec(compile(code, __file__, 'exec'))"
)
def make_setuptools_shim_args(
setup_py_path, # type: str
global_options=None, # type: Sequence[str]
no_user_config=False, # type: bool
unbuffered_output=False # type: bool
):
# type: (...) -> List[str]
"""
Get setuptools command arguments with shim wrapped setup file invocation.
:param setup_py_path: The path to setup.py to be wrapped.
:param global_options: Additional global options.
:param no_user_config: If True, disables personal user configuration.
:param unbuffered_output: If True, adds the unbuffered switch to the
argument list.
"""
args = [sys.executable]
if unbuffered_output:
args += ["-u"]
args += ["-c", _SETUPTOOLS_SHIM.format(setup_py_path)]
if global_options:
args += global_options
if no_user_config:
args += ["--no-user-cfg"]
return args
def make_setuptools_bdist_wheel_args(
setup_py_path, # type: str
global_options, # type: Sequence[str]
build_options, # type: Sequence[str]
destination_dir, # type: str
):
# type: (...) -> List[str]
# NOTE: Eventually, we'd want to also -S to the flags here, when we're
# isolating. Currently, it breaks Python in virtualenvs, because it
# relies on site.py to find parts of the standard library outside the
# virtualenv.
args = make_setuptools_shim_args(
setup_py_path,
global_options=global_options,
unbuffered_output=True
)
args += ["bdist_wheel", "-d", destination_dir]
args += build_options
return args
def make_setuptools_clean_args(
setup_py_path, # type: str
global_options, # type: Sequence[str]
):
# type: (...) -> List[str]
args = make_setuptools_shim_args(
setup_py_path,
global_options=global_options,
unbuffered_output=True
)
args += ["clean", "--all"]
return args
def make_setuptools_develop_args(
setup_py_path, # type: str
global_options, # type: Sequence[str]
install_options, # type: Sequence[str]
no_user_config, # type: bool
prefix, # type: Optional[str]
home, # type: Optional[str]
use_user_site, # type: bool
):
# type: (...) -> List[str]
assert not (use_user_site and prefix)
args = make_setuptools_shim_args(
setup_py_path,
global_options=global_options,
no_user_config=no_user_config,
)
args += ["develop", "--no-deps"]
args += install_options
if prefix:
args += ["--prefix", prefix]
if home is not None:
args += ["--home", home]
if use_user_site:
args += ["--user", "--prefix="]
return args
def make_setuptools_egg_info_args(
setup_py_path, # type: str
egg_info_dir, # type: Optional[str]
no_user_config, # type: bool
):
# type: (...) -> List[str]
args = make_setuptools_shim_args(
setup_py_path, no_user_config=no_user_config
)
args += ["egg_info"]
if egg_info_dir:
args += ["--egg-base", egg_info_dir]
return args
def make_setuptools_install_args(
setup_py_path, # type: str
global_options, # type: Sequence[str]
install_options, # type: Sequence[str]
record_filename, # type: str
root, # type: Optional[str]
prefix, # type: Optional[str]
header_dir, # type: Optional[str]
home, # type: Optional[str]
use_user_site, # type: bool
no_user_config, # type: bool
pycompile # type: bool
):
# type: (...) -> List[str]
assert not (use_user_site and prefix)
assert not (use_user_site and root)
args = make_setuptools_shim_args(
setup_py_path,
global_options=global_options,
no_user_config=no_user_config,
unbuffered_output=True
)
args += ["install", "--record", record_filename]
args += ["--single-version-externally-managed"]
if root is not None:
args += ["--root", root]
if prefix is not None:
args += ["--prefix", prefix]
if home is not None:
args += ["--home", home]
if use_user_site:
args += ["--user", "--prefix="]
if pycompile:
args += ["--compile"]
else:
args += ["--no-compile"]
if header_dir:
args += ["--install-headers", header_dir]
args += install_options
return args
| mit | 8,358,058,395,605,124,000 | 26.944751 | 77 | 0.602807 | false | 3.431479 | true | false | false |
rienafairefr/nYNABapi | pynYNAB/schema/Client.py | 2 | 6165 | import logging
from functools import wraps
from sqlalchemy import Column, String, ForeignKey, Integer
from sqlalchemy.orm import relationship
from sqlalchemy.orm import sessionmaker
from pynYNAB.exceptions import BudgetNotFound, WrongPushException
from pynYNAB.schema import Base, Catalog, Budget, Knowledge, Payee, Transaction
LOG = logging.getLogger(__name__)
def operation(expected_delta):
def operation_decorator(fn):
@wraps(fn)
def wrapped(self, *args, **kwargs):
fn(self, *args, **kwargs)
LOG.debug('push after '+fn.__name__)
self.push(expected_delta)
return wrapped
return operation_decorator
class nYnabClient_(Base):
__tablename__ = "nynabclients"
id = Column(String, primary_key=True)
catalog_id = Column(ForeignKey('catalog.id'))
catalog = relationship('Catalog')
budget_id = Column(ForeignKey('budget.id'))
budget = relationship('Budget')
budget_version_id = Column(String)
budget_name = Column(String)
starting_device_knowledge = Column(Integer, default=0)
ending_device_knowledge = Column(Integer, default=0)
connection = None
session = None
@property
def user_id(self):
return self.id
def add_missing(self):
self.catalog = Catalog()
self.catalog.knowledge = Knowledge()
self.budget = Budget()
self.budget.knowledge = Knowledge()
self.session.add(self.catalog)
self.session.add(self.budget)
self.session.commit()
def sync(self, update_keys=None):
LOG.debug('Client.sync')
self.catalogClient.sync(update_keys)
self.select_budget(self.budget_name)
self.budgetClient.sync(update_keys)
self.catalogClient.clear_changed_entities()
self.budgetClient.clear_changed_entities()
if self.budget_version_id is None and self.budget_name is not None:
raise BudgetNotFound()
def push(self, expected_delta=1):
# ending-starting represents the number of modifications that have been done to the data ?
LOG.debug('Client.push')
catalog_changed_entities = self.catalogClient.get_changed_apidict()
budget_changed_entities = self.budgetClient.get_changed_apidict()
delta = sum(len(l) for k, l in catalog_changed_entities.items()) + \
sum(len(l) for k, l in budget_changed_entities.items())
if delta != expected_delta:
raise WrongPushException(expected_delta, delta)
if any(catalog_changed_entities) or any(budget_changed_entities):
self.ending_device_knowledge = self.starting_device_knowledge + 1
self.catalogClient.push()
self.budgetClient.push()
self.catalogClient.clear_changed_entities()
self.budgetClient.clear_changed_entities()
self.starting_device_knowledge = self.ending_device_knowledge
self.session.commit()
@operation(3)
def add_account(self, account, balance, balance_date):
payee = Payee(
entities_account_id=account.id,
enabled=True,
auto_fill_subcategory_enabled=True,
auto_fill_memo_enabled=False,
auto_fill_amount_enabled=False,
rename_on_import_enabled=False,
name="Transfer : %s" % account.account_name
)
immediateincomeid = next(
s.id for s in self.budget.be_subcategories if s.internal_name == 'Category/__ImmediateIncome__')
startingbalanceid = next(p.id for p in self.budget.be_payees if p.internal_name == 'StartingBalancePayee')
transaction = Transaction(
accepted=True,
amount=balance,
entities_subcategory_id=immediateincomeid,
cash_amount=0,
cleared='Cleared',
date=balance_date,
entities_account_id=account.id,
credit_amount=0,
entities_payee_id=startingbalanceid,
is_tombstone=False
)
self.budget.be_accounts.append(account)
self.budget.be_payees.append(payee)
self.budget.be_transactions.append(transaction)
@operation(1)
def delete_account(self, account):
self.budget.be_accounts.remove(account)
@operation(1)
def add_transaction(self, transaction):
self.budget.be_transactions.append(transaction)
def add_transactions(self, transaction_list):
@operation(len(transaction_list))
def _add_transactions_method(self, tr_list):
for tr in tr_list:
self.budget.be_transactions.append(tr)
return _add_transactions_method(transaction_list)
@operation(1)
def delete_transaction(self, transaction):
self.budget.be_transactions.remove(transaction)
@operation(1)
def delete_budget(self, budget_name):
for budget in self.catalog.ce_budgets:
if budget.budget_name == budget_name:
self.catalog.ce_budgets.remove(budget)
def select_budget(self, budget_name):
self.budget_version_id = None
for budget_version in self.catalog.ce_budget_versions:
if budget_version.version_name == budget_name:
self.budget_version_id = budget_version.id
if self.budget_version_id is None:
raise BudgetNotFound()
def create_budget(self, budget_name):
import json
currency_format = dict(
iso_code='USD',
example_format='123,456.78',
decimal_digits=2,
decimal_separator='.',
symbol_first=True,
group_separator=',',
currency_symbol='$',
display_symbol=True
)
date_format = dict(
format='MM/DD/YYYY'
)
self.connection.dorequest(opname='CreateNewBudget',
request_dic={
"budget_name": budget_name,
"currency_format": json.dumps(currency_format),
"date_format": json.dumps(date_format)
})
| mit | 5,320,574,904,237,841,000 | 34.028409 | 114 | 0.618329 | false | 4.029412 | false | false | false |
plumgrid/plumgrid-nova | nova/api/openstack/compute/contrib/console_output.py | 11 | 3611 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Grid Dynamics
# Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
authorize = extensions.extension_authorizer('compute', 'console_output')
class ConsoleOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ConsoleOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@wsgi.action('os-getConsoleOutput')
def get_console_output(self, req, id, body):
"""Get text console output."""
context = req.environ['nova.context']
authorize(context)
try:
instance = self.compute_api.get(context, id)
except exception.NotFound:
raise webob.exc.HTTPNotFound(_('Instance not found'))
try:
length = body['os-getConsoleOutput'].get('length')
except (TypeError, KeyError):
raise webob.exc.HTTPBadRequest(_('os-getConsoleOutput malformed '
'or missing from request body'))
if length is not None:
try:
# NOTE(maurosr): cast length into a string before cast into an
# integer to avoid thing like: int(2.5) which is 2 instead of
# raise ValueError like it would when we try int("2.5"). This
# can be removed once we have api validation landed.
int(str(length))
except ValueError:
raise webob.exc.HTTPBadRequest(_('Length in request body must '
'be an integer value'))
try:
output = self.compute_api.get_console_output(context,
instance,
length)
except exception.NotFound:
raise webob.exc.HTTPNotFound(_('Unable to get console'))
except exception.InstanceNotReady as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
# XML output is not correctly escaped, so remove invalid characters
remove_re = re.compile('[\x00-\x08\x0B-\x1F]')
output = remove_re.sub('', output)
return {'output': output}
class Console_output(extensions.ExtensionDescriptor):
"""Console log output support, with tailing ability."""
name = "ConsoleOutput"
alias = "os-console-output"
namespace = ("http://docs.openstack.org/compute/ext/"
"os-console-output/api/v2")
updated = "2011-12-08T00:00:00+00:00"
def get_controller_extensions(self):
controller = ConsoleOutputController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 | -2,712,143,437,848,363,500 | 37.827957 | 79 | 0.625312 | false | 4.463535 | false | false | false |
lipis/hurry-app | main/user.py | 2 | 3752 | # -*- coding: utf-8 -*-
from flask.ext import wtf
from google.appengine.ext import ndb
import flask
import auth
import model
import util
from main import app
###############################################################################
# User List
###############################################################################
@app.route('/_s/user/', endpoint='user_list_service')
@app.route('/user/')
@auth.admin_required
def user_list():
user_dbs, more_cursor = util.retrieve_dbs(
model.User.query(),
limit=util.param('limit', int),
cursor=util.param('cursor'),
order=util.param('order') or '-created',
name=util.param('name'),
admin=util.param('admin', bool),
)
if flask.request.path.startswith('/_s/'):
return util.jsonify_model_dbs(user_dbs, more_cursor)
return flask.render_template(
'user/user_list.html',
html_class='user-list',
title='User List',
user_dbs=user_dbs,
more_url=util.generate_more_url(more_cursor),
has_json=True,
)
###############################################################################
# User Update
###############################################################################
class UserUpdateForm(wtf.Form):
username = wtf.StringField('Username',
[wtf.validators.required(), wtf.validators.length(min=3)],
filters=[util.email_filter],
)
name = wtf.StringField('Name',
[wtf.validators.required()], filters=[util.strip_filter],
)
email = wtf.StringField('Email',
[wtf.validators.optional(), wtf.validators.email()],
filters=[util.email_filter],
)
admin = wtf.BooleanField('Admin')
active = wtf.BooleanField('Active')
@app.route('/user/<int:user_id>/update/', methods=['GET', 'POST'])
@auth.admin_required
def user_update(user_id):
user_db = model.User.get_by_id(user_id)
if not user_db:
flask.abort(404)
form = UserUpdateForm(obj=user_db)
if form.validate_on_submit():
if not util.is_valid_username(form.username.data):
form.username.errors.append('This username is invalid.')
elif not is_username_available(form.username.data, user_db):
form.username.errors.append('This username is taken.')
else:
form.populate_obj(user_db)
if auth.current_user_id() == user_db.key.id():
user_db.admin = True
user_db.active = True
user_db.put()
return flask.redirect(flask.url_for('user_list', order='-modified'))
if flask.request.path.startswith('/_s/'):
return util.jsonify_model_db(user_db)
return flask.render_template(
'user/user_update.html',
title=user_db.name,
html_class='user-update',
form=form,
user_db=user_db,
)
###############################################################################
# User Delete
###############################################################################
@app.route('/_s/user/delete/', methods=['DELETE'])
@auth.admin_required
def user_delete_service():
user_keys = util.param('user_keys', list)
user_db_keys = [ndb.Key(urlsafe=k) for k in user_keys]
delete_user_dbs(user_db_keys)
return flask.jsonify({
'result': user_keys,
'status': 'success',
})
@ndb.transactional(xg=True)
def delete_user_dbs(user_db_keys):
ndb.delete_multi(user_db_keys)
###############################################################################
# Helpers
###############################################################################
def is_username_available(username, self_db=None):
user_dbs, more_cursor = util.retrieve_dbs(
model.User.query(),
username=username,
limit=2,
)
c = len(user_dbs)
return not (c == 2 or c == 1 and self_db and self_db.key != user_dbs[0].key)
| mit | 3,342,176,222,695,976,000 | 29.016 | 79 | 0.53678 | false | 3.782258 | false | false | false |
mlperf/training_results_v0.7 | Google/benchmarks/dlrm/implementations/dlrm-research-TF-tpu-v4-128/dlrm_estimator_main.py | 2 | 8873 | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DLRM implementation with REDACTED embeddings via TPUEstimator."""
import os
import timeit
import REDACTED
from absl import app as absl_app
from absl import flags
import tensorflow.compat.v1 as tf
from REDACTED.tensorflow.contrib.tpu.python.tpu import async_checkpoint
from REDACTED.tensorflow_models.mlperf.models.rough.dlrm import dataloader
from REDACTED.tensorflow_models.mlperf.models.rough.dlrm import dlrm
from REDACTED.tensorflow_models.mlperf.models.rough.dlrm import feature_config as fc
from REDACTED.tensorflow_models.mlperf.models.rough.dlrm_tf2 import common
FLAGS = flags.FLAGS
flags.DEFINE_string("master", default=None, help="Address of the master.")
flags.DEFINE_string(name="model_dir", default=None, help="Model directory.")
def create_tpu_estimator_columns(feature_columns, params, iters_per_loop=200):
"""Creates TPU estimator using feature columns.
Args:
feature_columns: Feature columns to use.
params: Hparams for the model.
iters_per_loop: Number of iterations to use per device loop invocation.
Returns:
An instance of TPUEstimator to use when training model.
"""
dlrm_tpu_config = tf.estimator.tpu.TPUConfig(
iterations_per_loop=iters_per_loop,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.PER_HOST_V2)
run_config = tf.estimator.tpu.RunConfig(
master=FLAGS.master, tpu_config=dlrm_tpu_config)
embedding_config_spec = tf.estimator.tpu.experimental.EmbeddingConfigSpec(
feature_columns=feature_columns,
optimization_parameters=tf.tpu.experimental
.StochasticGradientDescentParameters(learning_rate=FLAGS.learning_rate),
pipeline_execution_with_tensor_core=FLAGS.pipeline_execution,
partition_strategy=FLAGS.partition_strategy)
# Key "batch_size" is reserved by TPUEstimator.
tpu_params = {k: v for k, v in params.items() if k != "batch_size"}
return tf.estimator.tpu.TPUEstimator(
model_fn=dlrm.create_model_fn(),
config=run_config,
use_tpu=True,
train_batch_size=params["batch_size"],
params=tpu_params,
model_dir=FLAGS.model_dir,
embedding_config_spec=embedding_config_spec)
def create_tpu_estimator_dicts(feature_to_config_dict,
table_to_config_dict,
params,
iters_per_loop=200):
"""Creates TPU estimator using feature config dicts.
Args:
feature_to_config_dict: Feature config dicts using TableConfig values.
table_to_config_dict: Feature config dicts using FeatureConfig values.
params: Hparams for the model.
iters_per_loop: Number of iterations to use per device loop invocation.
Returns:
An instance of TPUEstimator to use when training model.
"""
per_host_train = tf.estimator.tpu.InputPipelineConfig.PER_HOST_V2
# SLICED: hangs - not supported with REDACTED?
# PER_HOST_V1: the batch dimension of the dense inputs is not sharded
# per_host_eval = tf.estimator.tpu.InputPipelineConfig.SLICED
per_host_eval = tf.estimator.tpu.InputPipelineConfig.PER_HOST_V1
# per_host_eval = tf.estimator.tpu.InputPipelineConfig.PER_HOST_V2
dlrm_tpu_config = tf.estimator.tpu.TPUConfig(
iterations_per_loop=iters_per_loop,
per_host_input_for_training=per_host_train,
eval_training_input_configuration=per_host_eval,
experimental_host_call_every_n_steps=FLAGS.summary_every_n_steps)
run_config = tf.estimator.tpu.RunConfig(
master=FLAGS.master,
model_dir=FLAGS.model_dir,
# Disable checkpointing and use async checkpointing instead.
save_checkpoints_steps=None,
save_checkpoints_secs=None,
log_step_count_steps=FLAGS.summary_every_n_steps,
tpu_config=dlrm_tpu_config
)
embedding_config_spec = tf.estimator.tpu.experimental.EmbeddingConfigSpec(
table_to_config_dict=table_to_config_dict,
feature_to_config_dict=feature_to_config_dict,
optimization_parameters=tf.tpu.experimental
.StochasticGradientDescentParameters(learning_rate=FLAGS.learning_rate),
pipeline_execution_with_tensor_core=FLAGS.pipeline_execution,
# (for quality) gradient_multiplier
partition_strategy=FLAGS.partition_strategy,
)
# Key "batch_size" is reserved by TPUEstimator.
tpu_params = {k: v for k, v in params.items() if k != "batch_size"}
return tf.estimator.tpu.TPUEstimator(
model_fn=dlrm.create_model_fn(),
config=run_config,
use_tpu=True,
train_batch_size=params["batch_size"],
eval_batch_size=params["eval_batch_size"],
params=tpu_params,
embedding_config_spec=embedding_config_spec)
def load_global_step_from_checkpoint_dir(checkpoint_dir):
try:
checkpoint_reader = tf.training.NewCheckpointReader(
tf.training.latest_checkpoint(checkpoint_dir))
return checkpoint_reader.get_tensor(tf.GraphKeys.GLOBAL_STEP)
except: # pylint: disable=bare-except
return 0
def main(_):
params = common.get_params()
feature_to_config_dict, table_to_config_dict = fc.get_feature_tbl_config(
params)
# Builds an estimator using FeatureConfig and TableConfig, as defined in
# third_party/tensorflow/python/tpu/tpu_embedding.py
estimator = create_tpu_estimator_dicts(
feature_to_config_dict,
table_to_config_dict,
params,
iters_per_loop=FLAGS.summary_every_n_steps)
train_input_fn = dataloader.CriteoTsvReader(
file_path="/REDACTED/mb-d/home/tpu-perf-team/tayo/criteo/terabyte_mlperf/rs=6.3/train/terabyte_train*",
is_training=True,
use_synthetic_data=params["use_synthetic_data"])
eval_input_fn = dataloader.CriteoTsvReader(
file_path="/readahead/128M/REDACTED/iz-d/home/tpu-perf-team/tayo/criteo/terabyte_mlperf/rs=6.3/eval/terabyte_eval*",
is_training=False,
use_synthetic_data=params["use_synthetic_data"])
if FLAGS.mode == "eval":
# From Pytorch logging:
# num eval batches: 1361, each 64K
# num train batches: 64014, each 64K
# 64013*4 @ 16k
# From other source:
# num_train_samples = 4195197692
if params["terabyte"]:
# TODO(tayo): The following number drops remainder.
num_eval_records = 89128960
num_eval_steps = num_eval_records // FLAGS.eval_batch_size
cycle_idx = 0
# Run evaluation when there appears a new checkpoint.
for ckpt in tf.train.checkpoints_iterator(FLAGS.model_dir, timeout=None):
try:
tf.logging.info("Beginning eval iteration {}.".format(cycle_idx + 1))
cycle_idx = cycle_idx + 1
start_time = timeit.default_timer()
eval_metrics = estimator.evaluate(
input_fn=eval_input_fn,
steps=num_eval_steps,
checkpoint_path=ckpt
# checkpoint_path="/REDACTED/mb-d/home/tpu-perf-team/tayo/dlrm/model_dir_full_precision_0/model.ckpt-256000",
)
tf.logging.info(
"Eval results: {}. Elapsed eval time: {:.4f}".format(
eval_metrics,
timeit.default_timer() - start_time))
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split("-")[1])
if current_step >= FLAGS.train_steps:
tf.logging.info(
"Evaluation finished after training step %d", current_step)
break
except tf.errors.NotFoundError:
tf.logging.info(
"Checkpoint %s no longer exists, skipping checkpoint", ckpt)
else: # FLAGS.mode == "train"
current_step = load_global_step_from_checkpoint_dir(FLAGS.model_dir)
tf.logging.info("Training for {} steps at batch_size {}.".format(
FLAGS.train_steps, FLAGS.batch_size))
start_time = timeit.default_timer()
hooks = []
hooks.append(
async_checkpoint.AsyncCheckpointSaverHook(
checkpoint_dir=FLAGS.model_dir,
save_steps=128000))
estimator.train(
input_fn=train_input_fn,
max_steps=FLAGS.train_steps,
hooks=hooks
)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.disable_v2_behavior()
common.define_dlrm_flags()
absl_app.run(main)
| apache-2.0 | 3,318,788,356,499,981,000 | 37.746725 | 122 | 0.691649 | false | 3.553464 | true | false | false |
simphony/tornado-webapi | tornadowebapi/traitlets.py | 1 | 3181 | """Our traits must be able to deal with Absent values, for two reasons.
First, the fact that we don't specify an optional value does not imply that
the resulting resource will have a default.
Second, when we do modification (PATCH) operations, we only specify the values
we want to change.
In practice, this means that all traits are optional. Mandatory entries
are only enforced when creating new resources or setting from scratch."""
import traitlets as _traitlets
HasTraits = _traitlets.HasTraits
TraitError = _traitlets.TraitError
Absent = _traitlets.Sentinel("Absent", "tornadowebapi.traitlets")
class Int(_traitlets.Int):
"""An int trait, with support for lack of specified value"""
default_value = Absent
def validate(self, obj, value):
if value == Absent:
return value
return super().validate(obj, value)
class Unicode(_traitlets.Unicode):
default_value = Absent
def info(self):
qualifiers = []
if self.metadata.get("strip", False):
qualifiers.append("strip")
if not self.metadata.get("allow_empty", True):
qualifiers.append("not empty")
text = ", ".join(qualifiers)
if len(text):
return self.info_text + "("+text+")"
else:
return self.info_text
def validate(self, obj, value):
if value == Absent:
return value
value = super().validate(obj, value)
if self.metadata.get("strip", False):
value = value.strip()
if not self.metadata.get("allow_empty", True) and len(value) == 0:
self.error(obj, value)
return value
class Label(Unicode):
"""A label is a string that is not none and is automatically
stripped"""
def __init__(self):
super().__init__(allow_empty=False, strip=True)
class Enum(_traitlets.Enum):
default_value = Absent
def validate(self, obj, value):
if value == Absent:
return value
return super().validate(obj, value)
class Bool(_traitlets.Bool):
default_value = Absent
def validate(self, obj, value):
if value == Absent:
return value
return super().validate(obj, value)
class Float(_traitlets.Float):
default_value = Absent
def validate(self, obj, value):
if value == Absent:
return value
return super().validate(obj, value)
class List(_traitlets.List):
def make_dynamic_default(self):
return Absent
def validate(self, obj, value):
if value == Absent:
return value
return super().validate(obj, value)
class Dict(_traitlets.Dict):
def make_dynamic_default(self):
return Absent
def validate(self, obj, value):
if value == Absent:
return value
return super().validate(obj, value)
class OneOf(_traitlets.Instance):
"""Marks a one to one relationship with a resource or resourcefragment."""
def make_dynamic_default(self):
return Absent
def validate(self, obj, value):
if value == Absent:
return value
return super().validate(obj, value)
| bsd-3-clause | 8,428,902,971,515,318,000 | 24.047244 | 78 | 0.624646 | false | 4.026582 | false | false | false |
frjaraur/python-deployer | deployer/options.py | 2 | 1234 |
"""
Runtime options.
"""
class Option(object):
"""
Shell option.
"""
def __init__(self, values, current_value):
self.values = values
self._value = current_value
self._callbacks = []
def on_change(self, callback):
self._callbacks.append(callback)
def get(self):
return self._value
def set(self, value):
self._value = value
for c in self._callbacks:
c()
class BooleanOption(Option):
def __init__(self, current_value):
assert isinstance(current_value, bool)
Option.__init__(self, ['on', 'off'], 'on' if current_value else 'off')
def get_value_as_bool(self):
return self._value == 'on'
class Options(object):
def __init__(self):
self._options = {
'keep-panes-open': BooleanOption(False),
# Other options to implement:
# 'colorscheme': Option(['dark_background', 'light_background'], 'dark_background'),
# 'interactive': BooleanOption(True),
# 'interactive': BooleanOption(True),
}
def __getitem__(self, name):
return self._options[name]
def items(self):
return self._options.items()
| bsd-2-clause | -8,426,590,869,415,820,000 | 22.730769 | 99 | 0.556726 | false | 4.045902 | false | false | false |
hltdi/guampa | scripts/store_wikipedia_dump.py | 1 | 2284 | #!/usr/bin/env python3
"""
Script to trangle over a directory full of wikipedia dumps as produced by
WikiExtractor.py and add them to the database.
"""
import glob
import os
import constants
import model
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from model import Document
from model import Sentence
from model import Tag
engine = create_engine(constants.THEDB)
Session = sessionmaker(bind=engine)
Base = model.Base
Base.metadata.create_all(engine)
def save_document(title, session):
"""Create a new document; return it."""
## XXX(alexr): need to handle source languages...
document = Document(title, "bob", "en")
session.add(document)
session.commit()
print("added document:", document)
return document
def get_tag(tagname, session):
"""Return or create a Tag object for this tag name."""
tag = session.query(Tag).filter_by(text=tagname).first()
if not tag:
tag = Tag(tagname)
session.add(tag)
session.commit()
return tag
def iterate_through_file(fn):
docid = None
session = Session()
with open(fn) as infile:
for line in infile:
line = line.strip()
if line.startswith("###"):
splitted = line[3:].split("|||")
title = splitted[0]
## save document, get docid.
document = save_document(title, session)
docid = document.id
tagnames = splitted[1:]
## tag that document with these tags.
for tagname in tagnames:
tag = get_tag(tagname, session)
document.tags.append(tag)
continue
## Otherwise, we have a sentence.
assert docid, "We're not currently in a document??"
sent = Sentence(line, docid)
session.add(sent)
session.commit()
def main():
import sys
document_dir = sys.argv[1]
fns = sorted(glob.glob("{0}/wiki*".format(document_dir)))
print("going through {0} files, each with many articles.".format(len(fns)))
for fn in fns:
iterate_through_file(fn)
if __name__ == "__main__": main()
| gpl-3.0 | -7,532,252,105,993,562,000 | 28.282051 | 79 | 0.616025 | false | 4.035336 | false | false | false |
chenyangh/Project4 | flappy.py | 1 | 16621 | from itertools import cycle
import random
import sys
from Tilecoder import numTilings, tilecode, numTiles, tiles
import pygame
from pygame.locals import *
from pylab import *
import random
FPS = 30
SCREENWIDTH = 280
SCREENHEIGHT = 512
# amount by which base can maximum shift to left
PIPEGAPSIZE = 100 # gap between upper and lower part of pipe
BASEY = SCREENHEIGHT * 0.79
# image, sound and hitmask dicts
IMAGES, SOUNDS, HITMASKS = {}, {}, {}
SHARED = {}
# list of all possible players (tuple of 3 positions of flap)
PLAYERS_LIST = (
# red bird
(
'assets/sprites/redbird-upflap.png',
'assets/sprites/redbird-midflap.png',
'assets/sprites/redbird-downflap.png',
),
# blue bird
(
# amount by which base can maximum shift to left
'assets/sprites/bluebird-upflap.png',
'assets/sprites/bluebird-midflap.png',
'assets/sprites/bluebird-downflap.png',
),
# yellow bird
(
'assets/sprites/yellowbird-upflap.png',
'assets/sprites/yellowbird-midflap.png',
'assets/sprites/yellowbird-downflap.png',
),
)
# list of backgrounds
BACKGROUNDS_LIST = (
'assets/sprites/background-day.png',
'assets/sprites/background-night.png',
)
# list of pipes
PIPES_LIST = (
'assets/sprites/pipe-green.png',
'assets/sprites/pipe-red.png',
)
def start():
global SCREEN, FPSCLOCK
pygame.init()
FPSCLOCK = pygame.time.Clock()
SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
pygame.display.set_caption('Flappy Bird')
# numbers sprites for score display
IMAGES['numbers'] = (
pygame.image.load('assets/sprites/0.png').convert_alpha(),
pygame.image.load('assets/sprites/1.png').convert_alpha(),
pygame.image.load('assets/sprites/2.png').convert_alpha(),
pygame.image.load('assets/sprites/3.png').convert_alpha(),
pygame.image.load('assets/sprites/4.png').convert_alpha(),
pygame.image.load('assets/sprites/5.png').convert_alpha(),
pygame.image.load('assets/sprites/6.png').convert_alpha(),
pygame.image.load('assets/sprites/7.png').convert_alpha(),
pygame.image.load('assets/sprites/8.png').convert_alpha(),
pygame.image.load('assets/sprites/9.png').convert_alpha()
)
# game over sprite
IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha()
# message sprite for welcome screen
IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha()
# base (ground) sprite
IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha()
# sounds
if 'win' in sys.platform:
soundExt = '.wav'
else:
soundExt = '.ogg'
SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt)
SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt)
SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt)
SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt)
SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt)
# select random background sprites
randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1)
IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert()
# select random player sprites
randPlayer = random.randint(0, len(PLAYERS_LIST) - 1)
IMAGES['player'] = (
pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(),
)
# select random pipe sprites
pipeindex = random.randint(0, len(PIPES_LIST) - 1)
IMAGES['pipe'] = (
pygame.transform.rotate(
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180),
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(),
)
# hismask for pipes
HITMASKS['pipe'] = (
getHitmask(IMAGES['pipe'][0]),
getHitmask(IMAGES['pipe'][1]),
)
# hitmask for player
HITMASKS['player'] = (
getHitmask(IMAGES['player'][0]),
getHitmask(IMAGES['player'][1]),
getHitmask(IMAGES['player'][2]),
)
movementInfo = showWelcomeAnimation()
SHARED['score'] = 0
SHARED['playerIndex'] = 0
SHARED['loopIter'] = 0
SHARED['playerIndexGen'] = movementInfo['playerIndexGen']
SHARED['playerx'] = int(SCREENWIDTH * 0.2)
SHARED['playery'] = movementInfo['playery']
SHARED['basex'] = movementInfo['basex']
SHARED['baseShift'] = IMAGES['base'].get_width() - IMAGES['background'].get_width()
SHARED['newPipe'] = getRandomPipe()
SHARED['upperPipes'] = [{'x': SCREENWIDTH + 20, 'y': SHARED['newPipe'][0]['y']}]
SHARED['lowerPipes'] = [{'x': SCREENWIDTH + 20, 'y': SHARED['newPipe'][1]['y']}]
SHARED['pipeVelX'] = -5
SHARED['playerVelY'] = -9
SHARED['playerMaxVelY'] = 20
SHARED['playerAccY'] = 3
SHARED['playerFlapAcc'] = -7
SHARED['playerFlapped'] = False
SHARED['flap'] = 1
SHARED['birdY'] = 244
SHARED['pipeX'] = 300
SHARED['pipeY'] = 0
SHARED['reward'] = 1
SHARED['state'] = (None,None)
return movementInfo
def mainGame(movementInfo,SHARED):
score = SHARED['score']
playerIndex = SHARED['playerIndex']
loopIter = SHARED['loopIter']
playerIndexGen = SHARED['playerIndexGen']
playerx, playery = SHARED['playerx'],SHARED['playery']
basex = SHARED['basex']
baseShift = SHARED['baseShift']
# get 2 new pipes to add to upperPipes lowerPipes list
newPipe = SHARED['newPipe']
pipeVelX = SHARED['pipeVelX']
# player velocity, max velocity, downward accleration, accleration on flap
playerVelY = SHARED['playerVelY'] # player's velocity along Y, default same as playerFlapped
playerMaxVelY = SHARED['playerMaxVelY'] # max vel along Y, max descend speed
playerAccY = SHARED['playerAccY'] # players downward accleration
playerFlapAcc = SHARED['playerFlapAcc'] # players speed on flapping
playerFlapped = SHARED['playerFlapped'] # True when player flaps
flap = SHARED['flap']
# while True:
if(flap==1):
if playery > -2 * IMAGES['player'][0].get_height():
playerVelY = playerFlapAcc
playerFlapped = True
SHARED['playerVelY'] = playerVelY
SHARED['playerFlapped'] = True
# check for crash here
crashTest = checkCrash({'x': playerx, 'y': playery, 'index': playerIndex},
SHARED['upperPipes'], SHARED['lowerPipes'])
if crashTest[0]:
SHARED['reward'] = -1000#-abs(SHARED['playery']-SHARED['pipeY'])
return {
'y': playery,
'groundCrash': crashTest[1],
'basex': basex,
'upperPipes': SHARED['upperPipes'],
'lowerPipes': SHARED['lowerPipes'],
'score': score,
'playerVelY': playerVelY,
}
# check for score
playerMidPos = playerx + IMAGES['player'][0].get_width() / 2
for pipe in SHARED['upperPipes']:
pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2
if pipeMidPos <= playerMidPos < pipeMidPos + 4:
score += 1
SHARED['score'] += 1
# SOUNDS['point'].play()
# playerIndex basex change
if (loopIter + 1) % 3 == 0:
playerIndex = playerIndexGen.next()
SHARED['playerIndex'] = playerIndex
loopIter = (loopIter + 1) % 30
basex = -((-basex + 100) % baseShift)
SHARED['loopIter'] = loopIter
SHARED['basex'] = basex
# player's movement
if playerVelY < playerMaxVelY and not playerFlapped:
playerVelY += playerAccY
SHARED['playerVelY'] = playerVelY
if playerFlapped:
playerFlapped = False
SHARED['playerFlapped'] = False
playerHeight = IMAGES['player'][playerIndex].get_height()
playery += min(playerVelY, BASEY - playery - playerHeight)
playery = max(0,playery)
SHARED['playery'] = playery
# move pipes to left
for uPipe, lPipe in zip(SHARED['upperPipes'], SHARED['lowerPipes']):
uPipe['x'] += pipeVelX
lPipe['x'] += pipeVelX
# add new pipe when first pipe is about to touch left of screen
if 0 < SHARED['upperPipes'][0]['x'] < 10:
SHARED['newPipe'] = getRandomPipe()
SHARED['upperPipes'].append(newPipe[0])
SHARED['lowerPipes'].append(newPipe[1])
# remove first pipe if its out of the screen
if SHARED['upperPipes'][0]['x'] < -IMAGES['pipe'][0].get_width():
SHARED['upperPipes'].pop(0)
SHARED['lowerPipes'].pop(0)
# draw sprites
SCREEN.blit(IMAGES['background'], (0,0))
for uPipe, lPipe in zip(SHARED['upperPipes'], SHARED['lowerPipes']):
SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
# print score so player overlaps the score
showScore(score)
SCREEN.blit(IMAGES['player'][playerIndex], (playerx, playery))
SHARED['birdY'] = playery
SHARED['pipeX'] = lPipe['x']
SHARED['pipeY'] = lPipe['y']
#print flap
pygame.display.update()
FPSCLOCK.tick(FPS)
SHARED['reward'] = 1
SHARED['state'] = (SHARED['pipeX'],SHARED['pipeY']-SHARED['playery'])
return 1
def showWelcomeAnimation():
"""Shows welcome screen animation of flappy bird"""
# index of player to blit on screen
playerIndex = 0
playerIndexGen = cycle([0, 1, 2, 1])
# iterator used to change playerIndex after every 5th iteration
loopIter = 0
playerx = int(SCREENWIDTH * 0.2)
playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2)
messagex = int((SCREENWIDTH - IMAGES['message'].get_width()) / 2)
messagey = int(SCREENHEIGHT * 0.12)
basex = 0
# amount by which base can maximum shift to left
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# player shm for up-down motion on welcome screen
playerShmVals = {'val': 0, 'dir': 1}
return {'playery': playery + playerShmVals['val'],
'basex': basex,
'playerIndexGen': playerIndexGen,}
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
# make first flap sound and return values for mainGame
SOUNDS['wing'].play()
return {
'playery': playery + playerShmVals['val'],
'basex': basex,
'playerIndexGen': playerIndexGen,
}
# adjust playery, playerIndex, basex
if (loopIter + 1) % 5 == 0:
playerIndex = playerIndexGen.next()
loopIter = (loopIter + 1) % 30
basex = -((-basex + 4) % baseShift)
playerShm(playerShmVals)
# draw sprites
SCREEN.blit(IMAGES['background'], (0,0))
SCREEN.blit(IMAGES['player'][playerIndex],
(playerx, playery + playerShmVals['val']))
SCREEN.blit(IMAGES['message'], (messagex, messagey))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
pygame.display.update()
FPSCLOCK.tick(FPS)
def playerShm(playerShm):
"""oscillates the value of playerShm['val'] between 8 and -8"""
if abs(playerShm['val']) == 8:
playerShm['dir'] *= -1
if playerShm['dir'] == 1:
playerShm['val'] += 1
else:
playerShm['val'] -= 1
def getRandomPipe():
"""returns a randomly generated pipe"""
# y of gap between upper and lower pipe
gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE))
gapY += int(BASEY * 0.2)
pipeHeight = IMAGES['pipe'][0].get_height()
pipeX = SCREENWIDTH + 10
return [
{'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe
{'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe
]
def showScore(score):
"""displays score in center of screen"""
scoreDigits = [int(x) for x in list(str(score))]
totalWidth = 0 # total width of all numbers to be printed
for digit in scoreDigits:
totalWidth += IMAGES['numbers'][digit].get_width()
Xoffset = (SCREENWIDTH - totalWidth) / 2
for digit in scoreDigits:
SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1))
Xoffset += IMAGES['numbers'][digit].get_width()
def checkCrash(player, upperPipes, lowerPipes):
"""returns True if player collders with base or pipes."""
pi = player['index']
player['w'] = IMAGES['player'][0].get_width()
player['h'] = IMAGES['player'][0].get_height()
# if player crashes into ground
if player['y'] + player['h'] >= BASEY - 1:
return [True, True]
else:
playerRect = pygame.Rect(player['x'], player['y'],
player['w'], player['h'])
pipeW = IMAGES['pipe'][0].get_width()
pipeH = IMAGES['pipe'][0].get_height()
for uPipe, lPipe in zip(upperPipes, lowerPipes):
# upper and lower pipe rects
uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH)
lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH)
# player and upper/lower pipe hitmasks
pHitMask = HITMASKS['player'][pi]
uHitmask = HITMASKS['pipe'][0]
lHitmask = HITMASKS['pipe'][1]
# if bird collided with upipe or lpipe
uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)
lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)
if uCollide or lCollide:
return [True, False]
return [False, False]
def pixelCollision(rect1, rect2, hitmask1, hitmask2):
"""Checks if two objects collide and not just their rects"""
rect = rect1.clip(rect2)
if rect.width == 0 or rect.height == 0:
return False
x1, y1 = rect.x - rect1.x, rect.y - rect1.y
x2, y2 = rect.x - rect2.x, rect.y - rect2.y
for x in xrange(rect.width):
for y in xrange(rect.height):
if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]:
return True
return False
def getHitmask(image):
"""returns a hitmask using an image's alpha."""
mask = []
for x in range(image.get_width()):
mask.append([])
for y in range(image.get_height()):
mask[x].append(bool(image.get_at((x,y))[3]))
return mask
def actionTileCode(F,S,A):
tilecqode(S[0],S[1],F)
F = [x + A*(numTilings*tiles*tiles) for x in F]
return F
def getExpected(q):
expectedVal = 0
a = argmax(q)
for i in range(2):
if(a==i):
expectedVal = expectedVal + (1 - (epsilon/2))*q[i]
else:
expectedVal = expectedVal + (epsilon/2)*q[i]
return expectedVal
def eligibilityTrace(zerovec,F):
zerovec = alpha*lmbda*zerovec
zerovec[F] = 1
return zerovec
episodeNum = 100000
count = 0
alpha = 0.1/numTilings
gamma = 1
lmbda = 0.5
epsilon = 0.1
n = numTiles * 2
F = [-1]*numTilings
w = -0.01*rand(n)
returnSum = 0
while(count<episodeNum):
moveInfo = start()
crashInfo = mainGame(moveInfo, SHARED)
S = SHARED['state']
zerovec = zeros(n)
G = 0
A = 0
F = actionTileCode(F,S,A)
zerovec[F] = 1
while(crashInfo == 1):
crashInfo = mainGame(moveInfo,SHARED)
S = SHARED['state']
R = SHARED['reward']
G = G + R
delta = R - sum(w[F])
q = zeros(2)
if(crashInfo == 1):
for a in range(2):
F = actionTileCode(F,S,a)
q[a] = sum(w[F])
else:
w = w + alpha*delta*zerovec
break
expected_q = getExpected(q)
delta = delta + expected_q
A = argmax(q) if rand() >= epsilon else random.choice([0,1])
SHARED['flap'] = A
w = w + alpha*delta*zerovec
F = actionTileCode(F,S,A)
zerovec = eligibilityTrace(zerovec,F)
count += 1
returnSum = returnSum + G
print 'Return from Episode ', count, 'is ', G
if(G>1000):
break
print returnSum/episodeNum
pygame.quit() | mit | 5,914,738,368,312,624,000 | 31.720472 | 102 | 0.595091 | false | 3.280892 | false | false | false |
TRUFA-rnaseq/trufa-web | client_example.py | 1 | 3731 | #-------------------------------------------------------------------------------
# simple client to test REST API of Job Launcher
import httplib
import json
from cStringIO import StringIO
#-------------------------------------------------------------------------------
authority = "localhost:8181"
#-------------------------------------------------------------------------------
def callGetJobStatus( joblist ):
conn = httplib.HTTPConnection( authority )
# encode the request params
params = {
'joblist': joblist,
}
payload = json.dumps( params, ensure_ascii=False )
payload.encode( 'utf-8' )
# define the params encoding
headers = { 'Content-Type': 'application/json; charset=utf-8'}
# call the remote service
cleanURI = '/jobs'
conn.request( 'GET', cleanURI, body=payload, headers=headers )
# get the result
retValues = {}
response = conn.getresponse()
if response.status == 200:
try:
retValues = json.loads( response.read() )
except ValueError:
print "error: can't decode json response"
else:
print "error :", response.status, response.reason
print retValues
#-------------------------------------------------------------------------------
def callRunJob( user, params ):
conn = httplib.HTTPConnection( authority )
# encode the request params
params = {
'user': user,
'program': 'trufa',
'params': params,
}
payload = json.dumps( params, ensure_ascii=False )
payload.encode( 'utf-8' )
# define the params encoding
headers = { 'Content-Type': 'application/json; charset=utf-8'}
# call the remote service
cleanURI = '/jobs'
conn.request( 'PUT', cleanURI, body=payload, headers=headers )
# get the result
retValues = {}
response = conn.getresponse()
if response.status == 200:
try:
retValues = json.loads( response.read() )
except ValueError:
print "error: can't decode json response"
else:
print "error :", response.status, response.reason
print retValues
#-------------------------------------------------------------------------------
def callJobStatus( jobid ):
conn = httplib.HTTPConnection( authority )
# call the remote service
cleanURI = '/jobs/'+str(jobid)
conn.request( 'GET', cleanURI )
# get the result
retValues = {}
response = conn.getresponse()
if response.status == 200:
try:
retValues = json.loads( response.read() )
except ValueError:
print "error: can't decode json response"
else:
print "error :", response.status, response.reason
print retValues
#-------------------------------------------------------------------------------
def callCancelJob( jobid ):
conn = httplib.HTTPConnection( authority )
# encode the request params
params = {
'cancel': True,
}
payload = json.dumps( params, ensure_ascii=False )
payload.encode( 'utf-8' )
# define the params encoding
headers = { 'Content-Type': 'application/json; charset=utf-8'}
# call the remote service
cleanURI = '/jobs/'+str(jobid)
conn.request( 'POST', cleanURI, body=payload, headers=headers )
# get the result
retValues = {}
response = conn.getresponse()
if response.status == 200:
try:
retValues = json.loads( response.read() )
except ValueError:
print "error: can't decode json response"
else:
print "error :", response.status, response.reason
print retValues
#-------------------------------------------------------------------------------
| bsd-3-clause | 3,249,914,653,846,899,000 | 27.922481 | 80 | 0.522916 | false | 4.704918 | false | false | false |
bing-jian/diffusion-mri | Python/src/spherical_harmonics.py | 2 | 3821 | """
Spherical Harmonics
http://www.sjbrown.co.uk/?article=sharmonics
"""
import numpy
import math
factorial = lambda n:reduce(lambda a,b:a*(b+1),range(n),1)
def evaluate_SH(angles, degree, dl=1):
theta = angles[0]
phi = angles[1]
if (dl==2):
coeff_length = (degree+1)*(degree+2)/2
B = numpy.zeros([1,coeff_length])
Btheta = numpy.zeros([1,coeff_length])
Bphi = numpy.zeros([1,coeff_length])
elif (dl==1):
coeff_length = (degree+1)*(degree+1)
B = numpy.zeros([1,coeff_length])
Btheta = numpy.zeros([1,coeff_length])
Bphi = numpy.zeros([1,coeff_length])
for l in range(0,degree+1,dl):
if (dl==2):
center = (l+1)*(l+2)/2 - l
elif (dl==1):
center = (l+1)*(l+1) - l
lconstant = math.sqrt((2*l + 1)/(4*math.pi))
center = center - 1
Plm,dPlm = P(l,0,theta)
B[0,center] = lconstant*Plm
Btheta[0,center] = lconstant * dPlm
Bphi[0,center] = 0
for m in range(1,l+1):
precoeff = lconstant * math.sqrt(2.0)*math.sqrt(factorial(l - m)/(factorial(l + m)*1.0))
if (m % 2 == 1):
precoeff = -precoeff
Plm,dPlm = P(l,m,theta)
pre1 = precoeff*Plm
pre2 = precoeff*dPlm
B[0,center + m] = pre1*math.cos(m*phi)
B[0,center - m] = pre1*math.sin(m*phi)
Btheta[0,center+m] = pre2*math.cos(m*phi)
Btheta[0,center-m] = pre2*math.sin(m*phi)
Bphi[0,center+m] = -m*B[0,center-m]
Bphi[0,center-m] = m*B[0,center+m]
return B,Btheta,Bphi
def real_spherical_harmonics(angles, coeff, degree, dl=1):
"""
Given a real-valued spherical function represented by spherical harmonics coefficients,
this function evaluates its value and gradient at given spherical angles
SYNTAX: [f, g] = real_spherical_harmonics(angles, coeff, degree, dl);
INPUTS:
angles - [theta,phi] are colatitude and longitude, respectively
coeff - real valued coefficients [a_00, a_1-1, a_10, a_11, ... ]
degree - maximum degree of spherical harmonics ;
dl - {1} for full band; 2 for even order only
OUTPUTS:
f - Evaluated function value f = \sum a_lm*Y_lm
g - derivatives with respect to theta and phi
"""
B,Btheta,Bphi = evaluate_SH(angles, degree, dl)
f = sum(-numpy.dot(B,coeff))
g = numpy.array((-sum(numpy.dot(Btheta,coeff)), -sum(numpy.dot(Bphi,coeff))))
return f,g
def P(l,m,theta):
"""
The Legendre polynomials are defined recursively
"""
pmm = 1
dpmm = 0
x = math.cos(theta)
somx2 = math.sin(theta)
fact = 1.0
for i in range(1,m+1):
dpmm = -fact * (x*pmm + somx2*dpmm)
pmm = pmm*(-fact * somx2)
fact = fact+2
# No need to go any further, rule 2 is satisfied
if (l == m):
Plm = pmm
dPlm = dpmm
return Plm,dPlm
# Rule 3, use result of P(m,m) to calculate P(m,m+1)
pmmp1 = x * (2 * m + 1) * pmm
dpmmp1 = (2*m+1)*(x*dpmm - somx2*pmm)
# Is rule 3 satisfied?
if (l == m + 1):
Plm = pmmp1
dPlm = dpmmp1
return Plm, dPlm
# Finally, use rule 1 to calculate any remaining cases
pll = 0
dpll = 0
for ll in range(m + 2,l+1):
# Use result of two previous bands
pll = (x * (2.0 * ll - 1.0) * pmmp1 - (ll + m - 1.0) * pmm) / (ll - m)
dpll = ((2.0*ll-1.0)*( x*dpmmp1 - somx2*pmmp1 ) - (ll+m-1.0)*dpmm) / (ll - m)
# Shift the previous two bands up
pmm = pmmp1
dpmm = dpmmp1
pmmp1 = pll
dpmmp1 = dpll
Plm = pll
dPlm = dpll
return Plm,dPlm
| mit | 311,085,629,541,360,450 | 29.086614 | 100 | 0.53363 | false | 2.923489 | false | false | false |
malon/EsperaDesEspera | scraping/SergasCsvDBFiles.py | 1 | 13948 |
# This file is part of EsperaDesespera.
#
# Copyright (C) 2011 by Alberto Ariza, Juan Elosua & Marta Alonso
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is provided 'as-is', without any express or implied warranty.
# In no event will the authors be held liable for any damages arising from the
# use of this software.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv, re
#Diccionarios
dicts = ('dictHosp','dictServ','DictTipo','dictFecha','dictInte')
#Diccionario con los diferentes Hospitales
dictHosp = {}
#Diccionario con los diferentes Tipos de servicio
dictTipo = {}
#Diccionario con los diferentes Servicios
dictServ = {}
#Diccionario con las diferentes Fechas de Analísis
dictFecha = {}
#Diccionario con los diferentes intervalos a tener en cuenta
dictInte = {'0 - 3 m': '1',
'3 - 6 m' : '2',
'6 - 12 m' : '3',
'más 12 m' : '4'}
#marcador de inicio de intervalos
s0a3 = '0 - 3 m'
#marcador de fin de intervalos
total = 'TOTAL'
tempo = 'Tempo medio de espera'
#fIndex = 'prueba.txt'
fIndex = 'CsvfilesIndex.txt'
#Los informes de CEX cambiaron a partir de 2006
CEXServChangeDate = '2006'
#A partir de esta fecha se añadio un informe de totales por hospital
reportChangeDate = 200912
#Funcion para determinar si el campo es numérico
def is_numeric(val):
try:
float(val)
except ValueError, e:
return False
return True
#Procesar los csv de hospitales
def procesarHosp(l,tipo):
try:
print 'procesarHosp'
#print 'longitud de la lista: ', len(l)
#Buscamos la fecha de analisis
i = l[1].index('/20')
fechaAnalisis = l[1][i-5:i+5]
if (not dictFecha.has_key(fechaAnalisis)):
dictFecha[fechaAnalisis] = str(len(dictFecha)+1)
if (not dictTipo.has_key(tipo)):
dictTipo[tipo] = str(len(dictTipo)+1)
i = l.index(s0a3)
j = l.index(total)
"""La longitud de cada fila hay que sumarle 1 para incluir ambos extremos
y 2 más para la cabecera y tiempo medio"""
longfila = (j-i+3)
ifila = j+2
ffila = ifila+longfila
while (ffila <= len(l)):
if (l[ifila] != total):
if (not dictHosp.has_key(l[ifila])):
dictHosp[l[ifila]] = str(len(dictHosp)+1)
wr1.writerow([dictFecha[fechaAnalisis],
dictHosp[l[ifila]],dictTipo[tipo],l[ffila-2].replace('.',''),l[ffila-1].replace(',','.')])
ifila = ffila
ffila += longfila
except Exception, e:
print 'Error', e
return False
return True
#Procesar los csv de servicios
def procesarServ(l,tipo):
try:
print 'procesarServ'
#print 'longitud de la lista: ', len(l)
#Buscamos la fecha de analisis
i = l[1].index('/20')
fechaAnalisis = l[1][i-5:i+5]
if (not dictFecha.has_key(fechaAnalisis)):
dictFecha[fechaAnalisis] = str(len(dictFecha)+1)
if (not dictTipo.has_key(tipo)):
dictTipo[tipo] = str(len(dictTipo)+1)
if (l[1][i+1:i+5] <= CEXServChangeDate and tipo == 'CEX'):
j = l.index('Pacientes')
longfila = 3
else:
i = l.index(s0a3)
j = l.index(total)
longfila = (j-i+3)
ifila = j+2
ffila = ifila+longfila
while (ffila <= len(l)):
if (l[ifila] != total):
if (not dictServ.has_key(l[ifila])):
dictServ[l[ifila]] = str(len(dictServ)+1)
wr2.writerow([dictFecha[fechaAnalisis],
dictServ[l[ifila]],dictTipo[tipo],
l[ffila-2].replace('.',''),l[ffila-1].replace(',','.')])
else:
wr3.writerow([dictFecha[fechaAnalisis],dictTipo[tipo],
l[ffila-2].replace('.',''),l[ffila-1].replace(',','.')])
ifila = ffila
ffila += longfila
except Exception, e:
print e
return False
return True
#Procesar los csv de pruebas
def procesarPrueb(l,tipo):
try:
print 'procesarPrueb'
#print 'longitud de la lista: ', len(l)
#Buscamos la fecha de analisis
i = l[1].index('/20')
fechaAnalisis = l[1][i-5:i+5]
if (not dictFecha.has_key(fechaAnalisis)):
dictFecha[fechaAnalisis] = str(len(dictFecha)+1)
if (not dictTipo.has_key(tipo)):
dictTipo[tipo] = str(len(dictTipo)+1)
if (fechaAnalisis != '31/03/2009'):
s = '|'.join(l)
l2 = s.split('|0 - 3 m|')
l2=l2[1:]
for chunk in l2:
laux=chunk.split('|')
j = laux.index(total)
#calculamos la longitud en base a la posición de corte calculada
longfila = j+4
#nos posicionamos en la primera fila
ifila = j+2
ffila = ifila+longfila
while (ffila <= len(laux)):
if (laux[ifila] != total):
if (not dictServ.has_key(laux[ifila])):
dictServ[laux[ifila]] = str(len(dictServ)+1)
wr2.writerow([dictFecha[fechaAnalisis],
dictServ[laux[ifila]],dictTipo[tipo],
laux[ffila-2].replace('.',''),laux[ffila-1].replace(',','.')])
else:
wr3.writerow([dictFecha[fechaAnalisis],dictTipo[tipo],
laux[ffila-2].replace('.',''),laux[ffila-1].replace(',','.')])
ifila = ffila
ffila += longfila
else:
i = l.index(s0a3)
j = l.index(total)
longfila = (j-i+4)
ifila = j+3
ffila = ifila+longfila
while (ffila <= len(l)):
if (l[ifila] != total):
if (not dictServ.has_key(l[ifila])):
dictServ[l[ifila]] = str(len(dictServ)+1)
wr2.writerow([dictFecha[fechaAnalisis],dictServ[l[ifila]],
dictTipo[tipo],l[ffila-3].replace('.',''),l[ffila-2].replace(',','.')])
else:
wr3.writerow([dictFecha[fechaAnalisis],
dictTipo[tipo],l[ffila-3].replace('.',''),l[ffila-2].replace(',','.')])
ifila = ffila
ffila += longfila
except Exception, e:
print e
return False
return True
#Procesar los csv de hospitales y servicios
def procesarHospServ(l, tipo):
try:
print 'procesarHospServ'
#Buscamos la fecha de analisis
i = l[1].index('/20')
fechaAnalisis = l[1][i-5:i+5]
if (not dictFecha.has_key(fechaAnalisis)):
dictFecha[fechaAnalisis] = str(len(dictFecha)+1)
if (not dictTipo.has_key(tipo)):
dictTipo[tipo] = str(len(dictTipo)+1)
s = '|'.join(l)
if (l[1][i+1:i+5] <= CEXServChangeDate and tipo == 'CEX'):
l2 = s.split('|Pacientes|')
hospital = ''
for chunk in l2:
laux=chunk.split('|')
#almacenamos el nombre del hospital de la siguiente tabla
if (laux.count(tempo) == 0):
hospital = laux[len(laux)-2]
if (not dictHosp.has_key(hospital)):
dictHosp[hospital] = str(len(dictHosp)+1)
continue
j = laux.index(tempo)
#calculamos la longitud en base a la posición de corte calculada
longfila = j+3
#nos posicionamos en la primera fila
ifila = j+1
ffila = ifila+longfila
while (ffila <= len(laux)):
if (laux[ifila] != total):
if (not dictServ.has_key(laux[ifila])):
dictServ[laux[ifila]] = str(len(dictServ)+1)
#Solamente cuenta con totales y no con valores individuales por intervalo
wr4.writerow([dictFecha[fechaAnalisis],dictHosp[hospital],dictServ[laux[ifila]],
dictTipo[tipo],laux[ffila-2].replace('.',''),laux[ffila-1].replace(',','.')])
else:
wr1.writerow([dictFecha[fechaAnalisis],dictHosp[hospital],
dictTipo[tipo],laux[ffila-2].replace('.',''),laux[ffila-1].replace(',','.')])
ifila = ffila
ffila += longfila
hospital = laux[len(laux)-2]
if (not chunk == l2[len(l2)-1]):
if (not dictHosp.has_key(hospital)):
dictHosp[hospital] = str(len(dictHosp)+1)
else:
l2 = s.split('|0 - 3 m|')
hospital = ''
for chunk in l2:
laux=chunk.split('|')
#almacenamos el nombre del hospital de la siguiente tabla
if (laux.count(total) == 0):
hospital = laux[len(laux)-2]
if (not dictHosp.has_key(hospital)):
dictHosp[hospital] = str(len(dictHosp)+1)
continue
j = laux.index(total)
#calculamos la longitud en base a la posición de corte calculada
longfila = j+4
#nos posicionamos en la primera fila
ifila = j+2
ffila = ifila+longfila
while (ffila <= len(laux)):
if (laux[ifila] != total):
if (not dictServ.has_key(laux[ifila])):
dictServ[laux[ifila]] = str(len(dictServ)+1)
#tenemos que quitar la primera columna y las 2 ultimas
longdatos = longfila-3
for i in range(longdatos):
wr5.writerow([dictFecha[fechaAnalisis],
str(i+1),dictHosp[hospital],dictServ[laux[ifila]],dictTipo[tipo],laux[ifila+1+i].replace('.','')])
wr4.writerow([dictFecha[fechaAnalisis],
dictHosp[hospital],dictServ[laux[ifila]],dictTipo[tipo],
laux[ffila-2].replace('.',''),laux[ffila-1].replace(',','.')])
else:
if(int(fechaAnalisis[6:10]+fechaAnalisis[3:5]) < reportChangeDate):
wr1.writerow([dictFecha[fechaAnalisis],dictHosp[hospital],
dictTipo[tipo],laux[ffila-2].replace('.',''),laux[ffila-1].replace(',','.')])
ifila = ffila
ffila += longfila
hospital = laux[len(laux)-2]
if (not chunk == l2[len(l2)-1]):
if (not dictHosp.has_key(hospital)):
dictHosp[hospital] = str(len(dictHosp)+1)
except Exception, e:
print e
return False
return True
#Procesar los csv de hospitales y pruebas
def procesarHospPrueb(l, tipo):
try:
print 'procesarHospPrueb'
#Buscamos la fecha de analisis
i = l[1].index('/20')
fechaAnalisis = l[1][i-5:i+5]
if (not dictFecha.has_key(fechaAnalisis)):
dictFecha[fechaAnalisis] = str(len(dictFecha)+1)
if (not dictTipo.has_key(tipo)):
dictTipo[tipo] = str(len(dictTipo)+1)
if (fechaAnalisis == '31/03/2009'):
offset = 1
else:
offset = 0
s = '|'.join(l)
l2 = s.split('|0 - 3 m|')
hospital = ''
for chunk in l2:
laux=chunk.split('|')
#almacenamos el nombre del hospital de la siguiente tabla
if (laux.count(total) == 0):
hospital = laux[len(laux)-3+offset]
if (not dictHosp.has_key(hospital)):
dictHosp[hospital] = str(len(dictHosp)+1)
continue
j = laux.index(total)
#calculamos la longitud en base a la posición de corte calculada
longfila = j+4+offset
#nos posicionamos en el principio de la fila
ifila = j+2+offset
ffila = ifila+longfila
while (ffila <= len(laux)):
if (laux[ifila] != total):
if (not dictServ.has_key(laux[ifila])):
dictServ[laux[ifila]] = str(len(dictServ)+1)
#tenemos que quitar la primera columna y las 2 ultimas
longdatos = longfila-3
for i in range(longdatos):
wr5.writerow([dictFecha[fechaAnalisis],str(i+1),
dictHosp[hospital],dictServ[laux[ifila]],dictTipo[tipo],laux[ifila+1+i].replace('.','')])
wr4.writerow([dictFecha[fechaAnalisis],dictHosp[hospital],dictServ[laux[ifila]],
dictTipo[tipo],laux[ffila-(2+offset)].replace('.',''),laux[ffila-(1+offset)].replace(',','.')])
ifila = ffila
ffila += longfila
h =laux[len(laux)-3+offset].replace(',','.')
if (not is_numeric(h)):
hospital = laux[len(laux)-3+offset]
if (not chunk == l2[len(l2)-1]):
if (not dictHosp.has_key(hospital)):
dictHosp[hospital] = str(len(dictHosp)+1)
except Exception, e:
print e
return False
return True
#Fichero de entrada
fileInput = open(fIndex,'r')
#Ficheros de salida de datos
fileOutput1 = open('TotHosp.csv','wb')
wr1 = csv.writer(fileOutput1, quoting=csv.QUOTE_ALL)
fileOutput2 = open('TotServ.csv','wb')
wr2 = csv.writer(fileOutput2, quoting=csv.QUOTE_ALL)
fileOutput3 = open('TotTipoServ.csv','wb')
wr3 = csv.writer(fileOutput3, quoting=csv.QUOTE_ALL)
fileOutput4 = open('TotHospServ.csv','wb')
wr4 = csv.writer(fileOutput4, quoting=csv.QUOTE_ALL)
fileOutput5 = open('DatosPacien.csv','wb')
wr5 = csv.writer(fileOutput5, quoting=csv.QUOTE_ALL)
#Ficheros de salida de definición
fileOutput6 = open('DefHosp.csv','wb')
wr6 = csv.writer(fileOutput6, quoting=csv.QUOTE_ALL)
fileOutput7 = open('DefServ.csv','wb')
wr7 = csv.writer(fileOutput7, quoting=csv.QUOTE_ALL)
fileOutput8 = open('DefTipoServ.csv','wb')
wr8 = csv.writer(fileOutput8, quoting=csv.QUOTE_ALL)
fileOutput9 = open('DefFecha.csv','wb')
wr9 = csv.writer(fileOutput9, quoting=csv.QUOTE_ALL)
fileOutput10 = open('DefInte.csv','wb')
wr10 = csv.writer(fileOutput10, quoting=csv.QUOTE_ALL)
for line in fileInput.readlines():
try:
tipo = line[4:7]
ifile = open(line.rstrip(), "rb")
reader = csv.reader(ifile)
for row in reader:
if (row[0].find('por hospitais e servizos') != -1):
print line.rstrip()
procesarHospServ(row,tipo)
elif (row[0].find('por hospitais e probas') != -1 or
row[0].find('probas diagnósticas por hospitais') != -1):
print line.rstrip()
procesarHospPrueb(row,tipo)
elif (row[0].find('por servizos') != -1):
print line.rstrip()
procesarServ(row,tipo)
elif (row[0].find('probas diagnósticas') != -1):
print line.rstrip()
procesarPrueb(row,tipo)
elif (row[0].find('por hospitais') != -1):
print line.rstrip()
procesarHosp(row,tipo)
else:
print 'Categoria no esperada ', row[0]
ifile.close()
except Exception, e:
print 'Error: ', e
try:
l = dictHosp.keys()
for data in l:
wr6.writerow([dictHosp[data],data,data])
l = dictServ.keys()
for data in l:
wr7.writerow([dictServ[data],data,data])
l = dictTipo.keys()
for data in l:
wr8.writerow([dictTipo[data],data,data,data])
l = dictFecha.keys()
for data in l:
wr9.writerow([dictFecha[data],data,data,' '])
l = dictInte.keys()
for data in l:
wr10.writerow([dictInte[data],data,data])
except Exception, e:
print e
#print 'diccionario de fechas: ', dictFecha
#print 'diccionario de hospitales: ', dictHosp
#print 'diccionario de servicios: ', dictServ
#print 'diccionario de tipos: ', dictTipo
#print 'diccionario de intervalos: ', dictInte
fileOutput1.close()
fileOutput2.close()
fileOutput3.close()
fileOutput4.close()
fileOutput5.close()
fileOutput6.close()
fileOutput7.close()
fileOutput8.close()
fileOutput9.close()
fileOutput10.close()
fileInput.close()
| agpl-3.0 | -3,548,245,310,299,805,000 | 32.023697 | 105 | 0.655784 | false | 2.481923 | false | false | false |
some-un/RLcourse5 | valueIterationAgents.py | 1 | 5879 | # valueIterationAgents.py
# -----------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
import mdp, util
from learningAgents import ValueEstimationAgent
class ValueIterationAgent(ValueEstimationAgent):
"""
* Please read learningAgents.py before reading this.*
A ValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs value iteration
for a given number of iterations using the supplied
discount factor.
"""
def __init__(self, mdp, discount = 0.9, iterations = 100):
"""
Your value iteration agent should take an mdp on
construction, run the indicated number of iterations
and then act according to the resulting policy.
Some useful mdp methods you will use:
mdp.getStates()
mdp.getPossibleActions(state)
mdp.getTransitionStatesAndProbs(state, action)
mdp.getReward(state, action, nextState)
mdp.isTerminal(state)
"""
self.mdp = mdp
self.discount = discount
self.iterations = iterations
self.values = util.Counter() # A Counter is a dict with default 0
# Write value iteration code here
"*** YOUR CODE HERE ***"
AllStates = mdp.getStates()
self.values = util.Counter()
#Iterate value function
for k in range(iterations):
Vnew = util.Counter() #Batch computation
for State in AllStates:
AllActions = mdp.getPossibleActions(State)
if len(AllActions) > 0: #Some actions are available
ExpectedValueofAction = util.Counter() #Temporary counter of value of each action available at s
for Action in AllActions:
Pssa = mdp.getTransitionStatesAndProbs(State,Action) #List of ((s'), probability) for s,a
for Transition in Pssa: #sum over all possible s' = StatePrime
StatePrime = Transition[0]
Probability = Transition[1]
Reward = mdp.getReward(State,Action,StatePrime)
Vprime = self.values[StatePrime]
ExpectedValueofAction[Action] += Probability*(Reward + discount*Vprime)
#Pick the best action in ValueofActions:
SortedActions = ExpectedValueofAction.sortedKeys()
OptimalAction = SortedActions[0]
#print "State :"+str(State)+" | Optimal Action: "+OptimalAction
#Update value function
Vnew[State] = ExpectedValueofAction[OptimalAction]
#else: no available action -> don't do anything to self.values[State]
self.values = Vnew
def getValue(self, state):
"""
Return the value of the state (computed in __init__).
"""
return self.values[state]
def computeQValueFromValues(self, state, action):
"""
Compute the Q-value of action in state from the
value function stored in self.values.
"""
"*** YOUR CODE HERE ***"
Pssa = self.mdp.getTransitionStatesAndProbs(state,action)
ExpectedValueofAction = 0
for Transition in Pssa: #sum over all possible s' = StatePrime
StatePrime = Transition[0]
Probability = Transition[1]
Reward = self.mdp.getReward(state,action,StatePrime)
Vprime = self.values[StatePrime]
ExpectedValueofAction += Probability*(Reward + self.discount*Vprime)
return ExpectedValueofAction
def computeActionFromValues(self, state):
"""
The policy is the best action in the given state
according to the values currently stored in self.values.
You may break ties any way you see fit. Note that if
there are no legal actions, which is the case at the
terminal state, you should return None.
"""
"*** YOUR CODE HERE ***"
AllActions = self.mdp.getPossibleActions(state)
if len(AllActions) > 0: #Some actions are available
ExpectedValueofAction = util.Counter() #Temporary counter of value of each action available at s
for Action in AllActions:
ExpectedValueofAction[Action] = self.computeQValueFromValues(state, Action)
#Pick the best action in ValueofActions:
OptimalAction = ExpectedValueofAction.argMax()
else:
OptimalAction = 'None'
return OptimalAction
def getPolicy(self, state):
return self.computeActionFromValues(state)
def getAction(self, state):
"Returns the policy at the state (no exploration)."
return self.computeActionFromValues(state)
def getQValue(self, state, action):
return self.computeQValueFromValues(state, action)
| gpl-3.0 | 1,036,748,393,675,748,600 | 39.544828 | 116 | 0.590066 | false | 4.532768 | false | false | false |
19kestier/taiga-back | taiga/projects/attachments/management/commands/migrate_attachments.py | 3 | 4292 | import re
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from django.core.files import File
from django.conf import settings
from django.db import transaction
from taiga.base.utils.iterators import iter_queryset
url = """
https://api-taiga.kaleidos.net/attachments/446/?user=8&token=9ac0fc593e9c07740975c6282e1e501189578faa
"""
class Command(BaseCommand):
help = "Parses all objects and try replace old attachments url with one new"
trx = r"((?:https?)://api-taiga\.kaleidos\.net/attachments/(\d+)/[^\s\"]+)"
@transaction.atomic
def handle(self, *args, **options):
settings.MEDIA_URL="https://media.taiga.io/"
self.move_user_photo()
self.move_attachments()
self.process_userstories()
self.process_issues()
self.process_wiki()
self.process_tasks()
self.process_history()
def move_attachments(self):
print("Moving all attachments to new location")
Attachment = apps.get_model("attachments", "Attachment")
qs = Attachment.objects.all()
for item in iter_queryset(qs):
try:
with transaction.atomic():
old_file = item.attached_file
item.attached_file = File(old_file)
item.save()
except FileNotFoundError:
item.delete()
def move_user_photo(self):
print("Moving all user photos to new location")
User = apps.get_model("users", "User")
qs = User.objects.all()
for item in iter_queryset(qs):
try:
with transaction.atomic():
old_file = item.photo
item.photo = File(old_file)
item.save()
except FileNotFoundError:
pass
def get_attachment_real_url(self, pk):
if isinstance(pk, str):
pk = int(pk)
Attachment = apps.get_model("attachments", "Attachment")
return Attachment.objects.get(pk=pk).attached_file.url
def replace_matches(self, data):
matches = re.findall(self.trx, data)
original_data = data
if len(matches) == 0:
return data
for url, attachment_id in matches:
new_url = self.get_attachment_real_url(attachment_id)
print("Match {} replaced by {}".format(url, new_url))
try:
data = data.replace(url, self.get_attachment_real_url(attachment_id))
except Exception as e:
print("Exception found but ignoring:", e)
assert data != original_data
return data
def process_userstories(self):
UserStory = apps.get_model("userstories", "UserStory")
qs = UserStory.objects.all()
for item in iter_queryset(qs):
description = self.replace_matches(item.description)
UserStory.objects.filter(pk=item.pk).update(description=description)
def process_tasks(self):
Task = apps.get_model("tasks", "Task")
qs = Task.objects.all()
for item in iter_queryset(qs):
description = self.replace_matches(item.description)
Task.objects.filter(pk=item.pk).update(description=description)
def process_issues(self):
Issue = apps.get_model("issues", "Issue")
qs = Issue.objects.all()
for item in iter_queryset(qs):
description = self.replace_matches(item.description)
Issue.objects.filter(pk=item.pk).update(description=description)
def process_wiki(self):
WikiPage = apps.get_model("wiki", "WikiPage")
qs = WikiPage.objects.all()
for item in iter_queryset(qs):
content = self.replace_matches(item.content)
WikiPage.objects.filter(pk=item.pk).update(content=content)
def process_history(self):
HistoryEntry = apps.get_model("history", "HistoryEntry")
qs = HistoryEntry.objects.all()
for item in iter_queryset(qs):
comment = self.replace_matches(item.comment)
comment_html = self.replace_matches(item.comment_html)
HistoryEntry.objects.filter(pk=item.pk).update(comment=comment, comment_html=comment_html)
| agpl-3.0 | -5,115,454,346,275,698,000 | 32.015385 | 105 | 0.608341 | false | 4.018727 | false | false | false |
rich-digi/wp-xml-transformer | split6.py | 1 | 1961 | # --------------------------------------------------------------------------------
# Split Wordpress XML (using Element Tree)
# --------------------------------------------------------------------------------
import sys, os, re, codecs
sys.path.append('/usr/local/lib/python2.7/site-packages/')
from lxml import etree as ET
#xmldata = 'input/dmclubcustomerblog.wordpress.2014-10-29.xml'
xmldata = 'input/wp.xml'
# Register Wordpress XML namespaces
namespaces = {
'wp' : 'http://wordpress.org/export/1.2/',
'excerpt' : 'http://wordpress.org/export/1.2/excerpt/',
'content' : 'http://purl.org/rss/1.0/modules/content/',
'wfw' : 'http://wellformedweb.org/CommentAPI/',
'dc' : 'http://purl.org/dc/elements/1.1/',
}
"""
REGISTER NAMESPACE WHEN WRITING ONLY
for prefix, uri in namespaces.iteritems():
ET.register_namespace(prefix, uri)
"""
def write_utf8_file(fp, ustr):
f = codecs.open(fp, 'w', 'utf-8');
f.write(ustr)
f.close()
# Parse the XML using ElementTree's streaming SAX-like parser
for event, elem in ET.iterparse(xmldata, tag='item', strip_cdata=False, remove_blank_text=True):
title = elem.find('title').text
type = elem.find('wp:post_type', namespaces=namespaces).text
name = elem.find('wp:post_name', namespaces=namespaces).text
print '{:15s} {:100s} {:100s}'.format(type, title, name)
content = elem.find('content:encoded', namespaces=namespaces)
excerpt = elem.find('excerpt:encoded', namespaces=namespaces)
elem.remove(content)
elem.remove(excerpt)
if title is not None:
dir_suffix = name
if dir_suffix is None:
dir_suffix = re.sub(r'[^\w]', '_', title.lower())
dir = os.getcwd()+'/output/'+type+'__'+dir_suffix
if not os.path.exists(dir): os.makedirs(dir)
xmlstr = ET.tostring(elem, pretty_print=True, encoding='unicode', method='xml')
write_utf8_file(dir+'/meta.xml', xmlstr)
write_utf8_file(dir+'/content.html', content.text)
write_utf8_file(dir+'/excerpt.html', excerpt.text)
| mit | -6,030,310,166,997,537,000 | 31.683333 | 96 | 0.630801 | false | 2.948872 | false | false | false |
MattNolanLab/Ramsden_MEC | ABAFunctions/ABA_imageprocessing.py | 1 | 12388 | '''
Code for processing images
Copyright (c) 2014, Helen Ramsden
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
__author__ = 'helenlramsden'
import os, re
import Image
import ImageChops, ImageOps
import numpy as np
def resizeimages(infolderpath, iseriesdict):
'''
Run image resize
:param infolderpath:
:param iseriesdict:
:return:
'''
for iseries in iseriesdict.keys():
filename = iseries + '.jpg'
filename_exp = re.sub('ish','exp',filename)
borderresize(filename, infolderpath, 'ish')
borderresize(filename_exp, re.sub('ish','exp',infolderpath), 'exp')
def borderresize(filename, infolderpath, runtype):
'''
Resize ISH images
:param filename:
:param infolderpath:
:return:
'''
newimagewidth = 6000/4
newimageheight = 4500/4
if 'ishfull' in filename: newimage = Image.new('L', (newimagewidth,newimageheight), (255) ) #white image
else:newimage = Image.new('L', (newimagewidth,newimageheight), (0) ) #black image
resizefactor = 1.25 # images 0.8 of original size
try:
ishimage = Image.open(infolderpath +filename).convert("L")
except IOError:
print filename, 'Image failed'
return
# Not used previously - remove outer border to get rid of any dark borders on original image
ishimage = ImageOps.crop(ishimage, border=5)
dwidth = newimagewidth - ishimage.size[0]
dheight = newimageheight - ishimage.size[1]
newimage.paste(ishimage, (dwidth/2,dheight/2,dwidth/2+ishimage.size[0],dheight/2+ishimage.size[1]))
newimage = newimage.resize((int(float(newimagewidth)/resizefactor),int(float(newimageheight)/resizefactor)))
newimage.save(re.sub('Original_%s' % runtype,'Resize_%s' % runtype, infolderpath) + filename)
def sb_images(infolderpath, outfolderpath,iseriesdict, outfile):
'''
Write ImageJ macro to subtract background
:param infolderpath:
:param outfolderpath:
:param iseriesdict:
:param outfile:
:return:
'''
for iseries in iseriesdict.keys():
filename = iseries + '.jpg'
outfile.write('open("' + infolderpath + filename +'");\n')
outfile.write('run("8-bit");\n')
outfile.write('run("Subtract Background...", "rolling=1 light");\n') # changed from 3 for smaller images
outfile.write('saveAs("Jpeg", "' +outfolderpath+ re.sub('.jpg','_sb.jpg',filename) + '");\n')
outfile.write('run("Close All");\n')
def thresh_images(infolderpath, outfolderpath, iseriesdict, outfile):
'''
Images are thresholded for use in registration, but not for segmentation. Performance could possibly be improved by
thresholding prior to edge detection
:param infolderpath:
:param outfolderpath:
:param iseriesdict:
:param outfile:
:return:
'''
for iseries in iseriesdict.keys():
filename = iseries + '.jpg'
outfile.write('open("' + infolderpath + filename +'");\n')
outfile.write('run("Auto Threshold", "method=MinError(I) white");\n')
outfile.write('saveAs("Jpeg", "' + outfolderpath + re.sub('_sb.jpg','_sbthres.jpg',filename) + '");\n')
outfile.write('run("Close All");\n')
def find_seg_edges(infilepath,outfilepath, iseriesdict, outfile):
'''
Detect the edges
:param infilepath:
:param outfilepath:
:param iseriesdict:
:param outfile:
:return:
'''
for iseries in iseriesdict.keys():
filename = iseries + '.jpg'
outfile.write('open("' + infilepath + filename +'");\n')
outfile.write('selectWindow("' + filename + '");\n')
outfile.write('run("8-bit");\n')
outfile.write('run("FeatureJ Edges", "compute smoothing=10 lower=[] higher=[]");\n') # check size for resized images
outfile.write('selectWindow("' + filename + ' edges");\n')
outfile.write('saveAs("Jpeg", "' + outfilepath+ re.sub('.jpg','_edges.jpg',filename) + '");\n')
outfile.write('run("Close All");\n')
def create_seg_mask(infilepath, outfilepath, iseriesdict, outfile, thresh):
'''
Create segmentation mask
:param infilepath:
:param outfilepath:
:param iseriesdict:
:param outfile:
:param thresh:
:return:
'''
edgefilepath = re.sub('Segmented/','Edges/',outfilepath)
origfilepath = re.sub('Segmented/','Resize_ish/',outfilepath)
segorigfilepath = re.sub('Segmented/','Segmented/SegmentedOrig/',outfilepath)
segmaskfilepath = re.sub('Segmented/','Segmented/SegmentedMask/',outfilepath)
for iseries in iseriesdict.keys():
filename = iseries + '.jpg'
outfile.write('open("' + edgefilepath + filename + '");\n')
outfile.write('run("Auto Threshold", "method=Li white");\n') # white ensures white on black background
outfile.write('run("Fill Holes");\n')
outfile.write('run("Watershed");\n')
outfile.write('run("Analyze Particles...", "size=210000-Infinity circularity=0.00-1.00 show=Masks display clear summarize add");\n') # size needs altering for resized images
outfile.write('selectWindow("Mask of ' + filename +'"); \n')
outfile.write('saveAs("Jpeg", "' + segmaskfilepath + re.sub('_edges.jpg' , '_mask.jpg', filename) + '");\n')
outfile.write('selectWindow("' + filename +'");\n')
outfile.write('run("Close All");\n')
def apply_seg_mask(infilepath, outfilepath, iseriesdict):
'''
Apply mask to other images
:param infilepath:
:param outfilepath:
:param iseriesdict:
:return:
'''
sbinfilepath = re.sub('Segmented/SegmentedMask/','Thresh/',infilepath)
origfilepath = re.sub('Segmented/','Resize_ish/',outfilepath)
expfilepath = re.sub('Segmented/','Resize_exp/',outfilepath)
segorigfilepath = re.sub('Segmented/','Segmented/SegmentedOrig/',outfilepath)
segmaskfilepath = re.sub('Segmented/','Segmented/SegmentedMask/',outfilepath)
segsbfilepath =re.sub('Segmented/','Segmented/SegmentedThresh/',outfilepath)
segexpfilepath = re.sub('Segmented/','Segmented/SegmentedExp/',outfilepath)
for iseries in iseriesdict.keys():
seg_mask(iseries, sbinfilepath, segmaskfilepath, segsbfilepath,origfilepath,expfilepath,segexpfilepath,segorigfilepath)
def seg_mask(iseries, sbinfilepath, segmaskfilepath, segsbfilepath,origfilepath,expfilepath,segexpfilepath,segorigfilepath):
#iseries is a filename, without jpg on the end and with sb on the end
# First, apply mask to sb image - mask is black (or grey) on white background
filename = re.sub('_mask','',iseries) + '.jpg' #this is the sb image
# print 'Initial', filename
maskim = Image.open(segmaskfilepath+ re.sub('.jpg','_mask.jpg',filename)).convert("L")
# Mask not always black so first make sure it is
threshold = 141
maskim = maskim.point(lambda p: p > threshold and 255)
threshfilename = re.sub('_sb','_sbthres', filename)
sbim = Image.open(sbinfilepath + threshfilename)
try:
# print 'Get thresh'
seg_sb = ImageChops.lighter(sbim,maskim)
seg_sb.save(segsbfilepath+ re.sub('.jpg','_seg.jpg',threshfilename) )
except IOError:
print 'error in file'
#Now open the original image - get rid of sb from filename
filename = re.sub('_sb','', filename)
origim = Image.open(origfilepath + filename).convert("L")
seg_orig = ImageChops.lighter(origim,maskim)
seg_orig.save(segorigfilepath+ re.sub('.jpg','_seg_orig.jpg',filename))
#Now open the exp image and apply mask
# First make mask white on black
maskim = ImageChops.invert(maskim)
# Now extract all the pixels that are white and make this region a transparent region on the mask
maskim = maskim.convert('LA')
datas = maskim.getdata()
newData = list()
for item in datas:
if item[0] == 255:
newData.append((255, 0))
else:
newData.append(item)
maskim.putdata(newData)
#img.save("img2.png", "PNG")
l,a = maskim.split()
# Check that exp file exists
if os.path.exists(expfilepath + re.sub('ish','exp',filename)):
#seg_exp = ImageChops.logical_and(expim,maskim)
expim = Image.open(expfilepath + re.sub('ish','exp',filename)).convert("LA") # should be a grayscale image
expim.paste(maskim, mask = a)
expim = expim.convert("L")
expim.save(segexpfilepath+ re.sub('.jpg','_seg_exp.tif',filename))
else: print 'N'
def getallfiledict(filefolder, filelist, filetype, fileend='jpg'):
'''
Function finds all the files within a folder and returns a dictionary of their image series (val) and full filename
(key)
:param filefolder:
:param filelist:
:param filetype:
:param project:
:return:
'''
ffilelist = re.sub('.txt',filefolder.split('/')[-2] + '.txt',filelist)
os.system("ls %s | grep %s > %s" % (filefolder, filetype, ffilelist))
allfilesdict = dict((line.strip(),line.strip().split('_')[0]) for line in open(ffilelist, 'r')) # key = whole filename, val = iseries
iseriesdict = dict((line.strip().split('\t')[0].split('.' + fileend)[0], line.strip().split('\t'))
for line in open(ffilelist,'r')) # key = filename without jpg, filename (replace tif with jpg)
return allfilesdict, iseriesdict
def getnewdict(outfilefolder, filelist, preiseriesdict,fileendout, fileendin, fileend='jpg'):
'''
Get dictionary of images in a particular file. If file already present, don't overwrite
'''
print outfilefolder, fileendin,len(preiseriesdict.keys())
[gotfilesdict,gotiseriesdict] = getallfiledict(outfilefolder, filelist, fileendout,fileend)
gotfileskeys = [re.sub(fileendout,fileendin,g) for g in gotiseriesdict.keys()]
print len(gotfileskeys)
try:
print list(preiseriesdict.keys())[0],len(gotfileskeys),gotfileskeys[0],list(preiseriesdict.keys())[0]
except IndexError:
print 'Empty list'
allfiles = set(preiseriesdict.keys()).difference(gotfileskeys)
print 'Files to be processed: ', len(allfiles)
iseriesdict = dict((k,preiseriesdict[k]) for k in allfiles)
return iseriesdict
def moveimages(originfolder, origfilename, maskinfolder, maskfilename,expinfolder, expfilename,outorigfolder,outexpfolder):
'''
Function for moving images into the centre - we didn't use this
'''
try:
segorigimage = Image.open(originfolder +origfilename).convert("L")
segexpimage = Image.open(expinfolder +expfilename).convert("L")
maskim = Image.open(maskinfolder + maskfilename).convert("L") # need to convert to 8 bit (not rgb)
except IOError:
print origfilename, 'Image failed'
return
threshold = 141
maskim = maskim.point(lambda p: p > threshold and 255)
maskim = ImageChops.invert(maskim)
com = ndimage.measurements.center_of_mass(np.array(maskim))
dwidth = int(com[1] - 525) # centre of mass - 600 (so leftwards will be negative)
dheight = int(com[0] - 430) # centre of mass - 450 (so upwards will be negative)
newsegimage = Image.new('L', (1200,900), (255) ) # white image for seg orig
newexpimage = Image.new('L', (1200,900), (0) ) # black image for seg orig
print dwidth, dheight
le = up = 0; ri = segorigimage.size[0]; lo = segorigimage.size[1];left = upper = 0
if dwidth > 0:le = int(dwidth)
else: ri = segorigimage.size[0] + int(dwidth); left = -dwidth
if dheight > 0: up = int(dheight)
else: lo = segorigimage.size[1] + int(dheight); upper = -dheight
box = (le, up, ri, lo)
newsegorigimage = segorigimage.crop(box)
newsegexpimage = segexpimage.crop(box)
newsegimage.paste(newsegorigimage, (left,upper,left + newsegorigimage.size[0],upper + newsegorigimage.size[1])) # left, upper, right, lower
newsegimage.save(outorigfolder + origfilename)
newexpimage.paste(newsegexpimage, (left,upper,left + newsegexpimage.size[0],upper + newsegexpimage.size[1])) # left, upper, right, lower
newexpimage.save(outexpfolder + expfilename)
| bsd-3-clause | 6,886,222,921,574,187,000 | 40.019868 | 175 | 0.723765 | false | 3.13303 | false | false | false |
sshleifer/object_detection_kitti | syntaxnet/dragnn/python/lexicon.py | 14 | 2806 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SyntaxNet lexicon utils."""
import os.path
import tensorflow as tf
from syntaxnet import task_spec_pb2
from syntaxnet.ops import gen_parser_ops
def create_lexicon_context(path):
"""Construct a SyntaxNet TaskContext file for standard lexical resources."""
context = task_spec_pb2.TaskSpec()
for name in [
'word-map', 'tag-map', 'tag-to-category', 'lcword-map', 'category-map',
'char-map', 'char-ngram-map', 'label-map', 'prefix-table', 'suffix-table'
]:
context.input.add(name=name).part.add(file_pattern=os.path.join(path, name))
return context
def build_lexicon(output_path,
training_corpus_path,
tf_master='',
training_corpus_format='conll-sentence',
morph_to_pos=False,
**kwargs):
"""Constructs a SyntaxNet lexicon at the given path.
Args:
output_path: Location to construct the lexicon.
training_corpus_path: Path to CONLL formatted training data.
tf_master: TensorFlow master executor (string, defaults to '' to use the
local instance).
training_corpus_format: Format of the training corpus (defaults to CONLL;
search for REGISTER_SYNTAXNET_DOCUMENT_FORMAT for other formats).
morph_to_pos: Whether to serialize morph attributes to the tag field,
combined with category and fine POS tag.
**kwargs: Forwarded to the LexiconBuilder op.
"""
context = create_lexicon_context(output_path)
if morph_to_pos:
context.parameter.add(name='join_category_to_pos', value='true')
context.parameter.add(name='add_pos_as_attribute', value='true')
context.parameter.add(name='serialize_morph_to_pos', value='true')
# Add the training data to the context.
resource = context.input.add()
resource.name = 'corpus'
resource.record_format.extend([training_corpus_format])
part = resource.part.add()
part.file_pattern = training_corpus_path
# Run the lexicon builder op.
with tf.Session(tf_master) as sess:
sess.run(
gen_parser_ops.lexicon_builder(
task_context_str=str(context), corpus_name='corpus', **kwargs))
| apache-2.0 | -2,600,197,595,570,163,000 | 37.438356 | 80 | 0.678902 | false | 3.881051 | false | false | false |
sinnorfin/raconteur | player.py | 1 | 10793 | import store
import tile
import math
import controls
import level
import pyglet.sprite
from pyglet.window import mouse
from pyglet.window import key
class Player(tile.Gameobject):
def __init__(self, coor, img,name = 'Player',
inv=None):
super(Player, self).__init__(img)
self.coor = coor
self.loc = store.findtile(self.coor)
self.name = name
self.inv = [] if inv is None else inv
self.itemcount = 0
self.faces = ['pcharF','pchar','pcharB','pcharR']
self.look = 'pchar'
self.mrange = 5
self.sp = pyglet.sprite.Sprite(x = store.ct(self.coor[0]),
y = store.ct(self.coor[1]),
img = store.getim(self),
batch = store.player_bt)
store.add(self,'gp')
store.add(self.sp,'spo')
def build(self,buildmenu):
if not self.cols():
if buildmenu.c[1] == 1:
buildmenu.overlay(store.findtile(self.coor),
buildmenu.blist[buildmenu.c[0]][0],
self.coor)
else:
buildmenu.build(store.findtile(self.coor),
buildmenu.blist[buildmenu.c[0]][0],
self.coor)
controls.turn()
def updateols(self):
for item in self.inv:
item.sp.x = self.sp.x
item.sp.y = self.sp.y
def updateitems(self):
for item in self.inv:
item.x = self.coor[0]
item.y = self.coor[1]
item.loc = self.loc
def cols(self):
for g_tile in store.store['gt']:
if (g_tile.passable == False and
g_tile.coor == self.coor):
return True
return False
def distance(self,target):
distance = [abs(self.coor[0]-target.coor[0]),
abs(self.coor[1]-target.coor[1])]
return distance
def player_bordering(self):
player_bordering = []
up = self.coor[1] + 1
down = self.coor[1] - 1
right = self.coor[0] + 1
left = self.coor[0] - 1
for g_tile in store.store['gt']:
add = False
if (g_tile.coor[1] == up and
g_tile.coor[0] == right):
add = True
ckcoll = [up,right]
elif (g_tile.coor[1] == down and
g_tile.coor[0] == left):
add = True
ckcoll = [down,left]
elif (g_tile.coor[1] == up and
g_tile.coor[0] == left):
add = True
ckcoll = [up,left]
elif (g_tile.coor[1] == down and
g_tile.coor[0] == right):
add = True
ckcoll = [down,right]
elif (g_tile.coor[1] == up and
g_tile.coor[0] == self.coor[0]):
add = True
ckcoll = [up,self.coor[0]]
elif (g_tile.coor[1] == down and
g_tile.coor[0] == self.coor[0]):
add = True
ckcoll = [down,self.coor[0]]
elif (g_tile.coor[1] == self.coor[1] and
g_tile.coor[0] == right):
add = True
ckcoll = [self.coor[1],right]
elif (g_tile.coor[1] == self.coor[1] and
g_tile.coor[0] == left):
add = True
ckcoll = [self.coor[1],left]
if (add == True and
not self.cols([ckcoll[0],ckcoll[1]])):
player_bordering.append(g_tile)
return player_bordering
def pathing(self):
self.checkmv(self.loc,True,pat=True)
Path.tagged = list(set(Path.tagged))
if Path.pl:
mincost = Path.pl[0].cost
costlist=[]
for path in Path.pl:
costlist.append(path.cost)
Path.cpath = Path.pl[costlist.index(min(costlist))]
for node in Path.cpath.nodes:
tag = pyglet.sprite.Sprite(x= store.ct(node.coor[0]),
y= store.ct(node.coor[1]),
img = store.image ['marker2'],
batch = store.debug_bt)
Path.wp.append(tag)
def moveg(self):
Path.clean_Path()
self.checkmv(self.loc,True)
Path.tagged = list(set(Path.tagged))
for tile in Path.tagged:
tag = pyglet.sprite.Sprite(x= store.ct(tile.coor[0]),
y= store.ct(tile.coor[1]),
img = store.image ['marker'],
batch = store.debug_bt)
Path.tags.append(tag)
for tagged in Path.tagged:
Path.ptagged.append(tagged)
if level.ontiles([store.cursor.mposx,store.cursor.mposy],Path.ptagged):
Path.clean_Path(tags=False)
Path.goal = store.findtile(store.cursor.coor)
store.cplayer.pathing()
def checkmv(self,tchk,first = False,pat=False,f=None):
checkdirs = [tchk.dirs[-1],tchk.dirs[1],
tchk.dirs[0],tchk.dirs[2]]
if f: checkdirs.pop(checkdirs.index(f))
if first == True:
st_cost = Path.cost
st_tagged = len(Path.tagged)
for ccheck in checkdirs:
Path.cost = st_cost
for i in range(len(Path.tagged)-st_tagged):
if pat==True:Path.tagged.pop()
if pat == True:
self.checkmv(ccheck,pat=True,
f=tchk)
else:
self.checkmv(ccheck,f=tchk)
if (first == False and tchk.passable == True and
Path.cost + 1 <= self.mrange):
Path.cost += 1
Path.tagged.append(tchk)
st_cost = Path.cost
st_tagged = len(Path.tagged)
if pat == True and tchk.coor == Path.goal.coor:
p = Path(Path.cost,[])
for node in Path.tagged: p.nodes.append(node)
Path.pl.append(p)
if Path.cost != self.mrange:
for ccheck in checkdirs:
Path.cost = st_cost
for i in range(len(Path.tagged)-st_tagged):
if pat == True:Path.tagged.pop()
if pat == True:
self.checkmv(ccheck,pat=True,
f=tchk)
else:
self.checkmv(ccheck,f=tchk)
def moveone(self,coor,dir,fixcoor):
if not self.coll(coor+dir):
self.coor[coor] += dir
self.look = self.faces[coor+dir]
self.sp.image=store.image [self.look]
if coor == 0:
self.sp.x = store.ct(self.coor[0])
else: self.sp.y = store.ct(self.coor[1])
self.loc = store.findtile(self.coor)
controls.turn()
self.updateols()
self.updateitems()
def coll(self,direc):
if not (self.loc.dirs[direc].passable):
return True
return False
def pmove(self,path,step):
if Path.step < len(path):
Path.node = path[step]
Path.anim = True
else:
Path.step = 0
self.coor[0] = Path.goal.coor[0]
self.coor[1] = Path.goal.coor[1]
self.loc = store.findtile(self.coor)
self.sp.x = store.ct(self.coor[0])
self.sp.y = store.ct(self.coor[1])
controls.turn()
def addplayer(self):
g_newplayer = Player(coor=[self.coor[0]+1,
self.coor[1]+1],img='pchar')
store.cid +=1
def cloak(self):
if not self.cols():
if self.img == 'pchar':
self.sp.image= store.image['pchar_1b']
self.img = 'pchar_1b'
else:
self.sp.image= store.image['pchar']
self.img = 'pchar'
controls.turn()
def hasitem_name(self,name):
for item in self.inv:
if item.name == name:
return True
return False
class Path(object):
cost = 0
tagged = []
ptagged = []
tags = []
wp = []
goal = None
pl = []
cpath = None
anim = False
step = 0
@staticmethod
def clean_Path(tags=True):
Path.cost = 0
Path.tagged[:] = []
Path.pl[:] = []
if Path.wp:
for wp in Path.wp:
wp.delete
del Path.wp[:]
if tags == True:
for tag in Path.tags:
tag.delete()
del Path.tags[:]
@staticmethod
def on_key_press(symbol,modifiers):
if symbol == key.ESCAPE:
store.clevel.pop_handlers()
store.handleraltered = False
Path.clean_Path()
del Path.ptagged[:]
return True
@staticmethod
def on_mouse_motion(x,y,dx,dy):
if (x+store.ats > store.cursor.mposx + store.ts or
x+store.ats < store.cursor.mposx or
y+store.ats > store.cursor.mposy + store.ts or
y+store.ats < store.cursor.mposy ):
if level.ontiles([x,y],Path.ptagged):
store.cursor.xcoor = math.floor(x/store.ts)
store.cursor.ycoor = math.floor(y/store.ts)
store.cursor.cursor = pyglet.sprite.Sprite(
x =store.ct(store.cursor.xcoor),
y =store.ct(store.cursor.ycoor),
img = store.image['cursor'])
store.cursor.mposx = x
store.cursor.mposy = y
store.cursor.coor = [store.cursor.xcoor, store.cursor.ycoor]
store.cursor.onarea = 'm'
Path.clean_Path(tags=False)
Path.goal = store.findtile(store.cursor.coor)
store.cplayer.pathing()
return True
@staticmethod
def on_mouse_press(x,y,button,modifiers):
if button == mouse.LEFT:
if level.ontiles([x,y],Path.ptagged):
Path.clean_Path()
Path.goal = store.findtile(store.cursor.coor)
store.cplayer.pathing()
store.cplayer.pmove(Path.cpath.nodes,
Path.step)
del Path.ptagged[:]
Path.clean_Path()
store.clevel.pop_handlers()
store.handleraltered = False
return True
def __init__(self,cost,nodes):
self.cost = cost
self.nodes = nodes
| gpl-3.0 | 4,820,696,689,690,783,000 | 37.003521 | 79 | 0.470953 | false | 3.730729 | false | false | false |
REGOVAR/Regovar | regovar/api_rest/handlers/event_handler.py | 1 | 3675 |
#!env/python3
# coding: utf-8
try:
import ipdb
except ImportError:
pass
import os
import json
import aiohttp
import aiohttp_jinja2
import datetime
import time
from aiohttp import web
from urllib.parse import parse_qsl
from config import *
from core.framework.common import *
from core.model import *
from api_rest.rest import *
class EventHandler:
@user_role('Authenticated')
def list(self, request):
"""
Get list of last 100 events
"""
return rest_success(core.events.list())
@user_role('Authenticated')
async def search(self, request):
"""
Get events list corresponding to provided filters
"""
data = await request.json()
try:
data = json.loads(data) if isinstance(data, str) else data
return rest_success(core.events.list(**data))
except Exception as ex:
return rest_error("Error occured when retriving list of requested events. {}".format(ex), ex=ex)
@user_role('Authenticated')
def get(self, request):
"""
Get details about the event
"""
event_id = request.match_info.get('event_id', -1)
event = core.events.get(event_id)
if not event:
return rest_error("Unable to find the event (id={})".format(event_id))
return rest_success(event)
@user_role('Authenticated')
async def new(self, request):
"""
Create new event with provided data
"""
data = await request.json()
data = json.loads(data) if isinstance(data, str) else data
message = check_string(data.pop("message")) if "message" in data else None
details = check_string(data.pop("details")) if "detals" in data else None
author_id = 1 # TODO: retrieve author_id from session
date = check_date(data.pop("date")) if "date" in data else datetime.datetime.now()
# Create the event
try:
event = core.events.new(author_id, date, message, details, data)
except Exception as ex:
return rest_error("Error occured when creating the new event. {}".format(ex), ex=ex)
if event is None:
return rest_error("Unable to create a new event with provided information.")
return rest_success(event)
@user_role('Authenticated')
async def edit(self, request):
"""
Edit event data
"""
data = await request.json()
data = json.loads(data) if isinstance(data, str) else data
message = check_string(data.pop("message")) if "message" in data else None
author_id = 1 # TODO: retrieve author_id from session
date = check_date(data.pop("date")) if "date" in data else datetime.datetime.now()
# Edit the event
try:
event = core.events.edit(author_id, event_id, date, message, data)
except Exception as ex:
return rest_error("Error occured when creating the new event. {}".format(ex), ex=ex)
if event is None:
return rest_error("Unable to create a new event with provided information.")
return rest_success(event)
@user_role('Authenticated')
def delete(self, request):
"""
Delete the event
"""
event_id = request.match_info.get('event_id', -1)
event = core.events.delete(event_id)
if not event:
return rest_error("Unable to delete the event (id={})".format(event_id))
return rest_success(event)
| agpl-3.0 | 4,176,222,296,036,044,300 | 25.630435 | 108 | 0.587755 | false | 4.238754 | false | false | false |
mborho/baas | setup.py | 1 | 1563 | from setuptools import setup, find_packages
import sys, os
version = '0.2.2'
setup(name='baas',
version=version,
description="'Buddy as a Service' is a xmpp / wavelet robot using Yahoo YQL API, Google API and other services to do searches (web, news, reviews, wikipedia, wiktionary, imdb) and some other stuff (translations, weather forecast, etc) for you.",
long_description="""\
The XMPP bot also runs on the google appengine. BaaS is easy extensible through plugins. No API Keys required! \
See http://mborho.github.com/baas for more infos.
""",
classifiers=[
"Programming Language :: Python :: 2.5",
"Topic :: Communications :: Chat",
"Development Status :: 4 - Beta",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Intended Audience :: Other Audience",
"Operating System :: POSIX :: Linux",
], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='xmpp',
author='Martin Borho',
author_email='[email protected]',
url='http://mborho.github.com/baas',
license='GNU General Public License (GPL)',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
#data_files=[('conf',"conf/baas.conf")],
include_package_data=True,
zip_safe=False,
install_requires=[
'twisted',
'feedparser',
'chardet',
'simplejson'
],
entry_points="""
[console_scripts]
baas_bot = baas.scripts.bot:main
""",
)
| gpl-3.0 | -9,197,217,250,324,064,000 | 37.121951 | 251 | 0.621881 | false | 3.812195 | false | false | false |
candsvincent/edgesense | python/edgesense/content/metrics.py | 3 | 6391 | import networkx as nx
import community as co
from edgesense.network.utils import extract_dpsg
from datetime import datetime
def extract_content_metrics(nodes_map, posts_map, comments_map, ts, timestep, timestep_window):
ts_metrics = {
'ts': ts,
'full:users_count':0., # Number of Posts total
'user:users_count':0., # Number of Posts total
'team:users_count':0., # Number of Posts total
'full:posts_count':0., # Number of Posts total
'user:posts_count':0., # - Number of Posts by contributors
'team:posts_count':0., # - Number of Posts by team
'user:posts_share':0., # - Share of User Generated Posts
'user:team_posts_share':0., # - Share of Team/User Posts
'full:ts_posts_count': 0., # - Number of Posts in period
'user:ts_posts_count':0., # - Number of Posts by contributors in period
'team:ts_posts_count':0., # - Number of Posts by team in period
'user:ts_posts_share': 0., # - Share of User Generated Posts in period
'user:ts_team_posts_share': 0., # - Share of Team/User Posts in period
'full:comments_count':0., # - Number of Comments total
'user:comments_count': 0., # - Number of Comments by contributors
'team:comments_count': 0., # - Number of Comments by team
'user:comments_share': 0., # - Share of Team/User Generated Comments
'user:team_comments_share': 0., # - Share of User Generated Comments
'full:ts_comments_count':0., # - Number of Comments total in period
'user:ts_comments_count':0., # - Number of Comments by contributors in period
'team:ts_comments_count':0., # - Number of Comments by contributors in period
'user:ts_comments_share': 0., # - Share of User Generated Comments in period
'user:ts_team_comments_share': 0., # - Share of Team/User Generated Comments in period
'user:active_count': 0.,
'user:noteam_active_count': 0.,
'user:active_share': 0.,
'user:conversations': 0.,
'user:noteam_conversations': 0.,
'user:conversations_share': 0.
}
# Users count metrics
for u in nodes_map.values():
if int(u['created_ts'])<=ts:
ts_metrics['full:users_count'] += 1
if u['team']:
ts_metrics['team:users_count'] += 1
else:
ts_metrics['user:users_count'] += 1
# Posts Count metrics
for p in posts_map.values():
if p['created_ts']<=ts:
ts_metrics['full:posts_count'] += 1
if p['team']:
ts_metrics['team:posts_count'] += 1
else:
ts_metrics['user:posts_count'] += 1
if p['created_ts']<=ts and p['created_ts']>=ts-timestep*timestep_window:
ts_metrics['full:ts_posts_count'] += 1
if p['team']:
ts_metrics['team:ts_posts_count'] += 1
else:
ts_metrics['user:ts_posts_count'] += 1
if ts_metrics['full:posts_count'] > 0:
ts_metrics['user:posts_share'] = float(ts_metrics['user:posts_count'])/float(ts_metrics['full:posts_count'])
if ts_metrics['user:posts_count'] > 0:
ts_metrics['user:team_posts_share'] = float(ts_metrics['team:posts_count'])/float(ts_metrics['user:posts_count'])
if ts_metrics['full:ts_posts_count'] > 0:
ts_metrics['user:ts_posts_share'] = float(ts_metrics['user:ts_posts_count'])/float(ts_metrics['full:ts_posts_count'])
if ts_metrics['user:ts_posts_count'] > 0:
ts_metrics['user:ts_team_posts_share'] = float(ts_metrics['team:ts_posts_count'])/float(ts_metrics['user:ts_posts_count'])
# Comments Count metrics
for c in comments_map.values():
if c['created_ts']<=ts:
ts_metrics['full:comments_count'] += 1
if c['team']:
ts_metrics['team:comments_count'] += 1
else:
ts_metrics['user:comments_count'] += 1
if c['created_ts']<=ts and c['created_ts']>=ts-timestep*timestep_window:
ts_metrics['full:ts_comments_count'] += 1
if c['team']:
ts_metrics['team:ts_comments_count'] += 1
else:
ts_metrics['user:ts_comments_count'] += 1
if ts_metrics['full:comments_count'] > 0:
ts_metrics['user:comments_share'] = float(ts_metrics['user:comments_count'])/float(ts_metrics['full:comments_count'])
if ts_metrics['user:comments_count'] > 0:
ts_metrics['user:team_comments_share'] = float(ts_metrics['team:comments_count'])/float(ts_metrics['user:comments_count'])
if ts_metrics['full:ts_comments_count'] > 0:
ts_metrics['user:ts_comments_share'] = float(ts_metrics['user:ts_comments_count'])/float(ts_metrics['full:ts_comments_count'])
if ts_metrics['user:ts_comments_count'] > 0:
ts_metrics['user:ts_team_comments_share'] = float(ts_metrics['team:ts_comments_count'])/float(ts_metrics['user:ts_comments_count'])
# - User counts
actives = set()
noteam_actives = set()
conversations = set()
noteam_conversations = set()
for c in comments_map.values():
if c['created_ts']<=ts and nodes_map.has_key(c['author_id']) and nodes_map.has_key(c['recipient_id']):
a = nodes_map[c['author_id']]
r = nodes_map[c['recipient_id']]
cnv = '-'.join(sorted([str(c['author_id']), str(c['recipient_id'])]))
if not (a['team'] and a['team_ts'] <=ts):
actives.add(c['author_id'])
conversations.add(cnv)
if not (r['team'] and r['team_ts'] <=ts):
noteam_actives.add(c['recipient_id'])
noteam_conversations.add(cnv)
ts_metrics['user:active_count'] = len(actives)
ts_metrics['user:noteam_active_count'] = len(noteam_actives)
if ts_metrics['user:active_count'] > 0:
ts_metrics['user:active_share'] = float(ts_metrics['user:noteam_active_count'])/float(ts_metrics['user:active_count'])
ts_metrics['user:conversations'] = len(conversations)
ts_metrics['user:noteam_conversations'] = len(noteam_conversations)
if ts_metrics['user:conversations'] > 0:
ts_metrics['user:conversations_share'] = float(ts_metrics['user:noteam_conversations'])/float(ts_metrics['user:conversations'])
return ts_metrics
| mit | -6,375,036,795,782,342,000 | 52.258333 | 139 | 0.595056 | false | 3.365456 | false | false | false |
lukovnikov/teafacto | teafacto/blocks/seq/memnn.py | 1 | 15185 | from teafacto.core.base import Block, Var, Val, param, tensorops as T
from IPython import embed
# TODO: INPUT MASK !!!!!!!! and attention etc
# TODO: what about memory mask?
# TODO: MEMORY POSITION EMBEDDINGS/ENCODINGS
# SYMBOLIC OUTPUT MEMORY ENABLED SEQ2SEQ
# - can place attention over all of temporary created output sequence
# - can write to any time step of output (write/erase interface)
# - can do multiple attention steps without actual output (change scalars)
# -> loss is placed over the symbolic output memory
class BulkNN(Block):
def __init__(self, inpencoder=None, memsampler=None,
memembmat=None, memencoder=None, memlen=None,
mem_pos_repr=None, inp_pos_repr=None,
inp_attention=None, mem_attention=None,
inp_addr_extractor=None, mem_addr_extractor=None,
write_addr_extractor=None, write_addr_generator=None,
write_value_generator=None, write_value_extractor=None,
mem_erase_generator=None, mem_change_generator=None,
nsteps=100, core=None, **kw):
super(BulkNN, self).__init__(**kw)
if mem_pos_repr is not None:
self._memposvecs = mem_pos_repr(memlen)
else:
self._memposvecs = None
self._inp_pos_repr = inp_pos_repr
self._nsteps = nsteps
self._memlen = memlen
self._inpencoder = inpencoder
self._inp_att = inp_attention
self._memencoder = memencoder
self._mem_att = mem_attention
self._memembmat = memembmat
self._memsampler = memsampler
self._core = core
# extractors from top core state:
self._inp_addr_extractor = inp_addr_extractor
self._mem_addr_extractor = mem_addr_extractor
self._write_addr_extractor = write_addr_extractor
self._write_addr_generator = write_addr_generator
self._write_value_extractor = write_value_extractor
self._write_value_generator = write_value_generator
self._mem_change_generator = mem_change_generator
self._mem_erase_generator = mem_erase_generator
def apply(self, inpseq): # int-(batsize, seqlen)
inpenco = self._inpencoder(inpseq) # may carry mask, based on encoder's embedder
batsize = inpenco.shape[0]
outvocsize = self._memembmat.shape[0]
mem_0 = T.concatenate([
T.ones((batsize, self._memlen, 1), dtype="float32") * 0.95,
T.ones((batsize, self._memlen, outvocsize-1), dtype="float32") * 0.05,
], axis=2) # (batsize, outseqlen, outvocsize)
mem_0 = T.softmax(mem_0)
core_init_states = self._core.get_init_info(batsize)
core_state_spec = self._core.get_statespec(flat=False)
assert(len(core_state_spec) == len(core_init_states))
h_0 = None # take last output of core states as initial state
c = 0
for ss in core_state_spec:
h_0_isout = False
for sss in ss:
if sss[0] == "output":
h_0_isout = True
h_0 = core_init_states[c]
if not h_0_isout:
h_0 = core_init_states[c]
c += 1
if self._inp_pos_repr is not None:
inpposvecs = self._inp_pos_repr(inpseq.shape[1])
inpposvecs = T.repeat(inpposvecs.dimadd(0), batsize, axis=0)
inpenc = T.concatenate([inpenco, inpposvecs], axis=2)
inpenc.mask = inpenco.mask
else:
inpenc = inpenco
outputs = T.scan(fn=self.rec,
outputs_info=[None, mem_0, h_0] + core_init_states,
n_steps=self._nsteps,
non_sequences=inpenc)
ret = outputs[0]
ret.push_extra_outs({"mem_0": mem_0, "h_0": h_0}) # DEBUGGING
return ret[-1], ret
def rec(self, mem_tm1, h_tm1, *args):
inpenc = args[-1]
states_tm1 = args[:-1]
batsize = inpenc.shape[0]
# mem_tm1: f(batsize, outseqlen, outvocsize)
# h_tm1: f(batsize, thinkerdim)
# inpenc: f(batsize, inplen, inpencdim)
# summarize memory
mem_tm1_sam = self._memsample(mem_tm1) # sample from mem
mem_tm1_embsum = T.dot(mem_tm1_sam, self._memembmat) # f(batsize, outseqlen, memembdim)
mem_tm1_sum = self._memencode(mem_tm1_embsum) # f(batsize, outseqlen, memsumdim)
if self._memposvecs is not None:
memposvecs = T.repeat(self._memposvecs.dimadd(0), batsize, axis=0)
mem_tm1_sum = T.concatenate([mem_tm1_sum, memposvecs], axis=2)
# input and memory read attentions
inp_ctx_t = self._get_inp_ctx(h_tm1, inpenc) # (batsize, inpencdim)
mem_ctx_t = self._get_mem_ctx(h_tm1, mem_tm1_sum) # (batsize, memsumdim)
# update thinker state
i_t = T.concatenate([inp_ctx_t, mem_ctx_t], axis=1)
rnuret = self._core.rec(i_t, *states_tm1)
h_t = rnuret[0]
states_t = rnuret[1:]
# memory change interface
mem_t_addr = self._get_addr_weights(h_t, mem_tm1_sum) # float-(batsize, outseqlen)
mem_t_write = self._get_write_weights(h_t) # (batsize, memvocsize)
e_t = self._get_erase(h_t) # (0..1)-(batsize,)
c_t = self._get_change(h_t) # (0..1)-(batsize,)
# memory change
can_mem_t = mem_tm1 - T.batched_dot(e_t, mem_tm1 * mem_t_addr.dimshuffle(0, 1, 'x')) # erase where we addressed
can_mem_t = can_mem_t + T.batched_tensordot(mem_t_addr, mem_t_write, axes=0) # write new value
mem_t = T.batched_dot(1 - c_t, mem_tm1) + T.batched_dot(c_t, can_mem_t) # interpolate between old and new value
mem_t = T.softmax(mem_t) # normalize to probabilities
return (mem_t, mem_t, h_t) + tuple(states_t)
def _memsample(self, mem):
if self._memsampler is None:
return mem
else:
return self._memsampler(mem)
def _memencode(self, mem):
if self._memencoder is None:
return mem
else:
return self._memencoder(mem)
def _get_inp_ctx(self, h, inpenc):
crit = self._inp_addr_extractor(h)
return self._inp_att(crit, inpenc)
def _get_mem_ctx(self, h, mem):
crit = self._mem_addr_extractor(h)
return self._mem_att(crit, mem)
def _get_addr_weights(self, h, mem):
crit = self._write_addr_extractor(h)
return self._write_addr_generator(crit, mem)
def _get_write_weights(self, h):
crit = self._write_value_extractor(h)
return self._write_value_generator(crit) # generate categorical write distr
def _get_erase(self, h):
return self._mem_erase_generator(h)
def _get_change(self, h):
return self._mem_change_generator(h)
from teafacto.blocks.seq.rnn import SeqEncoder, MakeRNU, RecStack, RNNWithoutInput
from teafacto.blocks.seq.rnu import GRU
from teafacto.blocks.match import CosineDistance
from teafacto.blocks.seq.attention import Attention, AttGen
from teafacto.blocks.basic import MatDot, Linear, Forward, SMO
from teafacto.blocks.activations import GumbelSoftmax
from teafacto.core.base import asblock
from teafacto.util import issequence
class SimpleBulkNN(BulkNN):
""" Parameterized simple interface for BulkNN that builds defaults for subcomponents """
def __init__(self, inpvocsize=None, inpembdim=None, inpemb=None,
inpencinnerdim=None, bidir=False, maskid=None,
dropout=False, rnu=GRU,
inpencoder=None,
memvocsize=None, memembdim=None, memembmat=None,
memencinnerdim=None,
memencoder=None,
inp_att_dist=CosineDistance(), mem_att_dist=CosineDistance(),
inp_attention=None, mem_attention=None,
coredims=None, corernu=GRU,
core=None, explicit_interface=False, scalaraggdim=None,
write_value_dim=None, nsteps=100,
posvecdim=None, mem_pos_repr=None, inp_pos_repr=None,
inp_addr_extractor=None, mem_addr_extractor=None,
write_addr_extractor=None, write_addr_generator=None,
write_addr_dist=CosineDistance(),
write_value_generator=None, write_value_extractor=None,
mem_erase_generator=None, mem_change_generator=None,
memsampler=None, memsamplemethod=None, memsampletemp=0.3,
**kw):
# INPUT ENCODING
if inpencoder is None:
inpencoder = SeqEncoder.RNN(indim=inpvocsize, inpembdim=inpembdim,
inpemb=inpemb, innerdim=inpencinnerdim, bidir=bidir,
maskid=maskid, dropout_in=dropout, dropout_h=dropout,
rnu=rnu).all_outputs()
lastinpdim = inpencinnerdim if not issequence(inpencinnerdim) else inpencinnerdim[-1]
else:
lastinpdim = inpencoder.block.layers[-1].innerdim
# MEMORY ENCODING
if memembmat is None:
memembmat = param((memvocsize, memembdim), name="memembmat").glorotuniform()
if memencoder is None:
memencoder = SeqEncoder.RNN(inpemb=False, innerdim=memencinnerdim,
bidir=bidir, dropout_in=dropout, dropout_h=dropout,
rnu=rnu, inpembdim=memembdim).all_outputs()
lastmemdim = memencinnerdim if not issequence(memencinnerdim) else memencinnerdim[-1]
else:
lastmemdim = memencoder.block.layers[-1].innerdim
# POSITION VECTORS
if posvecdim is not None and inp_pos_repr is None:
inp_pos_repr = RNNWithoutInput(posvecdim, dropout=dropout)
if posvecdim is not None and mem_pos_repr is None:
mem_pos_repr = RNNWithoutInput(posvecdim, dropout=dropout)
xtra_dim = posvecdim if posvecdim is not None else 0
# CORE RNN - THE THINKER
if core is None:
corelayers, _ = MakeRNU.fromdims([lastinpdim+lastmemdim+xtra_dim*2] + coredims,
rnu=corernu, dropout_in=dropout, dropout_h=dropout,
param_init_states=True)
core = RecStack(*corelayers)
lastcoredim = core.get_statespec()[-1][0][1][0]
# ATTENTIONS
if mem_attention is None:
mem_attention = Attention(mem_att_dist)
if inp_attention is None:
inp_attention = Attention(inp_att_dist)
if write_addr_generator is None:
write_addr_generator = AttGen(write_addr_dist)
# WRITE VALUE
if write_value_generator is None:
write_value_generator = WriteValGenerator(write_value_dim, memvocsize, dropout=dropout)
# MEMORY SAMPLER
if memsampler is not None:
assert(memsamplemethod is None)
if memsamplemethod is not None:
assert(memsampler is None)
memsampler = GumbelSoftmax(temperature=memsampletemp)
################ STATE INTERFACES #################
if not explicit_interface:
if inp_addr_extractor is None:
inp_addr_extractor = Forward(lastcoredim, lastinpdim + xtra_dim, dropout=dropout)
if mem_addr_extractor is None:
inp_addr_extractor = Forward(lastcoredim, lastmemdim + xtra_dim, dropout=dropout)
# WRITE INTERFACE
if write_addr_extractor is None:
write_addr_extractor = Forward(lastcoredim, lastmemdim + xtra_dim, dropout=dropout)
if write_value_extractor is None:
write_value_extractor = Forward(lastcoredim, write_value_dim, dropout=dropout)
# MEM UPDATE INTERFACE
if mem_erase_generator is None:
mem_erase_generator = StateToScalar(lastcoredim, scalaraggdim)
if mem_change_generator is None:
mem_change_generator = StateToScalar(lastcoredim, scalaraggdim)
else:
inp_addr_extractor, mem_addr_extractor, write_addr_extractor, \
write_value_extractor, mem_erase_generator, mem_change_generator = \
make_vector_slicers(0, lastinpdim + xtra_dim, lastmemdim + xtra_dim,
lastmemdim + xtra_dim, write_value_dim, 1, 1)
super(SimpleBulkNN, self).__init__(inpencoder=inpencoder,
memembmat=memembmat, memencoder=memencoder,
inp_attention=inp_attention, mem_attention=mem_attention,
core=core, memsampler=memsampler, nsteps=nsteps,
inp_addr_extractor=inp_addr_extractor, mem_addr_extractor=mem_addr_extractor,
write_addr_extractor=write_addr_extractor, write_addr_generator=write_addr_generator,
mem_erase_generator=mem_erase_generator, mem_change_generator=mem_change_generator,
write_value_generator=write_value_generator, write_value_extractor=write_value_extractor,
inp_pos_repr=inp_pos_repr, mem_pos_repr=mem_pos_repr,
**kw)
class WriteValGenerator(Block):
def __init__(self, dim, vocsize, interdims=tuple(), dropout=False, **kw):
super(WriteValGenerator, self).__init__(**kw)
self.dims = (dim,) + interdims
self.vocsize = vocsize
self.layers = []
for i in range(len(self.dims)-1):
layer = Forward(self.dims[i], self.dims[i+1], dropout=dropout)
self.layers.append(layer)
self.smo = SMO(self.dims[-1], outdim=self.vocsize)
def apply(self, x):
for layer in self.layers:
x = layer(x)
ret = self.smo(x)
return ret
class StateToScalar(Block):
def __init__(self, dim, outdim, **kw):
super(StateToScalar, self).__init__(**kw)
self.block = Forward(dim, outdim)
self.agg = param((outdim,), name="scalartostate_agg").uniform()
def apply(self, x):
y = T.dot(x, self.block)
z = T.dot(y, self.agg) # (batsize,)
ret = T.nnet.sigmoid(z)
return ret
def make_vector_slicers(*sizes):
sizes = list(sizes)
boundaries = [sizes[0]]
del sizes[0]
while len(sizes) > 0:
boundaries.append(sizes[0]+boundaries[-1])
del sizes[0]
rets = []
for i in range(len(boundaries) - 1):
a, b = boundaries[i], boundaries[i + 1]
yield Slicer(a, b)
class Slicer(Block):
def __init__(self, a, b, **kw):
super(Slicer, self).__init__(**kw)
self.a = a
self.b = b
def apply(self, x):
attrs = [slice(None, None, None)] * x.ndim
if self.b - self.a == 1:
attrs[-1] = self.a
else:
attrs[-1] = slice(self.a, self.b, None)
ret = x[attrs]
return ret
if __name__ == "__main__":
from teafacto.blocks.seq.rnn import RNNWithoutInput
m = RNNWithoutInput(3, 2)
out = m(5)
print out.eval().shape
print out.eval()
| mit | -7,271,688,369,530,812,000 | 41.654494 | 135 | 0.591307 | false | 3.411593 | false | false | false |
grollins/foldkin | foldkin/test/test_fit_markov_collection_to_kf.py | 1 | 2733 | import nose.tools
from foldkin.scipy_optimizer import ScipyOptimizer
from foldkin.coop.coop_collection import CoopCollectionFactory
from foldkin.coop.coop_model_parameter_set import CoopModelParameterSet
from foldkin.fold_rate_judge import CoopCollectionJudge
from foldkin.fold_rate_predictor import FoldRateCollectionPredictor,\
FoldRatePredictor
from foldkin.fold_rate_target_data import FoldRateCollectionTargetData
from foldkin.file_archiver import CoopCollectionFileArchiver
@nose.tools.istest
class TestFitManyFoldRates(object):
def make_score_fcn(self, model_factory, parameter_set,
judge, data_predictor, target_data):
def f(current_parameter_array):
parameter_set.update_from_array(current_parameter_array)
current_model = model_factory.create_model(parameter_set)
score, prediction = judge.judge_prediction(current_model,
data_predictor,
target_data,
noisy=False)
# print score, parameter_set
return score
return f
@nose.tools.istest
def predicted_rate_similar_to_true_rate(self):
'''This example fits coop model to experimental
rate of one protein.
'''
target_data = FoldRateCollectionTargetData()
target_data.load_data('N')
feature_range = range(1,31)
id_list = range(len(feature_range))
model_factory = CoopCollectionFactory(id_list, 'N', feature_range)
initial_parameters = CoopModelParameterSet()
initial_parameters.set_parameter_bounds('log_k0', 5.5, 5.7)
judge = CoopCollectionJudge()
data_predictor = FoldRateCollectionPredictor(FoldRatePredictor)
score_fcn = self.make_score_fcn(model_factory, initial_parameters,
judge, data_predictor, target_data)
optimizer = ScipyOptimizer(maxfun=5)
results = optimizer.optimize_parameters(score_fcn,
initial_parameters)
new_params, score, num_iterations = results
optimized_model = model_factory.create_model(new_params)
score, prediction = judge.judge_prediction(optimized_model,
data_predictor,
target_data)
print new_params
print score
archiver = CoopCollectionFileArchiver()
archiver.save_results(target_data, prediction,
"test_many_markov_results.txt")
| bsd-2-clause | 8,924,907,142,528,452,000 | 48.690909 | 75 | 0.603 | false | 4.386838 | false | false | false |
collab-project/django-encode | encode/widgets.py | 1 | 1520 | # Copyright Collab 2014-2016
# See LICENSE for details.
"""
Widgets.
"""
from __future__ import unicode_literals
from itertools import chain
from django import forms
from django.utils.safestring import mark_safe
class MediaDisplayWidget(forms.SelectMultiple):
"""
Widget for displaying media in admin forms.
"""
class Media:
js = ("encode/js/media.js",)
def render(self, name, value, attrs=None, choices=()):
paths = []
script = ''
if value is not None:
for option_value, option_label in chain(self.choices, choices):
if option_value in [int(x) for x in value]:
try:
from encode import models
path = models.MediaFile.objects.get(
title=option_label).file.url
paths.append(path)
except models.MediaFile.DoesNotExist:
pass
script = '''<script type="text/javascript">
$(document).ready(function() {
var elem = $('#id_%(name)s');
var widget = new collab.PreviewWidget(elem, %(paths)s);
});
</script>''' % {'name': name, 'paths': paths}
if attrs is None:
attrs = {}
output = super(MediaDisplayWidget, self).render(name, value, attrs,
choices)
return mark_safe(output + script)
| mit | -7,202,141,266,752,442,000 | 28.230769 | 75 | 0.511842 | false | 4.76489 | false | false | false |
tomevans/gps | gps/spgp_routines.py | 1 | 29754 | import sys, os, pdb, time
import numpy as np
import scipy.linalg
import matplotlib
import matplotlib.pyplot as plt
PERTURB = 1e-4#1e-3
def random_draw( gp_obj, xmesh=None, emesh=None, conditioned=True, perturb=PERTURB, ndraws=5, \
plot_draws=True, mesh_dim=0, lw=3 ):
"""
SUMMARY
Draws one or more random realisations from the gp and (optionally) plots them,
along with the mean function (black dashed line) and 1- and 2-sigma uncertainty
regions (shaded grey regions).
CALLING
draws = random_draw( gp_obj, xmesh=None, emesh=None, conditioned=True, perturb=PERTURB, \
ndraws=5, plot_draws=True, mesh_dim=0, lw=3 )
INPUTS
'xmesh' [KxD array] - input locations for the random draw points; if set to
None (default), a fine grid spanning the xtrain range will be used.
'emesh' [float] - white noise value for the random draw points; if set to
None (default) or zero, then this will be set to the value of the perturb
variable for numerical stability.
'conditioned' [bool] - if set to True (default), the GP will be trained on the
training data stored in the object; otherwise, it will be drawn from the
unconditioned prior.
'perturb' [float] - small perturbation to be added to the covariance diagonal for
numerical stability if the white noise errors are set to None/zero.
'ndraws' [integer] - the number of random draws to be made.
'plot_draws' [bool] - if set to True, the random draws will be plotted.
'mesh_dim' [integer] - for cases where D>1 (i.e. multidimensional input), a single
input dimension must be specified for the mesh to span; the other input
variables will be held fixed to the corresponding median values in the training
data set.
'lw' [integer] - thickness of plot lines.
OUTPUT
'draws' [list] - a list containing the separate random draws from the GP.
"""
xtrain = gp_obj.xtrain
dtrain = gp_obj.dtrain
etrain = gp_obj.etrain
n = np.shape( xtrain )[0]
d = np.shape( xtrain )[1]
if xmesh==None:
nmesh = 1000
xmesh_i = np.r_[ xtrain[:,mesh_dim].min() : xtrain[:,mesh_dim].max() : 1j*nmesh ]
xmesh = np.zeros( [ nmesh, d ] )
for i in range( d ):
if i!=mesh_dim:
xmesh[:,i] = np.median( xtrain[:,i] )
else:
xmesh[:,i] = xmesh_i
else:
nmesh = np.shape( xmesh )[0]
if conditioned==True:
print( '\nDrawing from GP posterior (i.e. after being trained on data set)' )
title_str = 'posterior (i.e. trained)'
else:
print( '\nDrawing from GP prior (i.e. not trained on any data set)' )
title_str = 'prior (i.e. untrained)'
mu, cov = meancov( gp_obj, xnew=xmesh, enew=emesh, conditioned=conditioned, perturb=perturb )
sig = np.sqrt( np.diag( cov ).flatten() )
mu = mu.flatten()
sig = sig.flatten()
xmesh_i = xmesh[:,mesh_dim].flatten()
if plot_draws==True:
fig = plt.figure()
ax = fig.add_axes( [ 0.05, 0.05, 0.9, 0.9 ] )
zorder0 = 0
ax.fill_between( xmesh_i, mu-2*sig, mu+2*sig, color=[ 0.8, 0.8, 0.8 ], zorder=zorder0 )
zorder0 = 1
ax.fill_between( xmesh_i, mu-1*sig, mu+1*sig, color=[ 0.6, 0.6, 0.6 ], zorder=zorder0 )
zorder0 = 2
ax.plot( xmesh_i, mu, ls='--', c='g', lw=2, zorder=zorder0 )
ax.set_title('%i random GP draws - %s' % ( ndraws, title_str ) )
# Draw random samples from the GP:
colormap = matplotlib.cm.cool
colormap = plt.cm.ScalarMappable( cmap=colormap )
colormap.set_clim( vmin=0, vmax=1 )
line_colors = np.r_[ 0.05 : 0.95 : 1j*ndraws ]
ax.set_xlim( [ xmesh_i.min(), xmesh_i.max() ] )
draws = []
for i in range( ndraws ):
print( ' drawing %i of %i on a mesh of %i points' % ( i+1, ndraws, nmesh ) )
# The following step can be a computation bottleneck if there are too
# many points on the mesh:
draw = np.random.multivariate_normal( mu, cov )
draws += [ draw ]
if plot_draws==True:
color = colormap.to_rgba( line_colors[i] )
zorder0 = 3
ax.plot( xmesh_i, draw, ls='-', c=color, lw=lw, zorder=1 )
if ( plot_draws==True )*( conditioned==True ):
dtrain = dtrain.flatten()
zorder0 = 4
xtrain_i = xtrain[:,mesh_dim].flatten()
if n<1000:
marktype = 'o'
elif n<2000:
marktype = '.'
else:
marktype = ','
if ( np.all( etrain==0 ) )+( np.all( etrain==None ) )+( n>=2000 ):
ax.plot( xtrain_i, dtrain, marktype, mec='k', mfc='k', zorder=zorder0 )
else:
errs = etrain + np.zeros( n )
ax.errorbar( xtrain_i, dtrain, yerr=errs, fmt=marktype, mec='k', mfc='k', ecolor='k', \
capsize=0, elinewidth=2, barsabove=True, zorder=zorder0 )
return draws
def meancov( gp_obj, xnew=None, enew=None, conditioned=True, perturb=PERTURB ):
"""
SUMMARY
Returns the mean and full covariance of a gp at the locations of xnew, with
random errors enew. If conditioned==True, the gp will be conditioned on the
training data stored in the gp_obj. If etrain==None or etrain==0 (stored within
gp_obj), a perturbation term of magnitude perturb will be added to the diagonal
entries of the training covariance matrix before it is inverted for numerical
stability.
CALLING:
mu, cov = meancov( gp_obj, xnew=None, enew=None, conditioned=True, perturb=PERTURB )
INPUTS
'gp_obj' [gp class object], containing:
'mfunc', 'cfunc' [functions] - mean and covariance functions.
'mpars', 'cpars' [dictionaries] - mean and covariance function parameters.
'xtrain' [NxD array] - training data input locations.
'dtrain' [Nx1 array] - training data values.
'etrain' [float] - white noise value for the training data points.
'xnew' [PxD array] - input locations for the mean and covariance to be evaluated at;
if set to None (default), the values for xtrain will be used.
'enew' [float] - white noise value to be incorporated into the covariance diagonal;
if set to None (default) or zero, it will be set to the value of the perturb
variable for numerical stability.
'conditioned' [bool] - if set to True (default), the gp will be trained on the
training data stored in the object.
'perturb' [float] - small perturbation to be added to the covariance diagonal for
numerical stability if the white noise errors are set to None/zero.
OUTPUT
'mu' [Px1 array] - gp mean function values.
'cov' [PxP array] - gp covariance values.
"""
# Unpack the variables stored in the GP object:
mfunc = gp_obj.mfunc
mpars = gp_obj.mpars
cfunc = gp_obj.cfunc
cpars = gp_obj.cpars
xtrain = gp_obj.xtrain
xinduc = gp_obj.xinduc
dtrain = gp_obj.dtrain
etrain = gp_obj.etrain
n = np.shape( xtrain )[0]
m = np.shape( xinduc )[0]
if xnew==None:
xnew = xtrain
conditioned = False
p = np.shape( xnew )[0]
# Ensure that etrain is formatted as an array
# and any zero entries replaced with jitter:
if np.ndim( etrain )==0:
if ( etrain==None )+( etrain==0 ):
etrain = perturb*np.ones( n )
else:
ixs = ( etrain==None )
etrain[ixs] = perturb
ixs = ( etrain==0 )
etrain[ixs] = perturb
# Do the same for enew:
if np.ndim( enew )==0:
if ( enew==None ):
enew = np.zeros( p )
else:
ixs = ( enew==None )
enew[ixs] = perturb
ixs = ( enew==0 )
enew[ixs] = perturb
if mfunc==None:
mfunc = zero_mfunc
if mpars==None:
mpars = {}
if cpars==None:
cpars = {}
# Calculate the unconditioned mean and covariance values
# at the new input locations:
mnew = mfunc( xnew, **mpars )
Km = cfunc( xinduc, xinduc, **cpars ) + ( perturb**2. ) * np.eye( m )
Kmp = cfunc( xinduc, xnew, **cpars )
Kmn = cfunc( xinduc, xtrain, **cpars )
knn = cfunc( xtrain, None, **cpars ).flatten()
kpp = cfunc( xnew, None, **cpars ).flatten()
Lm = np.linalg.cholesky( Km )
# The following lines calculate the pxp low-rank projection matrix:
# Qp = (Kmp^T)*(Km^-1)*(Kmp)
Vmp = scipy.linalg.lu_solve( scipy.linalg.lu_factor( Lm ), Kmp )
Qp = np.array( np.matrix( Vmp ).T * Vmp )
qpp = np.diag( Qp )
Deltap = np.diag( kpp - qpp )
sig2Ip = ( enew**2. ) * np.eye( p )
# If we are using the unconditioned GP, we are finished:
if conditioned==False:
mu = np.array( mnew.flatten() )
cov = np.array( Qp + Deltap + sig2Ip )
# If we want to use the conditioned GP, we still have work to do:
else:
mtrain = mfunc( xtrain, **mpars )
resids = dtrain.flatten() - mtrain.flatten()
# The following lines calculate the diagonal of the nxn Gamma matrix,
# as given by Eq C.1. To do this, we make use of the Cholesky identity
# given by Eq B.8. Note that:
# sig2*Gamma = Deltan + sig2*I
# where Deltan is the NxN diagonal matrix used in Eq 2.12.
Lm = np.linalg.cholesky( Km )
Vmn = scipy.linalg.lu_solve( scipy.linalg.lu_factor( Lm ), Kmn )
gnn = 1. + ( knn.flatten() - np.sum( Vmn**2., axis=0 ).flatten() ) / ( etrain**2. )
# To make things more concise, we will divide the rows of the Vmn and
# resids arrays by the square root of the corresponding entries on the
# Gamma matrix diagonal.
# Vmn --> Vmn * (Gamma^-0.5)
# resids --> (Gamma^-0.5) * resids
Vmn = np.matrix( Vmn / np.tile( np.sqrt( gnn ).flatten(), [ m, 1 ] ) )
resids = resids.flatten() / np.sqrt( gnn.flatten() )
resids = np.matrix( np.reshape( resids, [ n, 1 ] ) )
Vmn_resids = np.array( Vmn * resids )
# Now we need to calculate the term involving B^-1 in Eq 2.12, which
# we do using two Cholesky decompositions:
W = np.array( np.linalg.cholesky( ( enew**2. ) * np.eye( m ) + np.array( Vmn*Vmn.T ) ) )
Y = scipy.linalg.lu_solve( scipy.linalg.lu_factor( W ), Vmn_resids )
H = np.linalg.lstsq( Lm, Kmp )[0]
J = scipy.linalg.lu_solve( scipy.linalg.lu_factor( W ), H )
# Finally, we use Eqs 2.9 and 2.12 to calculate the predictive mean and
# covariance matrix of the GP:
mu = np.array( mnew.flatten() + np.array( np.matrix( J ).T * np.matrix( Y ) ).flatten() )
KmpTBinvKmp = ( enew**2. ) * np.array( np.matrix( J ).T * np.matrix( J ) )
cov = np.array( Deltap + sig2Ip + KmpTBinvKmp )
mu = np.reshape( mu, [ p, 1 ] )
cov = np.reshape( cov, [ p, p ] )
return mu, cov
def predictive( gp_obj, xnew=None, enew=None, conditioned=True, perturb=PERTURB ):
"""
SUMMARY
Returns the predictive mean and standard deviation of a gp. If conditioned==True,
the gp will be conditioned on the training data stored in the gp_obj. If
etrain==None or etrain==0 (stored within gp_obj), a perturbation term of magnitude
perturb will be added to the diagonal entries of the training covariance matrix
before it is inverted for numerical stability. This routine is very similar to
meancov, except that it only calculates the diagonal entries of the conditioned
gp's covariance matrix to save time.
CALLING:
mu, sig = predictive( gp_obj, xnew=None, enew=None, conditioned=True, perturb=PERTURB )
INPUTS:
'gp_obj' [gp class object], containing:
'mfunc', 'cfunc' [functions] - mean and covariance functions.
'mpars', 'cpars' [dictionaries] - mean and covariance function parameters.
'xtrain' [NxD array] - training data input locations.
'dtrain' [Nx1 array] - training data values.
'etrain' [float] - white noise value for the training data points.
'xnew' [PxD array] - input locations for the mean and covariance to be evaluated at;
if set to None (default), the values for xtrain will be used.
'enew' [float] - white noise value to be incorporated into the covariance diagonal;
if set to None (default) or zero, it will be set to the value of the perturb
variable for numerical stability.
'conditioned' [bool] - if set to True (default), the gp will be trained on the
training data stored in the object.
'perturb' [float] - small perturbation to be added to the covariance diagonal for
numerical stability if the white noise errors are set to None/zero.
OUTPUT:
'mu' [Px1 array] - gp mean function values.
'sig' [Px1 array] - 1-sigma marginalised uncertainties, i.e. the square roots of
the entries along the diagonal of the full covariance matrix.
"""
# Unpack the variables stored in the GP object:
mfunc = gp_obj.mfunc
mpars = gp_obj.mpars
cfunc = gp_obj.cfunc
cpars = gp_obj.cpars
xtrain = gp_obj.xtrain
xinduc = gp_obj.xinduc
dtrain = gp_obj.dtrain
etrain = gp_obj.etrain
n = np.shape( xtrain )[0]
m = np.shape( xinduc )[0]
p = np.shape( xnew )[0]
if mfunc==None:
mfunc = zero_mfunc
if mpars==None:
mpars = {}
if cpars==None:
cpars = {}
if xnew==None:
xnew = xtrain
conditioned = False
# Ensure that etrain is formatted as an array
# and any zero entries replaced with jitter:
if np.ndim( etrain )==0:
if ( etrain==None )+( etrain==0 ):
etrain = perturb*np.ones( n )
else:
ixs = ( etrain==None )
etrain[ixs] = perturb
ixs = ( etrain==0 )
etrain[ixs] = perturb
# Do the same for enew:
if np.ndim( enew )==0:
if ( enew==None ):
enew = np.zeros( p )
else:
ixs = ( enew==None )
enew[ixs] = perturb
ixs = ( enew==0 )
enew[ixs] = perturb
# Calculate the unconditioned mean and covariance values
# at the new input locations:
mnew = mfunc( xnew, **mpars )
kpp = cfunc( xnew, None, **cpars ).flatten()
# If we are using the unconditioned GP, we are finished:
if conditioned==False:
mu = mnew.flatten()
sig = np.sqrt( kpp.flatten() + ( enew**2. ) )
# If we want to use the conditioned GP, we still have work to do:
else:
mtrain = mfunc( xtrain, **mpars )
Km = cfunc( xinduc, xinduc, **cpars ) + ( perturb**2. ) * np.eye( m )
Kmn = cfunc( xinduc, xtrain, **cpars )
Kmp = cfunc( xinduc, xnew, **cpars )
knn = cfunc( xtrain, None, **cpars ).flatten()
resids = dtrain.flatten() - mtrain.flatten()
# The following lines calculate the diagonal of the NxN Gamma matrix,
# as given by Eq C.1. To do this, we make use of the Cholesky identity
# given by Eq B.8. Note that:
# sig2*Gamma = Delta + sig2*I
# where Delta is the diagonal matrix used in Eq 2.12.
Lm = np.linalg.cholesky( Km )
Vmn = scipy.linalg.lu_solve( scipy.linalg.lu_factor( Lm ), Kmn )
# Diagonal of QN:
Qnn_diag = np.sum( Vmn**2., axis=0 ).flatten()
# Diagonal of the D=sig2*Gamma matrix:
D_diag = knn - Qnn_diag + etrain**2.
# To make things more concise, we will divide the rows of the Vmn and
# resids arrays by the square root of the corresponding entries on the
# Gamma matrix diagonal.
# Vmn --> Vmn * (Gamma^-0.5)
# resids --> (Gamma^-0.5) * resids
Vmn = np.matrix( Vmn / np.tile( np.sqrt( D_diag ).flatten(), [ m, 1 ] ) )
resids = resids.flatten() / np.sqrt( D_diag.flatten() )
resids = np.matrix( np.reshape( resids, [ n, 1 ] ) )
Vmn_resids = np.array( Vmn * resids )
# Now we need to calculate the terms involving B^-1 in Eq 2.12, which
# we do using two Cholesky decompositions:
W = np.array( np.linalg.cholesky( np.eye( m ) + np.array( Vmn*Vmn.T ) ) )
Y = scipy.linalg.lu_solve( scipy.linalg.lu_factor( W ), Vmn_resids )
H = np.linalg.lstsq( Lm, Kmp )[0]
J = scipy.linalg.lu_solve( scipy.linalg.lu_factor( W ), H )
# Finally, we use Eq 2.12 to calculate the predictive mean and standard
# deviation of the GP:
mu = mnew.flatten() + np.array( np.matrix( J ).T * np.matrix( Y ) ).flatten()
sig = np.sqrt( kpp.flatten() + ( enew**2. ) \
- np.sum( H**2., axis=0 ).flatten() \
+ np.sum( J**2., axis=0 ).flatten() )
# Note that:
# np.sum( H**2., axis=0 ) = diagonal of (H^T)*H
# np.sum( J**2., axis=0 ) = diagonal of (J^T)*J
mu = np.reshape( mu, [ p, 1 ] )
sig = np.reshape( sig, [ p, 1 ] )
return mu, sig
def logp_builtin( gp_obj, perturb=None ):
"""
Uses the contents of the gp object to calculate its log likelihood. The
logp() routine is actually used to perform the calculation. Note that
the latter can be called directly if for some reason it is preferable to
do the precomputations separately outside the routine.
"""
xtrain = gp_obj.xtrain
dtrain = gp_obj.dtrain
etrain = gp_obj.etrain
xinduc = gp_obj.xinduc
mfunc = gp_obj.mfunc
mpars = gp_obj.mpars
cfunc = gp_obj.cfunc
cpars = gp_obj.cpars
n = np.shape( dtrain )[0]
m = np.shape( xinduc )[0]
if mpars==None:
mpars = {}
if cpars==None:
cpars = {}
# Ensure that etrain is formatted as an array
# and any zero entries replaced with jitter:
if np.ndim( etrain )==0:
if ( etrain==None )+( etrain==0 ):
etrain = perturb*np.ones( n )
else:
ixs = ( etrain==None )
etrain[ixs] = perturb
ixs = ( etrain==0 )
etrain[ixs] = perturb
if mfunc==None:
mfunc = zero_mfunc
mu = mfunc( xtrain, **mpars )
resids = dtrain.flatten() - mu.flatten()
resids = np.reshape( resids, [ n, 1 ] )
if xinduc==None:
print( 'Must specify inducing inputs (xinduc)' )
pdb.set_trace()
Km = cfunc( xinduc, xinduc, **cpars )
Kmn = cfunc( xinduc, xtrain, **cpars )
knn = cfunc( xtrain, None, **cpars )
loglikelihood = logp( resids, Km, Kmn, knn, etrain, perturb=perturb )
return loglikelihood
def logp( resids=None, Km=None, Kmn=None, knn=None, sigw=None, perturb=PERTURB ):
"""
SUMMARY
Evaluates the log likelihood of residuals that are assumed to be generated by a
gp with a specified covariance. The mean and covariance are passed directly into
the function as inputs, to allow flexibility in how they are actually computed.
This can be useful when repeated evaluations of logp are required (eg. likelihood
maximisation or MCMC), as it may be possible to optimise how these precomputations
are done outside the function.
The loglikelihood is calculated according to:
loglikelihood = -0.5*n*np.log( 2*np.pi ) - 0.5*L1 - 0.5*L2
where 'n' is the number of data points and:
L1 = logdet[ (Kmm^-1)*( Kmm+Kmn*(W^-1)*(Kmn^T) ) ] - logdet(W)
L2 = norm[ V*r ]^2 - norm[ (U^-1)*Kmn*(W^-1)*r ]^2
W = diag[ Knn - (Kmn^T)*(Km^-1)*Kmn ] + (sigw^2)*I
V*(V^T) = W
U*(U^T) = (Kmn^T)*(Km^-1)*Kmn + W
CALLING
loglikelihood = logp( resids, Kn, sigw, perturb=PERTURB )
INPUTS
'resids' [Nx1 array] - residuals between the training data and the gp mean function.
'Kn' [NxN array] - the covariance matrix between the training inputs.
'sigw' [Nx1 array or float] - white noise value to be incorporated into the covariance diagonal;
if set to None or zero, it will be set to the value of the perturb variable
for numerical stability.
'perturb' [float] - small perturbation to be added to the covariance diagonal for
numerical stability if the white noise errors are set to None/zero.
OUTPUT
'loglikelihood' [float] - the gp log likelihood.
"""
# Convert sigw to an array and replace any zero
# entries with jitter:
if np.ndim( sigw )==0:
if ( sigw==None )+( sigw==0 ):
sigw = perturb*np.ones( n )
else:
ixs = ( sigw==None )
sigw[ixs] = perturb
ixs = ( sigw==0 )
sigw[ixs] = perturb
# Unpack and prepare:
n = np.shape( Kmn )[1] # number of data points
m = np.shape( Kmn )[0] # number of inducing variables
Km = np.matrix( Km + ( perturb**2. ) * np.eye( m ) )
Kmn = np.matrix( Kmn )
knn = ( knn + perturb**2. ).flatten()
r = np.reshape( resids, [ n, 1 ] )
Sig2_diag = sigw**2.
# Calculate the diagonal entries of the Qnn matrix, where:
# Qnn = (Kmn^T)*(Kmm^-1)*Kmn
H = np.linalg.cholesky( Km )
V = np.array( scipy.linalg.lu_solve( scipy.linalg.lu_factor( H ), Kmn ) )
Qnn_diag = np.sum( V**2., axis=0 )
# Generate an array holding the diagonal entries of the D matrix, where:
# D = Qnn + diag[ Knn - Qnn ]
D_diag = ( knn - Qnn_diag + Sig2_diag ).flatten()
# Convert V to V*(D^-0.5) and compute V*(D^-1)*V:
V = np.matrix( V/np.tile( np.sqrt( D_diag ), [ m, 1 ] ) )
VVT = V*V.T
# Convert r to (D^-0.5)*r and compute (r^T)*(D^-1)*r:
r = np.matrix( np.reshape( r.flatten()/np.sqrt( D_diag ), [ n, 1 ] ) )
# To obtain L1, compute:
# L1 = 0.5*logdet(B) + 0.5*logdet(D)
# where:
# B*(B^T) = I + V*(V^T)
# = I + (H^-1)*Kmn*(D^-1)*(Kmn^T)*(H^-T)
# = (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(H^-T)
# = (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(Km^-1)*H
# det[ (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(Km^-1)*H ] = prod[ diag(B)^2 ]
# (this is a standard result of the Cholesky decomposition)
# --> logdet[ ( Kmm + Kmn*(D^-1)*(Kmn^T) )*(Km^-1) ] = 2*sum[ diag(B) ]
# (using standard results det[ X*Y ]=det[X]*det[Y] and det[X^-1]=1/det[X])
B = np.linalg.cholesky( np.matrix( np.eye( m ) ) + VVT )
logdetB = 2*np.sum( np.log( np.diag( B ) ) )
logdetD = np.sum( np.log( D_diag ) )
L1 = 0.5*( logdetB + logdetD )
# To obtain L2, compute:
# L2 = 0.5*(r^T)*r - 0.5*(Y^T)*Y
# where:
# (Y^T)*Y = (r^T)*(D^-0.5)*(Z^T)*Z*(D^0.5)*r
# Z = (B^-1)*V*(D^-0.5)
# = (B^-1)*(H^-1)*Kmn*(D^-0.5)
# = (B^-1)*(H^-1)*Kmn*(D^-0.5)
# Z^T = (D^-0.5)*(Kmn^T)*(H^-T)*(B^-T)
# so that:
# (Y^T)*Y = (r^T)*(D^-1)*(Kmn^T)*(H^-T)*(B^-T)*(B^-1)*(H^-1)*Kmn*(D^-1)*r
# = norm[ H*B*Kmn*(D^-1)*r ]^2
# as it can be verified that:
# (H*B)*[(H*B)^T] = Kmm + Kmn*(D^-1)*(Kmn^T)
# so that:
# (H^-T)*(B^-T)*(B^-1)*(H^-1) = (Kmm + Kmn*(D^-1)*(Kmn^T))^-1
rTr = float( r.T*r )
Z = np.matrix( scipy.linalg.lu_solve( scipy.linalg.lu_factor( B ), V ) )
Y = Z*r
YTY = float( Y.T*Y )
L2 = 0.5*( rTr - YTY )
L3 = 0.5*n*np.log( 2*np.pi )
return -float( L1 + L2 + L3 )
def prep_fixedcov( gp_obj, perturb=PERTURB ):
"""
Prepares a dictionary containing variables that remain unchanged in calculating
the log likelihood when the covariance parameters are fixed. The usage of this
routine is along the lines of:
>> resids = data - model
>> kwpars = gp.prep_fixedcov()
>> logp = gp.logp_fixedcov( resids=resids, kwpars=kwpars )
"""
# Unpack the variables stored in the GP object:
mfunc = gp_obj.mfunc
mpars = gp_obj.mpars
cfunc = gp_obj.cfunc
cpars = gp_obj.cpars
xtrain = gp_obj.xtrain
xinduc = gp_obj.xinduc
dtrain = gp_obj.dtrain
sigw = gp_obj.etrain
Kmn = cfunc( xinduc, xtrain, **cpars )
n = np.shape( Kmn )[1] # number of data points
m = np.shape( Kmn )[0] # number of inducing variables
Km = cfunc( xinduc, xinduc, **cpars ) + ( perturb**2. ) * np.eye( m )
knn = cfunc( xtrain, None, **cpars ).flatten()
knn = ( knn + perturb**2. ).flatten()
# Convert sigw to an array and replace any zero
# entries with jitter:
if np.ndim( sigw )==0:
if ( sigw==None )+( sigw==0 ):
sigw = perturb*np.ones( n )
else:
ixs = ( sigw==None )
sigw[ixs] = perturb
ixs = ( sigw==0 )
sigw[ixs] = perturb
Sig2_diag = sigw**2.
# Calculate the diagonal entries of the Qnn matrix, where:
# Qnn = (Kmn^T)*(Kmm^-1)*Kmn
H = np.linalg.cholesky( Km )
V = np.array( scipy.linalg.lu_solve( scipy.linalg.lu_factor( H ), Kmn ) )
Qnn_diag = np.sum( V**2., axis=0 )
# Generate an array holding the diagonal entries of the D matrix, where:
# D = Qnn + diag[ Knn - Qnn ]
D_diag = ( knn - Qnn_diag + Sig2_diag ).flatten()
# CHECK THIS IS DOING THE RIGHT THING:
# Convert V to V*(D^-0.5) and compute V*(D^-1)*V:
V = np.matrix( V/np.tile( np.sqrt( D_diag ), [ m, 1 ] ) )
VVT = V*V.T
# To obtain L1, compute:
# L1 = 0.5*logdet(B) + 0.5*logdet(D)
# where:
# B*(B^T) = I + V*(V^T)
# = I + (H^-1)*Kmn*(D^-1)*(Kmn^T)*(H^-T)
# = (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(H^-T)
# = (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(Km^-1)*H
# det[ (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(Km^-1)*H ] = prod[ diag(B)^2 ]
# (the above is a standard result of the Cholesky decomposition)
# --> logdet[ ( Kmm + Kmn*(D^-1)*(Kmn^T) )*(Km^-1) ] = 2*sum[ diag(B) ]
# (using standard results det[ X*Y ]=det[X]*det[Y] and det[X^-1]=1/det[X])
B = np.linalg.cholesky( np.matrix( np.eye( m ) ) + VVT )
logdetB = 2*np.sum( np.log( np.diag( B ) ) )
logdetD = np.sum( np.log( D_diag ) )
L1 = 0.5*( logdetB + logdetD )
Z = np.matrix( scipy.linalg.lu_solve( scipy.linalg.lu_factor( B ), V ) )
L3 = 0.5*n*np.log( 2*np.pi )
sqrt_D_diag = np.reshape( np.sqrt( D_diag ), [ n, 1 ] )
kwpars = { 'L1':L1, 'L3':L3, 'Z':Z, 'sqrt_D_diag':sqrt_D_diag }
return kwpars
def logp_fixedcov( resids=None, kwpars=None ):
"""
Calculates the log likehood using a specific dictionary of arguments that
are generated using the prep_fixedcov() routine. This routine is used to
avoid re-calculating the components of the log likelihood that remain
unchanged if the covariance parameters are fixed, which can potentially
save time for things like type-II maximum likelihood. The usage of this
routine is along the lines of:
>> resids = data - model
>> kwpars = gp.prep_fixedcov()
>> logp = gp.logp_fixedcov( resids=resids, kwpars=kwpars )
"""
L1 = kwpars['L1']
L3 = kwpars['L3']
Z = kwpars['Z']
sqrt_D_diag = kwpars['sqrt_D_diag']
r = np.matrix( resids/sqrt_D_diag )
# rTr should be rT*(D^(-1))*r
rTr = float( r.T*r )
Y = Z*r
YTY = float( Y.T*Y )
L2 = 0.5*( rTr - YTY )
return -float( L1 + L2 + L3 )
def prep_fixedcov_OLD( gp_obj, perturb=PERTURB ):
"""
Prepares a dictionary containing variables that remain unchanged in calculating
the log likelihood when the covariance parameters are fixed. The usage of this
routine is along the lines of:
>> resids = data - model
>> kwpars = gp.prep_fixedcov()
>> logp = gp.logp_fixedcov( resids=resids, kwpars=kwpars )
"""
# Ensure that etrain is formatted as an array
# and any zero entries replaced with jitter:
etrain = gp_obj.etrain
if np.ndim( etrain )==0:
if ( etrain==None )+( etrain==0 ):
etrain = perturb*np.ones( n )
else:
ixs = ( etrain==None )
etrain[ixs] = perturb
ixs = ( etrain==0 )
etrain[ixs] = perturb
# Do the same for enew:
if np.ndim( enew )==0:
if ( enew==None ):
enew = np.zeros( p )
else:
ixs = ( enew==None )
enew[ixs] = perturb
ixs = ( enew==0 )
enew[ixs] = perturb
Km = gp_obj.cfunc( gp_obj.xinduc, gp_obj.xinduc, **gp_obj.cpars )
Kmn = gp_obj.cfunc( gp_obj.xinduc, gp_obj.xtrain, **gp_obj.cpars )
knn = gp_obj.cfunc( gp_obj.xtrain, None, **gp_obj.cpars )
n = np.shape( Kmn )[1]
m = np.shape( Kmn )[0]
Km = np.matrix( Km + ( perturb**2. ) * np.eye( m ) )
Kmn = np.matrix( Kmn )
knn = np.matrix( knn + perturb**2. )
L = np.linalg.cholesky( Km )
Vmn = np.matrix( scipy.linalg.lu_solve( scipy.linalg.lu_factor( L ), Kmn ) )
gnn = 1. + ( knn.flatten() - np.sum( np.power( Vmn, 2. ), axis=0 ) ) / ( etrain**2. )
gnn = np.reshape( gnn, [ n, 1 ] )
Vmn = Vmn / np.tile( np.sqrt( gnn ).T, [ m, 1 ] )
VmnVmnT = Vmn * Vmn.T
W = np.linalg.cholesky( np.matrix( ( etrain**2. ) * np.eye( m ) ) + VmnVmnT )
Z = scipy.linalg.lu_solve( scipy.linalg.lu_factor( W ), Vmn )
Z = np.matrix( Z )
L1 = 0.5 * ( 2 * np.sum( np.log( np.diag( W ) ) ) + np.sum( np.log( gnn ) ) \
+ ( n-m ) * np.log( gp_obj.etrain**2. ) )
L3 = 0.5*n*np.log( 2*np.pi )
kwpars = { 'L1':L1, 'L3':L3, 'gnn':gnn, 'Z':Z, 'sigw':etrain }
return kwpars
def zero_mfunc( x, **kwargs ):
"""
A simple zero mean function, used whenever mfunc==None in
any of the above routines. It takes an [NxD] array as input
and returns an [Nx1] array of zeros.
"""
n = np.shape( x )[0]
return np.zeros( [ n, 1 ] )
| gpl-2.0 | -926,298,388,998,945,400 | 37.44186 | 100 | 0.57236 | false | 3.030865 | false | false | false |
Purg/SMQTK | python/smqtk/representation/data_element/url_element.py | 1 | 2346 | import mimetypes
import requests
from smqtk.representation import DataElement
__author__ = "[email protected]"
MIMETYPES = mimetypes.MimeTypes()
class DataUrlElement (DataElement):
"""
Representation of data loadable via a web URL address.
"""
@classmethod
def is_usable(cls):
# have to be able to connect to the internet
try:
# using github because that's where this repo has been hosted.
r = requests.get('http://github.com')
_ = r.content
return True
except Exception, ex:
cls.logger().warning(
"DataUrlElement not usable, cannot connect to "
"http://github.com"
)
return False
def __init__(self, url_address):
"""
:raises requests.exceptions.HTTPError: URL address provided does not
resolve into a valid GET request.
:param url_address: Web address of element
:type url_address: str
"""
super(DataUrlElement, self).__init__()
self._url = url_address
# make sure that url has a http:// or https:// prefix
if not (self._url[:7] == "http://" or self._url[:8] == "https://"):
self._url = "http://" + self._url
# Check that the URL is valid, i.e. actually points to something
requests.get(self._url).raise_for_status()
def get_config(self):
return {
"url_address": self._url
}
def content_type(self):
"""
:return: Standard type/subtype string for this data element, or None if
the content type is unknown.
:rtype: str or None
"""
return requests.get(self._url).headers['content-type']
def get_bytes(self):
"""
:return: Get the byte stream for this data element.
:rtype: bytes
:raises requests.exceptions.HTTPError: Error during request for data
via GET.
"""
# Fetch content from URL, return bytes
r = requests.get(self._url)
r.raise_for_status()
if r.ok:
return r.content
else:
raise RuntimeError("Request response not OK. Status code returned: "
"%d", r.status_code)
DATA_ELEMENT_CLASS = DataUrlElement
| bsd-3-clause | -8,930,708,487,979,302,000 | 26.6 | 80 | 0.563086 | false | 4.360595 | false | false | false |
jwinzer/openslides-proxyvoting | openslides_proxyvoting/models.py | 1 | 2500 | from django.db import models
from openslides.motions.models import Category, Motion, MotionPoll
from openslides.users.models import User
from openslides.utils.models import RESTModelMixin
from .access_permissions import (
AbsenteeVoteAccessPermissions,
VotingShareAccessPermissions,
VotingProxyAccessPermissions,
)
class VotingShare(RESTModelMixin, models.Model):
access_permissions = VotingShareAccessPermissions()
delegate = models.ForeignKey(User, on_delete=models.CASCADE, related_name='shares')
category = models.ForeignKey(Category, on_delete=models.CASCADE)
shares = models.DecimalField(max_digits=15, decimal_places=6)
class Meta:
default_permissions = ()
unique_together = ('delegate', 'category')
def __str__(self):
return '%s, %s, %s' % (self.delegate, self.category, self.shares)
class VotingProxy(RESTModelMixin, models.Model):
access_permissions = VotingProxyAccessPermissions()
delegate = models.OneToOneField(User, on_delete=models.CASCADE)
proxy = models.ForeignKey(User, on_delete=models.CASCADE, related_name='mandates')
class Meta:
default_permissions = ()
# TODO: Review permissions.
permissions = (
('can_manage', 'Can manage proxy voting'),
)
def __str__(self):
return '%s >> %s' % (self.delegate, self.proxy)
class AbsenteeVote(RESTModelMixin, models.Model):
access_permissions = AbsenteeVoteAccessPermissions()
motion = models.ForeignKey(Motion, on_delete=models.CASCADE)
delegate = models.ForeignKey(User, on_delete=models.CASCADE)
vote = models.CharField(max_length=1)
class Meta:
default_permissions = ()
unique_together = ('motion', 'delegate')
def __str__(self):
return '%s, %s, %s' % (self.motion, self.delegate, self.vote)
class MotionPollBallot(models.Model):
poll = models.ForeignKey(MotionPoll, on_delete=models.CASCADE)
delegate = models.ForeignKey(User, on_delete=models.CASCADE, related_name='delegate_set')
# voter = models.ForeignKey(User, on_delete=models.CASCADE, related_name='voter_set')
# keypad = models.IntegerField(default=0)
vote = models.CharField(max_length=1, blank=True)
# shares = models.DecimalField(max_digits=15, decimal_places=6)
class Meta:
default_permissions = ()
unique_together = ('poll', 'delegate')
def __str__(self):
return '%s, %s, %s' % (self.poll, self.delegate, self.vote)
| mit | 2,078,180,441,357,619,500 | 32.783784 | 93 | 0.6896 | false | 3.742515 | false | false | false |
numericube/twistranet | twistranet/twistapp/views/account_views.py | 1 | 34266 | import hashlib
import urllib
import time
from django.template import Context, RequestContext, loader
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound, HttpResponseServerError
from django.forms import widgets
from django.template.loader import get_template
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth import logout
from django.contrib.auth.models import User, UNUSABLE_PASSWORD
from django.contrib.sites.models import RequestSite
from django.contrib import messages
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.conf import settings
from twistranet.twistapp.signals import invite_user, reset_password, user_imported
from twistranet.twistapp.models import *
from twistranet.twistapp.forms import account_forms, registration_forms
from twistranet.twistapp.lib.slugify import slugify
from twistranet.twistapp.lib.log import log
from twistranet.actions import *
from twistranet.core.views import *
class UserAccountView(BaseWallView):
"""
This is what is used as a base view for accounts
"""
context_boxes = [
'account/profile.box.html',
'actions/context.box.html',
'account/relations.box.html',
]
template_variables = BaseWallView.template_variables + [
"account",
"n_communities",
"n_network_members"
]
model_lookup = UserAccount
template = "account/view.html"
title = None
name = "account_by_id"
def prepare_view(self, *args, **kw):
"""
Add a few parameters for the view
"""
# Regular creation
super(UserAccountView, self).prepare_view(*args, **kw)
if not hasattr(self, "useraccount"):
self.useraccount = self.auth
self.account = self.useraccount
self.n_communities = self.account and self.account.communities.count() or False
self.n_network_members = self.account and self.account.network.count() or False
# Add a message for ppl who have no content
if self.template == UserAccountView.template:
if self.account and self.auth and self.account.id == self.auth.id:
if not Content.objects.filter(publisher = self.auth).exists():
messages.info(self.request, mark_safe(_("""<p>
It seems that you do not have created content yet. Maybe it's time to do so!
</p>
<p>
Creating content in twistranet is easy. For example, just tell what you're working on in the form below and click the "Send" button.
</p>
<p>
Want to learn about what you can do in twistranet? Just take a look here: [help]
</p>
""")))
def get_objects_list(self,):
return Content.objects.getActivityFeed(self.object)
def get_recent_content_list(self):
"""
Retrieve recent content list for the given account.
XXX TODO: Optimize this by adding a (first_twistable_on_home, last_twistable_on_home) values pair on the Account object.
This way we can just query objects with id > last_twistable_on_home
"""
nb_all = self.objects_list.count()
batch = self.batch_list(nb_all)
nb_from = batch[0]
nb_to = batch[1]
if nb_from < nb_all:
objects_list = self.objects_list.order_by("-id").values_list('id', flat = True)[nb_from:nb_to]
latest_list = Content.objects.__booster__.filter(id__in = tuple(objects_list)).select_related(*self.select_related_summary_fields).order_by("-created_at")
return latest_list
return []
def get_title(self,):
"""
We override get_title in a way that it could be removed easily in subclasses.
Just define a valid value for self.title and this get_title() will keep the BaseView behaviour
"""
if not self.title:
return _("%(name)s's profile") % {'name': self.account.title}
return super(UserAccountView, self).get_title()
class HomepageView(UserAccountView):
"""
Special treatment for homepage.
"""
name = "twistranet_home"
title = _("Timeline")
def get_objects_list(self):
"""
Retrieve recent content list for the given account.
XXX TODO: Optimize this by adding a (first_twistable_on_home, last_twistable_on_home) values pair on the Account object.
This way we can just query objects with id > last_twistable_on_home
"""
objects_list = None
if not self.auth.is_anonymous:
if Content.objects.filter(publisher = self.auth).exists():
objects_list = Content.objects.followed.exclude(model_name = "Comment")
if objects_list is None:
objects_list = Content.objects.exclude(model_name = "Comment")
return objects_list
def prepare_view(self, ):
"""
We just have the account set as curently-auth account.
"""
# Get the actual view instance. Not optimal, but, well, works.
if not self.auth.is_anonymous:
prep_id = self.auth.id
else:
prep_id = None
super(HomepageView, self).prepare_view(prep_id)
class PublicTimelineView(UserAccountView):
name = "timeline"
title = _("Public timeline")
def get_objects_list(self):
"""
Just return all public / available content
"""
return Content.objects.exclude(model_name = "Comment")
# #
# LISTING VIEWS #
# #
class AccountListingView(BaseView):
"""
Todo: ALL accounts listing page.
"""
title = _("Accounts")
template = "account/list.html"
template_variables = BaseView.template_variables + [
"accounts",
]
def prepare_view(self, ):
super(AccountListingView, self).prepare_view()
self.accounts = Account.objects.get_query_set()[:settings.TWISTRANET_COMMUNITIES_PER_PAGE]
class AccountNetworkView(AccountListingView, UserAccountView):
"""
All networked accounts for an account page
"""
template = AccountListingView.template
template_variables = UserAccountView.template_variables + AccountListingView.template_variables
def get_title(self,):
if self.account.id == self.auth.id:
return _("Your network")
return _("%(name)s's network" % {'name': self.account.title} )
def prepare_view(self, *args, **kw):
super(AccountNetworkView, self).prepare_view()
UserAccountView.prepare_view(self, *args, **kw)
self.accounts = self.account.network
class AccountCommunitiesView(AccountListingView, UserAccountView):
"""
All communities for an account.
"""
template = AccountListingView.template
template_variables = UserAccountView.template_variables + AccountListingView.template_variables
def get_title(self,):
if self.account.id == self.auth.id:
return _("Your communities")
return _("%(name)s's communities" % {'name': self.account.title} )
def prepare_view(self, *args, **kw):
super(AccountCommunitiesView, self).prepare_view()
UserAccountView.prepare_view(self, *args, **kw)
self.accounts = self.account.communities
class AccountAdminCommunitiesView(AccountListingView, UserAccountView):
"""
All communities administred by an account.
"""
template = AccountListingView.template
template_variables = UserAccountView.template_variables + AccountListingView.template_variables
# XXX TODO
def get_title(self,):
if self.account.id == self.auth.id:
return _("Your communities")
return _("%(name)s's communities" % {'name': self.account.title} )
def prepare_view(self, *args, **kw):
super(AccountCommunitiesView, self).prepare_view(*args, **kw)
UserAccountView.prepare_view(self, *args, **kw)
self.accounts = self.account.communities
class PendingNetworkView(AccountListingView, UserAccountView):
"""
All pending network relations for an account
"""
template = AccountListingView.template
template_variables = UserAccountView.template_variables + AccountListingView.template_variables
title = _("Pending network requests")
name = "account_pending_network"
category = ACCOUNT_ACTIONS
def as_action(self,):
"""Only return the action if there's pending nwk requests
"""
if self.auth.is_anonymous:
return
req = self.auth.get_pending_network_requests()
if not req:
return
action = BaseView.as_action(self)
action.label = mark_safe(_('<span class="badge">%(number)d</span> Pending network requests') % {"number": len(req)})
return action
def prepare_view(self, *args, **kw):
super(PendingNetworkView, self).prepare_view()
UserAccountView.prepare_view(self, self.auth.id)
self.accounts = self.account.get_pending_network_requests()
# #
# ACTION VIEWS #
# #
class AccountDelete(BaseObjectActionView):
"""
Delete a community from the base
"""
model_lookup = UserAccount
name = "account_delete"
confirm = _("Do you really want to delete this account?<br />All content for this user WILL BE DELETED.")
title = _("Delete account")
def as_action(self):
if not isinstance(getattr(self, "object", None), self.model_lookup):
return None
if not self.object.can_delete:
return None
# Can't delete myself ;)
if self.object.id == Twistable.objects.getCurrentAccount(self.request).id:
return None
return super(AccountDelete, self).as_action()
def prepare_view(self, *args, **kw):
super(AccountDelete, self).prepare_view(*args, **kw)
if not self.object.can_delete:
raise ValueError("You're not allowed to delete this account")
name = self.useraccount.title
underlying_user = self.useraccount.user
__account__ = SystemAccount.get()
# self.useraccount.delete()
underlying_user.delete()
del __account__
messages.info(
self.request,
_("'%(name)s' account has been deleted.") % {'name': name},
)
raise MustRedirect(reverse("twistranet_home"))
class AddToNetworkView(BaseObjectActionView):
"""
Add sbdy to my network, with or without authorization
"""
model_lookup = UserAccount
name = "add_to_my_network"
def as_action(self, ):
"""
as_action(self, ) => generate the proper action.
"""
if not hasattr(self, "object"):
return None
if not isinstance(self.object, UserAccount):
return None
# Networking actions
if self.object.has_pending_network_request:
return Action(
label = _("Accept in your network"),
url = reverse(self.name, args = (self.object.id, ), ),
confirm = _(
"Would you like to accept %(name)s in your network?<br />"
"He/She will be able to see your network-only content."
) % { "name": self.object.title },
category = MAIN_ACTION,
)
elif self.object.can_add_to_my_network:
return Action(
label = _("Add to your network"),
url = reverse(self.name, args = (self.object.id, ), ),
confirm = _(
"Would you like to add %(name)s to your network?<br />"
"He/She will have to agree to your request."
) % {"name": self.object.title},
category = MAIN_ACTION,
)
def prepare_view(self, *args, **kw):
super(AddToNetworkView, self).prepare_view(*args, **kw)
self.redirect = self.useraccount.get_absolute_url()
self.useraccount.add_to_my_network()
name = self.useraccount.title
if self.useraccount in self.auth.network:
messages.success(
self.request,
_("You're now connected with %(name)s.") % {'name': name}
)
else:
messages.info(
self.request,
_("A network request has been sent to %(name)s for approval.") % {'name': name}
)
class RemoveFromNetworkView(BaseObjectActionView):
"""
Add sbdy to my network, with or without authorization
"""
model_lookup = UserAccount
name = "remove_from_my_network"
def as_action(self, ):
if not isinstance(getattr(self, "object", None), self.model_lookup):
return None
if self.object.has_received_network_request:
return Action(
category = LOCAL_ACTIONS,
label = _("Cancel your network request"),
url = reverse(self.name, args = (self.object.id, ), ),
confirm = _("Would you like to cancel your network request?"),
)
if self.object.in_my_network:
return Action(
category = LOCAL_ACTIONS,
label = _("Remove from your network"),
url = reverse(self.name, args = (self.object.id, ), ),
confirm = _("Would you like to remove %(name)s from your network?") % {"name": self.object.title},
)
def prepare_view(self, *args, **kw):
super(RemoveFromNetworkView, self).prepare_view(*args, **kw)
self.redirect = self.useraccount.get_absolute_url()
was_in_my_network = self.useraccount in self.auth.network
self.useraccount.remove_from_my_network()
name = self.useraccount.title
if was_in_my_network:
messages.success(
self.request,
_("You're not connected with %(name)s anymore.") % {'name': name}
)
else:
messages.info(
self.request,
_("Your network request to %(name)s has been canceled.") % {'name': name}
)
# #
# Edition / Creation views #
# #
class UserAccountEdit(UserAccountView):
"""
Edit form for user account. Not so far from the view itself.
"""
template = "account/edit.html"
form_class = account_forms.UserAccountForm
content_forms = []
latest_content_list = []
name = "user_account_edit"
category = LOCAL_ACTIONS
def as_action(self,):
"""
Return action only if can_edit user
"""
if not self.is_model:
return None
if self.object.can_edit:
return super(UserAccountEdit, self).as_action()
def get_title(self,):
"""
Title suitable for creation or edition
"""
if self.title:
return super(UserAccountEdit, self).get_title()
if not getattr(self, 'object', None):
return _("Create a user account")
elif self.object.id == self.auth.id:
return _("Edit your account")
return _("Edit %(name)s" % {'name' : self.object.title })
class UserAccountInvite(UserAccountEdit):
"""
UserAccount invitation. Close to the edit class!
"""
context_boxes = []
form_class = account_forms.UserInviteForm
title = _("Invite user")
category = GLOBAL_ACTIONS
name = "user_account_invite"
def as_action(self):
if not Account.objects.can_create:
return None
return BaseView.as_action(self)
def prepare_view(self):
"""
Process additional form stuff.
Here we've got a valid self.form object.
"""
super(UserAccountInvite, self).prepare_view()
is_admin = UserAccount.objects.getCurrentAccount(self.request).is_admin
if not is_admin:
self.form.fields['make_admin'].widget = widgets.HiddenInput()
if self.form_is_valid:
# Double-check that user is not already registered
email = self.form.cleaned_data['email']
if User.objects.filter(email = email).exists():
messages.error(self.request, _("This user already exists."))
self.form_is_valid = False
if self.form_is_valid:
# Generate the invitation link.
# Invitation is in two parts: the verification hash and the email address.
admin_string = ""
if is_admin:
if self.form.cleaned_data['make_admin']:
admin_string = "?make_admin=1"
h = "%s%s%s" % (settings.SECRET_KEY, email, admin_string)
h = hashlib.md5(h).hexdigest()
invite_link = reverse(AccountJoin.name, args = (h, urllib.quote_plus(email)))
# Send the invitation (as a signal)
invite_user.send(
sender = self.__class__,
inviter = UserAccount.objects.getCurrentAccount(self.request),
invitation_uri = "%s" % (invite_link, ),
target = email,
message = self.form.cleaned_data['invite_message'],
)
# Say we're happy and redirect
if self.form_is_valid:
messages.success(self.request, _("Invitation sent successfuly."))
raise MustRedirect(reverse(self.name))
# #
# Account login/logout/join #
# #
class AccountJoin(UserAccountEdit):
"""
join TN
"""
template = "registration/join.html"
form_class = account_forms.UserAccountCreationForm
name = "account_join"
title = _("Join")
def prepare_view(self, check_hash, email):
"""
Render the join form.
"""
# Check if hash and email AND admin priviledge match
is_admin = False
admin_string = "?make_admin=1"
h = "%s%s%s" % (settings.SECRET_KEY, email, admin_string)
h = hashlib.md5(h).hexdigest()
if check_hash == h:
is_admin = True
else:
# Check if hash and email match.
h = "%s%s" % (settings.SECRET_KEY, email)
h = hashlib.md5(h).hexdigest()
if not check_hash == h:
raise ValidationError("Invalid email. This invitation has been manually edited.")
# If user is already registered, return to login form
if User.objects.filter(email = email).exists():
raise MustRedirect(reverse(AccountLogin.name))
# Call form processing. Prepare all arguments, esp. email and username
username = email.split('@')[0]
username = slugify(username)
self.initial = {
"email": email,
"username": username,
}
super(AccountJoin, self).prepare_view()
# Now save user info. But before, double-check that stuff is still valid
if self.form_is_valid:
cleaned_data = self.form.cleaned_data
# Check password and username
if not cleaned_data["password"] == cleaned_data["password_confirm"]:
messages.warning(self.request, _("Password and confirmation do not match"))
elif User.objects.filter(username = cleaned_data["username"]).exists():
messages.warning(self.request, _("A user with this name already exists."))
else:
# Create user and set information
__account__ = SystemAccount.get()
u = User.objects.create(
username = cleaned_data["username"],
first_name = cleaned_data["first_name"],
last_name = cleaned_data["last_name"],
email = cleaned_data["email"],
is_superuser = is_admin,
is_active = True,
)
u.set_password(cleaned_data["password"])
u.save()
useraccount = UserAccount.objects.get(user = u)
useraccount.title = u"%s %s" % (cleaned_data["first_name"], cleaned_data["last_name"])
useraccount.save()
if is_admin:
admin_community = AdminCommunity.objects.get()
if not admin_community in useraccount.communities:
admin_community.join(useraccount, is_manager = True)
del __account__
# Display a nice success message and redirect to login page
messages.success(self.request, _("Your account is now created. You can login to twistranet."))
raise MustRedirect(reverse(AccountLogin.name))
class AccountLogin(BaseView):
template = "registration/login.html"
name = "login"
title = _("Login")
template_variables = BaseView.template_variables + \
['form', 'site', 'next', ]
global_boxes = [
'registration/introduction.box.html',
]
def prepare_view(self,):
"""
request, template_name='registration/login.html',
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=AuthenticationForm):
Displays the login form and handles the login action.
this is from django.contrib.auth.views
"""
from django.contrib.auth.views import REDIRECT_FIELD_NAME as redirect_field_name # = 'next'
from django.contrib.auth.views import AuthenticationForm as authentication_form
from django.contrib.auth.views import auth_login
from django.contrib.sites.models import Site, RequestSite
redirect_to = self.request.REQUEST.get(redirect_field_name, '')
if self.request.method == "POST":
self.form = authentication_form(data=self.request.POST)
if self.form.is_valid():
# Light security check -- make sure redirect_to isn't garbage.
if not redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
# Heavier security check -- redirects to http://example.com should
# not be allowed, but things like /view/?param=http://example.com
# should be allowed. This regex checks if there is a '//' *before* a
# question mark.
elif '//' in redirect_to and re.match(r'[^\?]*//', redirect_to):
redirect_to = settings.LOGIN_REDIRECT_URL
# Okay, security checks complete. Log the user in.
auth_login(self.request, self.form.get_user())
setattr(self, redirect_field_name, redirect_to)
if self.request.session.test_cookie_worked():
self.request.session.delete_test_cookie()
raise MustRedirect(redirect_to)
else:
# Invalid user/password
messages.warning(self.request, _("Sorry, that's not a valid username or password"))
else:
self.form = authentication_form(self.request)
self.request.session.set_test_cookie()
if Site._meta.installed:
self.site = Site.objects.get_current()
else:
self.site = RequestSite(self.request)
setattr(self, redirect_field_name, redirect_to)
class AccountForgottenPassword(AccountLogin):
"""
Forgotten pwd. Sorry, this has yet to be implemented.
"""
name = "forgotten_password"
title = _("Forgot your password")
template = "registration/forgotten.html"
template_variables = BaseView.template_variables + ['form', ]
def prepare_view(self,):
if self.request.method == "POST":
self.form = registration_forms.ForgottenPasswordForm(data=self.request.POST)
if self.form.is_valid():
# Generate the reset password link.
# The link is in two parts: the verification hash and the email.
# The verification hash is a combination of the server's secret key, user's email,
# HASHED version of the user password and current date.
# That way, we ensure that an email/site hash/password hash combination will
# get a unique reset password link.
email = self.form.cleaned_data['email']
user = User.objects.get(email = email)
h = "%s%s%s%s" % (settings.SECRET_KEY, email, user.password, time.strftime("%Y%m%d"))
h = hashlib.md5(h).hexdigest()
reset_link = reverse(ResetPassword.name, args = (h, urllib.quote_plus(email)))
# Send the invitation (as a signal)
useraccount = UserAccount.objects.__booster__.get(user__id = user.id)
reset_password.send(
sender = self.__class__,
target = useraccount,
reset_password_uri = "%s" % (reset_link, ),
)
# Say we're happy and redirect
messages.success(self.request, _("We've sent you a password reset email."))
raise MustRedirect(reverse("twistranet_home"))
else:
self.form = registration_forms.ForgottenPasswordForm()
class ResetPassword(AccountLogin):
"""
Provide a way for users to reset their password.
Works with a hash generated in the AccountForgottenPassword view.
"""
name = "reset_password"
title = _("Reset your password")
template = "registration/reset_password.html"
template_variables = BaseView.template_variables + ['form', ]
def prepare_view(self, check_hash, email):
if self.request.method == "POST":
self.form = registration_forms.ResetPasswordForm(data=self.request.POST)
if self.form.is_valid():
# Generate the reset password link.
# The link is in two parts: the verification hash and the password hash.
# That way, we ensure that an email/site hash/password hash combination will
# get a unique reset password link.
user = User.objects.get(email = email)
if user.password == UNUSABLE_PASSWORD:
raise ValidationError(_("Can't set password on this user."))
h = "%s%s%s%s" % (settings.SECRET_KEY, email, user.password, time.strftime("%Y%m%d"))
h = hashlib.md5(h).hexdigest()
if not h == check_hash:
raise ValidationError("Attempt to access an invalid verification hash.")
# Actually change password
user.set_password(self.form.cleaned_data['password'])
user.save()
# Say we're happy and redirect
messages.success(self.request, _("Your password is set to its new value. You can now login."))
raise MustRedirect(reverse("twistranet_home"))
else:
self.form = registration_forms.ResetPasswordForm()
class ChangePassword(UserAccountEdit):
"""
Classic "change password" with former password validation.
"""
name = "change_password"
title = _("Change your password")
template = "account/edit.html"
form_class = account_forms.ChangePasswordForm
template_variables = UserAccountEdit.template_variables + ['form', ]
def as_action(self,):
"""
Display this action only on current account, with user-settable backends.
"""
if not hasattr(self, "object"):
return None
if not self.auth.id == self.object.id:
return None
if self.auth.user.password == UNUSABLE_PASSWORD:
return None
return super(ChangePassword, self).as_action()
def prepare_view(self, *args, **kw):
super(ChangePassword, self).prepare_view(*args, **kw)
if self.request.method == "POST":
self.form = account_forms.ChangePasswordForm(data=self.request.POST)
if self.form.is_valid():
# Actually change password
user = self.useraccount.user
user.set_password(self.form.cleaned_data['new_password'])
user.save()
# Say we're happy and redirect
messages.success(self.request, _("New password set."))
raise MustRedirect(reverse("twistranet_home"))
else:
self.form = account_forms.ChangePasswordForm()
class AccountLogout(BaseView):
template = "registration/login.html"
template_variables = BaseView.template_variables + ["justloggedout", ]
name = "logout"
title = _("Logged out")
def prepare_view(self,):
messages.info(self.request, mark_safe(_("You are now logged out.<br />Thanks for spending some quality time on Twistranet.")))
self.justloggedout = True
logout(self.request)
import os, random, string
from StringIO import StringIO
import csv
class CSVDialect(csv.excel):
delimiter = ';'
lineterminator = '\r\n'
class AccountsImport(BaseView):
"""
import users view
"""
template="registration/import_accounts.html"
name = "accounts_import"
title = _("Import accounts")
def prepare_view(self):
"""
Render the import form
or do the import (from csv file posted).
"""
if self.request.method == "POST" and \
self.request.FILES.get("csv_file", None):
csv_file = self.request.FILES.get("csv_file")
reader = csv.reader(csv_file, dialect=CSVDialect)
for line in reader:
if not line:
continue
# firstname;lastname;email
firstname = line[0].decode('utf8')
lastname = line[1].decode('utf8')
email = line[2]
username = email.split('@')[0]
username = slugify(username).replace('_','-')
if User.objects.filter(username = username).exists():
u = User.objects.get(username = username)
useraccount = UserAccount.objects.get(user = u)
log.info( "User account '%s' already exixts" %useraccount.title )
else:
# create user
try:
__account__ = SystemAccount.get()
u = User.objects.create(
username = username,
first_name = firstname,
last_name = lastname,
email = email,
is_superuser = False,
is_active = True,
)
chars = string.ascii_letters + string.digits
random.seed = (os.urandom(1024))
password = ''.join(random.choice(chars) for i in range(6))
u.set_password(password)
u.save()
useraccount = UserAccount.objects.get(user = u)
useraccount.title = u"%s %s" % (firstname, lastname)
useraccount.save()
log.info( "User account '%s' for %s %s (%s) created !" %(username, firstname, lastname, email))
# notify imported user (a mail is sent to prevent user)
h = "%s%s%s%s" % (settings.SECRET_KEY, email, password, time.strftime("%Y%m%d"))
h = hashlib.md5(h).hexdigest()
reset_link = reverse(ResetPassword.name, args = (h, urllib.quote_plus(email)))
user_imported.send(
sender = self.__class__,
target = useraccount,
reset_password_url = reset_link,
)
del __account__
except:
log.warning( "Impossible to create account '%s' for %s %s (%s)" %(username, firstname, lastname, email))
continue
community_title = line[3].decode('utf8')
cid = slugify(community_title)
if Community.objects.filter(slug = cid).exists():
log.info( "Community %s already exists !" %community )
else:
c = Community.objects.create(
slug = cid,
title = community_title,
permissions = "workgroup"
)
c.save()
com = Community.objects.get(slug= cid)
com.join(account=useraccount)
log.info( "user %s join the community %s !" %(useraccount.title, community_title) )
messages.info( self.request, u"import finished",)
| agpl-3.0 | 4,508,740,274,224,574,500 | 39.551479 | 166 | 0.559563 | false | 4.420279 | false | false | false |
sot/mica | scripts/update_agasc_supplement.py | 1 | 3262 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Update the agasc_supplement.h5.
This file is a supplement to the stable AGASC to inform star selection
and star catalog checking.
Currently this script only has the capability to add a bad star to the
bad star table. It might end up including functionality to automatically
update another table with effective mags based on acq / guide history.
For process instructions see: https://github.com/sot/mica/wiki/AGASC-supplement
"""
import os
import argparse
from pathlib import Path
import pyyaks.logger
from astropy.table import Table
SKA = Path(os.environ['SKA'])
logger = None # Set via global in main()
def get_options(args=None):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--data-root",
default='.',
help=("Directory containing agasc_supplement.h5 (default='.')"))
parser.add_argument("--bad-star-id",
type=int,
help="AGASC ID of star to add to bad-star list")
parser.add_argument("--bad-star-source",
type=int,
help=("Source identifier indicating provenance (default=max "
"existing source + 1)"))
parser.add_argument("--log-level",
default=20,
help="Logging level (default=20 (info))")
parser.add_argument("--dry-run",
action="store_true",
help="Dry run (no actual file or database updates)")
opt = parser.parse_args(args)
return opt
def main(args=None):
global logger
# Setup for updating the sync repository
opt = get_options(args)
# Set up logging
loglevel = int(opt.log_level)
logger = pyyaks.logger.get_logger(name='mica_update_agasc_supplement', level=loglevel,
format="%(message)s")
data_root = Path(opt.data_root)
suppl_file = data_root / 'agasc_supplement.h5'
if suppl_file.exists():
logger.info(f'Updating agasc_supplement at {suppl_file}')
else:
raise IOError(f'file {suppl_file.absolute()} not found')
if opt.bad_star_id:
add_bad_star(opt.bad_star_id, opt.bad_star_source, suppl_file, opt.dry_run)
def add_bad_star(bad_star_id, bad_star_source, suppl_file, dry_run):
bad_star_id = int(bad_star_id)
dat = Table.read(str(suppl_file), format='hdf5', path='bad')
if bad_star_source is None:
bad_star_source = dat['source'].max() + 1
else:
bad_star_source = int(bad_star_source)
dat.add_row((bad_star_id, bad_star_source))
logger.info(f'Appending {bad_star_id} with source={bad_star_source} to {suppl_file}')
logger.info('')
logger.info('IMPORTANT:')
logger.info('Edit following if source ID is new:')
logger.info(' https://github.com/sot/mica/wiki/AGASC-supplement')
logger.info('')
logger.info('The wiki page also includes instructions for test, review, approval')
logger.info('and installation.')
if not dry_run:
dat.write(str(suppl_file), format='hdf5', path='bad', append=True, overwrite=True)
if __name__ == '__main__':
main()
| bsd-3-clause | -6,786,102,280,383,239,000 | 33.336842 | 90 | 0.622624 | false | 3.685876 | false | false | false |
ENCODE-DCC/encoded | src/encoded/types/experiment.py | 1 | 16913 | from pyramid.traversal import find_root
from snovault import (
calculated_property,
collection,
load_schema,
)
from snovault.util import Path
from .base import (
ALLOW_SUBMITTER_ADD,
Item,
paths_filtered_by_status,
SharedItem
)
from .dataset import Dataset
from .shared_calculated_properties import (
CalculatedAssaySynonyms,
CalculatedAssayTermID,
CalculatedVisualize,
CalculatedBiosampleSummary,
CalculatedSimpleSummary,
CalculatedReplicates,
CalculatedAssaySlims,
CalculatedAssayTitle,
CalculatedCategorySlims,
CalculatedTypeSlims,
CalculatedObjectiveSlims,
CalculatedReplicationType
)
from .assay_data import assay_terms
@collection(
name='experiments',
unique_key='accession',
properties={
'title': 'Experiments',
'description': 'Listing of Experiments',
})
class Experiment(Dataset,
CalculatedAssaySynonyms,
CalculatedAssayTermID,
CalculatedVisualize,
CalculatedBiosampleSummary,
CalculatedSimpleSummary,
CalculatedReplicates,
CalculatedAssaySlims,
CalculatedAssayTitle,
CalculatedCategorySlims,
CalculatedTypeSlims,
CalculatedObjectiveSlims,
CalculatedReplicationType):
item_type = 'experiment'
schema = load_schema('encoded:schemas/experiment.json')
embedded = Dataset.embedded + [
'biosample_ontology',
'files.platform',
'files.analysis_step_version.analysis_step',
'files.analysis_step_version.analysis_step.pipelines',
'files.quality_metrics',
'related_series',
'replicates.antibody',
'replicates.library',
'replicates.library.biosample.biosample_ontology',
'replicates.library.biosample.submitted_by',
'replicates.library.biosample.source',
'replicates.library.biosample.applied_modifications',
'replicates.library.biosample.organism',
'replicates.library.biosample.donor',
'replicates.library.biosample.donor.organism',
'replicates.library.biosample.part_of',
'replicates.library.biosample.part_of.donor',
'replicates.library.biosample.part_of.treatments',
'replicates.library.biosample.treatments',
'replicates.library.construction_platform',
'replicates.library.treatments',
'possible_controls',
'target.genes',
'target.organism'
]
audit_inherit = [
'original_files',
'original_files.replicate',
'original_files.platform',
'target',
'files.analysis_step_version.analysis_step.pipelines',
'revoked_files',
'revoked_files.replicate',
'submitted_by',
'lab',
'award',
'default_analysis',
'documents',
'replicates.antibody.characterizations.biosample_ontology',
'replicates.antibody.characterizations',
'replicates.antibody.targets',
'replicates.library',
'replicates.library.documents',
'replicates.library.biosample',
'replicates.library.biosample.biosample_ontology',
'replicates.library.biosample.organism',
'replicates.library.biosample.treatments',
'replicates.library.biosample.applied_modifications',
'replicates.library.biosample.donor.organism',
'replicates.library.biosample.donor',
'replicates.library.biosample.treatments',
'replicates.library.biosample.originated_from',
'replicates.library.biosample.originated_from.biosample_ontology',
'replicates.library.biosample.part_of',
'replicates.library.biosample.part_of.biosample_ontology',
'replicates.library.biosample.pooled_from',
'replicates.library.biosample.pooled_from.biosample_ontology',
'replicates.library.spikeins_used',
'replicates.library.treatments',
'target.organism',
]
set_status_up = [
'original_files',
'replicates',
'documents',
'target',
'analyses',
]
set_status_down = [
'original_files',
'replicates',
'analyses',
]
rev = Dataset.rev.copy()
rev.update({
'replicates': ('Replicate', 'experiment'),
'related_series': ('Series', 'related_datasets'),
'superseded_by': ('Experiment', 'supersedes')
})
@calculated_property(schema={
"title": "Related series",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "Series.related_datasets",
},
"notSubmittable": True,
})
def related_series(self, request, related_series):
return paths_filtered_by_status(request, related_series)
@calculated_property(schema={
"title": "Superseded by",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "Experiment.supersedes",
},
"notSubmittable": True,
})
def superseded_by(self, request, superseded_by):
return paths_filtered_by_status(request, superseded_by)
@calculated_property(schema={
"title": "Protein tags",
"description": "The protein tags introduced through the genetic modifications of biosamples investigated in the experiment.",
"comment": "Do not submit. This field is calculated through applied_modifications.",
"type": "array",
"notSubmittable": True,
"minItems": 1,
"items": {
"title": "Protein tag",
"description": "The protein tag introduced in the modification.",
"type": "object",
"additionalProperties": False,
"properties": {
"name": {
"title": "Tag name",
"type": "string",
"enum": [
"3xFLAG",
"6XHis",
"DsRed",
"eGFP",
"ER",
"FLAG",
"GFP",
"HA",
"mCherry",
"T2A",
"TagRFP",
"TRE",
"V5",
"YFP",
"mAID-mClover",
"mAID-mClover-NeoR",
"mAID-mClover-Hygro"
]
},
"location": {
"title": "Tag location",
"type": "string",
"enum": [
"C-terminal",
"internal",
"N-terminal",
"other",
"unknown"
]
},
"target": {
"title": "Tagged protein",
"type": "string",
"linkTo": "Target",
}
}
}
})
def protein_tags(self, request, replicates=None):
protein_tags = []
if replicates is not None:
for rep in replicates:
replicateObject = request.embed(rep, '@@object?skip_calculated=true')
if replicateObject['status'] in ('deleted', 'revoked'):
continue
if 'library' in replicateObject:
libraryObject = request.embed(replicateObject['library'], '@@object?skip_calculated=true')
if libraryObject['status'] in ('deleted', 'revoked'):
continue
if 'biosample' in libraryObject:
biosampleObject = request.embed(libraryObject['biosample'], '@@object')
if biosampleObject['status'] in ('deleted', 'revoked'):
continue
genetic_modifications = biosampleObject.get('applied_modifications')
if genetic_modifications:
for gm in genetic_modifications:
gm_object = request.embed(gm, '@@object?skip_calculated=true')
if gm_object.get('introduced_tags') is None:
continue
if gm_object.get('introduced_tags'):
for tag in gm_object.get('introduced_tags'):
tag_dict = {'location': tag['location'], 'name': tag['name']}
if gm_object.get('modified_site_by_target_id'):
tag_dict.update({'target': gm_object.get('modified_site_by_target_id')})
protein_tags.append(tag_dict)
if len(protein_tags) > 0:
return protein_tags
@calculated_property(schema={
"title": "Life stage and age summary",
"description": "Life stage and age display summary to be used for the mouse development matrix.",
"type": "string",
"notSubmittable": True,
})
def life_stage_age(self, request, replicates=None):
biosample_accessions = set()
all_life_stage = set()
all_age_display = set()
life_stage_age = ''
if replicates is not None:
for rep in replicates:
replicateObject = request.embed(rep, '@@object?skip_calculated=true')
if replicateObject['status'] in ('deleted', 'revoked'):
continue
if 'library' in replicateObject:
libraryObject = request.embed(replicateObject['library'], '@@object?skip_calculated=true')
if libraryObject['status'] in ('deleted', 'revoked'):
continue
if 'biosample' in libraryObject:
biosampleObject = request.embed(libraryObject['biosample'], '@@object')
if biosampleObject['status'] in ('deleted', 'revoked'):
continue
if biosampleObject['accession'] not in biosample_accessions:
biosample_accessions.add(biosampleObject['accession'])
life_stage = biosampleObject.get('life_stage')
if life_stage:
all_life_stage.add(life_stage)
age_display = biosampleObject.get('age_display')
if age_display:
all_age_display.add(age_display)
# Only return life_stage_age if all biosamples have the same life_stage and age_display
if len(all_life_stage) == 1 and len(all_age_display) == 1:
life_stage_age = ''.join(all_life_stage) + ' ' + ''.join(all_age_display)
return life_stage_age
@calculated_property(schema={
"title": "Perturbed",
"description": "A flag to indicate whether any biosamples have been perturbed with treatments or genetic modifications.",
"type": "boolean",
})
def perturbed(self, request, replicates=None):
if replicates is not None:
bio_perturbed = set()
for rep in replicates:
replicateObject = request.embed(rep, '@@object?skip_calculated=true')
if replicateObject['status'] in ('deleted', 'revoked'):
continue
if 'library' in replicateObject:
libraryObject = request.embed(replicateObject['library'], '@@object?skip_calculated=true')
if libraryObject['status'] in ('deleted', 'revoked'):
continue
if 'biosample' in libraryObject:
biosampleObject = request.embed(libraryObject['biosample'], '@@object')
if biosampleObject['status'] in ('deleted', 'revoked'):
continue
bio_perturbed.add(biosampleObject['perturbed'])
return any(bio_perturbed)
return False
matrix = {
'y': {
'group_by': ['biosample_ontology.classification', 'biosample_ontology.term_name'],
'label': 'Biosample',
},
'x': {
'group_by': 'assay_title',
'label': 'Assay',
},
}
sescc_stem_cell_matrix = {
'y': {
'group_by': ['biosample_ontology.classification', 'biosample_ontology.term_name'],
'label': 'Biosample',
},
'x': {
'group_by': ['assay_title', 'target.label'],
'label': 'Assay',
},
}
chip_seq_matrix = {
'y': {
'group_by': [
'replicates.library.biosample.donor.organism.scientific_name',
'target.label',
],
'label': 'Target',
},
'x': {
'group_by': ['biosample_ontology.classification', 'biosample_ontology.term_name', ('protein_tags.name', 'no_protein_tags')],
'label': 'Term Name',
},
}
summary_matrix = {
'x': {
'group_by': 'status'
},
'y': {
'group_by': ['replication_type']
}
}
reference_epigenome = {
'y': {
'group_by': ['biosample_ontology.classification', 'biosample_ontology.term_name'],
'label': 'Biosample',
},
'x': {
'group_by': ['assay_title', 'target.label'],
'label': 'Assay',
},
}
entex = {
'y': {
'group_by': ['biosample_ontology.classification', 'biosample_ontology.term_name'],
'label': 'Biosample',
},
'x': {
'group_by': ['assay_title', ('target.label', 'no_target'), 'replicates.library.biosample.donor.sex', 'replicates.library.biosample.donor.accession'],
'label': 'Assay',
},
}
mouse_development = {
'y': {
'group_by': ['biosample_ontology.term_name', 'life_stage_age'],
'label': 'Biosample',
},
'x': {
'group_by': ['assay_title', 'target.label'],
'label': 'Assay',
},
}
encore_matrix = {
'y': {
'group_by': ['target.label'],
'label': 'Target',
},
'x': {
'group_by': ['assay_title', 'biosample_ontology.term_name'],
'label': 'Assay',
},
}
encore_rna_seq_matrix = {
'y': {
'group_by': [('replicates.library.biosample.subcellular_fraction_term_name', 'no_term_name')],
'label': 'Subcellular localization',
},
'x': {
'group_by': ['assay_title', 'biosample_ontology.term_name'],
'label': 'Assay',
},
}
audit = {
'audit.ERROR.category': {
'group_by': 'audit.ERROR.category',
'label': 'Error'
},
'audit.INTERNAL_ACTION.category': {
'group_by': 'audit.INTERNAL_ACTION.category',
'label': 'Internal Action'},
'audit.NOT_COMPLIANT.category': {
'group_by': 'audit.NOT_COMPLIANT.category',
'label': 'Not Compliant'
},
'audit.WARNING.category': {
'group_by': 'audit.WARNING.category',
'label': 'Warning'
},
'x': {
'group_by': 'assay_title',
'label': 'Assay'
}
}
@collection(
name='replicates',
acl=ALLOW_SUBMITTER_ADD,
properties={
'title': 'Replicates',
'description': 'Listing of Replicates',
})
class Replicate(Item):
item_type = 'replicate'
schema = load_schema('encoded:schemas/replicate.json')
embedded = [
'antibody',
'experiment',
'library',
'library.biosample',
'library.biosample.donor',
'library.biosample.donor.organism',
]
set_status_up = [
'library',
'antibody',
]
set_status_down = []
def unique_keys(self, properties):
keys = super(Replicate, self).unique_keys(properties)
value = u'{experiment}/{biological_replicate_number}/{technical_replicate_number}'.format(
**properties)
keys.setdefault('replicate:experiment_biological_technical', []).append(value)
return keys
def __ac_local_roles__(self):
properties = self.upgrade_properties()
root = find_root(self)
experiment = root.get_by_uuid(properties['experiment'])
return experiment.__ac_local_roles__()
| mit | 8,760,214,666,596,681,000 | 35.529158 | 161 | 0.5183 | false | 4.128143 | false | false | false |
tensorflow/probability | tensorflow_probability/python/internal/backend/numpy/sets_lib.py | 1 | 1528 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Numpy implementations of sets functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from tensorflow_probability.python.internal.backend.numpy import _utils as utils
__all__ = [
'difference',
]
def _difference(a, b, aminusb=True, validate_indices=True):
if not aminusb:
raise NotImplementedError(
'Argument `aminusb != True` is currently unimplemented.')
if not validate_indices:
raise NotImplementedError(
'Argument `validate_indices != True` is currently unimplemented.')
return np.setdiff1d(a, b)
# --- Begin Public Functions --------------------------------------------------
# TODO(b/136555907): Add unit test.
difference = utils.copy_docstring(
'tf.sets.difference',
_difference)
| apache-2.0 | -6,061,709,947,104,250,000 | 30.833333 | 80 | 0.673429 | false | 4.340909 | false | false | false |
salsh/ROA-Analysis | scripts/plot_BGP_Impact.py | 1 | 11904 | #
# This file is part of ROA-Analysis
#
# Author: Samir Al-Sheikh (Freie Universitaet, Berlin)
# [email protected]
#
# MIT License
#
# Copyright (c) 2017 The ROA-Analysis authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os, re, matplotlib, calendar, types, numpy, pandas, copy, time, warnings, glob
from collections import Counter
from datetime import datetime
from subprocess import Popen
from utils.parse import parse_window, parse_shell, parse_cfg
from utils.util import cc_refactoring
config_file = os.path.dirname(os.path.abspath(__file__)) + '/config.txt'
if parse_cfg(0).ssh_enabled: matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib as mpl
options = 'i:p:c:m:d:t'
## Note: Warnings ignored (Type 3 font error if tex mode is active)
warnings.filterwarnings('ignore')
def get_options():
return options
def start_bgpreader(interval, project, collectors, archive_an, cfg):
""" Starts a BGPReader instance for multiple RPKI Cache Server for a given time interval """
## Check whether LD_LIBRARY_PATH is set to prevent rtrlib linker errors
rtr_env = os.environ.copy()
if 'LD_LIBRARY_PATH' not in os.environ: exit("Warning: LD_LIBRARY_PATH is not set, add the rtrlib path\n")
## Start BGPReader and log errors to file
dump_file = archive_an + '/' + cfg.bgp_project + \
'_' + cfg.bgp_vantage_point + '_' + interval + '.csv'
if not os.path.exists('LOG/'): os.makedirs('LOG/')
log_file = 'LOG/' + 'plot_BGP_Impact_log.txt'
print 'Info: Executing BGPReader...'
with open(log_file, 'a') as f, open(os.devnull, 'w') as devnull:
command = cfg.bgpreader_path + ' -t updates -p ' + cfg.bgp_project + ' -c ' + cfg.bgp_vantage_point + \
' -H ' + '\"1,0,0,' + project + ',' + (',' + project + ',').join(collectors) + '\"' + \
' -w ' + interval + ' > \"' + dump_file + '\"'
print command
p = Popen(['/bin/bash', '-c', command], env=rtr_env, stdout=devnull, stderr=f)
p.communicate()
print 'Info: Finished BGPReader'
if not os.stat(dump_file).st_size:
return -1
return dump_file
def main():
""" Dumps BGPReader output to file and analyses the different validation status """
## Parse shell arguments and config constants and build up file infrastructure
args = parse_shell(options); cfg = parse_cfg(args)
if isinstance(args.collector, types.StringTypes):
exit('Error: Collector number should be minimal two')
archive_an = cfg.archive_an + 'BGP_Impact/' + '_'.join(args.collector)
if not os.path.exists(archive_an): os.makedirs(archive_an)
## Get dump files and convert argument interval to unix timestamps
dump_files, dump_diff = ([] for i in range(2))
for c in args.collector:
collector_search = cfg.archive_cc + c + '/*/' + cfg.pf + '*' + cfg.sf
dump_files.append([c, sorted([e.split('/')[-1] for e in glob.iglob(collector_search)])])
start, end = args.interval.split(',')
if(start == '0' or end == '-1'): exit('Error: Open interval is not allowed')
start_unix = calendar.timegm(datetime.strptime(start, '%Y%m%d.%H%M').utctimetuple())
end_unix = calendar.timegm(datetime.strptime(end, '%Y%m%d.%H%M').utctimetuple())
inv_unix = str(start_unix) + ',' + str(end_unix)
## Start BGPReader and wait for the output
if(args.mode == 'w'):
dump_file = start_bgpreader(inv_unix, args.project, args.collector, archive_an, cfg)
if(dump_file == -1):
exit('Error: BGPStream caused an error')
## Create a output Dump containing only the updates with different validation results
with open(dump_file, 'r') as f:
for line in f:
if 'epoch_filetime' in line:
dump_diff.append(line); continue
if(line.split('|')[1] == 'A'):
c_count = []
for c in args.collector:
nf = line.count(c + ',notfound'); iv = line.count(c + ',invalid')
v = line.count(c + ',valid')
if c in line: c_count.append([nf, iv, v])
if(len(set(tuple(c) for c in c_count)) > 1): dump_diff.append(line)
with open(dump_file[:-4] + '_diff.csv' , 'w') as f: f.write(''.join(dump_diff))
else:
dump_file = archive_an + '/' + '-'.join(args.collector) + '_' + cfg.bgp_project + \
'_' + cfg.bgp_vantage_point + '_' + inv_unix + '.csv'
print dump_file
if not os.path.exists(dump_file):
exit('Error: BGP-Impact file does not exist, rerun with w-mode')
## Possible second only plot mode
nf_diff = pandas.read_csv(archive_an + '/not_found.csv', encoding='utf8', delimiter=',')
iv_diff = pandas.read_csv(archive_an + '/invalid.csv', encoding='utf8', delimiter=',')
v_diff = pandas.read_csv(archive_an + '/valid.csv', encoding='utf8', delimiter=',')
v_diff.columns = ['Timestamp'] + list(v_diff.columns.values[1:])
v_diff['Timestamp'] = pandas.to_datetime(v_diff['Timestamp'])
v_diff.set_index('Timestamp', inplace=True)
v_diff = v_diff.reindex_axis(sorted(v_diff.columns), axis=1)
iv_diff.columns = ['Timestamp'] + list(iv_diff.columns.values[1:])
iv_diff['Timestamp'] = pandas.to_datetime(iv_diff['Timestamp'])
iv_diff.set_index('Timestamp', inplace=True)
iv_diff = iv_diff.reindex_axis(sorted(iv_diff.columns), axis=1)
nf_diff.columns = ['Timestamp'] + list(nf_diff.columns.values[1:])
nf_diff['Timestamp'] = pandas.to_datetime(nf_diff['Timestamp'])
nf_diff.set_index('Timestamp', inplace=True)
nf_diff = nf_diff.reindex_axis(sorted(nf_diff.columns), axis=1)
# Debug Print
print nf_diff; print iv_diff; print v_diff
plot(nf_diff, iv_diff, v_diff, args.collector, archive_an, dump_file, args.dpi, args.tex)
return
## Open the output file and split it by interval
df = pandas.read_csv(dump_file, sep='\n', names=['Line'], dtype=str)
inds = df[df['Line'].str.contains('epoch_filetime')].index.values.tolist() + [df.index[-1]]
dumps = [(df['Line'][i].split('epoch_filetime: ')[1].split('\n')[0], \
'\n'.join(df['Line'][i:k].tolist())) for i,k in zip(inds[0::], inds[1::])]
timestamps = [d[0] for d in dumps]
del df, inds
## Count the NFs, IVs, Vs for every collector and set the dataframes
nf = pandas.DataFrame(columns=args.collector, index=timestamps).fillna(numpy.NaN)
iv = copy.deepcopy(nf); v = copy.deepcopy(nf)
for i,y in enumerate(dumps):
nf_l, iv_l, v_l = ([] for i in range(3))
for c in args.collector:
if not len(re.findall(re.escape(c), dumps[i][1])):
nf_l.append(numpy.NaN), iv_l.append(numpy.NaN), v_l.append(numpy.NaN)
else:
nf_l.append(len(re.findall(re.escape(c + ',notfound'), dumps[i][1])))
iv_l.append(len(re.findall(re.escape(c + ',invalid'), dumps[i][1])))
v_l.append(len(re.findall(re.escape(c + ',valid'), dumps[i][1])))
nf.loc[dumps[i][0]] = nf_l; iv.loc[dumps[i][0]] = iv_l; v.loc[dumps[i][0]] = v_l
timestamps = [datetime.strptime(time.strftime('%Y%m%d.%H%M', time.gmtime(float(t))), \
'%Y%m%d.%H%M') for t in timestamps]
nf.index = timestamps; iv.index = timestamps; v.index = timestamps;
nf_diff = nf[args.collector[1:]].sub(nf[args.collector[0]], axis=0)
iv_diff = iv[args.collector[1:]].sub(iv[args.collector[0]], axis=0)
v_diff = v[args.collector[1:]].sub(v[args.collector[0]], axis=0)
nf_diff.columns = cc_refactoring(nf_diff.columns.values)
nf_diff = nf_diff.reindex_axis(sorted(nf_diff.columns), axis=1)
iv_diff.columns = cc_refactoring(iv_diff.columns.values)
iv_diff = iv_diff.reindex_axis(sorted(iv_diff.columns), axis=1)
v_diff.columns = cc_refactoring(v_diff.columns.values)
v_diff = v_diff.reindex_axis(sorted(v_diff.columns), axis=1)
## Debug Print
print nf_diff; print iv_diff; print v_diff
## Export dataframe to csv to enable only plot mode
nf_diff.to_csv(archive_an + '/not_found.csv', sep=',', encoding='utf-8')
iv_diff.to_csv(archive_an + '/invalid.csv', sep=',', encoding='utf-8')
v_diff.to_csv(archive_an + '/valid.csv', sep=',', encoding='utf-8')
plot(nf_diff, iv_diff, v_diff, args.collector, archive_an, dump_file, args.dpi, args.tex)
return
def plot(nf_diff, iv_diff, v_diff, collector, archive_an, dump_file, dpi, tex):
""" Plot the graph for differences for the BGP validation over time """
## Set figure properties
figsize = (20, 10) if not int(tex) else (25, 16)
mpl.rcParams['figure.figsize'] = figsize; mpl.rcParams['figure.dpi'] = dpi
mpl.rcParams['figure.facecolor'] = 'w'; mpl.rcParams['figure.edgecolor'] = 'k'
if int(tex): mpl.rc('font',family='serif',serif='Latin Modern Roman',size=24)
## Plotting
fig, ax = plt.subplots(nrows=len(collector[1:]), ncols=1, sharey=True)
ax1 = fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
start, end = nf_diff.index[0].strftime('%b %d '), nf_diff.index[-2].strftime('until %b %d, %Y')
collectors = sorted(cc_refactoring(collector)); y = 1.02 if int(tex) else 1
plt.title('Impact on BGP - Differences for '+ collectors[0].replace('Web', 'W') +' vs. '+
', '.join([c.replace('Web', 'W').replace('RTR', 'R') for c in collectors[1:]]) + \
' - ' + start + end, y=y)
names = list(nf_diff.columns.values)
for i,c in enumerate(names):
fmt = '%Y-%m-%d' if not int(tex) else '%m-%d'
lab = list(set([pandas.to_datetime(str(l)).strftime(fmt) for l in list(nf_diff.index.values)]))
ax[i].set_xticklabels(sorted(lab)[::3]); ax[i].grid(axis='y')
if(i != len(names)-1): ax[i].tick_params(labelbottom='off')
nf_diff[c].plot(y=names, kind='line', ax=ax[i], label='notfound')
iv_diff[c].plot(y=names, kind='line', ax=ax[i], label='invalid')
v_diff[c].plot(y=names, kind='line', ax=ax[i], label='valid')
ax_offset = 1.02 if int(tex) else 1.01
ax[i].set_xlabel(c,rotation=270); ax[i].xaxis.set_label_coords(ax_offset,.90)
plt.setp(ax[i].xaxis.get_majorticklabels(), rotation=0); ax[i].minorticks_off()
for t in ax[i].xaxis.get_major_ticks(): t.label1.set_horizontalalignment('center')
lgd_y = -.8 if int(tex) else -.7
lgd = ax[i].legend(bbox_to_anchor=(0., lgd_y, 1., lgd_y), loc=8, ncol=3, mode='expand', borderaxespad=0)
y_offset = -.05 if int(tex) else -.03; ax1.grid(False); ax1.get_yaxis().set_label_coords(y_offset,0.5)
plt.ylabel('Differences between the validation results of BGP announcements [#prefixes]')
ax = plt.gca().get_xaxis().set_label_coords(0.5,-.05); plt.xlabel('Timestamp'); fig.tight_layout()
## Export Plot
if not os.path.exists(archive_an + '/Plots/'): os.makedirs(archive_an + '/Plots/')
dump_pdf = dump_file.split('/')[-1].replace('csv','pdf')
print '\nPlot: ' + archive_an + '/Plots/' + 'plots_bgp_impact_' + dump_pdf
fig.savefig(archive_an + '/Plots/' + 'plots_bgp_impact_' + dump_pdf, bbox_inches='tight')
if __name__ == '__main__':
main()
| mit | -3,012,274,211,403,680,000 | 48.190083 | 108 | 0.65079 | false | 3.083139 | false | false | false |
uhp/uhp | uhpweb/controller/admin.py | 1 | 39163 | # -*- coding: UTF-8 -*-
import os
import json
import logging
import time
import copy
import tornado
from sqlalchemy.orm import query,aliased
from sqlalchemy import and_,or_,desc,asc
from sqlalchemy import func
from sqlalchemy.orm.exc import NoResultFound
import async
import static_config
import database
import config
import util
import callback_lib
import mm
from controller import BaseHandler
from controller import shell_lib
from model.instance import Instance
from model.host_group_var import Host,Group,GroupHost,HostVar,GroupVar
from model.task import Task
from model.services import Service
from model.callback import CallBack
app_log = logging.getLogger("tornado.application")
class AdminHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
user = self.get_current_user();
if user['type'] != 0 :
self.ret("error","this user is not admin")
return
self.render("admin.html")
class AdminBackHandler(BaseHandler):
@tornado.web.authenticated
def get(self , path):
user = self.get_current_user();
if user['type'] != 0 :
self.ret("error","this user is not admin")
return
if hasattr(self, path) :
fun = getattr(self, path);
if callable(fun):
apply(fun)
else:
self.ret("error","unsupport action")
@tornado.web.authenticated
def post(self , path):
self.get(path)
def user(self):
user = self.get_current_user();
ret = {"user":user,"menus":static_config.adminmenus}
self.ret("ok", "", ret);
#获取所有服务的静态信息
def services_info(self):
session = database.getSession()
active = []
for service in session.query(Service):
if service.status == Service.STATUS_ACTIVE :
active.append(service.service)
services_copy = copy.deepcopy(static_config.services)
for temp in services_copy:
if temp['name'] in active:
temp['active'] = True;
else:
temp['active'] = False;
#计算url
if temp.has_key('web') :
urls = []
for web in temp['web'] :
port = ""
for gv in session.query(GroupVar).filter(GroupVar.name == web['port']) :
port = gv.value
for instance in session.query(Instance).filter(Instance.role == web['role']) :
url = {"role":web['role'],"host":instance.host,"port":port}
urls.append(url)
temp['urls'] = urls;
else:
temp['urls'] = []
#特殊规则
#根据dfs_namenode_support_allow_format 配置 控制是否放出format参数
if temp['name'] == 'hdfs' :
should_format = database.get_service_conf(session,'hdfs','dfs_namenode_support_allow_format')
if should_format != None and should_format != 'true' :
wi = 0
find = False;
for action in temp['actions']:
if action['name'] == 'format':
find = True
break;
wi = wi +1
if find:
del temp['actions'][wi]
ret = { "services" : services_copy , "role_check_map" : static_config.role_check_map }
session.close()
self.ret("ok", "", ret);
def service_info(self):
service = self.get_argument("service")
ret = { "name": service,"instances" : self.get_instance(service),"summary":self.get_service_summary(service) }
self.ret("ok", "", ret)
def get_instance(self,service):
session = database.getSession()
instances = session.query(Instance).filter(Instance.service == service )
ret = []
for instance in instances:
ret.append(instance.format())
session.close()
return ret;
def get_service_summary(self,service):
session = database.getSession()
ret = {}
for role in static_config.get_role_from_service(service):
ret[role] = {}
for instance in session.query(Instance).filter(Instance.service==service):
inst = instance.format()
if not ret[inst["role"]].has_key(inst["health"]) :
ret[inst["role"]][inst["health"]] = 0
ret[inst["role"]][inst["health"]] += 1
session.close()
return ret;
#获取所有的机器和组
def group_host_list(self):
session = database.getSession()
groups = session.query(Group)
ret={}
temp=[]
for group in groups:
temp.append( {"name" : group.group});
ret["groups"]=temp
hosts = session.query(Host).filter(Host.status==Host.STATUS_READY)
temp=[];
for host in hosts:
temp.append( {"name" : host.hostname});
ret["hosts"]=temp
session.close()
self.ret("ok", "", ret);
#获取配置变量的接口 兼容组变量和机器变量,机器变量不过滤机器名称
def conf_var(self):
service = self.get_argument("service")
group = self.get_argument("group","all")
showType = self.get_argument("showType")
temp = []
session = database.getSession()
if showType=="group":
groupVars = session.query(GroupVar).filter(and_( GroupVar.service == service , GroupVar.group == group ) )
for groupVar in groupVars:
temp.append( groupVar.format() );
else:
hostVars = session.query(HostVar).filter( HostVar.service == service )
for hostVar in hostVars:
temp.append( hostVar.format() );
session.close()
self.ret("ok", "", {"conf":temp})
#获取fair scheduler的信息
def fair_scheduler_config(self):
session = database.getSession()
#获取队列
queues = database.get_service_conf(session,"yarn","fair_scheduler_queues")
yarn_app_mapreduce_am_resource_mb = database.get_service_conf(session,"yarn","yarn_app_mapreduce_am_resource_mb")
mapreduce_map_memory_mb = database.get_service_conf(session,"yarn","mapreduce_map_memory_mb")
mapreduce_reduce_memory_mb = database.get_service_conf(session,"yarn","yarn_app_mapreduce_am_resource_mb")
#计算node
nodes = 0;
node = []
for instance in session.query(Instance).filter(Instance.role == "nodemanager"):
nodes = nodes + 1
node.append(instance.host)
session.query()
node_memory = database.get_conf_from_host(session,node,"yarn","yarn_nm_resource_memory_mb")
#计算host
total_memory = 0;
for (node,memory) in node_memory.items():
total_memory = total_memory + int(memory)
self.ret("ok","",{"fair_scheduler_queues":queues,"yarn_app_mapreduce_am_resource_mb":yarn_app_mapreduce_am_resource_mb,
"mapreduce_map_memory_mb":mapreduce_map_memory_mb,"mapreduce_reduce_memory_mb":mapreduce_reduce_memory_mb,
"total_memory":total_memory,"nodes":nodes,"node_memory":node_memory
})
#保存 修改 删除 分组变量或者机器变量
#TODO增加区分是否第一次插入
def save_conf_var(self):
service = self.get_argument("service")
showType = self.get_argument("showType")
group = self.get_argument("group","")
host = self.get_argument("host","")
name = self.get_argument("name")
value = self.get_argument("value")
type = self.get_argument("type")
text = self.get_argument("text","")
showdel = self.get_argument("del","")
self.save_var_todb(service,showType,group,host,name,value,type,text,showdel)
self.ret("ok", "", {})
def save_var_todb(self,service,showType,group,host,name,value,type,text,showdel=""):
value = str(value)
session = database.getSession()
if showType=="group":
groupVar = GroupVar(group,service,name,value,type,text)
if showdel=="del":
for groupVar in session.query(GroupVar).filter( and_( GroupVar.service == service , GroupVar.group == group , GroupVar.name == name )) :
session.delete(groupVar)
session.commit()
else:
session.merge(groupVar)
session.commit()
else:
hostVar = HostVar(host,service,name,value,type,text)
if showdel=="del":
for hostVar in session.query(HostVar).filter( and_( HostVar.service == service , HostVar.host == host , HostVar.name == name )) :
session.delete(hostVar)
session.commit()
else:
session.merge(hostVar)
session.commit()
session.close()
# 提交一个执行任务
# 当前直接把收到start的instance,标记为start状态
# 当前直接把收到stop的instance,标记为stop状态
# TODO 加入starting stopping 状态进行检查
#
def send_action(self):
taskType = self.get_argument("taskType","ansible")
service = self.get_argument("service")
actionType = self.get_argument("actionType")
instances = self.get_argument("instances","")
taskName = self.get_argument("taskName")
running_id = []
session = database.getSession()
#在执行action之前,检查角色的数量是不是符合要求
#如果不符合,给出提示
ret_msg = []
#角色数量检查
(check,warn_msg) = self.check_role_num_by_service(session, service, "It could make the task fail.")
if not check:
ret_msg += warn_msg
if actionType=="service":
#针对服务的操作
self.update_with_service_action(session,service,taskName)
taskid = database.build_task(session,taskType,service,"","",taskName)
running_id.append(taskid)
elif actionType=="instance":
for instance in instances.split(","):
(host,role) = Instance.split_instance_name(instance)
if host != None and role != None :
self.update_with_instance_action(session,service,host,role,taskName)
taskid = database.build_task(session,taskType,service,host,role,taskName)
running_id.append(taskid)
else:
self.ret("error","split instance name %s error" % instance)
return
else:
self.ret("error", "unsport actionType")
return
session.commit()
session.close()
#发送消息到MQ
msg = ','.join([str(rid) for rid in running_id])
if not mm.send(msg):
ret_msg.append("send message to worker error")
ret_msg_str = ""
if len(ret_msg) != 0:
ret_msg_str = ",".join(ret_msg)
self.ret("ok", ret_msg_str, {"runningid": running_id})
#对某个task发送kill命令
def kill_task(self):
taskid = self.get_argument("taskid")
#发送消息到MQ
if not mm.kill_task(int(taskid)):
self.ret("error", "killing task failed")
else:
self.ret("ok", "")
#尝试重跑某个失败的task
def rerun_task(self):
taskid = self.get_argument("taskid")
session = database.getSession()
try:
task = session.query(Task).filter(Task.id == taskid).one()
except NoResultFound:
return self.ret("error", "Cant't find the task with id: %s" % taskid)
newTaskid = database.build_task(session,task.taskType,task.service,task.host,task.role,task.task)
for cb in session.query(CallBack).filter(CallBack.taskid == taskid):
callback_lib.add_callback(session,newTaskid,cb.func,json.loads(cb.params) )
#发送消息到MQ
retMsg = ""
msg = str(newTaskid)
if not mm.send(msg):
retMsg = "send message to worker error"
app_log.info("send msg to mq")
session.close()
self.ret("ok", retMsg, {"taskid":newTaskid} )
def update_with_service_action(self,session,service,taskName):
'''
收一个action,进行状态更新
进行状态管理
'''
if taskName == "start" :
session.query(Instance).filter(Instance.service==service) \
.update({Instance.status:Instance.STATUS_START,\
Instance.uptime:int(time.time())})
session.commit();
elif taskName == "stop" :
session.query(Instance).filter(Instance.service==service) \
.update({Instance.status:Instance.STATUS_STOP,
Instance.uptime:0})
session.commit();
if taskName == "aux" and service == "hive" :
upload_path = config.aux_upload_dir
aux_list = []
for file in os.listdir(upload_path):
if file.startswith('.'):
continue
file_path = os.path.join(upload_path,file)
if os.path.isfile(file_path):
aux_list.append("file://" + file_path)
session.query(GroupVar).filter( and_((GroupVar.service==service),(GroupVar.name=="hive_aux_jars_path")) ) \
.update({GroupVar.value : ','.join(aux_list) })
session.commit();
def update_with_instance_action(self,session,service,host,role,taskName):
if taskName == "start" :
session.query(Instance).filter(and_(Instance.service==service, \
Instance.host == host, Instance.role == role )) \
.update({Instance.status:Instance.STATUS_START,
Instance.uptime:int(time.time())})
session.commit();
elif taskName == "stop" :
session.query(Instance).filter(and_(Instance.service==service, \
Instance.host == host, Instance.role == role )) \
.update({Instance.status:Instance.STATUS_STOP,
Instance.uptime:0})
session.commit();
#添加一个机器
#端口 用户名 密码 等都是空的 在异步连接的时候会补充这个
def add_host(self):
hosts = self.get_argument("hosts")
port = self.get_argument("port","")
user = self.get_argument("user","")
passwd = self.get_argument("passwd","")
sudopasswd = self.get_argument("sudopasswd","")
host_array = hosts.split(",")
(check,msg) = self.check_add_host(host_array)
if not check:
self.ret("error", msg)
return
id = async.async_setup()
async.async_run(async.add_host,(id,host_array,(user,port,passwd,sudopasswd)))
self.ret("ok", "", {"runningId": [id]})
def check_add_host(self,hostArray):
session = database.getSession()
for host in hostArray:
num = session.query(Host).filter(Host.hostname==host).count()
if util.look_like_ip(host) :
return (False,host+" look like ip, please check")
if num != 0 :
return (False,host+" is already in host table")
session.close()
return (True,"")
#查询进度
def query_progress(self):
idList = self.get_argument("id")
ids = json.loads(idList)
progress = 0;
progress_msg = "";
session = database.getSession()
for nid in ids:
(pg,msg) = self.query_id_process(session,nid)
if nid < 0:
progress_msg += "SyncTask taskid: (%d) %s \n" % (-nid,msg);
else:
progress_msg += "Task taskid:(%d) %s \n" % (nid,msg);
progress += int(pg)
session.close()
progress /= len(ids)
self.ret("ok", "", {"id": ids,"progress":progress,"progressMsg":progress_msg } )
def query_id_process(self,session,nid):
if nid <0 :
#同步任务
return (async.async_get(nid,"progress","0"),async.async_pop(nid,"progressMsg",""))
else:
#worker 任务
queryTask = session.query(Task).filter(Task.id==nid)
if queryTask.count() == 0:
return (0,str(id)+" isn't exist")
else:
nowTask = queryTask[0]
return (nowTask.getProcess(),nowTask.msg)
#获取机器列表
def hosts(self):
session = database.getSession()
hosts = session.query(Host)
ret={}
for host in hosts:
ret[host.hostname]={"info":host.format()}
session.close()
self.ret("ok", "", {"hosts":ret})
def set_rack(self):
hosts = self.get_argument("hosts")
rack = self.get_argument("rack")
session = database.getSession()
session.query(Host).filter(Host.hostname.in_(hosts.split(","))).update( { Host.rack:rack },synchronize_session="fetch" )
session.commit()
session.close()
self.ret("ok","")
def del_host(self):
hosts = self.get_argument("hosts")
session = database.getSession()
(check,msg)=self.check_del_host(session,hosts)
if not check:
self.ret("error", msg)
return
#删除机器
queryHosts = session.query(Host).filter(Host.hostname.in_(hosts.split(",")))
for host in queryHosts:
session.delete(host)
#删除分组信息
queryGH = session.query(GroupHost).filter(GroupHost.hostname.in_(hosts.split(",")))
for gh in queryGH:
session.delete(gh)
session.commit()
session.close()
self.ret("ok", "")
def check_del_host(self,session,hosts):
num = session.query(Instance).filter(Instance.host.in_(hosts.split(","))).count()
if num != 0 :
return (False,"some host find in instance.please remove them first")
return (True,""+str(num))
#查询机器和角色的关系
def host_role(self):
session= database.getSession()
active=[]
for service in session.query(Service):
if service.status == Service.STATUS_ACTIVE :
active.append(service.service)
roles = {};
for service in static_config.services:
if service["name"] in active:
roles[service["name"]] = service["role"]
hostroles = {}
doing=[]
#补充所有的host列表
hosts = session.query(Host).filter(Host.status == Host.STATUS_READY)
for host in hosts:
hostname = host.hostname;
hostroles[hostname]={};
hostroles[hostname]['role']=[]
instances = session.query(Instance)
for instance in instances:
role = instance.role
host = instance.host
hostroles[host]['role'].append(role)
if instance.status == Instance.STATUS_SETUP or instance.status == Instance.STATUS_REMOVING :
doing.append({"host":host,"role":role,"status":instance.status})
session.close()
self.ret("ok", "",{"roles":roles,"hostroles":hostroles,"doing":doing})
#查询正在进行的服务
def doing(self):
doing = []
session = database.getSession()
instances = session.query(Instance)
for instance in instances:
role = instance.role
host = instance.host
if instance.status == Instance.STATUS_SETUP or instance.status == Instance.STATUS_REMOVING :
doing.append({"host":host,"role":role,"status":instance.status})
session.close()
self.ret("ok", "",{"doing":doing})
#添加一个服务
def add_service(self):
service = self.get_argument("service")
add_args = self.get_argument("add")
var_args = self.get_argument("vars","[]")
add_instance = json.loads(add_args)
#设定一些必要的变量
varArgs = json.loads(var_args)
for var in varArgs:
self.save_var_todb(var['service'],var['showType'],var['group'],
var['host'],var['name'],var['value'],var['type'],
var['text'])
#开启服务
new_ser = Service(service,Service.STATUS_ACTIVE)
session = database.getSession()
session.merge(new_ser)
session.commit()
session.close()
self.inner_add_del_instance(add_instance, [])
def can_del_service(self):
service = self.get_argument("service")
session = database.getSession()
instances = [];
for instance in session.query(Instance).filter(Instance.service == service):
instances.append(instance.get_instance_name(instance.host, instance.role))
session.close()
if len(instances) == 0:
self.ret("ok", "")
else:
self.ret("error","some instance is exist please remove then first. instances:"+(",".join(instances)))
def del_service(self):
service = self.get_argument("service")
#关闭服务
new_ser = Service(service,Service.STATUS_INIT)
session = database.getSession()
session.merge(new_ser)
session.commit()
session.close()
self.ret("ok", "")
#添加删除实例instance
#删除提交任务,并且轮询任务是否执行完成
#如果任务执行完成,就删除
def add_del_instance(self):
add_args = self.get_argument("add","[]")
del_args = self.get_argument("del","[]")
var_args = self.get_argument("vars","[]")
#设定一些必要的变量
var_args = json.loads(var_args)
for var in var_args:
self.save_var_todb(var['service'],var['showType'],var['group'],
var['host'],var['name'],var['value'],var['type'],
var['text'])
add_instance = json.loads(add_args)
del_instance = json.loads(del_args)
self.inner_add_del_instance(add_instance,del_instance)
def inner_add_del_instance(self,add_instance,del_instance):
session = database.getSession()
ret_msg = []
(check,msg) = self.check_add_del_instance( session, add_instance, del_instance)
if not check:
self.ret("error", msg);
return;
else:
if msg != "" and isinstance(msg, list) :
ret_msg += msg
elif isinstance(msg, str) :
ret_msg.append(msg)
add_running_id = self.add_instance( add_instance )
del_running_id = self.del_instance( del_instance )
for taskid in add_running_id:
callback_lib.add_callback(session,taskid,"dealAddInstance")
for taskid in del_running_id:
callback_lib.add_callback(session,taskid,"dealDelInstance")
session.close()
#发送消息到MQ
msg = ','.join([str(id) for id in (add_running_id + del_running_id)])
if not mm.send(msg):
ret_msg.append("send message to worker error")
self.ret("ok", '\n'.join(ret_msg), {"addRunningId":add_running_id,"delRunningId":del_running_id})
def add_instance(self,addInstance):
#将add插入到instance表
session = database.getSession()
for add_inst in addInstance:
temp_service = static_config.get_service_from_role(add_inst["role"])
new_in = Instance(temp_service,add_inst["host"],add_inst["role"])
new_in.status = Instance.STATUS_SETUP
session.merge(new_in)
session.commit()
#提交活动
running_id=[]
for add_inst in addInstance:
temp_service = static_config.get_service_from_role(add_inst["role"])
taskid = database.build_task(session,"ansible",temp_service,add_inst["host"],add_inst["role"],"setup")
running_id.append(taskid)
session.commit()
session.close()
return running_id
def del_instance(self,delInstance):
#更新instance表的对应状态为removing
session = database.getSession()
for delInst in delInstance:
session.query(Instance).filter(and_(Instance.host==delInst["host"],Instance.role==delInst["role"])) \
.update({Instance.status:Instance.STATUS_REMOVING})
session.commit()
#提交卸载活动
running_id=[]
for delInst in delInstance:
tempService = static_config.get_service_from_role(delInst["role"])
#newTask = Task("ansible",tempService,delInst["host"],delInst["role"],"remove")
#session.add(newTask)
#session.flush();
#running_id.append(newTask.id)
new_taskid = database.build_task(session,"ansible",tempService,delInst["host"],delInst["role"],"remove")
running_id.append(new_taskid)
session.commit()
session.close()
return running_id
def check_add_del_instance(self,session,add_instance,del_instance):
if len(add_instance) == 0 and len(del_instance) == 0:
self.ret("error", "no instance need to add or del");
return;
#角色数量检查
role_num_query = session.query(Instance.role,func.count(Instance.id)).group_by(Instance.role)
role_num = {}
for record in role_num_query:
role_num[record[0]] = record[1]
add_del_num = {}
for add_inst in add_instance:
num = session.query(Instance).filter(and_(Instance.host == add_inst["host"], \
Instance.role == add_inst["role"])).count()
if num == 1:
return (False,"instance is exist (%s,%s) " % ( add_inst["host"], add_inst["role"]) )
if add_del_num.has_key( add_inst["role"] ) :
add_del_num[add_inst["role"]] = add_del_num[add_inst["role"]] + 1
else:
add_del_num[add_inst["role"]] = 1;
for del_inst in del_instance:
query = session.query(Instance).filter(and_(Instance.host == del_inst["host"], \
Instance.role == del_inst["role"]))
num = query.count();
if num == 0 or num > 1:
return (False,"instance is not exist ( %s,%s) " % ( del_inst["host"] ,del_inst["role"] ))
else:
for instance in query:
if instance.status != "stop":
return (False,"instance's status is not stop (%s,%s) " % ( del_inst["host"], del_inst["role"]) )
if add_del_num.has_key( del_inst["role"] ) :
add_del_num[del_inst["role"]] = add_del_num[del_inst["role"]] - 1
else:
add_del_num[del_inst["role"]] = -1;
#合并role_num和add_del_num,然后计算角色数量是否符合
warn_msg = []
for (role,new_num) in add_del_num.items():
old_num = 0;
if role_num.has_key(role) :
old_num = role_num[role]
(check,msg) = self.check_role_num( role, old_num+new_num )
if not check :
warn_msg.append(msg)
return (True, warn_msg)
def check_role_num_by_service(self, session, service, add_more_msg=""):
#角色数量检查
role_num_query = session.query(Instance.role,func.count(Instance.id)).group_by(Instance.role)
checkResult = True
warnMsg = []
for record in role_num_query:
(check,msg) = self.check_role_num( record[0], record[1], add_more_msg )
if not check:
checkResult = False
warnMsg.append(msg)
return ( checkResult, warnMsg )
def check_role_num(self, role, new_num, add_more_msg=""):
"""
检查这个角色的数量是不是符合要求
"""
if static_config.role_check_map.has_key( role ) :
temp = static_config.role_check_map[role]
if temp.has_key("min") and new_num < temp["min"] :
return (False, "role %s 's number %d shoule more than or equal %d.%s"
% ( role, new_num, temp["min"], add_more_msg) )
if temp.has_key("max") and new_num > temp["max"] :
return (False, "role %s 's number %d shoule less than or equal %d.%s"
% ( role, new_num, temp["max"], add_more_msg) )
if temp.has_key("equal") and new_num != temp["equal"] :
return (False, "role %s 's number %d shoule equal to %d.%s"
% ( role, new_num, temp["equal"], add_more_msg) )
return (True,"")
#查询任务
#dir=desc&limit=50&offset=0&orderby=id&search=aaa
def tasks(self):
search = self.get_argument("search","")
orderby = self.get_argument("orderby","")
dir = self.get_argument("dir","")
offset = self.get_argument("offset","")
limit = self.get_argument("limit","")
session = database.getSession()
query = session.query(Task)
if search != "" :
search='%'+search+'%'
query = query.filter(or_(Task.id.like(search),Task.taskType.like(search),Task.service.like(search), \
Task.host.like(search),Task.role.like(search),Task.task.like(search), \
Task.status.like(search), Task.result.like(search)))
total_task = query.count();
if dir=="asc":
query = query.order_by(asc(orderby))[int(offset):int(offset)+int(limit)]
else :
query = query.order_by(desc(orderby))[int(offset):int(offset)+int(limit)]
task_list=[]
for task in query:
task_list.append(task.format())
session.close()
self.ret("ok", "", {"tasks":task_list,"totalTask":total_task})
#查询单个任务的详细
def task_detail(self):
taskid = self.get_argument("taskid")
session = database.getSession()
task = session.query(Task).filter(Task.id==taskid).first()
tf = task.format()
tf['msg'] = task.msg
session.close()
self.ret("ok", "", {"task":tf})
#查询机器和组的对应关系
def host_group(self):
session = database.getSession()
groups = {}
hostgroups = {}
for host in session.query(Host).filter(Host.status == Host.STATUS_READY ):
hostgroups[host.hostname]={}
hostgroups[host.hostname]["group"]=['all']
for group in session.query(Group):
groups[group.group]=group.text
for gh in session.query(GroupHost):
hostgroups[gh.hostname]["group"].append(gh.group)
session.close()
self.ret("ok","",{"groups":groups,"hostgroups":hostgroups})
#保存组
def save_group(self):
name = self.get_argument("group")
text = self.get_argument("text","")
toDel = self.get_argument("del","")
nowGroup = Group(name,text)
session = database.getSession()
if toDel=="del":
for group in session.query(Group).filter(Group.group==name):
session.delete(group)
session.commit()
else:
session.merge(nowGroup)
session.commit()
session.close()
self.ret("ok","")
#修改机器和分组的关系
def setup_group(self):
add_args = self.get_argument("add")
del_args = self.get_argument("del")
add_groups = json.loads(add_args)
del_groups = json.loads(del_args)
session = database.getSession()
for addGroup in add_groups:
gh = GroupHost(addGroup['group'],addGroup['host'])
session.merge(gh)
session.commit
for delGroup in del_groups:
query = session.query(GroupHost).filter(and_(GroupHost.hostname==delGroup['host'],GroupHost.group==delGroup['group']))
for gh in query:
session.delete(gh)
session.commit()
session.close()
self.ret("ok","")
#******************************************************
#获取所有的template文件
def template_list(self):
templates={}
for dir in os.listdir(config.template_dir):
if dir.startswith('.') :
continue;
dirPath = os.path.join(config.template_dir,dir)
if os.path.exists(dirPath) and os.path.isdir(dirPath):
templates[dir] = []
for file in os.listdir(dirPath):
filePath = os.path.join(dirPath,file)
app_log.info(filePath)
if os.path.exists(filePath) and os.path.isfile(filePath):
file = file.replace(".j2","")
templates[dir].append(file);
templates[dir].sort()
self.ret("ok","",{"templates":templates})
#获取指定的文件内容
def template_file(self):
dir = self.get_argument("dir")
file = self.get_argument("file")
file = file+".j2"
filePath = os.path.join(config.template_dir,dir,file)
if os.path.exists(filePath) and os.path.isfile(filePath):
content = open(filePath, "r").read()
self.ret("ok","",{"content":content,"row":self.get_content_row(content)})
else:
self.ret("error","file not exist")
def template_build_file(self):
'''
获取生成的配置文件
'''
dir = self.get_argument("dir")
file = self.get_argument("file")
file = file+".j2"
host = self.get_argument("host")
(content,output) = shell_lib.get_template_file(host,dir,file);
if content != "":
self.ret("ok","",{"content":content,"row":self.get_content_row(content) })
else:
self.ret("error",output)
def template_download_file(self):
'''
生成整个服务的配置文件
'''
dir = self.get_argument("dir")
host = self.get_argument("host")
(url,output) = shell_lib.download_template_file(host,dir);
if url != None and url != "":
self.ret("ok","",{"url" : url })
else:
self.ret("error",output)
def get_content_row(self,content):
count = 0 ;
for c in content:
if c == "\n" :
count = count+1
return count;
def save_template_file(self):
dir = self.get_argument("dir")
file = self.get_argument("file")
file = file+".j2"
content = self.get_argument("content")
filePath = os.path.join(config.template_dir,dir,file)
fd = open(filePath,"w")
fd.write(content.encode('utf8'));
time.sleep(2)
self.ret("ok","")
#****************************************************************************************
#manual获取数据库的表
def manual_metadata(self):
table={}
models = database.get_all_models()
temp = {}
for model in models:
temp = {}
temp['column']=[]
temp['primary']=[]
for col in model.__table__.columns:
if col.primary_key:
temp['primary'].append(col.name)
else:
temp['column'].append(col.name)
table[model.__tablename__]=temp
self.ret("ok","",{"table":table})
def manual_query(self):
sql = self.get_argument("sql")
session = database.getSession()
result = session.execute(sql)
data = []
for record in result:
temp = [];
for value in record:
temp.append(value)
data.append(temp);
session.close()
self.ret("ok","",{"column":result.keys(),"data":data})
#修改数据库 直接使用merge进行合并
def manual_execute(self):
sql = self.get_argument("sql")
session = database.getSession()
result = session.execute(sql)
session.commit()
session.flush()
session.close()
self.ret("ok","")
#以下是aux 相关的配置
def aux_get(self):
upload_path = config.aux_upload_dir
file_list = []
if not os.path.exists(upload_path) :
os.makedirs(upload_path)
for file in os.listdir(upload_path):
if file.startswith('.'):
continue
file_path = os.path.join(upload_path,file)
if os.path.isfile(file_path):
size = os.path.getsize(file_path)
file_list.append({"name":file,"size":size})
self.ret("ok","",{"files":file_list})
def aux_upload(self):
upload_path = config.aux_upload_dir
file_metas = self.request.files['file']
result = {}
for meta in file_metas:
filename = meta['filename']
filepath = os.path.join(upload_path,filename)
with open(filepath,'wb') as up:
up.write(meta['body'])
result[filename] = "ok"
self.ret("ok", "", {"result":result})
def aux_delete(self):
upload_path = config.aux_upload_dir
file_name = self.get_argument("filename")
file_path = os.path.join(upload_path, file_name)
try:
os.remove(file_path)
self.ret("ok","")
except:
self.ret("error","delete file %s error" % file_path)
| gpl-2.0 | 4,349,474,049,535,728,000 | 36.879482 | 152 | 0.530068 | false | 3.773665 | true | false | false |
pierotofy/WebODM | plugins/cloudimport/platform_helper.py | 1 | 1995 | import importlib
from os import path, listdir
from app.plugins import logger
from .platform_extension import PlatformExtension
platforms = None
def get_all_platforms():
# Cache platforms search
global platforms
if platforms == None:
platforms = read_platform_from_files()
return platforms
def get_platform_by_name(platform_name):
platforms = get_all_platforms()
for platform in platforms:
if platform.name == platform_name:
return platform
return None
def get_all_extended_platforms():
return [platform for platform in get_all_platforms() if isinstance(platform, PlatformExtension)]
def read_platform_from_files():
platforms_path = get_platforms_path()
platforms = []
for platform_script in [platform for platform in listdir(platforms_path) if path.isfile(path.join(platforms_path, platform))]:
# Each python script must have a class called Platform
# Instantiate the platform
try:
module_path = "plugins.cloudimport.platforms.{}".format(path.splitext(platform_script)[0])
module = importlib.import_module(module_path)
platform = (getattr(module, "Platform"))()
platforms.append(platform)
except Exception as e:
logger.warning("Failed to instantiate platform {}: {}".format(platform_script, e))
assert_all_platforms_are_called_differently(platforms)
return platforms
def assert_all_platforms_are_called_differently(platforms):
platform_names = []
for platform in platforms:
if platform.name in platform_names:
# ToDo: throw an error
logger.warning('Found multiple platforms with the name {}. This will cause problems...'.format(platform.name))
else:
platform_names.append(platform.name)
def get_platforms_path():
current_path = path.dirname(path.realpath(__file__))
return path.abspath(path.join(current_path, 'platforms'))
| mpl-2.0 | 957,944,366,230,847,200 | 36.641509 | 130 | 0.677193 | false | 4.365427 | false | false | false |
Brazelton-Lab/bio_utils | bio_utils/tests/test_entry_verifier.py | 1 | 2721 | #! /usr/bin/env python3
"""Test bio_utils' entry_verifier
Copyright:
test_entry_verifier.py test bio_utils' entry_verifier
Copyright (C) 2015 William Brazelton, Alex Hyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from bio_utils.verifiers import entry_verifier
from bio_utils.verifiers import FormatError
import os
__author__ = 'Alex Hyer'
__email__ = '[email protected]'
__license__ = 'GPLv3'
__maintainer__ = 'Alex Hyer'
__status__ = 'Production'
__version__ = '1.0.0'
def test_entry_verifier():
"""Test bio_utils' entry_verifier with multiple regex and subjects"""
# Store multiple entries for testing
# Good data
regex_solo = r'^>.+{0}[ACGTU]+{0}$'.format(os.linesep)
subject_solo = '>entry1{0}AGGAATTCGGCTAGCTTGAC{0}'.format(os.linesep)
delimiter_solo = r'{0}'.format(os.linesep)
# Bad set 1
regex_set = r'^@.+\t[ACGTURYKMSWBDHVNX]+\t\+.*\t.+\t$'
subject_set = [r'@entry1\tAAGGATTCG\t+\t142584412\t'
r'@entry\tAGGTZCCCCG\t+\t1224355221\t',
r'@entry3\tGCCTAGC\t+\t6443284\t']
delimiter_set = r'\t'
# Bad set 2
regex_set2 = r'^@.+\\r\\n[ACGTURYKMSWBDHVNX]+\\r\\n\+.*\\r\\n.+\\r\\n$'
subject_set2 = [r'@entry1\r\nAAGGATTCG\r\n+\r\n142584412\r\n'
r'@entry\r\nAGGTGCCCCG\r\n+\r\n1224355221\r\n',
r'4entry3\r\nGCCTAGC\r\n+\r\n6443284\r\n']
delimiter_set2 = r'\\r\\n'
# Next line will throw a FormatError if entry_verifier is broken
# and doesn't deem subject_solo to match regex_solo
entry_verifier([subject_solo], regex_solo, delimiter_solo)
# Test set
try:
entry_verifier(subject_set, regex_set, delimiter_set)
except FormatError as error:
assert error.template == r'^[ACGTURYKMSWBDHVNX]+$'
assert error.subject == 'AGGTZCCCCG'
assert error.part == 1
# Test set 2
try:
entry_verifier(subject_set2, regex_set2, delimiter_set2)
except FormatError as error:
assert error.template == r'^@.+$'
assert error.subject == '4entry3'
assert error.part == 0
| gpl-3.0 | -6,426,005,740,206,852,000 | 33.884615 | 75 | 0.651599 | false | 3.189918 | true | false | false |
DittmarLab/HGTector | hgtector/search.py | 1 | 80853 | #!/usr/bin/env python3
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, Qiyun Zhu and Katharina Dittmar.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import re
from os import remove, makedirs, cpu_count
from os.path import join, isdir, isfile
from shutil import which, rmtree
from tempfile import mkdtemp
from time import time, sleep
from math import log
from urllib.parse import quote
from urllib.request import urlopen, HTTPError, URLError
from hgtector.util import (
timestamp, load_configs, get_config, arg2bool, list_from_param, file2id,
id2file_map, read_taxdump, read_input_prots, read_prot2taxid,
get_product, seqid2accver, write_fasta, run_command, contain_words,
is_latin, is_capital, is_ancestral, taxid_at_rank)
description = """batch sequence homology searching and filtering"""
arguments = [
'basic',
['-i|--input', 'input protein set file, or directory where one or more '
'input files are located', {'required': True}],
['-o|--output', 'directory where search results are to be saved',
{'required': True}],
['-m|--method', 'search method',
{'choices': ['auto', 'diamond', 'blast', 'remote',
'precomp'], 'default': 'auto'}],
['-s|--precomp', 'file or directory of precomputed search results (when '
'method = precomp)'],
'database',
['-d|--db', 'reference protein sequence database'],
['-t|--taxdump', 'directory of taxonomy database files (nodes.dmp and '
'names.dmp)'],
['--taxmap', 'sequence Id to taxId mapping file (not necessary if '
'protein database already contains taxonomy)'],
'search behaviors',
['-k|--maxhits', 'maximum number of hits to preserve per query (0 for '
'unlimited)', {'type': int}],
['--minsize', 'minimum length of query sequence (aa)', {'type': int}],
['--queries', 'number of queries per run (0 for whole sample)',
{'type': int}],
['--maxchars', 'maximum number of characters per run (0 for unlimited)',
{'type': int}],
'search cutoffs',
['--maxseqs', 'maximum number of sequences to return', {'type': int}],
['--evalue', 'maximum E-value cutoff', {'type': float}],
['--identity', 'minimum percent identity cutoff', {'type': int}],
['--coverage', 'minimum percent query coverage cutoff', {'type': int}],
['--extrargs', 'extra arguments for choice of search method'],
'taxonomic filters',
['--tax-include', 'include taxa under those taxIds'],
['--tax-exclude', 'exclude taxa under those taxIds'],
['--tax-unique', 'ignore more than one hit with same taxID',
{'choices': ['yes', 'no']}],
['--tax-unirank', 'ignore more than one hit under same taxon at this '
'rank'],
['--tax-capital', 'ignore taxon names that are not capitalized',
{'choices': ['yes', 'no']}],
['--tax-latin', 'ignore species names that are not Latinate',
{'choices': ['yes', 'no']}],
['--tax-block', 'ignore taxon names containing those words'],
'local search behaviors',
['-p|--threads', 'number of threads (0 for all CPU cores)',
{'type': int}],
['--tmpdir', 'temporary directory'],
['--diamond', 'diamond executable'],
['--blastp', 'blastp executable'],
['--blastdbcmd', 'blastdbcmd executable'],
'remote search behaviors',
['--algorithm', 'remote search algorithm'],
['--retries', 'maximum number of retries per search',
{'type': int}],
['--delay', 'seconds between two search requests',
{'type': int}],
['--timeout', 'seconds before program gives up waiting',
{'type': int}],
['--entrez', 'entrez query text'],
['--server', 'remote search server URL'],
'self-alignment options',
['--aln-method', 'self-alignment method',
{'choices': ['auto', 'native', 'fast', 'lookup',
'precomp']}],
['--aln-precomp', 'file or directory of precomputed sequence Id to score '
'maps (when self-alignment method = precomp)'],
['--aln-server', 'remote align server URL'],
'remote fetch options',
['--fetch-enable', 'whether to enable remote fetch',
{'choices': ['auto', 'yes', 'no']}],
['--fetch-queries', 'maximum number of query entries per search'],
['--fetch-retries', 'maximum number of retries per search'],
['--fetch-delay', 'seconds between two fetch requests',
{'type': int}],
['--fetch-timeout', 'seconds before program gives up waiting',
{'type': int}],
['--fetch-server', 'remote fetch server URL'],
]
class Search(object):
def __init__(self):
self.arguments = arguments
self.description = description
def __call__(self, args):
print(f'Homology search started at {timestamp()}.')
# load configurations
self.cfg = load_configs()
# read and validate arguments
self.args_wf(args)
# read and validate input data
self.input_wf()
# perform homology search for each sample
for sid, sample in sorted(self.data.items()):
if 'done' in sample:
continue
prots = sample['prots']
print(f'Batch homology search of {sid} started at {timestamp()}.')
# collect sequences to search
id2idx = {}
seqs = []
for i, prot in enumerate(prots):
if 'hits' in prot:
continue
id_ = prot['id']
seqs.append((id_, prot['seq']))
id2idx[id_] = i
print(f'Number of queries: {len(seqs)}.')
# divide sequences into batches
batches = ([seqs] if self.method == 'precomp' else
self.subset_seqs(seqs, self.queries, self.maxchars))
# run search for each batch
n = 0
for batch in batches:
# batch homology search
res = self.search_wf(
batch, self.pcmap[sid] if self.method == 'precomp'
else None)
# update taxIds of hits
self.taxid_wf(res)
# update taxonomic information of new taxIds
self.taxinfo_wf(res)
# perform taxonomy-based filtering
self.taxfilt_wf(res)
# update samples with search results
indices = [id2idx[x[0]] for x in batch]
self.update_search_results(prots, res, set(indices))
# perform self-alignment
seqs2a = [x for x in batch if 'score' not in prots[
id2idx[x[0]]]]
if seqs2a:
for id_, score in self.selfaln_wf(seqs2a, res).items():
prots[id2idx[id_]]['score'] = score
# write search results to file
with open(join(self.output, f'{sid}.tsv'), 'a') as f:
self.write_search_results(f, prots, indices)
n += len(batch)
print(f' {n} queries completed.')
print(f'Batch homology search of {sid} ({len(prots)} proteins) '
f'ended at {timestamp()}.')
# clean up
if hasattr(self, 'mkdtemp'):
rmtree(self.tmpdir)
print(f'Batch homology search finished at {timestamp()}.')
"""master workflows"""
def args_wf(self, args):
"""Workflow for validating and setting arguments.
Parameters
----------
args : dict
command-line arguments
Notes
-----
Workflow:
1. Load command-line arguments.
2. Update arguments from configurations.
3. Validate input and output directories.
4. Determine search method and parameters.
5. Determine taxonomy database and map.
6. Determine self-alignment method and parameters.
7. Determine remote fetch method and parameters.
8. Print major settings if desired.
"""
# load arguments
for key, val in vars(args).items():
setattr(self, key, val)
# check input directory and data
if isfile(self.input):
self.input_map = {file2id(self.input): self.input}
elif isdir(self.input):
self.input_map = {k: join(self.input, v) for k, v in id2file_map(
self.input).items()}
if len(self.input_map) == 0:
raise ValueError(
f'No input data are found under: {self.input}.')
else:
raise ValueError(
f'Invalid input data file or directory: {self.input}.')
# check / create output directory
makedirs(self.output, exist_ok=True)
self.prev_map = id2file_map(self.output, 'tsv')
"""determine search strategy"""
# load search parameters
get_config(self, 'evalue', 'search.evalue', float)
for key in ('method', 'minsize', 'maxseqs', 'identity', 'coverage'):
get_config(self, key, f'search.{key}')
for key in ('diamond', 'blastp', 'blastdbcmd'):
get_config(self, key, f'program.{key}')
if self.method not in {'auto', 'diamond', 'blast', 'remote',
'precomp'}:
raise ValueError(f'Invalid search method: {self.method}.')
# look for precomputed search results
if self.method == 'precomp' and not self.precomp:
raise ValueError('Must specify location of pre-computed search '
'results.')
if self.precomp:
if isfile(self.precomp):
if len(self.input_map) > 1:
raise ValueError('A directory of multiple pre-computed '
'search result files is needed.')
self.pcmap = {file2id(self.precomp): self.precomp}
elif isdir(self.precomp):
self.pcmap = {k: join(self.precomp, v) for k, v in id2file_map(
self.precomp).items()}
if len(self.pcmap) == 0:
raise ValueError('Cannot locate any pre-computed search '
f'results at: {self.precomp}.')
else:
raise ValueError('Invalid pre-computed search result file or '
f'directory: {self.precomp}.')
if self.method == 'auto':
self.method = 'precomp'
# check local search executables and databases
diamond_db = self.check_diamond()
blast_db = self.check_blast()
# choose a local search method if available, or do remote search
if self.method == 'auto':
if diamond_db:
self.method = 'diamond'
self.db = diamond_db
elif blast_db:
self.method = 'blast'
self.db = blast_db
else:
self.method = 'remote'
# load method-specific arguments
for key in ('queries', 'maxchars', 'extrargs'):
get_config(self, key, f'{self.method}.{key}')
# load remote search settings
if self.method == 'remote':
for key in ('db', 'algorithm', 'delay', 'timeout', 'entrez'):
get_config(self, key, f'remote.{key}')
get_config(self, 'server', 'server.search')
# determine number of threads
if self.method in ('diamond', 'blast') and not self.threads:
# use all available CPUs if threads is set to zero or left empty
self.threads = cpu_count()
# do single-threading if CPU count not working
if self.threads is None:
print('WARNING: Cannot determine number of CPUs. Will do '
'single-threading instead.')
self.threads = 1
# apply BLAST CPU number cap
if self.method == 'blast' and self.threads > 8:
print('WARNING: BLAST can only use a maximum of 8 CPUs.')
self.threads = 8
# check / create temporary directory
if self.method in ('diamond', 'blast'):
dir_ = self.tmpdir
if not dir_:
self.tmpdir = mkdtemp()
setattr(self, 'mkdtemp', True) # mark for cleanup
elif not isdir(dir_):
raise ValueError(f'Invalid temporary directory: {dir_}.')
"""determine taxonomy database and filters"""
# initialize protein-to-taxId map
self.prot2tid = {}
# assign taxonomy database
for key in ('taxdump', 'taxmap'):
get_config(self, key, f'database.{key}')
if self.method != 'remote':
# check local taxonomy database
if not self.taxdump:
print('WARNING: Local taxonomy database is not specified. '
'Will try to retrieve taxonomic information from remote '
'server.')
elif not isdir(self.taxdump):
raise ValueError(
f'Invalid taxonomy database directory: {self.taxdump}.')
else:
for fname in ('names.dmp', 'nodes.dmp'):
if not isfile(join(self.taxdump, fname)):
raise ValueError(
f'Taxonomy database file {fname} is not found.')
# check local taxonomy map
if self.taxmap and not isfile(self.taxmap):
raise ValueError(
f'Invalid protein-to-taxId map: {self.taxmap}.')
# load taxonomic filters and convert to lists
for key in ('include', 'exclude', 'block'):
attr = f'tax_{key}'
get_config(self, attr, f'taxonomy.{key}')
setattr(self, attr, list_from_param(getattr(self, attr)))
# load taxonomy switches
for key in ('unique', 'unirank', 'capital', 'latin'):
get_config(self, f'tax_{key}', f'taxonomy.{key}')
"""determine self-alignment strategy"""
# load configurations
get_config(self, 'aln_method', 'search.selfaln')
get_config(self, 'aln_server', 'server.selfaln')
met_ = self.aln_method
if met_ not in ('auto', 'native', 'fast', 'lookup', 'precomp'):
raise ValueError(f'Invalid self-alignment method: {met_}.')
# look for precomputed self-alignment results
if met_ == 'precomp' and not self.aln_precomp:
raise ValueError('Must specify location of pre-computed self-'
'alignment scores.')
pre_ = self.aln_precomp
if pre_:
if isfile(pre_):
if len(self.input_map) > 1:
raise ValueError('A directory of multiple pre-computed '
'self-alignment result files is needed.')
self.aln_pcmap = {file2id(pre_): pre_}
elif isdir(pre_):
self.aln_pcmap = {k: join(pre_, v) for k, v in
id2file_map(pre_).items()}
if len(self.aln_pcmap) == 0:
raise ValueError('Cannot locate any pre-computed self-'
f'alignment results at: {pre_}.')
else:
raise ValueError('Invalid pre-computed self-alignment result '
f'file or directory: {pre_}.')
if met_ == 'auto':
self.aln_method = 'precomp'
# use the same search method for self-alignment, otherwise use fast
if met_ in ('auto', 'native'):
self.aln_method = 'fast' if self.method == 'precomp' else 'native'
"""determine fetch strategy"""
# load configurations
get_config(self, 'fetch_server', 'server.fetch')
for key in ('enable', 'queries', 'retries', 'delay', 'timeout'):
get_config(self, f'fetch_{key}', f'fetch.{key}')
# determine remote or local fetching
if self.fetch_enable == 'auto':
self.fetch_enable = 'yes' if (
self.method == 'remote' or not self.taxdump) else 'no'
"""final steps"""
# convert boolean values
for key in ('tax_unique', 'tax_capital', 'tax_latin'):
setattr(self, key, arg2bool(getattr(self, key, None)))
# convert fractions to percentages
for metric in ('identity', 'coverage'):
val = getattr(self, metric)
if val and val < 1:
setattr(self, metric, val * 100)
# print major settings
print('Settings:')
print(f' Search method: {self.method}.')
print(f' Self-alignment method: {self.aln_method}.')
print(f' Remote fetch enabled: {self.fetch_enable}.')
def check_diamond(self):
"""Check if DIAMOND is available.
Returns
-------
str or None
valid path to DIAMOND database, or None if not available
Raises
------
ValueError
If settings conflict.
"""
if self.method in ('diamond', 'auto'):
if not self.diamond:
self.diamond = 'diamond'
if which(self.diamond):
try:
db = self.db or self.cfg['database']['diamond']
except KeyError:
pass
if db:
if isfile(db) or isfile(f'{db}.dmnd'):
return db
elif self.method == 'diamond':
raise ValueError(f'Invalid DIAMOND database: {db}.')
elif self.method == 'diamond':
raise ValueError(
'A protein database is required for DIAMOND search.')
elif self.method == 'diamond':
raise ValueError(
f'Invalid diamond executable: {self.diamond}.')
def check_blast(self):
"""Check if BLAST is available.
Returns
-------
str or None
valid path to BLAST database, or None if not available
Raises
------
ValueError
If settings conflict.
"""
if self.method in ('blast', 'auto'):
if not self.blastp:
self.blastp = 'blastp'
if which(self.blastp):
try:
db = self.db or self.cfg['database']['blast']
except KeyError:
pass
if db:
if all(isfile(f'{db}.{x}') for x in ('phr', 'pin', 'psq')):
return db
elif self.method == 'blast':
raise ValueError(f'Invalid BLAST database: {db}.')
elif self.method == 'blast':
raise ValueError(
'A protein database is required for BLAST search.')
elif self.method == 'blast':
raise ValueError(f'Invalid blastp executable: {self.blastp}.')
def input_wf(self):
"""Master workflow for processing input data.
Notes
-----
Workflow:
1. Read proteins of each protein set.
2. Process search results from previous runs.
3. Import precomputed self-alignment scores.
4. Fetch sequences for proteins to be searched.
5. Drop sequences shorter than threshold.
6. Read or initiate taxonomy database.
7. Read protein-to-taxId map.
"""
# initiate data structure
self.data = {}
# read proteins of each sample
print('Reading input proteins...')
nprot = 0
for id_, fname in self.input_map.items():
prots = read_input_prots(fname)
n = len(prots)
if n == 0:
raise ValueError(f'No protein entries are found for {id_}.')
print(f' {id_}: {n} proteins.')
self.data[id_] = {'prots': prots}
nprot += n
print(f'Done. Read {nprot} proteins from {len(self.data)} samples.')
# process search results from previous runs
ndone = 0
if self.prev_map:
print('Processing search results from previous runs...')
for id_ in self.data:
if id_ not in self.prev_map:
continue
prots_ = self.data[id_]['prots']
n = len(prots_)
m = len(self.parse_prev_results(join(
self.output, self.prev_map[id_]), prots_))
if m == n:
self.data[id_]['done'] = True
ndone += m
print(f'Done. Found results for {ndone} proteins, remaining '
f'{nprot - ndone} to search.')
# check if search is already completed
if (ndone == nprot):
return
# import precomputed self-alignment scores
if self.aln_method == 'precomp' and hasattr(self, 'aln_pcmap'):
n, m = 0, 0
print('Importing precomputed self-alignment scores...', end='')
for sid, file in self.aln_pcmap.items():
# read scores
scores = {}
with open(file, 'r') as f:
for line in f:
line = line.rstrip('\r\n')
if not line or line.startswith('#'):
continue
id_, score = line.split('\t')
scores[id_] = float(score)
# assign scores if not already
for prot in self.data[sid]['prots']:
if 'score' in prot:
continue
n += 1
id_ = prot['id']
try:
prot['score'] = scores[id_]
m += 1
except KeyError:
pass
print(' done.')
print(f' Imported scores for {n} proteins.')
dif = n - m
if dif > 0:
raise ValueError(f'Missing scores for {dif} proteins.')
# fetch sequences for unsearched proteins
seqs2q = self.check_missing_seqs(self.data)
n = len(seqs2q)
if n > 0:
print(f'Sequences of {n} proteins are to be retrieved.')
if self.method == 'blast':
print('Fetching sequences from local BLAST database...',
end='')
seqs = self.blast_seqinfo(seqs2q)
n = self.update_prot_seqs(seqs)
print(' done.')
print(f' Obtained sequences of {n} proteins.')
seqs2q = self.check_missing_seqs(self.data)
n = len(seqs2q)
if n > 0:
print(f' Remaining {n} proteins.')
if n > 0 and self.fetch_enable == 'yes':
print(f'Fetching {n} sequences from remote server...',
flush=True)
seqs = self.remote_seqinfo(seqs2q)
n = self.update_prot_seqs(seqs)
print(f'Done. Obtained sequences of {n} proteins.')
seqs2q = self.check_missing_seqs(self.data)
n = len(seqs2q)
if n > 0:
raise ValueError(f' Cannot obtain sequences of {n} proteins.')
# drop short sequences
if self.minsize:
print(f'Dropping sequences shorter than {self.minsize} aa...',
end='')
for sid, sample in self.data.items():
for i in reversed(range(len(sample['prots']))):
if len(sample['prots'][i]['seq']) < self.minsize:
del sample['prots'][i]
print(' done.')
# read or initiate taxonomy database
# read external taxdump
if self.taxdump is not None:
print('Reading local taxonomy database...', end='')
self.taxdump = read_taxdump(self.taxdump)
print(' done.')
print(f' Read {len(self.taxdump)} taxa.')
# read taxdump generated by previous runs
elif (isfile(join(self.output, 'names.dmp')) and
isfile(join(self.output, 'nodes.dmp'))):
print('Reading custom taxonomy database...', end='')
self.taxdump = read_taxdump(self.output)
print(' done.')
print(f' Read {len(self.taxdump)} taxa.')
# build taxdump from scratch
else:
print('Initiating custom taxonomy database...', end='')
self.taxdump = {'1': {
'name': 'root', 'parent': '1', 'rank': 'no rank'}}
self.update_dmp_files(['1'])
print(' done.')
# record invalid taxIds to save compute
self.badtaxids = set()
def search_wf(self, seqs, file=None):
"""Master workflow for batch homology search.
Parameters
----------
seqs : list of tuple
query sequences (Id, sequence)
file : str, optional
file of precomputed results
Returns
-------
dict
sequence Id to hit table
Notes
-----
Workflow:
1. Generate an Id-to-length map.
2. Import precomputed search results (if method = precomp).
3. Call choice of method (remote, blast or diamond) to search.
"""
# generate an Id-to-length map
lenmap = {x[0]: len(x[1]) for x in seqs}
# import pre-computed search results
if self.method == 'precomp':
print('Importing pre-computed search results...', end='')
res = self.parse_hit_table(file, lenmap)
print(' done.')
print(f' Found hits for {len(res)} proteins.')
# run de novo search
elif self.method == 'remote':
res = self.remote_search(seqs)
sleep(self.delay)
elif self.method == 'blast':
res = self.blast_search(seqs)
elif self.method == 'diamond':
res = self.diamond_search(seqs)
return res
def taxid_wf(self, prots):
"""Master workflow for associating hits with taxIds.
Parameters
----------
prots : dict of list
proteins (search results)
Notes
-----
Workflow:
1. Update taxmap with taxIds directly available from hit tables.
2. Get taxIds for sequence Ids without them:
2.1. Query external taxon map, if available.
2.2. Do local fetch (from a BLAST database) if available.
2.3. Do remote fetch (from NCBI server) if available.
3. Delete hits without associated taxIds.
"""
added_taxids = set()
# update taxon map with taxIds already in hit tables
ids2q, added = self.update_hit_taxids(prots)
added_taxids.update(added)
# attempt to look up taxIds from external taxon map
if ids2q and self.method != 'remote' and self.taxmap is not None:
# load taxon map on first use (slow and memory-hungry)
if isinstance(self.taxmap, str):
print('Reading protein-to-TaxID map (WARNING: may be slow and '
'memory-hungry)...', end='', flush=True)
self.taxmap = read_prot2taxid(self.taxmap)
print(' done.')
print(f' Read {len(self.taxmap)} records.')
ids2q, added = self.update_hit_taxids(prots, self.taxmap)
added_taxids.update(added)
# attempt to look up taxIds from local BLAST database
if (ids2q and self.method in ('blast', 'precomp') and self.db
and self.blastdbcmd):
newmap = {x[0]: x[1] for x in self.blast_seqinfo(ids2q)}
ids2q, added = self.update_hit_taxids(prots, newmap)
added_taxids.update(added)
# attempt to look up taxIds from remote server
if ids2q and self.fetch_enable == 'yes':
print(f'Fetching taxIds of {len(ids2q)} sequences from remote '
'server...', flush=True)
newmap = {x[0]: x[1] for x in self.remote_seqinfo(ids2q)}
print(f'Done. Obtained taxIds of {len(newmap)} sequences.')
ids2q, added = self.update_hit_taxids(prots, newmap)
added_taxids.update(added)
# drop hits whose taxIds cannot be identified
n = len(ids2q)
if n > 0:
print(f'WARNING: Cannot obtain taxIds for {n} sequences. These '
'hits will be dropped.')
for hits in prots.values():
for i in reversed(range(len(hits))):
if hits[i]['id'] in ids2q:
del hits[i]
def taxinfo_wf(self, prots):
"""Master workflow for associating hits with taxonomic information.
Parameters
----------
prots : dict of list
proteins (search results)
Notes
-----
Workflow:
1. Obtain a list of taxIds represented by hits.
2. List taxIds that are missing in current taxonomy database.
3. Get taxonomic information for taxIds by remote fetch, if available.
4. Append new taxonomic information to dump files, if available.
5. Delete hits whose taxIds are not associated with information.
"""
# list taxIds without information
tids2q = set()
for prot, hits in prots.items():
for hit in hits:
tid = hit['taxid']
if tid not in self.taxdump:
tids2q.add(tid)
tids2q = sorted(tids2q)
# retrieve information for these taxIds
if tids2q and self.fetch_enable == 'yes':
print(f'Fetching {len(tids2q)} taxIds and ancestors from remote '
'server...', flush=True)
xml = self.remote_taxinfo(tids2q)
added = self.parse_taxonomy_xml(xml)
print(f'Done. Obtained taxonomy of {len(tids2q)} taxIds.')
self.update_dmp_files(added)
tids2q = [x for x in tids2q if x not in added]
# drop taxIds whose information cannot be obtained
if tids2q:
print(f'WARNING: Cannot obtain information of {len(tids2q)} '
'taxIds. These hits will be dropped.')
tids2q = set(tids2q)
for hits in prots.values():
for i in reversed(range(len(hits))):
if hits[i]['taxid'] in tids2q:
del hits[i]
self.badtaxids.update(tid)
def taxfilt_wf(self, prots):
"""Workflow for filtering hits by taxonomy.
Parameters
----------
prots : dict of list of dict
proteins (search results)
Notes
-----
Workflow:
1. Bottom-up filtering (delete in place)
1.1. Delete taxIds already marked as bad.
1.2. Delete taxIds whose ancestors are not in the "include" list.
1.3. Delete taxIds, any of whose ancestors is in the "exclude" list.
1.4. Delete empty taxon names.
1.5. Delete taxon names that are not capitalized.
1.6. Delete taxon names in which any word is in the "block" list.
2. Bottom-up filtering (mark and batch delete afterwards)
2.1. Mark taxIds that already appeared in hit table for deletion.
2.2. Mark taxIds whose ancestor at given rank already appeared in hit
table for deletion.
"""
# filtering of taxIds and taxon names
for id_, hits in prots.items():
# bottom-up filtering by independent criteria
for i in reversed(range(len(hits))):
todel = False
tid = hits[i]['taxid']
taxon = self.taxdump[tid]['name']
if tid in self.badtaxids:
todel = True
elif (self.tax_include and not
is_ancestral(tid, self.tax_include, self.taxdump)):
todel = True
elif (self.tax_exclude and
is_ancestral(tid, self.tax_exclude, self.taxdump)):
todel = True
elif taxon == '':
todel = True
elif self.tax_capital and not is_capital(taxon):
todel = True
elif self.tax_block and contain_words(taxon, self.tax_block):
todel = True
elif self.tax_latin:
tid_ = taxid_at_rank(tid, 'species', self.taxdump)
if not tid_ or not is_latin(tid_):
todel = True
if todel:
del hits[i]
self.badtaxids.add(tid)
# top-down filtering by sorting-based criteria
todels = []
used = set()
used_at_rank = set()
for i in range(len(hits)):
tid = hits[i]['taxid']
if self.tax_unique:
if tid in used:
todels.append(i)
continue
else:
used.add(tid)
if self.tax_unirank:
tid_ = taxid_at_rank(tid, self.tax_unirank, self.taxdump)
if tid_ and tid_ in used_at_rank:
todels.append(i)
else:
used_at_rank.add(tid_)
for i in reversed(todels):
del hits[i]
def selfaln_wf(self, seqs, prots=None):
"""Master workflow for protein sequence self-alignment.
Parameters
----------
seqs : list of tuple
query sequences (Id, sequence)
prots : dict of list of dict, optional
hit tables, only relevant when amet = lookup
Returns
-------
dict
Id-to-score map
Notes
-----
Workflow:
1. If amet = lookup, just look up, and raise if any sequences don't
have self-hits.
2. If amet = fast, run fast built-in algorithm on each sequence.
3. If amet = native, find the corresponding search method.
4. Run self-alignment in batches only when method = precomp, otherwise
the query sequences are already subsetted into batches.
5. If some sequences don't have self hits in batch self-alignments,
try to get them via single self-alignments.
6. If some sequences still don't have self hits, do built-in algorithm,
but note that the output may be slightly different from others.
7. If some sequences still don't have self hits, raise.
"""
res = []
# just look up (will fail if some are not found)
if self.aln_method == 'lookup':
res = self.lookup_selfaln(seqs, prots)
# use built-in algorithm
elif self.aln_method == 'fast':
for id_, seq in seqs:
bitscore, evalue = self.fast_selfaln(seq)
res.append((id_, bitscore, evalue))
# use the same search method for self-alignment
elif self.aln_method == 'native':
# divide sequences into batches, when search results are
# precomputed
batches = ([seqs] if self.method != 'precomp' else
self.subset_seqs(seqs, self.queries, self.maxchars))
# do self-alignments in batches to save compute
for batch in batches:
res_ = []
# call search method
if self.method == 'remote':
res_ = self.remote_selfaln(batch)
sleep(self.delay)
elif self.method == 'blast':
res_ = self.blast_selfaln(batch)
elif self.method == 'diamond':
res_ = self.diamond_selfaln(batch)
# merge results
res += res_
# if some hits are not found, do single alignments
left = set([x[0] for x in seqs]) - set([x[0] for x in res])
if left:
print('WARNING: The following sequences cannot be self-aligned '
'in a batch. Do individual alignments instead.')
print(' ' + ', '.join(left))
for id_, seq in seqs:
if id_ not in left:
continue
res_ = None
# call search method
if self.method == 'remote':
res_ = self.remote_selfaln([(id_, seq)])
sleep(self.delay)
elif self.method == 'blast':
res_ = self.blast_selfaln([(id_, seq)])
elif self.method == 'diamond':
res_ = self.diamond_selfaln([(id_, seq)])
# if failed, do built-in alignment
if not res_:
print(f'WARNING: Sequence {id_} cannot be self-aligned '
'using the native method. Do fast alignment '
'instead.')
bitscore, evalue = self.fast_selfaln(seq)
res_ = [(id_, bitscore, evalue)]
# merge results
res += res_
# check if all sequences have results
left = set([x[0] for x in seqs]) - set([x[0] for x in res])
if left:
raise ValueError('Cannot calculate self-alignment metrics for '
'the following sequences:\n ' + ', '.join(
sorted(left)))
return {x[0]: x[1] for x in res}
"""input/output functions"""
@staticmethod
def subset_seqs(seqs, queries=None, maxchars=None):
"""Generate subsets of sequences based on cutoffs.
Parameters
----------
seqs : list of tuple
sequences to subset (id, sequence)
queries : int, optional
number of query sequences per subset
maxchars : int, optional
maximum total length of query sequences per subset
Returns
-------
list of list of tuple
subsets
Raises
------
ValueError
If any sequence exceeds maxchars.
"""
if not maxchars:
# no subsetting
if not queries:
return [seqs]
# subsetting only by queries
subsets = []
for i in range(0, len(seqs), queries):
subsets.append(seqs[i:i + queries])
return subsets
# subsetting by maxchars, and by queries if applicable
subsets = [[]]
cquery, cchars = 0, 0
for id_, seq in seqs:
chars = len(seq)
if chars > maxchars:
raise ValueError(f'Sequence {id_} exceeds maximum allowed '
f'length {maxchars} for search.')
if cchars + chars > maxchars or queries == cquery > 0:
subsets.append([])
cquery, cchars = 0, 0
subsets[-1].append((id_, seq))
cquery += 1
cchars += chars
return subsets
def update_search_results(self, prots, res, indices=set()):
"""Update proteins with new search results.
Parameters
----------
prots : list of dict
proteins to update
res : dict
search results
indices : set of int, optional
indices of proteins to be updated
if omitted, only proteins with hits will be updated
"""
for i, prot in enumerate(prots):
if 'hits' in prot:
continue
if indices and i not in indices:
continue
id_ = prot['id']
if id_ in res:
prot['hits'] = []
n = 0
for hit in res[id_]:
prot['hits'].append(hit)
n += 1
if self.maxhits and n == self.maxhits:
break
elif indices and i in indices:
prot['hits'] = []
@staticmethod
def write_search_results(f, prots, indices=None):
"""Write search results to a file.
Parameters
----------
f : file handle
file to write to (in append mode)
prots : array of hash
protein set
indices : list of int, optional
limit to these proteins
"""
for i in indices if indices else range(len(prots)):
prot = prots[i]
f.write(f'# ID: {prot["id"]}\n')
f.write(f'# Length: {len(prot["seq"])}\n')
f.write(f'# Product: {prot["product"]}\n')
f.write(f'# Score: {prot["score"]}\n')
f.write(f'# Hits: {len(prot["hits"])}\n')
for hit in prot['hits']:
f.write('\t'.join([hit[x] for x in (
'id', 'identity', 'evalue', 'score', 'coverage',
'taxid')]) + '\n')
@staticmethod
def parse_prev_results(fp, prots):
"""Parse previous search results.
Parameters
----------
file : str
file containing search results
prots : list
protein records
Returns
-------
list of str
completed protein Ids
"""
done = []
with open(fp, 'r') as f:
for line in f:
if line.startswith('# ID: '):
done.append(line[6:].rstrip('\r\n'))
doneset = set(done)
for prot in prots:
if prot['id'] in doneset:
prot['score'] = 0
prot['hits'] = []
return done
@staticmethod
def check_missing_seqs(data):
"""Get a list of proteins whose sequences remain to be retrieved.
Parameters
----------
data : dict
protein sets
Returns
-------
list of str
Ids of proteins without sequences
"""
res = set()
for sid, sample in data.items():
if 'done' in sample:
continue
for prot in sample['prots']:
if not prot['seq'] and 'hits' not in prot:
res.add(prot['id'])
return sorted(res)
def update_dmp_files(self, ids):
"""Write added taxonomic information to custom taxdump files.
Parameters
----------
ids : list of str
added taxIds
Notes
-----
Taxonomic information will be appended to nodes.dmp and names.dmp in
the working directory.
"""
fo = open(join(self.output, 'nodes.dmp'), 'a')
fa = open(join(self.output, 'names.dmp'), 'a')
for id_ in sorted(ids, key=int):
fo.write('\t|\t'.join((
id_, self.taxdump[id_]['parent'],
self.taxdump[id_]['rank'])) + '\t|\n')
fa.write('\t|\t'.join((
id_, self.taxdump[id_]['name'])) + '\t|\n')
fo.close()
fa.close()
"""sequence query functions"""
def blast_seqinfo(self, ids):
"""Retrieve information of given sequence Ids from local BLAST database.
Parameters
----------
ids : list of str
query sequence Ids
Returns
-------
list of tuple
(id, taxid, product, sequence)
Notes
-----
When making database (using makeblastdb), one should do -parse_seqids
to enable search by name (instead of sequence) and -taxid_map with a
seqId-to-taxId map to enable taxId query.
"""
# run blastdbcmd
# fields: accession, taxid, sequence, title
cmd = ' '.join((
self.blastdbcmd,
'-db', self.db,
'-entry', ','.join(ids),
'-outfmt', '"%a %T %s %t"'))
out = run_command(cmd)[1]
# parse output
res = []
header = True
for line in out:
# catch invalid database error
if header:
# letter case is dependent on BLAST version
if line.lower().startswith('blast database error'):
raise ValueError(f'Invalid BLAST database: {self.db}.')
header = False
# if one sequence Id is not found, program will print:
# Error: [blastdbcmd] Entry not found: NP_123456.1
# if none of sequence Ids are found, program will print:
# Error: [blastdbcmd] Entry or entries not found in BLAST
# database
if (line.startswith('Error') or 'not found' in line or
line.startswith('Please refer to')):
continue
# limit to 4 partitions because title contains spaces
x = line.split(None, 3)
# if database was not compiled with -taxid_map, taxIds will be 0
if x[1] in ('0', 'N/A'):
x[1] = ''
# title will be empty if -parse_seqids was not triggered
if len(x) == 3:
x.append('')
# parse title to get product
else:
x[3] = get_product(x[3])
res.append((x[0], x[1], x[3], x[2]))
return res
def remote_seqinfo(self, ids):
"""Retrieve information of given sequence Ids from remote server.
Parameters
----------
ids : list of str
query sequence Ids (e.g., accessions)
Returns
-------
list of tuple
(id, taxid, product, sequence)
Raises
------
ValueError
All sequence Ids are invalid.
Failed to retrieve info from server.
"""
return self.parse_fasta_xml(self.remote_fetches(
ids, 'db=protein&rettype=fasta&retmode=xml&id={}'))
def remote_fetches(self, ids, urlapi):
"""Fetch information from remote server in batch mode
Parameters
----------
ids : list of str
query entries (e.g., accessions)
urlapi : str
URL API, with placeholder for query entries
Returns
-------
str
fetched information
Notes
-----
The function dynamically determines the batch size, starting from a
large number and reducing by half on every other retry. This is because
the NCBI server is typically busy and frequently runs into the "502 Bad
Gateway" issue. To resolve, one may subset queries and retry.
"""
cq = self.fetch_queries # current number of queries
cids = ids # current list of query Ids
res = ''
while True:
batches = [cids[i:i + cq] for i in range(0, len(cids), cq)]
failed = []
# batch fetch sequence information
for batch in batches:
try:
res += self.remote_fetch(urlapi.format(','.join(batch)))
print(f' Fetched information of {len(batch)} entries.',
flush=True)
sleep(self.fetch_delay)
except ValueError:
failed.extend(batch)
# reduce batch size by half on each trial
if failed and cq > 1:
cids = failed
cq = int(cq / 2)
print('Retrying with smaller batch size...', flush=True)
else:
cids = []
break
if cids:
print(f'WARNING: Cannot retrieve information of {len(cids)} '
'entries.')
return res
def remote_fetch(self, urlapi):
"""Fetch information from remote server.
Parameters
----------
urlapi : str
URL API
Returns
-------
str
fetched information
Raises
------
ValueError
Fetch failed.
"""
url = f'{self.fetch_server}?{urlapi}'
for i in range(self.fetch_retries):
if i:
print('Retrying...', end=' ', flush=True)
sleep(self.fetch_delay)
try:
with urlopen(url, timeout=self.fetch_timeout) as response:
return response.read().decode('utf-8')
except (HTTPError, URLError) as e:
print(f'{e.code} {e.reason}.', end=' ', flush=True)
print('', flush=True)
raise ValueError('Failed to fetch information from remote server.')
def update_prot_seqs(self, seqs):
"""Update protein sets with retrieved sequences.
Parameters
----------
seqs : list of tuple
protein sequences (id, taxid, product, sequence)
Returns
-------
int
number of proteins with sequence added
Notes
------
Different protein sets may contain identical protein Ids.
"""
# hash proteins by id
prots = {x[0]: (x[2], x[3]) for x in seqs}
# if queries are accessions without version, NCBI will add version
acc2ver = {}
for id_, info in prots.items():
acc_ = re.sub(r'\.\d+$', '', id_)
acc2ver[acc_] = id_
n = 0
for sid, sample in self.data.items():
for prot in sample['prots']:
if prot['seq']:
continue
# try to match protein Id (considering de-versioned accession)
id_ = prot['id']
if id_ not in prots:
try:
id_ = acc2ver[id_]
except KeyError:
continue
if id_ not in prots:
continue
# update protein information
for i, key in enumerate(['product', 'seq']):
if not prot[key]:
prot[key] = prots[id_][i]
n += 1
return n
@staticmethod
def parse_fasta_xml(xml):
"""Parse sequence information in FASTA/XML format retrieved from NCBI
server.
Parameters
----------
xml : str
sequence information in XML format
Returns
-------
list of str
[id, taxid, product, sequence]
Notes
-----
NCBI EFectch record type = TinySeq XML
.. _NCBI RESTful API:
https://www.ncbi.nlm.nih.gov/books/NBK25499/table/chapter4.T._valid_
values_of__retmode_and/?report=objectonly
.. _NCBI RESTful API example:
https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=protein
&rettype=fasta&retmode=xml&id=NP_454622.1,NP_230502.1,NP_384288.1
"""
seqs = []
for m in re.finditer(r'<TSeq>(.+?)<\/TSeq>', xml, re.DOTALL):
s_ = m.group(1)
seq = []
for key in (('accver', 'taxid', 'defline', 'sequence')):
m_ = re.search(r'<TSeq_%s>(.+)<\/TSeq_%s>' % (key, key), s_)
seq.append(m_.group(1) if m_ else '')
seq[2] = get_product(seq[2])
seqs.append(seq)
return seqs
"""homology search functions"""
def blast_search(self, seqs):
"""Run BLAST to search a sequence against a database.
Parameters
----------
seqs : list of tuple
query sequences (id, sequence)
Returns
-------
dict of list of dict
hit table per query sequence
Raises
------
ValueError
If BLAST run fails.
Notes
-----
- In ncbi-blast+ 2.7.1, the standard tabular format (-outfmt 6) is:
qaccver saccver pident length mismatch gapopen qstart qend sstart
send evalue bitscore
- In older versions, fields 1 and 2 are qseqid and sseqid. The
difference is that an sseqid may read "ref|NP_123456.1|" instead of
"NP_123456.1".
- staxids are ;-delimited, will be "N/A" if not found or the database
does not contain taxIds.
- The BLAST database should ideally be prepared as:
makeblastdb -in seqs.faa -dbtype prot -out db -parse_seqids \
-taxid map seq2taxid.txt
- Unlike blastn, blastp does not have -perc_identity.
"""
tmpin = join(self.tmpdir, 'tmp.in')
with open(tmpin, 'w') as f:
write_fasta(seqs, f)
cmd = [self.blastp,
'-query', tmpin,
'-db', self.db]
args = {x: getattr(self, x, None) for x in (
'evalue', 'coverage', 'maxseqs', 'threads', 'extrargs')}
if args['evalue']:
cmd.extend(['-evalue', str(args['evalue'])])
if args['coverage']:
cmd.extend(['-qcov_hsp_perc', str(args['coverage'])])
if args['maxseqs']:
cmd.extend(['-max_target_seqs', str(args['maxseqs'])])
if args['threads'] is not None:
cmd.extend(['-num_threads', str(args['threads'])])
if args['extrargs']:
cmd.append(args['extrargs'])
cmd.append('-outfmt "6 qaccver saccver pident evalue bitscore qcovhsp'
' staxids"')
ec, out = run_command(' '.join(cmd))
remove(tmpin)
if ec:
raise ValueError(f'blastp failed with error code {ec}.')
return self.parse_def_table(out)
def diamond_search(self, seqs):
"""Run DIAMOND to search a sequence against a database.
Parameters
----------
seqs : list of tuple
query sequences (id, sequence)
Returns
-------
dict of list of dict
hit table per query sequence
Raises
------
ValueError
If DIAMOND run fails.
Notes
-----
The DIAMOND database should ideally be prepared as:
diamond makedb --in seqs.faa --db db \
--taxonmap prot.accession2taxid
"""
tmpin = join(self.tmpdir, 'tmp.in')
with open(tmpin, 'w') as f:
write_fasta(seqs, f)
cmd = [self.diamond, 'blastp',
'--query', tmpin,
'--db', self.db,
'--threads', str(self.threads),
'--tmpdir', self.tmpdir]
args = {x: getattr(self, x, None) for x in (
'evalue', 'identity', 'coverage', 'maxseqs', 'extrargs')}
if args['evalue']:
cmd.extend(['--evalue', str(args['evalue'])])
if args['identity']:
cmd.extend(['--id', str(args['identity'])])
if args['coverage']:
cmd.extend([' --query-cover', str(args['coverage'])])
if args['maxseqs']:
cmd.extend(['--max-target-seqs', str(args['maxseqs'])])
if args['extrargs']:
cmd.append(args['extrargs'])
cmd.extend(['--outfmt',
'6 qseqid sseqid pident evalue bitscore qcovhsp staxids'])
ec, out = run_command(' '.join(cmd), merge=False)
remove(tmpin)
if ec:
raise ValueError(f'diamond failed with error code {ec}.')
return self.parse_def_table(out)
def remote_search(self, seqs):
"""Perform BLAST search through a remote server.
Parameters
----------
seqs : list of tuple
query sequences (id, sequence)
Returns
-------
dict of list of dict
hit table per query sequence
.. _NCBI's official reference of RESTful APIs:
https://ncbi.github.io/blast-cloud/dev/using-url-api.html
.. _NCBI's official sample Perl script:
https://blast.ncbi.nlm.nih.gov/docs/web_blast.pl
.. _NCBI has restrictions on the frequency and bandwidth of remote
BLAST searches. See this page:
https://blast.ncbi.nlm.nih.gov/Blast.cgi?CMD=Web&PAGE_TYPE=
BlastDocs&DOC_TYPE=DeveloperInfo
.. _Instead, NCBI recommends setting up custom BLAST servers. See:
https://ncbi.github.io/blast-cloud/
"""
# generate query URL
query = ''.join([f'>{id_}\n{seq}\n' for id_, seq in seqs])
url = f'{self.server}?CMD=Put&PROGRAM=blastp&DATABASE={self.db}'
if self.algorithm:
url += '&BLAST_PROGRAMS=' + self.algorithm
if self.evalue:
url += '&EXPECT=' + str(self.evalue)
if self.maxseqs:
url += '&MAX_NUM_SEQ=' + str(self.maxseqs)
if self.entrez:
url += '&EQ_TEXT=' + quote(self.entrez)
if self.extrargs:
url += '&' + self.extrargs.lstrip('&')
url += '&QUERY=' + quote(query)
print(f'Submitting {len(seqs)} queries for search.', end='',
flush=True)
trial = 0
while True:
if trial:
if trial == (self.retries or 0) + 1:
raise ValueError(
f'Remote search failed after {trial} trials.')
print(f'Retry {trial} times.', end='', flush=True)
sleep(self.delay)
trial += 1
# get request Id
with urlopen(url) as response:
res = response.read().decode('utf-8')
m = re.search(r'^ RID = (.*$)', res, re.MULTILINE)
if not m:
print('WARNING: Failed to obtain RID.')
continue
rid = m.group(1)
print(f' RID: {rid}.', end='', flush=True)
sleep(1)
# check status
url_ = f'{self.server}?CMD=Get&FORMAT_OBJECT=SearchInfo&RID={rid}'
starttime = time()
success = False
while True:
with urlopen(url_) as response:
res = response.read().decode('utf-8')
m = re.search(r'\s+Status=(.+)', res, re.MULTILINE)
if not m:
print('WARNING: Failed to retrieve remote search status.')
break
status = m.group(1)
if status == 'WAITING':
if time() - starttime > self.timeout:
print('WARNING: Remote search timeout.')
break
print('.', end='', flush=True)
sleep(self.delay)
continue
elif status in ('FAILED', 'UNKNOWN'):
print('WARNING: Remote search failed.')
break
elif status == 'READY':
if 'ThereAreHits=yes' not in res:
print('WARNING: Remote search returned no result.')
break
success = True
break
else:
print(f'WARNING: Unknown remote search status: {status}.')
break
if not success:
continue
sleep(1)
# retrieve result
url_ = (f'{self.server}?CMD=Get&ALIGNMENT_VIEW=Tabular'
f'&FORMAT_TYPE=Text&RID={rid}')
if self.maxseqs:
url_ += (f'&MAX_NUM_SEQ={self.maxseqs}'
f'&DESCRIPTIONS={self.maxseqs}')
with urlopen(url_) as response:
res = response.read().decode('utf-8')
if '# blastp' not in res or '# Query: ' not in res:
print('WARNING: Invalid format of remote search results.')
continue
print(' Results retrieved.')
break
# fields (as of 2018): query acc.ver, subject acc.ver, % identity,
# alignment length, mismatches, gap opens, q. start, q. end, s. start,
# s. end, evalue, bit score, % positives
m = re.search(r'<PRE>(.+?)<\/PRE>', res, re.DOTALL)
out = m.group(1).splitlines()
lenmap = {id_: len(seq) for id_, seq in seqs}
return self.parse_m8_table(out, lenmap)
def parse_hit_table(self, file, lenmap=None):
"""Determine hit table type and call corresponding parser.
Parameters
----------
file : str
hit table file
lenmap : dict, optional
map of sequence Ids to lengths (only needed for m8)
Returns
-------
list of dict
hit table
"""
ism8 = None
lines = []
with open(file, 'r') as f:
for line in f:
line = line.rstrip('\r\n')
if line and not line.startswith('#'):
lines.append(line)
if ism8 is None:
x = line.split('\t')
ism8 = len(x) > 8
return (self.parse_m8_table(lines, lenmap) if ism8 else
self.parse_def_table(lines))
def parse_def_table(self, lines):
"""Parse search results in default tabular format.
Parameters
----------
lines : list of str
search result in default tabular format
fields: qseqid sseqid pident evalue bitscore qcovhsp staxids
Returns
-------
dict of list of dict
hits per query
"""
res = {}
ths = {x: getattr(self, x, 0) for x in (
'evalue', 'identity', 'coverage', 'maxhits')}
for line in lines:
line = line.rstrip('\r\n')
if not line or line.startswith('#'):
continue
x = line.split('\t')
# filter by thresholds
if ths['evalue']:
if x[3] != '*' and ths['evalue'] < float(x[3]):
continue
if ths['identity']:
if x[2] != '*' and ths['identity'] > float(x[2]):
continue
if ths['coverage']:
if x[5] != '*' and ths['coverage'] > float(x[5]):
continue
# pass if maximum targets reached
if ths['maxhits']:
if x[0] in res and ths['maxhits'] == len(res[x[0]]):
continue
# add hit to list
res.setdefault(x[0], []).append({
'id': seqid2accver(x[1]), 'identity': x[2], 'evalue': x[3],
'score': x[4], 'coverage': x[5], 'taxid': x[6] if x[6] not
in {'', 'N/A', '0'} else ''})
return res
def parse_m8_table(self, lines, lenmap):
"""Parse search results in BLAST's standard tabular format (m8).
Parameters
----------
lines : list of str
search result in BLAST m8 tabular format
fields: qseqid sseqid pident length mismatch gapopen qstart qend
sstart send evalue bitscore
lenmap : dict
map of sequence Ids to lengths (needed for calculating coverage)
Returns
-------
list of dict
hit table
Raises
------
ValueError
Query Id not found in length map.
"""
res = {}
ths = {x: getattr(self, x, 0) for x in (
'evalue', 'identity', 'coverage', 'maxhits')}
for line in lines:
line = line.rstrip('\r\n')
if not line or line.startswith('#'):
continue
x = line.split('\t')
# calculate coverage
if x[0] not in lenmap:
raise ValueError(f'Invalid query sequence Id: {x[0]}.')
try:
cov = (int(x[7]) - int(x[6]) + 1) / lenmap[x[0]] * 100
except ValueError:
cov = 0
# filter by thresholds
if ths['evalue']:
if x[10] != '*' and ths['evalue'] < float(x[10]):
continue
if ths['identity']:
if x[2] != '*' and ths['identity'] > float(x[2]):
continue
if ths['coverage']:
if cov and ths['coverage'] > cov:
continue
# pass if maximum targets reached
if ths['maxhits']:
if x[0] in res and ths['maxhits'] == len(res[x[0]]):
continue
# add hit to list
res.setdefault(x[0], []).append({
'id': seqid2accver(x[1]), 'identity': x[2], 'evalue': x[10],
'score': x[11], 'coverage': f'{cov:.2f}', 'taxid': ''})
return res
"""taxonomy query functions"""
def update_hit_taxids(self, prots, taxmap={}):
"""Update hits with taxIds, and update master sequence Id to taxId map.
Parameters
----------
prots : dict of list of dict
proteins (e.g., search results)
taxmap : dict, optional
reference sequence Id to taxId map
Returns
-------
list, list
sequence Ids still without taxIds
taxIds added to master map
"""
idsotid = set() # proteins without taxIds
newtids = set() # newly added taxIds
for prot, hits in prots.items():
for hit in hits:
id_, tid = hit['id'], hit['taxid']
# taxId already in hit table
if tid:
if id_ not in self.prot2tid:
self.prot2tid[id_] = tid
newtids.add(tid)
continue
# taxId already in taxon map
try:
hit['taxid'] = self.prot2tid[id_]
continue
except KeyError:
pass
# taxId in reference taxon map:
try:
tid = taxmap[id_]
hit['taxid'] = tid
self.prot2tid[id_] = tid
newtids.add(tid)
continue
except KeyError:
pass
# not found
idsotid.add(id_)
return sorted(idsotid), sorted(newtids)
def remote_taxinfo(self, ids):
"""Retrieve complete taxonomy information of given taxIds from remote
server.
Parameters
----------
ids : list of str
query taxIds
Returns
-------
str
taxonomy information in XML format
Raises
------
ValueError
TaxID list is invalid.
ValueError
Failed to retrieve info from server.
"""
res = self.remote_fetches(ids, 'db=taxonomy&id={}')
# this error occurs when taxIds are not numeric
if '<ERROR>ID list is empty' in res:
raise ValueError('Invalid taxId list.')
return res
def parse_taxonomy_xml(self, xml):
"""Parse taxonomy information in XML format retrieved from NCBI server.
Parameters
----------
xml : str
taxonomy information in XML format
Returns
-------
list of str
taxIds added to taxonomy database
Notes
-----
The function will update taxonomy database.
"""
added = []
# get result for each query
p = re.compile(r'<Taxon>\n'
r'\s+<TaxId>(\d+)<\/TaxId>\n.+?'
r'\s+<ScientificName>([^<>]+)<\/ScientificName>.+?'
r'\s+<ParentTaxId>(\d+)<\/ParentTaxId>.+?'
r'\s+<Rank>([^<>]+?)<\/Rank>(.+?)\n'
r'<\/Taxon>',
re.DOTALL | re.VERBOSE)
for m in p.finditer(xml):
tid = m.group(1)
if tid in self.taxdump:
continue
# add query taxId to taxdump
self.taxdump[tid] = {
'name': m.group(2), 'parent': m.group(3), 'rank': m.group(4)}
added.append(tid)
# get lineage
m1 = re.search(r'<LineageEx>(.+?)<\/LineageEx>', m.group(5),
re.DOTALL)
if not m1:
continue
# move up through lineage
p1 = re.compile(r'\s+<Taxon>\n'
r'\s+<TaxId>(\d+)<\/TaxId>\n'
r'\s+<ScientificName>([^<>]+)<\/ScientificName>\n'
r'\s+<Rank>([^<>]+)<\/Rank>\n'
r'\s+<\/Taxon>\n',
re.DOTALL | re.VERBOSE)
for m2 in reversed(list(p1.finditer(m1.group(1)))):
tid_ = m2.group(1)
pid = self.taxdump[tid]['parent']
if pid == '':
self.taxdump[tid]['parent'] = tid_
elif pid != tid_:
raise ValueError(
f'Broken lineage for {tid}: {pid} <=> {tid_}.')
tid = tid_
if tid in self.taxdump:
continue
self.taxdump[tid] = {
'name': m2.group(2), 'parent': '', 'rank': m2.group(3)}
added.append(tid)
# stop at root
if self.taxdump[tid]['parent'] == '':
self.taxdump[tid]['parent'] = '1'
return added
"""self-alignment functions"""
@staticmethod
def lookup_selfaln(seqs, hits):
"""Look up self-alignment metrics of sequences from their hit tables.
Parameters
----------
seqs : list of tuple
query sequences (id, sequence)
hits : dict of list of dict
hit tables
Returns
-------
list of tuple
(id, bitscore, evalue)
"""
res = []
for id_, seq in seqs:
msg = (f'Cannot find a self-hit for sequence {id_}. Consider '
'setting self-alignment method to other than "lookup".')
if id_ not in hits:
raise ValueError(msg)
found = False
for hit in hits[id_]:
if hit['id'] == id_:
res.append((id_, hit['score'], hit['evalue']))
found = True
break
if not found:
raise ValueError(msg)
return res
@staticmethod
def fast_selfaln(seq):
"""Calculate self-alignment statistics using built-in algorithm.
Parameters
----------
seq : str
query sequence
Returns
-------
tuple of (str, str)
bitscore and evalue
Notes
-----
Statistics are calculated following:
.. _Official BLAST documentation:
https://www.ncbi.nlm.nih.gov/BLAST/tutorial/Altschul-1.html
Default BLASTp parameters are assumed (matrix = BLOSUM62, gapopen
= 11, gapextend = 1), except for that the composition based statistics
is switched off (comp-based-stats = 0).
Result should be identical to that by DIAMOND, but will be slightly
different from that by BLAST.
"""
# BLOSUM62 is the default aa substitution matrix for BLAST / DIAMOND
blosum62 = {'A': 4, 'R': 5, 'N': 6, 'D': 6, 'C': 9,
'Q': 5, 'E': 5, 'G': 6, 'H': 8, 'I': 4,
'L': 4, 'K': 5, 'M': 5, 'F': 6, 'P': 7,
'S': 4, 'T': 5, 'W': 11, 'Y': 7, 'V': 4}
# calculate raw score (S)
n, raw = 0, 0
for c in seq.upper():
try:
n += 1
raw += blosum62[c]
# in case there are non-basic amino acids
except KeyError:
pass
# BLAST's empirical values when gapopen = 11, gapextend = 1. See:
# ncbi-blast-2.7.1+-src/c++/src/algo/blast/core/blast_stat.c, line #268
lambda_, K = 0.267, 0.041
# calculate bit score (S')
bit = (lambda_ * raw - log(K)) / log(2)
# calculate e-value (E)
e = n ** 2 * 2 ** -bit
return f'{bit:.1f}', f'{e:.3g}'
def blast_selfaln(self, seqs):
"""Run BLAST to align sequences to themselves.
Parameters
----------
seqs : list of tuple
query sequences (id, sequence)
Returns
-------
list of tuple
(id, bitscore, evalue)
"""
tmpin = join(self.tmpdir, 'tmp.in')
with open(tmpin, 'w') as f:
write_fasta(seqs, f)
cmd = ' '.join((
self.blastp,
'-query', tmpin,
'-subject', tmpin,
'-num_threads', str(self.threads),
'-outfmt', '6'))
extrargs = getattr(self, 'extrargs', None)
if extrargs:
cmd += ' ' + extrargs
ec, out = run_command(cmd)
if ec:
raise ValueError(f'blastp failed with error code {ec}.')
remove(tmpin)
return(self.parse_self_m8(out))
def diamond_selfaln(self, seqs):
"""Run DIAMOND to align sequences to themselves.
Parameters
----------
seqs : list of tuple
query sequences (id, sequence)
Returns
-------
list of tuple
(id, bitscore, evalue)
"""
# generate temporary query file
tmpin = join(self.tmpdir, 'tmp.in')
with open(tmpin, 'w') as f:
write_fasta(seqs, f)
# generate temporary database
tmpdb = join(self.tmpdir, 'tmp.dmnd')
cmd = ' '.join((
self.diamond, 'makedb',
'--in', tmpin,
'--db', tmpdb,
'--threads', str(self.threads),
'--tmpdir', self.tmpdir))
ec, out = run_command(cmd, merge=False)
if ec:
raise ValueError(f'diamond failed with error code {ec}.')
# perform search
cmd = ' '.join((
self.diamond, 'blastp',
'--query', tmpin,
'--db', tmpdb,
'--threads', str(self.threads),
'--tmpdir', self.tmpdir))
extrargs = getattr(self, 'extrargs', None)
if extrargs:
cmd += ' ' + extrargs
ec, out = run_command(cmd, merge=False)
if ec:
raise ValueError(f'diamond failed with error code {ec}.')
remove(tmpin)
remove(tmpdb)
return(self.parse_self_m8(out))
def remote_selfaln(self, seqs):
"""Perform BLAST search through a remote server.
Parameters
----------
seqs : list of tuple
query sequences (id, sequence)
Returns
-------
list of tuple
(id, bitscore, evalue)
"""
# further split sequences into halves (to comply with URI length limit)
batches = self.subset_seqs(seqs, maxchars=int(
(self.maxchars + 1) / 2)) if self.maxchars else [seqs]
result = []
for batch in batches:
# generate query URL
query = ''.join([f'>{id_}\n{seq}\n' for id_, seq in batch])
query = quote(query)
url = (f'{self.aln_server}?CMD=Put&PROGRAM=blastp&'
f'DATABASE={self.db}&QUERY={query}&SUBJECTS={query}')
if self.extrargs:
url += '&' + self.extrargs.lstrip('&')
print(f'Submitting {len(batch)} queries for self-alignment.',
end='', flush=True)
trial = 0
while True:
if trial:
if trial == (self.retries or 0) + 1:
raise ValueError('Remote self-alignment failed after '
f'{trial} trials.')
print(f'Retry {trial} times.', end='', flush=True)
sleep(self.delay)
trial += 1
# get request Id
with urlopen(url) as response:
res = response.read().decode('utf-8')
m = re.search(r'^ RID = (.*$)', res, re.MULTILINE)
if not m:
print('WARNING: Failed to obtain RID.')
continue
rid = m.group(1)
print(f' RID: {rid}.', end='', flush=True)
sleep(1)
# check status
url_ = (f'{self.aln_server}?CMD=Get&FORMAT_OBJECT=SearchInfo&'
f'RID={rid}')
starttime = time()
success = False
while True:
with urlopen(url_) as response:
res = response.read().decode('utf-8')
m = re.search(r'\s+Status=(.+)', res, re.MULTILINE)
if not m:
print('WARNING: Failed to retrieve remote self-'
'alignment status.')
break
status = m.group(1)
if status == 'WAITING':
if time() - starttime > self.timeout:
print('WARNING: Remote self-alignment timeout.')
break
print('.', end='', flush=True)
sleep(self.delay)
continue
elif status in ('FAILED', 'UNKNOWN'):
print('WARNING: Remote self-alignment failed.')
break
elif status == 'READY':
if 'ThereAreHits=yes' not in res:
print('WARNING: Remote self-alignment returned no '
'result.')
break
success = True
break
else:
print('WARNING: Unknown remote self-alignment status: '
f'{status}.')
break
if not success:
continue
sleep(1)
# retrieve result
url_ = (f'{self.aln_server}?CMD=Get&ALIGNMENT_VIEW=Tabular&'
f'FORMAT_TYPE=Text&RID={rid}')
with urlopen(url_) as response:
res = response.read().decode('utf-8')
if '# blastp' not in res or '# Query: ' not in res:
print('WARNING: Invalid format of remote self-alignment '
'results.')
continue
print(' Results retrieved.')
break
m = re.search(r'<PRE>(.+?)<\/PRE>', res, re.DOTALL)
out = m.group(1).splitlines()
result += self.parse_self_m8(out)
return result
@staticmethod
def parse_self_m8(lines):
"""Extract self-alignment results from m8 format table.
Parameters
----------
lines : list of str
hit table in BLAST m8 format
fields: qseqid sseqid pident length mismatch gapopen qstart qend
sstart send evalue bitscore
Returns
-------
list of tuple
hit table (id, bitscore, evalue)
"""
res = []
used = set()
for line in lines:
x = line.rstrip('\r\n').split('\t')
if x[0].startswith('#'):
continue
if len(x) < 12:
continue
if x[0] != x[1]:
continue
if x[0] in used:
continue
res.append((x[1], x[11], x[10]))
used.add(x[0])
return res
| bsd-3-clause | 3,409,914,012,500,449,300 | 34.902753 | 80 | 0.490718 | false | 4.306189 | false | false | false |
NDManh/numbbo | code-experiments/tools/cocoutils.py | 2 | 6790 | ## -*- mode: python -*-
## Lots of utility functions to abstract away platform differences.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
from shutil import copyfile, copytree, rmtree
from subprocess import CalledProcessError, call, STDOUT
try:
from subprocess import check_output
except ImportError:
import subprocess
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def hg(args):
"""Run a Mercurial command and return its output.
All errors are deemed fatal and the system will quit."""
full_command = ['hg']
full_command.extend(args)
try:
output = check_output(full_command, env=os.environ, universal_newlines=True)
output = output.rstrip()
except CalledProcessError as e:
print('Failed to execute hg.')
raise
return output
def git(args):
"""Run a git command and return its output.
All errors are deemed fatal and the system will quit."""
full_command = ['git']
full_command.extend(args)
try:
output = check_output(full_command, env=os.environ,
stderr=STDOUT, universal_newlines=True)
output = output.rstrip()
except CalledProcessError as e:
# print('Failed to execute "%s"' % str(full_command))
raise
return output
def is_dirty():
"""Return True if the current working copy has uncommited changes."""
raise NotImplementedError()
return hg(['hg', 'id', '-i'])[-1] == '+'
def git_version(pep440=True):
"""Return somewhat readible version number from git, like
'0.1-6015-ga0a3769' if not pep440 else '0.1.6015'"""
try:
res = git(['describe', '--tags'])
if pep440:
return '.'.join(res.split('-')[:2])
else:
return res
except:
# print('git version call failed')
return ''
def git_revision():
"""Return unreadible git revision identifier, like
a0a3769da32436c27df84d1b9b0915447aebf4d0"""
try:
return git(['rev-parse', 'HEAD'])
except:
# print('git revision call failed')
return ""
def run(directory, args):
print("RUN\t%s in %s" % (" ".join(args), directory))
oldwd = os.getcwd()
try:
os.chdir(directory)
output = check_output(args, stderr=STDOUT, env=os.environ,
universal_newlines=True)
# print(output)
except CalledProcessError as e:
print("ERROR: return value=%i" % e.returncode)
print(e.output)
raise
finally:
os.chdir(oldwd)
def python(directory, args, env=None):
print("PYTHON\t%s in %s" % (" ".join(args), directory))
oldwd = os.getcwd()
if os.environ.get('PYTHON') is not None:
## Use the Python interpreter specified in the PYTHON
## environment variable.
full_command = [os.environ['PYTHON']]
else:
## No interpreter specified. Use the Python interpreter that
## is used to execute this script.
full_command = [sys.executable]
full_command.extend(args)
try:
os.chdir(directory)
output = check_output(full_command, stderr=STDOUT, env=os.environ,
universal_newlines=True)
# print(output)
except CalledProcessError as e:
print("ERROR: return value=%i" % e.returncode)
print(e.output)
raise
finally:
os.chdir(oldwd)
def rscript(directory, args, env=None):
print("RSCRIPT\t%s in %s" % (" ".join(args), directory))
oldwd = os.getcwd()
if os.environ.get('RSCRIPT') is not None:
## Use the Rscript interpreter specified in the RSCRIPT
## environment variable.
full_command = [os.environ['RSCRIPT']]
else:
## No interpreter specified. Try to find an Rscript interpreter.
full_command = ['Rscript']
full_command.extend(args)
try:
os.chdir(directory)
output = check_output(full_command, stderr=STDOUT, env=os.environ,
universal_newlines=True)
except CalledProcessError as e:
print("ERROR: return value=%i" % e.returncode)
print(e.output)
raise
finally:
os.chdir(oldwd)
def copy_file(source, destination):
print("COPY\t%s -> %s" % (source, destination))
copyfile(source, destination)
def copy_tree(source_directory, destination_directory):
"""CAVEAT: this removes the destination tree if present!"""
if os.path.isdir(destination_directory):
rmtree(destination_directory)
print("COPY\t%s -> %s" % (source_directory, destination_directory))
copytree(source_directory, destination_directory)
def write_file(string, destination):
print("WRITE\t%s" % destination)
with open(destination, 'w') as fd:
fd.write(string)
def make(directory, target):
"""Run make to build a target"""
print("MAKE\t%s in %s" % (target, directory))
oldwd = os.getcwd()
try:
os.chdir(directory)
# prepare makefile(s)
if ((('win32' in sys.platform) or ('win64' in sys.platform)) and
('cygwin' not in os.environ['PATH'])):
# only if under Windows and without Cygwin, we need a specific
# Windows makefile
copy_file('Makefile_win_gcc.in', 'Makefile')
else:
copy_file('Makefile.in', 'Makefile')
output = check_output(['make', target], stderr=STDOUT, env=os.environ,
universal_newlines=True)
except CalledProcessError as e:
print("ERROR: return value=%i" % e.returncode)
print(e.output)
raise
finally:
os.chdir(oldwd)
def expand_file(source, destination, dictionary):
print("EXPAND\t%s to %s" % (source, destination))
from string import Template
with open(source, 'r') as fd:
content = Template(fd.read())
with open(destination, "w") as outfd:
outfd.write(content.safe_substitute(dictionary))
| bsd-3-clause | 6,164,873,409,866,953,000 | 32.95 | 84 | 0.609426 | false | 3.989424 | false | false | false |
x2Ident/x2Ident_test | mitmproxy/mitmproxy/console/grideditor/col_bytes.py | 2 | 3158 | from __future__ import absolute_import, print_function, division
import os
import urwid
from mitmproxy.console import signals
from mitmproxy.console.grideditor import base
from netlib import strutils
def read_file(filename, callback, escaped):
# type: (str, Callable[...,None], bool) -> Optional[str]
if not filename:
return
filename = os.path.expanduser(filename)
try:
with open(filename, "r" if escaped else "rb") as f:
d = f.read()
except IOError as v:
return str(v)
if escaped:
try:
d = strutils.escaped_str_to_bytes(d)
except ValueError:
return "Invalid Python-style string encoding."
# TODO: Refactor the status_prompt_path signal so that we
# can raise exceptions here and return the content instead.
callback(d)
class Column(base.Column):
def Display(self, data):
return Display(data)
def Edit(self, data):
return Edit(data)
def blank(self):
return b""
def keypress(self, key, editor):
if key == "r":
if editor.walker.get_current_value() is not None:
signals.status_prompt_path.send(
self,
prompt="Read file",
callback=read_file,
args=(editor.walker.set_current_value, True)
)
elif key == "R":
if editor.walker.get_current_value() is not None:
signals.status_prompt_path.send(
self,
prompt="Read unescaped file",
callback=read_file,
args=(editor.walker.set_current_value, False)
)
elif key == "e":
o = editor.walker.get_current_value()
if o is not None:
n = editor.master.spawn_editor(o)
n = strutils.clean_hanging_newline(n)
editor.walker.set_current_value(n)
elif key in ["enter"]:
editor.walker.start_edit()
else:
return key
class Display(base.Cell):
def __init__(self, data):
# type: (bytes) -> Display
self.data = data
escaped = strutils.bytes_to_escaped_str(data)
w = urwid.Text(escaped, wrap="any")
super(Display, self).__init__(w)
def get_data(self):
return self.data
class Edit(base.Cell):
def __init__(self, data):
# type: (bytes) -> Edit
data = strutils.bytes_to_escaped_str(data)
w = urwid.Edit(edit_text=data, wrap="any", multiline=True)
w = urwid.AttrWrap(w, "editfield")
super(Edit, self).__init__(w)
def get_data(self):
# type: () -> bytes
txt = self._w.get_text()[0].strip()
try:
return strutils.escaped_str_to_bytes(txt)
except ValueError:
signals.status_message.send(
self,
message="Invalid Python-style string encoding.",
expire=1000
)
raise
| gpl-3.0 | -3,493,163,022,185,555,000 | 28.660194 | 66 | 0.526282 | false | 4.111979 | false | false | false |
PowerShellEmpire/Empire | lib/modules/powershell/collection/ninjacopy.py | 10 | 4109 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-NinjaCopy',
'Author': ['@JosephBialek'],
'Description': ('Copies a file from an NTFS partitioned volume by reading the '
'raw volume and parsing the NTFS structures.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : True,
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/mattifestation/PowerSploit/blob/master/Exfiltration/Invoke-NinjaCopy.ps1',
'https://clymb3r.wordpress.com/2013/06/13/using-powershell-to-copy-ntds-dit-registry-hives-bypass-sacls-dacls-file-locks/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Path' : {
'Description' : 'The full path of the file to copy (example: c:\\windows\\ntds\\ntds.dit)',
'Required' : True,
'Value' : ''
},
'LocalDestination' : {
'Description' : 'A file path to copy the file to on the local computer.',
'Required' : False,
'Value' : ''
},
'RemoteDestination' : {
'Description' : 'A file path to copy the file to on the remote computer. If this isn\'t used, LocalDestination must be specified.',
'Required' : False,
'Value' : ''
},
'ComputerName' : {
'Description' : 'An array of computernames to run the script on.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/collection/Invoke-NinjaCopy.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
scriptEnd = "$null = Invoke-NinjaCopy "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
scriptEnd += " -" + str(option)
else:
scriptEnd += " -" + str(option) + " " + str(values['Value'])
scriptEnd += "; Write-Output 'Invoke-NinjaCopy Completed'"
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
return script
| bsd-3-clause | 4,989,571,720,808,305,000 | 37.046296 | 151 | 0.506449 | false | 4.394652 | false | false | false |
openedbox/bigdata-all-in-one | tools/hbase.py | 1 | 3710 | import os
from helper import SshClient, ScpClient, HadoopConfigGen
class HBase:
def __init__(self):
pass
@staticmethod
def install(options):
hbase = HadoopConfigGen()
hbase.add_property('hbase.rootdir', 'hdfs://%s/hbase' % options.hadoop_cluster_name)
hbase.add_property('hbase.cluster.distributed', 'true')
zookeeper_names = []
for zookeeper_node in options.all_zookeeper_hosts:
zookeeper_names.append(options.host_names[zookeeper_node])
hbase.add_property('hbase.zookeeper.quorum', ','.join(zookeeper_names))
hbase.add_property('hbase.zookeeper.property.clientPort', '2181')
hbase.add_property('hbase.zookeeper.property.dataDir', options.zookeeper_data_dir)
hbase.add_property('hbase.tmp.dir', options.hbase_tmp_dir)
hbase.save('./tmp/hbase-site.xml')
regionserver_path = './tmp/regionservers'
if os.path.exists(regionserver_path):
os.remove(regionserver_path)
regionservers = open(regionserver_path, 'w')
for hb in options.hbase_nodes:
regionservers.write('%s\n' % options.host_names[hb])
regionservers.close()
for host in options.all_hbase_hosts:
ssh = SshClient(host, options.root_password)
if 'cannot open' in ssh.execute("file " + options.hbase_path):
ScpClient.local2remote(host, options.root_password, options.hbase_package_path,
'/usr/local/hbase')
ssh.execute('tar zxf /usr/local/hbase -C /usr/local')
ssh.execute('rm -rf /usr/local/hbase')
ssh.execute(
'echo export HBASE_HOME=%s >>/etc/profile' % options.hbase_path)
ssh.execute('echo export PATH=\$HBASE_HOME/bin:\$PATH >>/etc/profile')
ssh.execute('source /etc/profile')
ssh.execute('rm -rf %s/lib/slf4j-log4j12*.jar' % options.hbase_path)
ssh.execute(
'sed -i \'s:# export JAVA_HOME=/usr/java/jdk1.6.0/:export JAVA_HOME=%s:\' %s/conf/hbase-env.sh' % (
options.jdk_path, options.hbase_path))
ssh.execute(
'sed -i \'s:# export HBASE_MANAGES_ZK=true:export HBASE_MANAGES_ZK=false:\' %s/conf/hbase-env.sh' %
options.hbase_path)
ScpClient.local2remote(host, options.root_password, './tmp/hbase-site.xml',
'%s/conf/' % options.hbase_path)
ScpClient.local2remote(host, options.root_password, './tmp/regionservers',
'%s/conf/' % options.hbase_path)
ssh.close()
@staticmethod
def start(options):
master = SshClient(options.all_hbase_hosts[0], options.root_password)
print 'start hbase cluster'
master.execute('$HBASE_HOME/bin/hbase-daemon.sh start master')
master.close()
for regionserver in options.all_hbase_hosts:
region = SshClient(regionserver, options.root_password)
region.execute('$HBASE_HOME/bin/hbase-daemon.sh start regionserver')
region.close()
@staticmethod
def stop(options):
for regionserver in options.all_hbase_hosts:
region = SshClient(regionserver, options.root_password)
region.execute('$HBASE_HOME/bin/hbase-daemon.sh stop regionserver')
region.close()
master = SshClient(options.all_hbase_hosts[0], options.root_password)
print 'stop hbase cluster'
master.execute('$HBASE_HOME/bin/hbase-daemon.sh stop master')
master.close()
| mit | -8,632,675,921,342,916,000 | 41.159091 | 119 | 0.594609 | false | 3.626588 | false | false | false |
tukeJonny/NTPAmpMitigator | infra/NTPAmp/httpget.py | 1 | 4905 | #-*- coding: utf-8 -*-
import sys
import time
import argparse
import logging
from decimal import Decimal
import requests
try:
from requests.exceptions import ConnectTimeout
MyTimeoutException=ConnectTimeout
except:
from requests.exceptions import Timeout
MyTimeoutException=Timeout
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class StaticGraph(object):
def __init__(self, elapsed_times, timeout):
self.elapsed = {
'x': [x for x in range(1, len(elapsed_times)+1)],
'y': elapsed_times
}
self.timeout = {
'x': [x for x in range(1, len(elapsed_times)+1)],
'y': [timeout]*len(elapsed_times)
}
def make(self):
plt.title("Elapsed Times")
plt.xlabel("time [sec]")
plt.ylabel("elapsed time [sec]")
plt.xlim([1,len(self.elapsed['x'])])
plt.ylim([0,self.timeout['y'][0]+1])
plt.legend(loc='upper right')
plt.grid()
plt.plot(self.timeout['x'],self.timeout['y'], color='r')
plt.plot(self.elapsed['x'],self.elapsed['y'])
plt.savefig("elapsed.png")
plt.show()
class Response(object):
def __init__(self, url, elapsed, status_code):
self.url = url
if not isinstance(elapsed, int): #Not Error Number
self._elapsed = elapsed.total_seconds()
else:
self._elapsed = elapsed
self.status_code = status_code
self.is_timeout = elapsed == 1 or status_code == 1
@property
def elapsed(self):
return self._elapsed
def __str__(self):
if not self.is_timeout:
msg = "[{status_code}] from {url}: Time= {elapsed}[sec]".format( \
status_code=self.status_code,url=self.url,elapsed=self._elapsed)
else:
msg = "[!] from {url}: Request timeout"
return msg
make_response = lambda d: Response(d['url'],d['elapsed'],d['status_code'])
class HTTPTest(object):
"""
HTTP GET Tester
"""
def __init__(self, url, count, timeout):
self.url = url
self.count = count
self.timeout = timeout
self.fail_count = 0
self.elapsed_times = []
self.INTERVAL = 1
self.logger = logging.getLogger("HTTPTest")
self.logger.setLevel(logging.DEBUG)
# File
handler = logging.FileHandler('http_get.log', mode='w')
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
#Stdout
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
def do_get(self):
"""
Do HTTP GET with requests module.
"""
try:
res = requests.get(self.url, timeout=self.timeout)
response = make_response(res.__dict__)
self.elapsed_times.append(response.elapsed)
self.logger.info( str(response) )
except MyTimeoutException:
response = make_response({'url':self.url,'elapsed':-1,'status_code':-1})
self.elapsed_times.append(self.timeout)
self.logger.info( str(response) )
self.fail_count += 1
def display_statics(self):
pktloss_ratio = Decimal(str((self.fail_count/self.count)))*Decimal('100')
self.logger.info("+++++ HTTP GET Tester Statics +++++")
self.logger.info("Send: {}".format(self.count))
self.logger.info("Recv: {}".format(self.count-self.fail_count))
self.logger.info("Loss: {}".format(self.fail_count))
self.logger.info("{}% Packet Loss".format(pktloss_ratio))
self.logger.info("+++++++++++++++++++++++++++++++++++")
# Make static graph images
statgraph = StaticGraph(self.elapsed_times, self.timeout)
statgraph.make()
def start(self):
"""
call do_get <self.count> time.
"""
self.logger.info("[+] Start {} times HTTP GET Test to {}!".format(self.count, self.url))
for i in range(self.count):
self.do_get()
time.sleep(self.INTERVAL)
self.display_statics()
def parse_argument():
parser = argparse.ArgumentParser(description='ping like HTTP GET tester.')
parser.add_argument("-u", "--url", type=str, help='Request to this url.', default="http://www.yahoo.co.jp/")
parser.add_argument("-c", "--count", type=int, help='HTTP GET test count.', default=2000)
parser.add_argument("-t", "--timeout", type=int, help='Request timeout limit.', default=1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_argument()
tester = HTTPTest(args.url, args.count, args.timeout)
tester.start() | mit | 4,381,088,293,051,387,400 | 32.37415 | 112 | 0.58736 | false | 3.844044 | true | false | false |
yarbroughw/JMDE | JMDE/treeclassifier.py | 1 | 6463 | from __future__ import print_function
import json
import warnings
import pickle
import itertools
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
import scipy.optimize as opt
import retrieve
class Node:
def __init__(self, n, pipeline):
self.name = n
self.children = {}
self.pipeline = pipeline
self.threshold = 0.00
def hasChildren(self):
return self.children != dict()
def getset(self, amount):
return list(retrieve.entities(amount, self.name))
def train(self, entities):
if not entities:
return
with warnings.catch_warnings():
warnings.simplefilter("ignore")
corpus = features(entities)
target = labels(entities)
self.pipeline = self.pipeline.fit(corpus, target)
assert hasattr(self.pipeline.steps[0][1], "vocabulary_")
def test(self):
if not self.testset:
print(self.name + "'s test set not initialized!")
return
corpus = features(self.testset)
target = labels(self.testset)
return self.pipeline.score(corpus, target)
def predict(self, x):
''' take an entity and classify it into either a child node (if
confident about prediction) or self (if unconfident)
'''
fs = features([x])
proba = max(self.pipeline.predict_proba(fs)[0])
if proba < self.threshold:
label = self.name
else:
label = self.pipeline.predict(fs)[0]
return label
def isTrained(self):
return hasattr(self.pipeline.steps[0][1], "vocabulary_")
def distance(self, predicted, label):
''' error function for optimization of node thresholds.
correct classification is a 0, withholded classification is a 1,
and misclassification is a 2
'''
if predicted == label:
return 0
elif predicted == self.name:
return 1
else:
return 2
def score(self, threshold):
''' gets entities from this node and its children, to score how
well the node classifies the entities (using "distance")
'''
self.threshold = threshold
total = sum([self.distance(self.predict(e), e["class"])
for e in self.testset])
return total / len(self.testset)
def learnthreshold(self):
print("loading test set.")
self.testset = list(itertools.chain(retrieve.direct(100, self.name),
retrieve.entities(200, self.name)))
print("optimizing.")
result = opt.minimize_scalar(self.score,
bounds=(0.0, 1.0),
method='bounded')
print(result)
self.threshold = result.x
print(self.name, "threshold set to", self.threshold)
class TreeClassifier:
def __init__(self, subclass=None):
# make tree from nested ontology
with open("../data/nestedontology.json", 'r') as f:
ontology = json.load(f)[0]
self.root = self._buildtree(ontology)
if subclass:
self.root = self.getsubnode(subclass)
def getsubnode(self, classname):
''' returns the node in the tree that matches classname '''
for node in iter(self):
if node.name == classname:
return node
raise ValueError(classname + " is not a valid class name!")
def _buildtree(self, json_tree):
''' build tree from nested json '''
root = Node(json_tree["name"], pipeline())
for child in json_tree["children"]:
root.children[child["name"]] = (self._buildtree(child))
return root
def __iter__(self):
''' BFS traversal of tree '''
queue = [self.root]
while queue != []:
current = queue.pop(0)
queue.extend(list(current.children.values()))
yield current
def train(self, entities):
''' train each node's classifier '''
for node in iter(self):
print("Training", node.name)
node.train(entities)
def autotrain(self, amount):
''' train each node's classifier '''
for node in iter(self):
entities = node.getset(amount)
node.train(entities)
def learnthresholds(self):
for node in iter(self):
if node.isTrained():
print("learning threshold for", node.name, end='. ')
node.learnthreshold()
def predict(self, entity):
''' returns predicted classes for entity.
predicts downwards in tree from root node
'''
node = self.root
while node.hasChildren() and node.isTrained():
predicted_label = node.predict(entity)
if predicted_label == node.name:
break
node = node.children[predicted_label]
return node.name
def predictions(self, entities):
''' runs predict function ovr set of entities '''
for entity in entities:
self.predict(entity)
def score(self, entities):
total = 0
for entity in entities:
realclass = entity["deepest"]
predicted = self.predict(entity)
if predicted == realclass:
total += 1
return total / len(entities)
def features(dataset):
return [ ' '.join(x["properties"]) for x in dataset]
def labels(dataset):
return [ x["class"] for x in dataset ]
def pipeline():
return Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB())])
def dump(trainnum, testnum, filename):
tree = TreeClassifier("owl:Thing")
print("Tree created with", tree.root.name, "as root.")
tree.train(trainnum, testnum)
with open(filename, 'wb') as f:
pickle.dump(tree, f)
def load(filename):
with open(filename, 'rb') as f:
tree = pickle.load(f)
return tree
if __name__ == "__main__":
tree = TreeClassifier()
tree.train(1000, 10)
entities = [e for e in retrieve.entities(10, "owl:Thing")]
for e in entities:
print(e["name"], e["class"], tree.predict(e))
| mit | -445,018,581,275,244,600 | 30.681373 | 79 | 0.578369 | false | 4.343414 | true | false | false |
Pluto-tv/chromium-crosswalk | tools/perf/page_sets/tough_compositor_cases.py | 35 | 3128 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class ToughCompositorPage(page_module.Page):
def __init__(self, url, page_set):
super(ToughCompositorPage, self).__init__(
url=url, page_set=page_set, credentials_path = 'data/credentials.json',
shared_page_state_class=shared_page_state.SharedMobilePageState)
self.archive_data_file = 'data/tough_compositor_cases.json'
def RunNavigateSteps(self, action_runner):
super(ToughCompositorPage, self).RunNavigateSteps(action_runner)
# TODO(epenner): Remove this wait (http://crbug.com/366933)
action_runner.Wait(5)
class ToughCompositorScrollPage(ToughCompositorPage):
def __init__(self, url, page_set):
super(ToughCompositorScrollPage, self).__init__(url=url, page_set=page_set)
def RunPageInteractions(self, action_runner):
# Make the scroll longer to reduce noise.
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage(direction='down', speed_in_pixels_per_second=300)
class ToughCompositorWaitPage(ToughCompositorPage):
def __init__(self, url, page_set):
super(ToughCompositorWaitPage, self).__init__(url=url, page_set=page_set)
def RunPageInteractions(self, action_runner):
# We scroll back and forth a few times to reduce noise in the tests.
with action_runner.CreateInteraction('Animation'):
action_runner.Wait(8)
class ToughCompositorCasesPageSet(story.StorySet):
""" Touch compositor sites """
def __init__(self):
super(ToughCompositorCasesPageSet, self).__init__(
archive_data_file='data/tough_compositor_cases.json',
cloud_storage_bucket=story.PUBLIC_BUCKET)
scroll_urls_list = [
# Why: Baseline CC scrolling page. A long page with only text. """
'http://jsbin.com/pixavefe/1/quiet?CC_SCROLL_TEXT_ONLY',
# Why: Baseline JS scrolling page. A long page with only text. """
'http://jsbin.com/wixadinu/2/quiet?JS_SCROLL_TEXT_ONLY',
# Why: Scroll by a large number of CC layers """
'http://jsbin.com/yakagevo/1/quiet?CC_SCROLL_200_LAYER_GRID',
# Why: Scroll by a large number of JS layers """
'http://jsbin.com/jevibahi/4/quiet?JS_SCROLL_200_LAYER_GRID',
]
wait_urls_list = [
# Why: CC Poster circle animates many layers """
'http://jsbin.com/falefice/1/quiet?CC_POSTER_CIRCLE',
# Why: JS poster circle animates/commits many layers """
'http://jsbin.com/giqafofe/1/quiet?JS_POSTER_CIRCLE',
# Why: JS invalidation does lots of uploads """
'http://jsbin.com/beqojupo/1/quiet?JS_FULL_SCREEN_INVALIDATION',
# Why: Creates a large number of new tilings """
'http://jsbin.com/covoqi/1/quiet?NEW_TILINGS',
]
for url in scroll_urls_list:
self.AddStory(ToughCompositorScrollPage(url, self))
for url in wait_urls_list:
self.AddStory(ToughCompositorWaitPage(url, self))
| bsd-3-clause | -621,170,004,393,088,100 | 39.102564 | 80 | 0.703645 | false | 3.261731 | false | false | false |
bradallred/gemrb | gemrb/GUIScripts/pst/MessageWindow.py | 1 | 4025 | # -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# MessageWindow.py - scripts and GUI for main (walk) window
###################################################
import GemRB
import GUIClasses
import GUICommon
import GUICommonWindows
import CommonWindow
import GUIWORLD
from GameCheck import MAX_PARTY_SIZE
from GUIDefines import *
MWindow = 0
ActionsWindow = 0
PortraitWindow = 0
OptionsWindow = 0
MessageTA = 0
def OnLoad():
global MWindow, ActionsWindow, PortraitWindow, OptionsWindow
# TODO: we can uncomment the "HIDE_CUT" lines below to hide the windows for cutscenes
# the original doesn't hide them and it looks like there is a map drawing bug at the bottom of the screen due to the bottom
# row of tiles getting squished for not fitting perfectly on screen (tho I havent seen this in BG2, but maybe wasnt paying attention)
ActionsWindow = GemRB.LoadWindow(0, GUICommon.GetWindowPack(), WINDOW_BOTTOM|WINDOW_LEFT)
ActionsWindow.AddAlias("ACTWIN")
#ActionsWindow.AddAlias("HIDE_CUT", 1)
ActionsWindow.AddAlias("NOT_DLG", 0)
ActionsWindow.SetFlags(WF_BORDERLESS|IE_GUI_VIEW_IGNORE_EVENTS, OP_OR)
OptionsWindow = GemRB.LoadWindow(2, GUICommon.GetWindowPack(), WINDOW_BOTTOM|WINDOW_RIGHT)
OptionsWindow.AddAlias("OPTWIN")
#OptionsWindow.AddAlias("HIDE_CUT", 2)
OptionsWindow.AddAlias("NOT_DLG", 1)
OptionsWindow.SetFlags(WF_BORDERLESS|IE_GUI_VIEW_IGNORE_EVENTS, OP_OR)
MWindow = GemRB.LoadWindow(7, GUICommon.GetWindowPack(), WINDOW_BOTTOM|WINDOW_HCENTER)
MWindow.SetFlags(WF_DESTROY_ON_CLOSE, OP_NAND)
MWindow.AddAlias("MSGWIN")
MWindow.AddAlias("HIDE_CUT", 0)
MWindow.SetFlags(WF_BORDERLESS|IE_GUI_VIEW_IGNORE_EVENTS, OP_OR)
PortraitWindow = GUICommonWindows.OpenPortraitWindow (1, WINDOW_BOTTOM|WINDOW_HCENTER)
#PortraitWindow.AddAlias("HIDE_CUT", 3)
PortraitWindow.AddAlias("NOT_DLG", 2)
PortraitWindow.SetFlags(WF_BORDERLESS|IE_GUI_VIEW_IGNORE_EVENTS, OP_OR)
pframe = PortraitWindow.GetFrame()
pframe['x'] -= 16
PortraitWindow.SetFrame(pframe)
MessageTA = MWindow.GetControl (1)
MessageTA.SetFlags (IE_GUI_TEXTAREA_AUTOSCROLL|IE_GUI_TEXTAREA_HISTORY)
MessageTA.SetResizeFlags(IE_GUI_VIEW_RESIZE_ALL)
MessageTA.AddAlias("MsgSys", 0)
CloseButton= MWindow.GetControl (0)
CloseButton.SetText(28082)
CloseButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, lambda: MWindow.Close())
CloseButton.SetFlags (IE_GUI_BUTTON_MULTILINE, OP_OR)
CloseButton.MakeDefault()
OpenButton = OptionsWindow.GetControl (10)
OpenButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, lambda: MWindow.Focus())
# Select all
Button = ActionsWindow.GetControl (1)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, GUICommon.SelectAllOnPress)
# Select all
Button = ActionsWindow.GetControl (3)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, GUICommonWindows.ActionStopPressed)
FormationButton = ActionsWindow.GetControl (4)
FormationButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, GUIWORLD.OpenFormationWindow)
GUICommonWindows.SetupClockWindowControls (ActionsWindow)
GUICommonWindows.SetupMenuWindowControls (OptionsWindow)
UpdateControlStatus ()
def UpdateControlStatus ():
if GemRB.GetGUIFlags() & (GS_DIALOGMASK|GS_DIALOG):
Label = MWindow.GetControl (0x10000003)
Label.SetText (str (GemRB.GameGetPartyGold ()))
MWindow.Focus()
elif MWindow:
MWindow.Close()
| gpl-2.0 | -7,083,244,787,348,088,000 | 34.9375 | 134 | 0.766708 | false | 3.067835 | false | false | false |
tomviner/pytest | src/_pytest/mark/structures.py | 1 | 13120 | import inspect
import warnings
from collections import namedtuple
from collections.abc import MutableMapping
from operator import attrgetter
from typing import Set
import attr
from ..compat import ascii_escaped
from ..compat import getfslineno
from ..compat import NOTSET
from _pytest.outcomes import fail
from _pytest.warning_types import PytestUnknownMarkWarning
EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark"
def alias(name, warning=None):
getter = attrgetter(name)
def warned(self):
warnings.warn(warning, stacklevel=2)
return getter(self)
return property(getter if warning is None else warned, doc="alias for " + name)
def istestfunc(func):
return (
hasattr(func, "__call__")
and getattr(func, "__name__", "<lambda>") != "<lambda>"
)
def get_empty_parameterset_mark(config, argnames, func):
from ..nodes import Collector
requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION)
if requested_mark in ("", None, "skip"):
mark = MARK_GEN.skip
elif requested_mark == "xfail":
mark = MARK_GEN.xfail(run=False)
elif requested_mark == "fail_at_collect":
f_name = func.__name__
_, lineno = getfslineno(func)
raise Collector.CollectError(
"Empty parameter set in '%s' at line %d" % (f_name, lineno + 1)
)
else:
raise LookupError(requested_mark)
fs, lineno = getfslineno(func)
reason = "got empty parameter set %r, function %s at %s:%d" % (
argnames,
func.__name__,
fs,
lineno,
)
return mark(reason=reason)
class ParameterSet(namedtuple("ParameterSet", "values, marks, id")):
@classmethod
def param(cls, *values, marks=(), id=None):
if isinstance(marks, MarkDecorator):
marks = (marks,)
else:
assert isinstance(marks, (tuple, list, set))
if id is not None:
if not isinstance(id, str):
raise TypeError(
"Expected id to be a string, got {}: {!r}".format(type(id), id)
)
id = ascii_escaped(id)
return cls(values, marks, id)
@classmethod
def extract_from(cls, parameterset, force_tuple=False):
"""
:param parameterset:
a legacy style parameterset that may or may not be a tuple,
and may or may not be wrapped into a mess of mark objects
:param force_tuple:
enforce tuple wrapping so single argument tuple values
don't get decomposed and break tests
"""
if isinstance(parameterset, cls):
return parameterset
if force_tuple:
return cls.param(parameterset)
else:
return cls(parameterset, marks=[], id=None)
@staticmethod
def _parse_parametrize_args(argnames, argvalues, *args, **kwargs):
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
force_tuple = len(argnames) == 1
else:
force_tuple = False
return argnames, force_tuple
@staticmethod
def _parse_parametrize_parameters(argvalues, force_tuple):
return [
ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues
]
@classmethod
def _for_parametrize(cls, argnames, argvalues, func, config, function_definition):
argnames, force_tuple = cls._parse_parametrize_args(argnames, argvalues)
parameters = cls._parse_parametrize_parameters(argvalues, force_tuple)
del argvalues
if parameters:
# check all parameter sets have the correct number of values
for param in parameters:
if len(param.values) != len(argnames):
msg = (
'{nodeid}: in "parametrize" the number of names ({names_len}):\n'
" {names}\n"
"must be equal to the number of values ({values_len}):\n"
" {values}"
)
fail(
msg.format(
nodeid=function_definition.nodeid,
values=param.values,
names=argnames,
names_len=len(argnames),
values_len=len(param.values),
),
pytrace=False,
)
else:
# empty parameter set (likely computed at runtime): create a single
# parameter set with NOTSET values, with the "empty parameter set" mark applied to it
mark = get_empty_parameterset_mark(config, argnames, func)
parameters.append(
ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None)
)
return argnames, parameters
@attr.s(frozen=True)
class Mark:
#: name of the mark
name = attr.ib(type=str)
#: positional arguments of the mark decorator
args = attr.ib() # List[object]
#: keyword arguments of the mark decorator
kwargs = attr.ib() # Dict[str, object]
def combined_with(self, other):
"""
:param other: the mark to combine with
:type other: Mark
:rtype: Mark
combines by appending args and merging the mappings
"""
assert self.name == other.name
return Mark(
self.name, self.args + other.args, dict(self.kwargs, **other.kwargs)
)
@attr.s
class MarkDecorator:
""" A decorator for test functions and test classes. When applied
it will create :class:`Mark` objects which are often created like this::
mark1 = pytest.mark.NAME # simple MarkDecorator
mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator
and can then be applied as decorators to test functions::
@mark2
def test_function():
pass
When a MarkDecorator instance is called it does the following:
1. If called with a single class as its only positional argument and no
additional keyword arguments, it attaches itself to the class so it
gets applied automatically to all test cases found in that class.
2. If called with a single function as its only positional argument and
no additional keyword arguments, it attaches a MarkInfo object to the
function, containing all the arguments already stored internally in
the MarkDecorator.
3. When called in any other case, it performs a 'fake construction' call,
i.e. it returns a new MarkDecorator instance with the original
MarkDecorator's content updated with the arguments passed to this
call.
Note: The rules above prevent MarkDecorator objects from storing only a
single function or class reference as their positional argument with no
additional keyword or positional arguments.
"""
mark = attr.ib(validator=attr.validators.instance_of(Mark))
name = alias("mark.name")
args = alias("mark.args")
kwargs = alias("mark.kwargs")
@property
def markname(self):
return self.name # for backward-compat (2.4.1 had this attr)
def __eq__(self, other):
return self.mark == other.mark if isinstance(other, MarkDecorator) else False
def __repr__(self):
return "<MarkDecorator {!r}>".format(self.mark)
def with_args(self, *args, **kwargs):
""" return a MarkDecorator with extra arguments added
unlike call this can be used even if the sole argument is a callable/class
:return: MarkDecorator
"""
mark = Mark(self.name, args, kwargs)
return self.__class__(self.mark.combined_with(mark))
def __call__(self, *args, **kwargs):
""" if passed a single callable argument: decorate it with mark info.
otherwise add *args/**kwargs in-place to mark information. """
if args and not kwargs:
func = args[0]
is_class = inspect.isclass(func)
if len(args) == 1 and (istestfunc(func) or is_class):
store_mark(func, self.mark)
return func
return self.with_args(*args, **kwargs)
def get_unpacked_marks(obj):
"""
obtain the unpacked marks that are stored on an object
"""
mark_list = getattr(obj, "pytestmark", [])
if not isinstance(mark_list, list):
mark_list = [mark_list]
return normalize_mark_list(mark_list)
def normalize_mark_list(mark_list):
"""
normalizes marker decorating helpers to mark objects
:type mark_list: List[Union[Mark, Markdecorator]]
:rtype: List[Mark]
"""
extracted = [
getattr(mark, "mark", mark) for mark in mark_list
] # unpack MarkDecorator
for mark in extracted:
if not isinstance(mark, Mark):
raise TypeError("got {!r} instead of Mark".format(mark))
return [x for x in extracted if isinstance(x, Mark)]
def store_mark(obj, mark):
"""store a Mark on an object
this is used to implement the Mark declarations/decorators correctly
"""
assert isinstance(mark, Mark), mark
# always reassign name to avoid updating pytestmark
# in a reference that was only borrowed
obj.pytestmark = get_unpacked_marks(obj) + [mark]
class MarkGenerator:
""" Factory for :class:`MarkDecorator` objects - exposed as
a ``pytest.mark`` singleton instance. Example::
import pytest
@pytest.mark.slowtest
def test_function():
pass
will set a 'slowtest' :class:`MarkInfo` object
on the ``test_function`` object. """
_config = None
_markers = set() # type: Set[str]
def __getattr__(self, name):
if name[0] == "_":
raise AttributeError("Marker name must NOT start with underscore")
if self._config is not None:
# We store a set of markers as a performance optimisation - if a mark
# name is in the set we definitely know it, but a mark may be known and
# not in the set. We therefore start by updating the set!
if name not in self._markers:
for line in self._config.getini("markers"):
# example lines: "skipif(condition): skip the given test if..."
# or "hypothesis: tests which use Hypothesis", so to get the
# marker name we split on both `:` and `(`.
marker = line.split(":")[0].split("(")[0].strip()
self._markers.add(marker)
# If the name is not in the set of known marks after updating,
# then it really is time to issue a warning or an error.
if name not in self._markers:
if self._config.option.strict_markers:
fail(
"{!r} not found in `markers` configuration option".format(name),
pytrace=False,
)
else:
warnings.warn(
"Unknown pytest.mark.%s - is this a typo? You can register "
"custom marks to avoid this warning - for details, see "
"https://docs.pytest.org/en/latest/mark.html" % name,
PytestUnknownMarkWarning,
)
return MarkDecorator(Mark(name, (), {}))
MARK_GEN = MarkGenerator()
class NodeKeywords(MutableMapping):
def __init__(self, node):
self.node = node
self.parent = node.parent
self._markers = {node.name: True}
def __getitem__(self, key):
try:
return self._markers[key]
except KeyError:
if self.parent is None:
raise
return self.parent.keywords[key]
def __setitem__(self, key, value):
self._markers[key] = value
def __delitem__(self, key):
raise ValueError("cannot delete key in keywords dict")
def __iter__(self):
seen = self._seen()
return iter(seen)
def _seen(self):
seen = set(self._markers)
if self.parent is not None:
seen.update(self.parent.keywords)
return seen
def __len__(self):
return len(self._seen())
def __repr__(self):
return "<NodeKeywords for node {}>".format(self.node)
@attr.s(cmp=False, hash=False)
class NodeMarkers:
"""
internal structure for storing marks belonging to a node
..warning::
unstable api
"""
own_markers = attr.ib(default=attr.Factory(list))
def update(self, add_markers):
"""update the own markers
"""
self.own_markers.extend(add_markers)
def find(self, name):
"""
find markers in own nodes or parent nodes
needs a better place
"""
for mark in self.own_markers:
if mark.name == name:
yield mark
def __iter__(self):
return iter(self.own_markers)
| mit | 7,383,742,529,174,535,000 | 31.964824 | 97 | 0.588338 | false | 4.348691 | true | false | false |
duncanwp/cis_esp | web/datasets/load_MODIS.py | 1 | 1887 | import django
django.setup()
def gring_to_obj(gring):
from datasets.utils import lat_lon_points_to_polygon
line = lat_lon_points_to_polygon(gring[:4], gring[4:])
return line
def _read_modis_geoData(dirpath, test_set):
import pandas as pd
import glob
from os.path import join
from datetime import timedelta
files = glob.glob(join(dirpath, "*.txt"))
if test_set:
files = files[::100]
df = pd.concat(pd.read_csv(f, header=2, parse_dates=[1]) for f in files)
# We only want the day-time files
df = df[df.DayNightFlag == "D"]
# Create a Multi-point object for each set of GRings
df["poly"] = df.filter(regex="GRing").apply(gring_to_obj, axis=1)
# The granules are 5 minutes each
df['EndDateTime'] = df['StartDateTime'] + timedelta(minutes=5)
return df
def load_modis_geoData(dirpath, test_set=False):
from datasets.models import MeasurementFile, Measurement
df = _read_modis_geoData(dirpath, test_set)
aod = Measurement(measurement_type='AOD')
aod.save()
aod.measurementvariable_set.create(variable_name='AOD_550_Dark_Target_Deep_Blue_Combined')
for _index, row in df.iterrows():
mf = MeasurementFile(time_start=row['StartDateTime'], time_end=row['EndDateTime'],
spatial_extent=row['poly'].wkt, name=row['# GranuleID'])
mf.save()
aod.measurementfile_set.add(mf)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('path', help="MODIS input path")
parser.add_argument('--test_set', help="Only do a subset", action='store_true')
# Gets command line args by default
args = parser.parse_args()
load_modis_geoData(args.path, args.test_set)
| lgpl-3.0 | -7,313,040,373,078,914,000 | 29.435484 | 94 | 0.650238 | false | 3.46875 | true | false | false |
Bergurth/aes_cmdl.py | daes_cmdl.py | 1 | 2117 | import os, random, struct, sys
from Crypto.Cipher import AES
from optparse import OptionParser
import getpass
import hashlib
parser = OptionParser()
parser.add_option("-p")
(options, args) = parser.parse_args()
if(len(sys.argv) < 2):
print "usage: python daes_cmdl.py input_file_name <output_file_name> -p <password>"
sys.exit()
in_file = sys.argv[1]
if(len(sys.argv) == 3):
out_file = sys.argv[2]
else:
no_out_file = True
out_filename = os.path.splitext(in_file)[0] + '1234-09876'
cwd = os.getcwd()
if(options.p):
password = options.p
else:
#password = raw_input("please specify your password")
password = getpass.getpass("please specify your password")
key = hashlib.sha256(password).digest()
def decrypt_file(key, in_filename, out_filename=None, chunksize=24*1024):
""" Decrypts a file using AES (CBC mode) with the
given key. Parameters are similar to encrypt_file,
with one difference: out_filename, if not supplied
will be in_filename without its last extension
(i.e. if in_filename is 'aaa.zip.enc' then
out_filename will be 'aaa.zip')
"""
if not out_filename:
out_filename = os.path.splitext(in_filename)[0] + '1234-09876'
#print out_filename
with open(in_filename, 'rb') as infile:
origsize = struct.unpack('<Q', infile.read(struct.calcsize('Q')))[0]
iv = infile.read(16)
decryptor = AES.new(key, AES.MODE_CBC, iv)
with open(out_filename, 'wb') as outfile:
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
outfile.write(decryptor.decrypt(chunk))
outfile.truncate(origsize)
decrypt_file(key, in_file)
with open(cwd + "/" + out_filename,"r") as f:
#minlen = 12
for line in f:
sys.stdout.write(line)
if(no_out_file):
if sys.platform.startswith("linux"):
os.system("shred "+ cwd + "/" + out_filename)
os.remove(cwd + "/" + out_filename)
else:
os.remove(cwd + "/" + out_filename)
sys.exit(0)
| gpl-3.0 | 5,057,111,491,352,913,000 | 24.506024 | 87 | 0.616438 | false | 3.36566 | false | false | false |
gbook/nidb | src/gdcm/Examples/Python/ReadAndDumpDICOMDIR.py | 1 | 8113 | ################################################################################
#
# Program: GDCM (Grassroots DICOM). A DICOM library
#
# Copyright (c) 2006-2011 Mathieu Malaterre
# All rights reserved.
# See Copyright.txt or http://gdcm.sourceforge.net/Copyright.html for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
# File: ReadAndDumpDICOMDIR.py
#
# Author: Lukas Batteau (lbatteau gmail)
#
# This example shows how to read and dump a DICOMDIR File.
# Based on Tom Marynowski's (lordglub gmail) example.
#
# Usage:
# python ReadAndDumpDICOMDIR.py [DICOMDIR file]
############################################################################
import sys
import gdcm
if __name__ == "__main__":
# Check arguments
if (len(sys.argv) < 2):
# No filename passed
print "No input filename found"
quit()
filename = sys.argv[1]
# Read file
reader = gdcm.Reader()
reader.SetFileName(filename)
if (not reader.Read()):
print "Unable to read %s" % (filename)
quit()
file = reader.GetFile()
# Retrieve header information
fileMetaInformation = file.GetHeader()
print fileMetaInformation
# Retrieve data set
dataSet = file.GetDataSet()
#print dataSet
# Check media storage
mediaStorage = gdcm.MediaStorage()
mediaStorage.SetFromFile(file)
if (gdcm.MediaStorage.GetMSType(str(mediaStorage)) != gdcm.MediaStorage.MediaStorageDirectoryStorage):
# File is not a DICOMDIR
print "This file is not a DICOMDIR (Media storage type: %s)" % (str(mediaStorage))
quit()
# Check Media Storage SOP Class
if (fileMetaInformation.FindDataElement(gdcm.Tag(0x0002, 0x0002))):
sopClassUid = str(fileMetaInformation.GetDataElement(gdcm.Tag(0x0002, 0x0002)).GetValue())
# Check SOP UID
if (sopClassUid != "1.2.840.10008.1.3.10"):
# File is not a DICOMDIR
print "This file is not a DICOMDIR"
else:
# Not present
print "Media Storage SOP Class not present"
quit()
# Iterate through the DICOMDIR data set
iterator = dataSet.GetDES().begin()
while (not iterator.equal(dataSet.GetDES().end())):
dataElement = iterator.next()
# Check the element tag
if (dataElement.GetTag() == gdcm.Tag(0x004, 0x1220)):
# The 'Directory Record Sequence' element
sequence = dataElement.GetValueAsSQ()
# Loop through the sequence items
itemNr = 1
while (itemNr < sequence.GetNumberOfItems()):
item = sequence.GetItem(itemNr)
# Check the element tag
if (item.FindDataElement(gdcm.Tag(0x0004, 0x1430))):
# The 'Directory Record Type' element
value = str(item.GetDataElement(gdcm.Tag(0x0004, 0x1430)).GetValue())
# PATIENT
while (value.strip() == "PATIENT"):
print value.strip()
# Print patient name
if (item.FindDataElement(gdcm.Tag(0x0010, 0x0010))):
value = str(item.GetDataElement(gdcm.Tag(0x0010, 0x0010)).GetValue())
print value
# Print patient ID
if (item.FindDataElement(gdcm.Tag(0x0010, 0x0020))):
value = str(item.GetDataElement(gdcm.Tag(0x0010, 0x0020)).GetValue())
print value
# Next
itemNr = itemNr + 1
item = sequence.GetItem(itemNr)
if (item.FindDataElement(gdcm.Tag(0x0004, 0x1430))):
value = str(item.GetDataElement(gdcm.Tag(0x0004, 0x1430)).GetValue())
# STUDY
while (value.strip() == "STUDY"):
print value.strip()
# Print study UID
if (item.FindDataElement(gdcm.Tag(0x0020, 0x000d))):
value = str(item.GetDataElement(gdcm.Tag(0x0020, 0x000d)).GetValue())
print value
# Print study date
if (item.FindDataElement(gdcm.Tag(0x0008, 0x0020))):
value = str(item.GetDataElement(gdcm.Tag(0x0008, 0x0020)).GetValue())
print value
# Print study description
if (item.FindDataElement(gdcm.Tag(0x0008, 0x1030))):
value = str(item.GetDataElement(gdcm.Tag(0x0008, 0x1030)).GetValue())
print value
# Next
itemNr = itemNr + 1
item = sequence.GetItem(itemNr)
if (item.FindDataElement(gdcm.Tag(0x0004, 0x1430))):
value = str(item.GetDataElement(gdcm.Tag(0x0004, 0x1430)).GetValue())
# SERIES
while (value.strip() == "SERIES"):
print value.strip()
# Print series UID
if (item.FindDataElement(gdcm.Tag(0x0020, 0x000e))):
value = str(item.GetDataElement(gdcm.Tag(0x0020, 0x000e)).GetValue())
print value
# Print series modality
if (item.FindDataElement(gdcm.Tag(0x0008, 0x0060))):
value = str(item.GetDataElement(gdcm.Tag(0x0008, 0x0060)).GetValue())
print "Modality"
print value
# Print series description
if (item.FindDataElement(gdcm.Tag(0x0008, 0x103e))):
value = str(item.GetDataElement(gdcm.Tag(0x0008, 0x103e)).GetValue())
print "Description"
print value
# Next
itemNr = itemNr + 1
item = sequence.GetItem(itemNr)
if (item.FindDataElement(gdcm.Tag(0x0004, 0x1430))):
value = str(item.GetDataElement(gdcm.Tag(0x0004, 0x1430)).GetValue())
# IMAGE
while (value.strip() == "IMAGE"):
print value.strip()
# Print image UID
if (item.FindDataElement(gdcm.Tag(0x0004, 0x1511))):
value = str(item.GetDataElement(gdcm.Tag(0x0004, 0x1511)).GetValue())
print value
# Next
if (itemNr < sequence.GetNumberOfItems()):
itemNr = itemNr + 1
else:
break
item = sequence.GetItem(itemNr)
if (item.FindDataElement(gdcm.Tag(0x0004, 0x1430))):
value = str(item.GetDataElement(gdcm.Tag(0x0004, 0x1430)).GetValue())
# Next
itemNr = itemNr + 1
| gpl-3.0 | -3,555,645,555,328,756,000 | 42.385027 | 113 | 0.459016 | false | 4.570704 | false | false | false |
jianlirong/incubator-hawq | contrib/hawq-hadoop/hawq-mapreduce-tool/generate_mr_report.py | 12 | 2279 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
############################################################################
# A script to generate pulse readable report.
infile = file('inputformat.logs')
outfile = file('inputformat.report','w')
testsuitename = ''
outline = ''
success = False
line = infile.readline()
while line:
if line.find('Executing test case: ')!=-1:
if len(outline) != 0:
if success:
outline = outline + "|Test Status|PASS"
else:
outline = outline + "|Test Status|FAILED"
outfile.write(outline+'\n')
outline="Test Suite Name|" + testsuitename + "|Test Case Name|"
success = False
startIndex = line.find('Executing test case: ') + len('Executing test case: ')
endIndex=len(line)
testcasename = line[startIndex:endIndex-1]
outline = outline + testcasename + "|Test Detail|" + testcasename + " (0.00 ms)"
elif line.find('Executing test suite: ')!=-1:
startIndex = line.find('Executing test suite: ') + len('Executing test suite: ')
endIndex = len(line)
testsuitename = line[startIndex:endIndex-1]
elif line.find('Successfully finish test case: ')!=-1:
success = True
line = infile.readline()
if len(outline) != 0:
if success:
outline = outline + "|Test Status|PASS"
else:
outline = outline + "|Test Status|FAILED"
outfile.write(outline+'\n')
outfile.flush()
infile.close()
outfile.close()
| apache-2.0 | -7,884,257,589,737,035,000 | 33.530303 | 88 | 0.642387 | false | 4.05516 | true | false | false |
fidelram/deepTools | deeptools/deepBlue.py | 1 | 10950 | #!/usr/bin/env python
try:
# python 2
import xmlrpclib
except:
# python 3
import xmlrpc.client as xmlrpclib
import time
import tempfile
import os.path
import sys
import pyBigWig
from deeptools.utilities import mungeChromosome
from deeptoolsintervals import GTF
import datetime
def isDeepBlue(fname):
"""
Returns true if the file ends in .wig, .wiggle, or .bedgraph, since these indicate a file on the deepBlue server
"""
if fname.endswith(".wig"):
return True
if fname.endswith(".wiggle"):
return True
if fname.endswith(".bedgraph"):
return True
if fname.startswith("http") or fname.startswith("ftp"):
return False
# For ENCODE samples, the "Name" is just the ENCODE sample ID, so as a fallback check for files that aren't there.
if not os.path.exists(fname):
return True
return False
def mergeRegions(regions):
"""
Given a list of [(chrom, start, end), ...], merge all overlapping regions
This returns a dict, where values are sorted lists of [start, end].
"""
bar = sorted(regions)
out = dict()
last = [None, None, None]
for reg in bar:
if reg[0] == last[0] and reg[1] <= last[2]:
if reg[2] > last[2]:
last[2] = reg[2]
continue
else:
if last[0]:
if last[0] not in out:
out[last[0]] = list()
out[last[0]].append([last[1], last[2]])
last = [reg[0], reg[1], reg[2]]
if last[0] not in out:
out[last[0]] = list()
out[last[0]].append([last[1], last[2]])
return out
def makeTiles(db, args):
"""
Given a deepBlue object, return a list of regions that will be queried
"""
out = []
for (k, v) in db.chromsTuple:
start = 0
while start <= v:
end = start + args.binSize
if end > v:
end = v
out.append([k, start, end])
start += end + args.distanceBetweenBins
return out
def makeChromTiles(db):
"""
Make a region for each chromosome
"""
out = []
for (k, v) in db.chromsTuple:
out.append([k, 0, v])
return out
def makeRegions(BED, args):
"""
Given a list of BED/GTF files, make a list of regions.
These are vaguely extended as appropriate. For simplicity, the maximum of --beforeRegionStartLength
and --afterRegionStartLength are tacked on to each end and transcripts are used for GTF files.
"""
itree = GTF(BED, transcriptID=args.transcriptID, transcript_id_designator=args.transcript_id_designator)
o = []
extend = 0
# The before/after stuff is specific to computeMatrix
if "beforeRegionStartLength" in args:
extend = max(args.beforeRegionStartLength, args.afterRegionStartLength)
for chrom in itree.chroms:
regs = itree.findOverlaps(chrom, 0, 4294967295) # bigWig files use 32 bit coordinates
for reg in regs:
o.append([chrom, max(0, reg[0] - extend), reg[1] + extend])
del itree
return o
def preloadWrapper(foo):
"""
This is a wrapper around the preload function for multiprocessing
"""
args = foo[2]
regs = foo[3]
res = deepBlue(foo[0], url=args.deepBlueURL, userKey=args.userKey)
return res.preload(regs, tmpDir=args.deepBlueTempDir)
class deepBlue(object):
def __init__(self, sample, url="http://deepblue.mpi-inf.mpg.de/xmlrpc", userKey="anonymous_key"):
"""
Connect to the requested deepblue server with the given user key and request the specifed sample from it.
>>> sample = "S002R5H1.ERX300721.H3K4me3.bwa.GRCh38.20150528.bedgraph"
>>> db = deepBlue(sample) # doctest: +SKIP
>>> assert(db.chroms("chr1") == 248956422) # doctest: +SKIP
"""
self.sample = sample
self.url = url
self.userKey = userKey
self.server = xmlrpclib.Server(url, allow_none=True)
self.info = None
self.experimentID = None
self.genome = None
self.chromsDict = None
self.chromsTuple = None
# Set self.experimentID
experimentID = self.getEID()
if not experimentID:
raise RuntimeError("The requested sample({}) has no associated experiment! If you did not intend to use samples on deepBlue, then it appears either you misspelled a file name or (if you're using BAM files for input) one of your BAM files is lacking a valid index.".format(sample))
# Set self.info
(status, resp) = self.server.info(self.experimentID, userKey)
if status != "okay":
raise RuntimeError("Received the following error while fetching information about '{}': {}".format(resp, sample))
self.info = resp[0]
# Set self.genome
genome = self.getGenome()
if not genome:
raise RuntimeError("Unable to determine an appropriate genome for '{}'".format(sample))
# Set self.chroms
chroms = self.getChroms()
if not chroms:
raise RuntimeError("Unable to determine chromosome names/sizes for '{}'".format(sample))
def getEID(self):
"""
Given a sample name, return its associated experiment ID (or None on error).
self.experimentID is then the internal ID (e.g., e52525)
"""
(status, resps) = self.server.search(self.sample, "experiments", self.userKey)
if status != "okay":
raise RuntimeError("Received an error ({}) while searching for the experiment associated with '{}'".format(resps, self.sample))
for resp in resps:
if resp[1] == self.sample:
self.experimentID = resp[0]
return resp[0]
return None
def getGenome(self):
"""
Determines and sets the genome assigned to a given sample. On error, this raises a runtime exception.
self.genome is then the internal genome ID.
"""
if "genome" in self.info.keys():
self.genome = self.info["genome"]
return self.genome
def getChroms(self):
"""
Determines and sets the chromosome names/sizes for a given sample. On error, this raises a runtime exception.
self.chroms is then a dictionary of chromosome:length pairs
"""
(status, resp) = self.server.chromosomes(self.genome, self.userKey)
if status != "okay":
raise RuntimeError("Received an error while fetching chromosome information for '{}': {}".format(self.sample, resp))
self.chromsDict = {k: v for k, v in resp}
self.chromsTuple = [(k, v) for k, v in resp]
return resp
def chroms(self, chrom=None):
"""
Like the chroms() function in pyBigWig, returns either chromsDict (chrom is None) or the length of a given chromosome
"""
if chrom is None:
return self.chromsDict
elif chrom in self.chromsDict:
return self.chromsDict[chrom]
return None
def close(self):
pass
def preload(self, regions, tmpDir=None):
"""
Given a sample and a set of regions, write a bigWig file containing the underlying signal.
This function returns the file name, which needs to be deleted by the calling function at some point.
This sends queries one chromosome at a time, due to memory limits on deepBlue
"""
startTime = datetime.datetime.now()
regions2 = mergeRegions(regions)
# Make a temporary file
f = tempfile.NamedTemporaryFile(delete=False, dir=tmpDir)
fname = f.name
f.close()
# Start with the bigWig file
bw = pyBigWig.open(fname, "w")
bw.addHeader(self.chromsTuple, maxZooms=0) # This won't work in IGV!
# Make a string out of everything in a resonable order
for k, v in self.chromsTuple:
# Munge chromosome names as appropriate
chrom = mungeChromosome(k, regions2.keys())
if not chrom:
continue
if chrom not in regions2 or len(regions2) == 0:
continue
regionsStr = "\n".join(["{}\t{}\t{}".format(k, reg[0], reg[1]) for reg in regions2[chrom]])
regionsStr += "\n"
# Send the regions
(status, regionsID) = self.server.input_regions(self.genome, regionsStr, self.userKey)
if status != "okay":
raise RuntimeError("Received the following error while sending regions for '{}': {}".format(regionsID, self.sample))
# Get the experiment information
(status, queryID) = self.server.select_experiments(self.sample, k, None, None, self.userKey)
if status != "okay":
raise RuntimeError("Received the following error while running select_experiments on file '{}': {}".format(self.sample, queryID))
if not queryID:
raise RuntimeError("Somehow, we received None as a query ID (file '{}')".format(self.sample))
# Intersect
(status, intersectID) = self.server.intersection(queryID, regionsID, self.userKey)
if status != "okay":
raise RuntimeError("Received the following error while running intersection on file '{}': {}".format(self.sample, intersectID))
if not intersectID:
raise RuntimeError("Somehow, we received None as an intersect ID (file '{}')".format(self.sample))
# Query the regions
(status, reqID) = self.server.get_regions(intersectID, "START,END,VALUE", self.userKey)
if status != "okay":
raise RuntimeError("Received the following error while fetching regions in file '{}': {}".format(self.sample, reqID))
# Wait for the server to process the data
(status, info) = self.server.info(reqID, self.userKey)
request_status = info[0]["state"]
while request_status != "done" and request_status != "failed":
time.sleep(0.1)
(status, info) = self.server.info(reqID, self.userKey)
request_status = info[0]["state"]
# Get the actual data
(status, resp) = self.server.get_request_data(reqID, self.userKey)
if status != "okay":
raise RuntimeError("Received the following error while fetching data in file '{}': {}".format(self.sample, resp))
for intervals in resp.split("\n"):
interval = intervals.split("\t")
if interval[0] == '':
continue
bw.addEntries([k], [int(interval[0]) - 1], ends=[int(interval[1]) - 1], values=[float(interval[2])])
bw.close()
sys.stderr.write("{} done (took {})\n".format(self.sample, datetime.datetime.now() - startTime))
sys.stderr.flush()
return fname
| gpl-3.0 | -8,865,133,240,584,388,000 | 37.286713 | 292 | 0.603288 | false | 4.000731 | false | false | false |
Apstra/aeon-venos | tests/test_opx.py | 1 | 2524 | import mock
import pytest
from pylib.aeon.opx import device
from pylib.aeon.cumulus import connector
g_facts = {
'hw_version': None,
'hw_part_number': None,
'hostname': 'opx221_vm',
'serial_number': '525400A5EC36',
'fqdn': 'opx221_vm',
'os_version': '2.2.1',
'virtual': True,
'hw_model': 'S6000-VM',
'vendor': 'OPX',
'mac_address': '52:54:00:A5:EC:36',
'os_name': 'OPX',
'service_tag': None
}
ip_link_show_out = '''
1: lo: <LOOPBACK> mtu 65536 qdisc noop state DOWN mode DEFAULT group default
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
link/ether 52:54:00:a5:ec:36 brd ff:ff:ff:ff:ff:ff
'''
hostname_out = "opx221_vm"
grep_version_out = '2.2.1'
grep_platform_out = 'S6000-VM'
@mock.patch('pylib.aeon.opx.connector.paramiko.SSHClient')
@pytest.fixture()
def opx_connector(mock_ssh):
hostname = '1.1.1.1'
port = 22
proto = 'ssh'
user = 'test_user'
passwd = 'test_passwd'
con = connector.Connector(hostname, port=port, proto=proto, user=user, passwd=passwd)
return con
@mock.patch('pylib.aeon.opx.device.BaseDevice.probe')
@mock.patch('pylib.aeon.opx.device.Connector')
@pytest.fixture()
def opx_device(mock_connector, mock_probe, request):
def mock_execute(args, **kwargs):
results = []
for arg in args:
# os_version
if arg == """grep -oP '^OS_VERSION=[\"]?\K.*\d' /etc/OPX-release-version""":
results.append({'stdout': grep_version_out})
# platform
if arg == """grep -oP '^PLATFORM=[\"]?\K.*\w' /etc/OPX-release-version""":
results.append({'stdout': grep_platform_out})
# hostname
elif arg == 'hostname':
results.append({'stdout': hostname_out})
elif arg =='ip link show':
results.append({'stdout': ip_link_show_out})
return True, results
mock_connector.return_value.execute.side_effect = mock_execute
mock_probe.return_value = True, 10
target = '1.1.1.1'
user = 'test_user'
passwd = 'test_passwd'
dev = device.Device(target, user=user, passwd=passwd)
return dev
def test_opx_device(opx_device):
dev = opx_device
assert dev.OS_NAME == 'OPX'
assert dev.DEFAULT_PROBE_TIMEOUT == 10
assert dev.user == 'test_user'
assert dev.passwd == 'test_passwd'
assert dev.facts == g_facts
| apache-2.0 | 7,499,508,265,810,249,000 | 29.409639 | 114 | 0.614105 | false | 3.063107 | true | false | false |
Far0/Battleports | Code/score.py | 1 | 4923 | import pygame
import psycopg2
import collections
from buttons import Buttons
from quit import process_events
from itertools import chain
blue = (30, 15, 170) #RGB numbers for various needed colours
yellow = (255, 255, 0)
white = (255, 255, 255)
def data_shredder(a): #removes the tuple and the list
res = ''
if isinstance(a, collections.Iterable):
for item in a:
res += str(data_shredder(item)) + ' '
else:
res = str(a)
return res
def interact_with_database(command):
# Connect and set up cursor
connection = psycopg2.connect("dbname=project2db user=postgres password=root")
cursor = connection.cursor()
# Execute the command
cursor.execute(command)
connection.commit()
# Save results
results = None
try:
results = cursor.fetchall()
except psycopg2.ProgrammingError:
# Nothing to fetch
pass
# Close connection
cursor.close()
connection.close()
return results
def download_names(): #get the items from the database
return interact_with_database("SELECT name FROM score Order By wins DESC")
def download_wins():
return interact_with_database("SELECT wins FROM score Order By wins DESC")
def download_losses():
return interact_with_database("SELECT losses FROM score Order By wins DESC")
def download_ratio():
interact_with_database("SELECT score FROM score")
def update_wins(name, wins): #not used atm
interact_with_database("UPDATE score SET wins = {} WHERE name = '{}' ".format(wins, name))
def update_losses(name, losses):
interact_with_database("UPDATE score SET losses = {} WHERE name = '{}' ")
def amount(): #gets the number of items in the table
tuple_list = interact_with_database("SELECT count(name) FROM score")
return data_shredder(tuple_list) #removes the tuple and the list
def ratio(id):
losses = interact_with_database("SELECT losses FROM score WHERE name = {}".format(id))
wins = interact_with_database("SELECT wins FROM score WHERE name = {}".format(id))
losses = int(data_shredder(losses))
wins = int(data_shredder(wins))
total = wins + losses
ratio = losses / total
score = (1 - ratio) * 100
update_score(id, score)
def highscore():
pygame.init()
dispinfo = pygame.display.Info()
size = (dispinfo.current_w, dispinfo.current_h)
width = dispinfo.current_w #Size for the screen in pixels
height = dispinfo.current_h
screen = pygame.display.set_mode(size, pygame.FULLSCREEN) #Make a window with the size stated above
bg = pygame.image.load("black.jpg") #Load in a background
bg = pygame.transform.scale(bg,(width,height))
leave = Buttons(width, height, 0.8, 0.9, "Return")
name = Buttons(width, height, 0.000000001, 0.9, "Highscore")
myfont = pygame.font.SysFont(None, 30)
playerName = list(download_names())
playerWins = list(download_wins()) #store the list from the database
playerLosses = list(download_losses())
screen.blit(bg, (0, 0))#Draw the background
y = 50
l = myfont.render("Highscore",1,(255,255,0)) #display each item on the screen
screen.blit(l, (750,y))
a = 0
a = len(playerName)
if a > 10:
a = 10
for i in range(a):
tempplayername = playerName[i][0]
tempplayerwins = playerWins[i][0]
tempplayerlosses = playerLosses[i][0]
playerName.append(tempplayername)
playerWins.append(tempplayerwins)
playerLosses.append(tempplayerlosses)
y += 25
l = myfont.render("{} Name: {}".format(i+1, playerName[i]),1,(255,255,0)) #display each item on the screen
screen.blit(l, (750,y))
y += 25
l = myfont.render(" Wins: {}".format(playerWins[i]),1,(255,255,0)) #display each item on the screen
screen.blit(l, (750,y))
y += 25
l = myfont.render(" Losses: {}".format(playerLosses[i]),1,(255,255,0)) #display each item on the screen
screen.blit(l, (750,y))
y += 25
#change the pos of the next textbox so they dont overlap
while not process_events(): #Loop to keep refreshing the screen while the window is not closed
name.draw(screen, 400, 70) #draw the name on the screen
screen.blit(name.write(100), ((int(name.width * name.xpos), int(name.height * name.ypos))))
leave.draw(screen, 300, 47) #draw the exit button
screen.blit(leave.write(65), ((int(leave.width * leave.xpos), int(leave.height * leave.ypos))))
if pygame.mouse.get_pressed()[0] and leave.clicked():
from menu import program
program()
if leave.clicked():
leave.tcolor = yellow
leave.bcolor = blue
if not leave.clicked():
leave.bcolor = blue
leave.tcolor = white
pygame.display.flip() #Flips the screen so that everything updates | mit | 1,107,863,158,769,956,100 | 31.183007 | 120 | 0.644526 | false | 3.652077 | false | false | false |
Lynx187/script.module.urlresolver | lib/urlresolver/plugins/hugefiles.py | 3 | 2840 | '''
Hugefiles urlresolver plugin
Copyright (C) 2013 Vinnydude
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
import re
from urlresolver import common
from lib import captcha_lib
class HugefilesResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "hugefiles"
domains = ["hugefiles.net"]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
url = self.get_url(host, media_id)
common.addon.log_debug('HugeFiles - Requesting GET URL: %s' % url)
html = self.net.http_GET(url).content
if 'File Not Found' in html:
raise UrlResolver.ResolverError('File Not Found or removed')
#Set POST data values
data = {}
r = re.findall(r'type="hidden"\s+name="([^"]+)"\s+value="([^"]+)', html)
if r:
for name, value in r:
data[name] = value
else:
raise UrlResolver.ResolverError('Cannot find data values')
data['method_free'] = 'Free Download'
data.update(captcha_lib.do_captcha(html))
common.addon.log_debug('HugeFiles - Requesting POST URL: %s DATA: %s' % (url, data))
html = self.net.http_POST(url, data).content
r = re.search('fileUrl\s*=\s*"([^"]+)', html)
if r:
return r.group(1)
raise UrlResolver.ResolverError('Unable to resolve HugeFiles Link')
def get_url(self, host, media_id):
return 'http://hugefiles.net/%s' % media_id
def get_host_and_id(self, url):
r = re.search('//(.+?)/([0-9a-zA-Z]+)', url)
if r:
return r.groups()
else:
return False
return('host', 'media_id')
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return (re.match('http://(www.)?hugefiles.net/' +
'[0-9A-Za-z]+', url) or
'hugefiles' in host)
| gpl-2.0 | 8,769,949,715,246,539,000 | 34.5 | 92 | 0.630986 | false | 3.801874 | false | false | false |
Conan-Kudo/librepo | examples/python/yum_repo_simple_download.py | 4 | 1122 | #!/usr/bin/python
"""
Example: Simple download whole repository
This example uses more "pythonic" way of usage.
Instead of use setopt() method it uses class properties.
Use case:
We have a metalink url of a repository and we
want do download complete repository metadata.
"""
import librepo
# Metalink URL
METALINK_URL = "https://mirrors.fedoraproject.org/metalink?repo=fedora-19&arch=x86_64"
# Destination directory (note: This directory must exists!)
DESTDIR = "downloaded_metadata"
if __name__ == "__main__":
h = librepo.Handle()
r = librepo.Result()
# Repository with repodata in the rpm-md format
h.repotype = librepo.LR_YUMREPO
# Set metalink url
h.mirrorlist = METALINK_URL
# Destination directory for metadata
h.destdir = DESTDIR
# Use the fastest mirror
h.fastestmirror = True
try:
h.perform(r)
except librepo.LibrepoException as e:
# rc - Return code (integer value)
# msg - Detailed error message (string)
# general_msg - Error message based on rc (string)
rc, msg, general_msg = e
print "Error: %s" % msg
| lgpl-2.1 | 4,813,575,545,057,048,000 | 26.365854 | 86 | 0.678253 | false | 3.50625 | false | false | false |
minh5/foster_care_proj | foster_care_proj/notebooks/processing.py | 1 | 6803 | import pandas as pd
class Component:
def __init__(self, name, calculate):
self.name = name
accepted_calculations = ['total', 'average', 'dummy', 'sum']
if calculate in accepted_calculations:
self.calculate = calculate
else:
raise Exception('calculate method must be', str(accepted_calculations))
def __str__(self):
return self.name
class DataSet:
def __init__(self, data, components):
self.data = data
self.components = []
if len(components) > 0:
for component in components:
self.components.append(component)
def add_component(self, component):
self.components.append(component)
@property
def unique_id(self):
return 'unique_id'
@property
def id_column(self):
return self.data[self.unique_id]
@staticmethod
def create_unique_id(row):
return ('_').join([str(row['FACILITY_ID']), str(row['CLIENT_ID']), str(row['HOME_RMVL_KEY'])])
@property
def base_df(self):
self.data[self.unique_id] = self.data.apply(lambda x: self.create_unique_id(x), axis=1)
base_df = pd.DataFrame(data=self.data.ix[:, self.unique_id])
base_df.columns = [self.unique_id]
return base_df.drop_duplicates(self.unique_id)
@property
def interim_df(self):
"""
Used for debugging purposes
"""
data = self.data
data[self.unique_id] = data.apply(lambda x: self.create_unique_id(x), axis=1)
return data
@staticmethod
def restructure_column_names(df, component):
new_names = [component.name + '_' + str(column) for column in df.columns.values]
df.columns = new_names
return df
def get_totals(self, component):
import pdb
grouped = self.data.groupby(self.unique_id).count()[component.name]
grouped = pd.DataFrame(index=grouped.index, data=grouped)
grouped.columns = [component.name + '_count']
return grouped
def get_dummy(self, component):
crosstab = pd.crosstab(self.id_column, self.data[component.name])
new_names = [component.name + '_' + str(column) for column in crosstab.columns.values]
crosstab.columns = new_names
return crosstab
def get_average(self, component):
grouped = self.data.groupby(self.unique_id).mean()[component.name]
grouped = pd.DataFrame(index=grouped.index, data=grouped)
grouped.columns = [component.name + '_avg']
return grouped
def get_sum(self, component):
grouped = self.data.groupby(self.unique_id).sum()[component.name]
grouped = pd.DataFrame(index=grouped.index, data=grouped)
grouped.columns = [component.name + '_sum']
return grouped
def run_calculation(self, component):
if component.calculate == 'average':
calc = self.get_average(component)
elif component.calculate == 'total':
calc = self.get_totals(component)
elif component.calculate == 'dummy':
calc = self.get_dummy(component)
elif component.calculate == 'sum':
calc = self.get_sum(component)
else:
raise Exception('calculations for {comp} component not supported'.format(comp=component.name))
return calc
@staticmethod
def outcome_function(row, desirability):
desirability = desirability[0].upper() + desirability[1:]
if row['desirability_spell'] == desirability:
return 1
else:
return 0
def create_outcome_var(self):
return self.data.apply(lambda x: self.outcome_function(x, 'good'), axis=1)
def finalize_df(self):
data = self.base_df
for component in self.components:
print('working on', component.name)
calc = self.run_calculation(component)
data = pd.merge(data, calc, left_on=self.unique_id, right_index=True, how='left')
data.columns = [col.replace(' ', '_').lower() for col in data.columns.values]
data['outcome'] = self.create_outcome_var()
return data
causes = [
'Abandonment', 'Alcohol Use/Abuse - Caretaker',
'Alcohol Use/Abuse - Child', 'Death of Parent(s)',
'Domestic Violence', 'Drug Use/Abuse - Caretaker',
'Drug Use/Abuse - Child', 'Incarceration of Parent/Guardian(s)',
"JPO Removal (Child's Behavior Problem)",
'Mental/Emotional Injuries', 'Neglect - educational needs',
'Neglect - hygiene/clothing needs', 'Neglect - medical needs',
'Neglect - No/Inadequate Housing', 'Neglect - nutritional needs',
'Neglect - supervision and safety needs',
"Parent's inability to cope",
'Parent lacks skills for providing care',
'Parent not seeking BH treatment',
'Parent not seeking BH treatmnt for child', 'Parent/Child Conflict',
'Parent/Guardian lacks skills to provide', 'Physical Abuse',
'Relinquishment', 'Resumption', 'Sexual Abuse', 'Truancy', 'Provider.Type', 'Capacity',
'Willing.to.Adopt', 'Gender.Served', 'Age.Range.Served',
'lower_age_served', 'upper_age_served', 'Family Foster Care',
'Foster Care', 'Group Home', 'Non-Relative/Kinship',
'Non-Relative/Non-Kinship', 'Pre-Adoptive', 'Pre-Adoptive Home',
'Pre-Adoptive Teen Mother with Non-Dependent Child', 'Regular',
'Regular Teen Mother with Non-Dependent Child',
'Regular Teen Mother with Two Dependent Children',
'Relative/Kinship', 'Residential', 'Residential / Institution',
'Residential Treatment Facility (RTF)', 'RTF Room and Board',
'Shelter', 'Shelter Teen Mother with Non-Dependent Child',
'Teen Family Foster Care (ages 12-21 years)',
'Teen mother with 2 non-dependent children',
'Teen Mother with 2 Non-Dependent Children',
'Teen mother with non-dependent child',
'Teen Parent Family Foster Care (ages 12-21) plus one non-dependent child',
'Therapeutic Foster Care']
comp_dict = {}
comp_dict['RMVL_LOS'] = 'average'
comp_dict['CASE_REF_ID'] = 'total'
comp_dict['CLIENT_ID'] = 'total'
comp_dict['GENDER'] = 'dummy'
comp_dict['RACE_GROUP'] = 'dummy'
comp_dict['RMVL_TYPE'] = 'dummy'
comp_dict['RMVL_AGE'] = 'total'
comp_dict['PLCMNT_TYPE'] = 'dummy'
comp_dict['TYPE_PLACEMENT'] = 'dummy'
comp_dict['ANALYSIS_CARETYPE'] = 'dummy'
comp_dict['NUM_SPELLS'] = 'average'
comp_dict['NUM_MOVES'] = 'average'
comp_dict['NUM_SPELLS'] = 'total'
comp_dict['NUM_MOVES'] = 'total'
comp_dict['NUM_SPELLS'] = 'sum'
comp_dict['NUM_MOVES'] = 'sum'
for cause in causes:
comp_dict[cause] = 'total'
components = []
for key, value in comp_dict.items():
comp = Component(key, value)
components.append(comp) | mit | -5,855,531,884,169,251,000 | 36.59116 | 106 | 0.628546 | false | 3.485143 | false | false | false |
frreiss/tensorflow-fred | tensorflow/python/layers/utils_test.py | 15 | 4880 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ConvUtilsTest(test.TestCase):
def testConvertDataFormat(self):
self.assertEqual('NCDHW', utils.convert_data_format('channels_first', 5))
self.assertEqual('NCHW', utils.convert_data_format('channels_first', 4))
self.assertEqual('NCW', utils.convert_data_format('channels_first', 3))
self.assertEqual('NHWC', utils.convert_data_format('channels_last', 4))
self.assertEqual('NWC', utils.convert_data_format('channels_last', 3))
self.assertEqual('NDHWC', utils.convert_data_format('channels_last', 5))
with self.assertRaises(ValueError):
utils.convert_data_format('invalid', 2)
def testNormalizeTuple(self):
self.assertEqual((2, 2, 2), utils.normalize_tuple(2, n=3, name='strides'))
self.assertEqual(
(2, 1, 2), utils.normalize_tuple((2, 1, 2), n=3, name='strides'))
with self.assertRaises(ValueError):
utils.normalize_tuple((2, 1), n=3, name='strides')
with self.assertRaises(ValueError):
utils.normalize_tuple(None, n=3, name='strides')
def testNormalizeDataFormat(self):
self.assertEqual(
'channels_last', utils.normalize_data_format('Channels_Last'))
self.assertEqual(
'channels_first', utils.normalize_data_format('CHANNELS_FIRST'))
with self.assertRaises(ValueError):
utils.normalize_data_format('invalid')
def testNormalizePadding(self):
self.assertEqual('same', utils.normalize_padding('SAME'))
self.assertEqual('valid', utils.normalize_padding('VALID'))
with self.assertRaises(ValueError):
utils.normalize_padding('invalid')
def testConvOutputLength(self):
self.assertEqual(4, utils.conv_output_length(4, 2, 'same', 1, 1))
self.assertEqual(2, utils.conv_output_length(4, 2, 'same', 2, 1))
self.assertEqual(3, utils.conv_output_length(4, 2, 'valid', 1, 1))
self.assertEqual(2, utils.conv_output_length(4, 2, 'valid', 2, 1))
self.assertEqual(5, utils.conv_output_length(4, 2, 'full', 1, 1))
self.assertEqual(3, utils.conv_output_length(4, 2, 'full', 2, 1))
self.assertEqual(2, utils.conv_output_length(5, 2, 'valid', 2, 2))
def testConvInputLength(self):
self.assertEqual(3, utils.conv_input_length(4, 2, 'same', 1))
self.assertEqual(2, utils.conv_input_length(2, 2, 'same', 2))
self.assertEqual(4, utils.conv_input_length(3, 2, 'valid', 1))
self.assertEqual(4, utils.conv_input_length(2, 2, 'valid', 2))
self.assertEqual(3, utils.conv_input_length(4, 2, 'full', 1))
self.assertEqual(4, utils.conv_input_length(3, 2, 'full', 2))
def testDeconvOutputLength(self):
self.assertEqual(4, utils.deconv_output_length(4, 2, 'same', 1))
self.assertEqual(8, utils.deconv_output_length(4, 2, 'same', 2))
self.assertEqual(5, utils.deconv_output_length(4, 2, 'valid', 1))
self.assertEqual(8, utils.deconv_output_length(4, 2, 'valid', 2))
self.assertEqual(3, utils.deconv_output_length(4, 2, 'full', 1))
self.assertEqual(6, utils.deconv_output_length(4, 2, 'full', 2))
class ConstantValueTest(test.TestCase):
@test_util.run_deprecated_v1
def testConstantValue(self):
f1 = lambda: constant_op.constant(5)
f2 = lambda: constant_op.constant(32)
# Boolean pred
self.assertEqual(5, utils.constant_value(utils.smart_cond(True, f1, f2)))
self.assertEqual(32, utils.constant_value(utils.smart_cond(False, f1, f2)))
# Integer pred
self.assertEqual(5, utils.constant_value(utils.smart_cond(1, f1, f2)))
self.assertEqual(32, utils.constant_value(utils.smart_cond(0, f1, f2)))
# Unknown pred
pred = array_ops.placeholder_with_default(True, shape=())
self.assertIsNone(utils.constant_value(utils.smart_cond(pred, f1, f2)))
#Error case
with self.assertRaises(TypeError):
utils.constant_value(5)
if __name__ == '__main__':
test.main()
| apache-2.0 | 6,092,373,260,875,308,000 | 40.008403 | 80 | 0.690574 | false | 3.439042 | true | false | false |
vitalti/sapl | sapl/relatorios/templates/pdf_protocolo_gerar.py | 1 | 6253 | # parameters=sessao,imagem,data,lst_protocolos,dic_cabecalho,lst_rodape,dic_filtro
"""relatorio_protocolo.py
External method para gerar o arquivo rml do resultado de uma pesquisa de protocolos
Autor: Luciano De Fazio
Empresa: OpenLegis Consultoria
versão: 1.0
"""
import time
from trml2pdf import parseString
def cabecalho(dic_cabecalho, imagem):
"""Gera o codigo rml do cabecalho"""
tmp_data = ''
tmp_data += '\t\t\t\t<image x="2.1cm" y="25.7cm" width="59" height="62" file="' + imagem + '"/>\n'
tmp_data += '\t\t\t\t<lines>2cm 25.4cm 19cm 25.4cm</lines>\n'
tmp_data += '\t\t\t\t<setFont name="Helvetica-Bold" size="15"/>\n'
tmp_data += '\t\t\t\t<drawString x="5cm" y="27.2cm">' + \
dic_cabecalho['nom_casa'] + '</drawString>\n'
tmp_data += '\t\t\t\t<setFont name="Helvetica" size="12"/>\n'
tmp_data += '\t\t\t\t<drawString x="5cm" y="26.6cm">Sistema de Apoio ao Processo Legislativo</drawString>\n'
tmp_data += '\t\t\t\t<setFont name="Helvetica-Bold" size="13"/>\n'
tmp_data += '\t\t\t\t<drawString x="2.2cm" y="24.6cm">Relatório de Controle do Protocolo</drawString>\n'
return tmp_data
def rodape(lst_rodape):
"""Gera o codigo rml do rodape"""
tmp_data = ''
tmp_data += '\t\t\t\t<lines>2cm 3.2cm 19cm 3.2cm</lines>\n'
tmp_data += '\t\t\t\t<setFont name="Helvetica" size="8"/>\n'
tmp_data += '\t\t\t\t<drawString x="2cm" y="3.3cm">' + \
lst_rodape[2] + '</drawString>\n'
tmp_data += '\t\t\t\t<drawString x="17.9cm" y="3.3cm">Página <pageNumber/></drawString>\n'
tmp_data += '\t\t\t\t<drawCentredString x="10.5cm" y="2.7cm">' + \
lst_rodape[0] + '</drawCentredString>\n'
tmp_data += '\t\t\t\t<drawCentredString x="10.5cm" y="2.3cm">' + \
lst_rodape[1] + '</drawCentredString>\n'
return tmp_data
def paraStyle():
"""Gera o codigo rml que define o estilo dos paragrafos"""
tmp_data = ''
tmp_data += '\t<stylesheet>\n'
tmp_data += '\t\t<blockTableStyle id="Standard_Outline">\n'
tmp_data += '\t\t\t<blockAlignment value="LEFT"/>\n'
tmp_data += '\t\t\t<blockValign value="TOP"/>\n'
tmp_data += '\t\t</blockTableStyle>\n'
tmp_data += '\t\t<initialize>\n'
tmp_data += '\t\t\t<paraStyle name="all" alignment="justify"/>\n'
tmp_data += '\t\t</initialize>\n'
tmp_data += '\t\t<paraStyle name="P1" fontName="Helvetica-Bold" fontSize="10.0" leading="10" alignment="CENTER"/>\n'
tmp_data += '\t\t<paraStyle name="P2" fontName="Helvetica" fontSize="10.0" leading="13" alignment="justify"/>\n'
tmp_data += '\t</stylesheet>\n'
return tmp_data
def protocolos(lst_protocolos):
"""Gera o codigo rml do conteudo da pesquisa de protocolos"""
tmp_data = ''
# inicio do bloco que contem os flowables
tmp_data += '\t<story>\n'
for dic in lst_protocolos:
# espaco inicial
tmp_data += '\t\t<para style="P2">\n'
tmp_data += '\t\t\t<font color="white"> </font>\n'
tmp_data += '\t\t</para>\n'
tmp_data += '\t\t<para style="P2">\n'
tmp_data += '\t\t\t<font color="white"> </font>\n'
tmp_data += '\t\t</para>\n'
# condicao para a quebra de pagina
tmp_data += '\t\t<condPageBreak height="4cm"/>\n'
# protocolos
if dic['titulo'] != None:
tmp_data += '\t\t<para style="P1">Protocolo ' + \
dic['titulo'] + '</para>\n'
tmp_data += '\t\t<para style="P1">\n'
tmp_data += '\t\t\t<font color="white"> </font>\n'
tmp_data += '\t\t</para>\n'
if dic['txt_assunto'] != None:
txt_assunto = dic['txt_assunto'].replace('&', '&')
tmp_data += '\t\t<para style="P2">' + txt_assunto + '</para>\n'
if dic['txt_interessado'] != None:
tmp_data += '\t\t<para style="P2"><b>Interessado:</b> ' + \
dic['txt_interessado'] + '</para>\n'
elif dic['nom_autor'] != None:
tmp_data += '\t\t<para style="P2"><b>Autor:</b> ' + \
dic['nom_autor'] + '</para>\n'
if dic['natureza'] != None:
tmp_data += '\t\t<para style="P2"><b>Natureza Processo:</b> ' + \
dic['natureza'] + '</para>\n'
if dic['processo'] != None:
tmp_data += '\t\t<para style="P2"><b>Classificação:</b> ' + \
dic['processo'] + '</para>\n'
if dic['data'] != None:
tmp_data += '\t\t<para style="P2"><b>Data Protocolo:</b> ' + \
dic['data'] + '</para>\n'
if dic['anulado'] != "":
tmp_data += '\t\t<para style="P2"><b>** PROTOCOLO ANULADO **</b> ' '</para>\n'
tmp_data += '\t</story>\n'
return tmp_data
def principal(imagem, lst_protocolos, dic_cabecalho, lst_rodape):
"""Funcao pricipal que gera a estrutura global do arquivo rml"""
arquivoPdf = str(int(time.time() * 100)) + ".pdf"
tmp_data = ''
tmp_data += '<?xml version="1.0" encoding="utf-8" standalone="no" ?>\n'
tmp_data += '<!DOCTYPE document SYSTEM "rml_1_0.dtd">\n'
tmp_data += '<document filename="relatorio.pdf">\n'
tmp_data += '\t<template pageSize="(21cm, 29.7cm)" title="Relatório de Protocolos" author="Luciano De Fazio" allowSplitting="20">\n'
tmp_data += '\t\t<pageTemplate id="first">\n'
tmp_data += '\t\t\t<pageGraphics>\n'
tmp_data += cabecalho(dic_cabecalho, imagem)
tmp_data += rodape(lst_rodape)
tmp_data += '\t\t\t</pageGraphics>\n'
tmp_data += '\t\t\t<frame id="first" x1="2cm" y1="3cm" width="17cm" height="21cm"/>\n'
tmp_data += '\t\t</pageTemplate>\n'
tmp_data += '\t</template>\n'
tmp_data += paraStyle()
tmp_data += protocolos(lst_protocolos)
tmp_data += '</document>\n'
tmp_pdf = parseString(tmp_data)
return tmp_pdf
# if hasattr(context.temp_folder,arquivoPdf):
# context.temp_folder.manage_delObjects(ids=arquivoPdf)
# context.temp_folder.manage_addFile(arquivoPdf)
# arq=context.temp_folder[arquivoPdf]
# arq.manage_edit(title='Arquivo PDF temporário.',filedata=tmp_pdf,content_type='application/pdf')
# return "/temp_folder/"+arquivoPdf
# return
# principal(sessao,imagem,data,lst_protocolos,dic_cabecalho,lst_rodape,dic_filtro)
| gpl-3.0 | 7,242,856,074,353,114,000 | 40.092105 | 136 | 0.576849 | false | 2.674946 | false | false | false |
yanchen036/tensorflow | tensorflow/python/kernel_tests/distributions/student_t_test.py | 3 | 20207 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Student t distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import student_t
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
@test_util.run_all_in_graph_and_eager_modes
class StudentTTest(test.TestCase):
def testStudentPDFAndLogPDF(self):
with self.test_session():
batch_size = 6
df = constant_op.constant([3.] * batch_size)
mu = constant_op.constant([7.] * batch_size)
sigma = constant_op.constant([8.] * batch_size)
df_v = 3.
mu_v = 7.
sigma_v = 8.
t = np.array([-2.5, 2.5, 8., 0., -1., 2.], dtype=np.float32)
student = student_t.StudentT(df, loc=mu, scale=-sigma)
log_pdf = student.log_prob(t)
self.assertEquals(log_pdf.get_shape(), (6,))
log_pdf_values = self.evaluate(log_pdf)
pdf = student.prob(t)
self.assertEquals(pdf.get_shape(), (6,))
pdf_values = self.evaluate(pdf)
if not stats:
return
expected_log_pdf = stats.t.logpdf(t, df_v, loc=mu_v, scale=sigma_v)
expected_pdf = stats.t.pdf(t, df_v, loc=mu_v, scale=sigma_v)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.log(expected_pdf), log_pdf_values)
self.assertAllClose(expected_pdf, pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testStudentLogPDFMultidimensional(self):
with self.test_session():
batch_size = 6
df = constant_op.constant([[1.5, 7.2]] * batch_size)
mu = constant_op.constant([[3., -3.]] * batch_size)
sigma = constant_op.constant([[-math.sqrt(10.), math.sqrt(15.)]] *
batch_size)
df_v = np.array([1.5, 7.2])
mu_v = np.array([3., -3.])
sigma_v = np.array([np.sqrt(10.), np.sqrt(15.)])
t = np.array([[-2.5, 2.5, 4., 0., -1., 2.]], dtype=np.float32).T
student = student_t.StudentT(df, loc=mu, scale=sigma)
log_pdf = student.log_prob(t)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.get_shape(), (6, 2))
pdf = student.prob(t)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.get_shape(), (6, 2))
if not stats:
return
expected_log_pdf = stats.t.logpdf(t, df_v, loc=mu_v, scale=sigma_v)
expected_pdf = stats.t.pdf(t, df_v, loc=mu_v, scale=sigma_v)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.log(expected_pdf), log_pdf_values)
self.assertAllClose(expected_pdf, pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testStudentCDFAndLogCDF(self):
with self.test_session():
batch_size = 6
df = constant_op.constant([3.] * batch_size)
mu = constant_op.constant([7.] * batch_size)
sigma = constant_op.constant([-8.] * batch_size)
df_v = 3.
mu_v = 7.
sigma_v = 8.
t = np.array([-2.5, 2.5, 8., 0., -1., 2.], dtype=np.float32)
student = student_t.StudentT(df, loc=mu, scale=sigma)
log_cdf = student.log_cdf(t)
self.assertEquals(log_cdf.get_shape(), (6,))
log_cdf_values = self.evaluate(log_cdf)
cdf = student.cdf(t)
self.assertEquals(cdf.get_shape(), (6,))
cdf_values = self.evaluate(cdf)
if not stats:
return
expected_log_cdf = stats.t.logcdf(t, df_v, loc=mu_v, scale=sigma_v)
expected_cdf = stats.t.cdf(t, df_v, loc=mu_v, scale=sigma_v)
self.assertAllClose(expected_log_cdf, log_cdf_values, atol=0., rtol=1e-5)
self.assertAllClose(
np.log(expected_cdf), log_cdf_values, atol=0., rtol=1e-5)
self.assertAllClose(expected_cdf, cdf_values, atol=0., rtol=1e-5)
self.assertAllClose(
np.exp(expected_log_cdf), cdf_values, atol=0., rtol=1e-5)
def testStudentEntropy(self):
df_v = np.array([[2., 3., 7.]]) # 1x3
mu_v = np.array([[1., -1, 0]]) # 1x3
sigma_v = np.array([[1., -2., 3.]]).T # transposed => 3x1
with self.test_session():
student = student_t.StudentT(df=df_v, loc=mu_v, scale=sigma_v)
ent = student.entropy()
ent_values = self.evaluate(ent)
# Help scipy broadcast to 3x3
ones = np.array([[1, 1, 1]])
sigma_bc = np.abs(sigma_v) * ones
mu_bc = ones.T * mu_v
df_bc = ones.T * df_v
if not stats:
return
expected_entropy = stats.t.entropy(
np.reshape(df_bc, [-1]),
loc=np.reshape(mu_bc, [-1]),
scale=np.reshape(sigma_bc, [-1]))
expected_entropy = np.reshape(expected_entropy, df_bc.shape)
self.assertAllClose(expected_entropy, ent_values)
def testStudentSample(self):
with self.test_session():
df = constant_op.constant(4.)
mu = constant_op.constant(3.)
sigma = constant_op.constant(-math.sqrt(10.))
df_v = 4.
mu_v = 3.
sigma_v = np.sqrt(10.)
n = constant_op.constant(200000)
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
samples = student.sample(n, seed=123456)
sample_values = self.evaluate(samples)
n_val = 200000
self.assertEqual(sample_values.shape, (n_val,))
self.assertAllClose(sample_values.mean(), mu_v, rtol=1e-2, atol=0)
self.assertAllClose(
sample_values.var(),
sigma_v**2 * df_v / (df_v - 2),
rtol=1e-2,
atol=0)
self._checkKLApprox(df_v, mu_v, sigma_v, sample_values)
# Test that sampling with the same seed twice gives the same results.
def testStudentSampleMultipleTimes(self):
with self.test_session():
df = constant_op.constant(4.)
mu = constant_op.constant(3.)
sigma = constant_op.constant(math.sqrt(10.))
n = constant_op.constant(100)
random_seed.set_random_seed(654321)
student = student_t.StudentT(
df=df, loc=mu, scale=sigma, name="student_t1")
samples1 = self.evaluate(student.sample(n, seed=123456))
random_seed.set_random_seed(654321)
student2 = student_t.StudentT(
df=df, loc=mu, scale=sigma, name="student_t2")
samples2 = self.evaluate(student2.sample(n, seed=123456))
self.assertAllClose(samples1, samples2)
def testStudentSampleSmallDfNoNan(self):
with self.test_session():
df_v = [1e-1, 1e-5, 1e-10, 1e-20]
df = constant_op.constant(df_v)
n = constant_op.constant(200000)
student = student_t.StudentT(df=df, loc=1., scale=1.)
samples = student.sample(n, seed=123456)
sample_values = self.evaluate(samples)
n_val = 200000
self.assertEqual(sample_values.shape, (n_val, 4))
self.assertTrue(np.all(np.logical_not(np.isnan(sample_values))))
def testStudentSampleMultiDimensional(self):
with self.test_session():
batch_size = 7
df = constant_op.constant([[3., 7.]] * batch_size)
mu = constant_op.constant([[3., -3.]] * batch_size)
sigma = constant_op.constant([[math.sqrt(10.), math.sqrt(15.)]] *
batch_size)
df_v = [3., 7.]
mu_v = [3., -3.]
sigma_v = [np.sqrt(10.), np.sqrt(15.)]
n = constant_op.constant(200000)
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
samples = student.sample(n, seed=123456)
sample_values = self.evaluate(samples)
self.assertEqual(samples.get_shape(), (200000, batch_size, 2))
self.assertAllClose(
sample_values[:, 0, 0].mean(), mu_v[0], rtol=1e-2, atol=0)
self.assertAllClose(
sample_values[:, 0, 0].var(),
sigma_v[0]**2 * df_v[0] / (df_v[0] - 2),
rtol=1e-1,
atol=0)
self._checkKLApprox(df_v[0], mu_v[0], sigma_v[0], sample_values[:, 0, 0])
self.assertAllClose(
sample_values[:, 0, 1].mean(), mu_v[1], rtol=1e-2, atol=0)
self.assertAllClose(
sample_values[:, 0, 1].var(),
sigma_v[1]**2 * df_v[1] / (df_v[1] - 2),
rtol=1e-1,
atol=0)
self._checkKLApprox(df_v[0], mu_v[0], sigma_v[0], sample_values[:, 0, 1])
def _checkKLApprox(self, df, mu, sigma, samples):
n = samples.size
np.random.seed(137)
if not stats:
return
sample_scipy = stats.t.rvs(df, loc=mu, scale=sigma, size=n)
covg = 0.99
r = stats.t.interval(covg, df, loc=mu, scale=sigma)
bins = 100
hist, _ = np.histogram(samples, bins=bins, range=r)
hist_scipy, _ = np.histogram(sample_scipy, bins=bins, range=r)
self.assertGreater(hist.sum(), n * (covg - .01))
self.assertGreater(hist_scipy.sum(), n * (covg - .01))
hist_min1 = hist + 1. # put at least one item in each bucket
hist_norm = hist_min1 / hist_min1.sum()
hist_scipy_min1 = hist_scipy + 1. # put at least one item in each bucket
hist_scipy_norm = hist_scipy_min1 / hist_scipy_min1.sum()
kl_appx = np.sum(np.log(hist_scipy_norm / hist_norm) * hist_scipy_norm)
self.assertLess(kl_appx, 1)
def testBroadcastingParams(self):
def _check(student):
self.assertEqual(student.mean().get_shape(), (3,))
self.assertEqual(student.variance().get_shape(), (3,))
self.assertEqual(student.entropy().get_shape(), (3,))
self.assertEqual(student.log_prob(2.).get_shape(), (3,))
self.assertEqual(student.prob(2.).get_shape(), (3,))
self.assertEqual(student.sample(37, seed=123456).get_shape(), (37, 3,))
_check(student_t.StudentT(df=[2., 3., 4.,], loc=2., scale=1.))
_check(student_t.StudentT(df=7., loc=[2., 3., 4.,], scale=1.))
_check(student_t.StudentT(df=7., loc=3., scale=[2., 3., 4.,]))
def testBroadcastingPdfArgs(self):
def _assert_shape(student, arg, shape):
self.assertEqual(student.log_prob(arg).get_shape(), shape)
self.assertEqual(student.prob(arg).get_shape(), shape)
def _check(student):
_assert_shape(student, 2., (3,))
xs = np.array([2., 3., 4.], dtype=np.float32)
_assert_shape(student, xs, (3,))
xs = np.array([xs])
_assert_shape(student, xs, (1, 3))
xs = xs.T
_assert_shape(student, xs, (3, 3))
_check(student_t.StudentT(df=[2., 3., 4.,], loc=2., scale=1.))
_check(student_t.StudentT(df=7., loc=[2., 3., 4.,], scale=1.))
_check(student_t.StudentT(df=7., loc=3., scale=[2., 3., 4.,]))
def _check2d(student):
_assert_shape(student, 2., (1, 3))
xs = np.array([2., 3., 4.], dtype=np.float32)
_assert_shape(student, xs, (1, 3))
xs = np.array([xs])
_assert_shape(student, xs, (1, 3))
xs = xs.T
_assert_shape(student, xs, (3, 3))
_check2d(student_t.StudentT(df=[[2., 3., 4.,]], loc=2., scale=1.))
_check2d(student_t.StudentT(df=7., loc=[[2., 3., 4.,]], scale=1.))
_check2d(student_t.StudentT(df=7., loc=3., scale=[[2., 3., 4.,]]))
def _check2d_rows(student):
_assert_shape(student, 2., (3, 1))
xs = np.array([2., 3., 4.], dtype=np.float32) # (3,)
_assert_shape(student, xs, (3, 3))
xs = np.array([xs]) # (1,3)
_assert_shape(student, xs, (3, 3))
xs = xs.T # (3,1)
_assert_shape(student, xs, (3, 1))
_check2d_rows(student_t.StudentT(df=[[2.], [3.], [4.]], loc=2., scale=1.))
_check2d_rows(student_t.StudentT(df=7., loc=[[2.], [3.], [4.]], scale=1.))
_check2d_rows(student_t.StudentT(df=7., loc=3., scale=[[2.], [3.], [4.]]))
def testMeanAllowNanStatsIsFalseWorksWhenAllBatchMembersAreDefined(self):
with self.test_session():
mu = [1., 3.3, 4.4]
student = student_t.StudentT(df=[3., 5., 7.], loc=mu, scale=[3., 2., 1.])
mean = self.evaluate(student.mean())
self.assertAllClose([1., 3.3, 4.4], mean)
def testMeanAllowNanStatsIsFalseRaisesWhenBatchMemberIsUndefined(self):
with self.test_session():
mu = [1., 3.3, 4.4]
student = student_t.StudentT(
df=[0.5, 5., 7.], loc=mu, scale=[3., 2., 1.],
allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
self.evaluate(student.mean())
def testMeanAllowNanStatsIsTrueReturnsNaNForUndefinedBatchMembers(self):
with self.test_session():
mu = [-2, 0., 1., 3.3, 4.4]
sigma = [5., 4., 3., 2., 1.]
student = student_t.StudentT(
df=[0.5, 1., 3., 5., 7.], loc=mu, scale=sigma,
allow_nan_stats=True)
mean = self.evaluate(student.mean())
self.assertAllClose([np.nan, np.nan, 1., 3.3, 4.4], mean)
def testVarianceAllowNanStatsTrueReturnsNaNforUndefinedBatchMembers(self):
with self.test_session():
# df = 0.5 ==> undefined mean ==> undefined variance.
# df = 1.5 ==> infinite variance.
df = [0.5, 1.5, 3., 5., 7.]
mu = [-2, 0., 1., 3.3, 4.4]
sigma = [5., 4., 3., 2., 1.]
student = student_t.StudentT(
df=df, loc=mu, scale=sigma, allow_nan_stats=True)
var = self.evaluate(student.variance())
## scipy uses inf for variance when the mean is undefined. When mean is
# undefined we say variance is undefined as well. So test the first
# member of var, making sure it is NaN, then replace with inf and compare
# to scipy.
self.assertTrue(np.isnan(var[0]))
var[0] = np.inf
if not stats:
return
expected_var = [
stats.t.var(d, loc=m, scale=s) for (d, m, s) in zip(df, mu, sigma)
]
self.assertAllClose(expected_var, var)
def testVarianceAllowNanStatsFalseGivesCorrectValueForDefinedBatchMembers(
self):
with self.test_session():
# df = 1.5 ==> infinite variance.
df = [1.5, 3., 5., 7.]
mu = [0., 1., 3.3, 4.4]
sigma = [4., 3., 2., 1.]
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
var = self.evaluate(student.variance())
if not stats:
return
expected_var = [
stats.t.var(d, loc=m, scale=s) for (d, m, s) in zip(df, mu, sigma)
]
self.assertAllClose(expected_var, var)
def testVarianceAllowNanStatsFalseRaisesForUndefinedBatchMembers(self):
with self.test_session():
# df <= 1 ==> variance not defined
student = student_t.StudentT(
df=1., loc=0., scale=1., allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
self.evaluate(student.variance())
with self.test_session():
# df <= 1 ==> variance not defined
student = student_t.StudentT(
df=0.5, loc=0., scale=1., allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
self.evaluate(student.variance())
def testStd(self):
with self.test_session():
# Defined for all batch members.
df = [3.5, 5., 3., 5., 7.]
mu = [-2.2]
sigma = [5., 4., 3., 2., 1.]
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
# Test broadcast of mu across shape of df/sigma
stddev = self.evaluate(student.stddev())
mu *= len(df)
if not stats:
return
expected_stddev = [
stats.t.std(d, loc=m, scale=s) for (d, m, s) in zip(df, mu, sigma)
]
self.assertAllClose(expected_stddev, stddev)
def testMode(self):
with self.test_session():
df = [0.5, 1., 3]
mu = [-1, 0., 1]
sigma = [5., 4., 3.]
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
# Test broadcast of mu across shape of df/sigma
mode = self.evaluate(student.mode())
self.assertAllClose([-1., 0, 1], mode)
def testPdfOfSample(self):
student = student_t.StudentT(df=3., loc=np.pi, scale=1.)
num = 20000
samples = student.sample(num, seed=123456)
pdfs = student.prob(samples)
mean = student.mean()
mean_pdf = student.prob(student.mean())
sample_vals, pdf_vals, mean_val, mean_pdf_val = self.evaluate(
[samples, pdfs, student.mean(), mean_pdf])
self.assertEqual(samples.get_shape(), (num,))
self.assertEqual(pdfs.get_shape(), (num,))
self.assertEqual(mean.get_shape(), ())
self.assertNear(np.pi, np.mean(sample_vals), err=0.02)
self.assertNear(np.pi, mean_val, err=1e-6)
# Verify integral over sample*pdf ~= 1.
# Tolerance increased since eager was getting a value of 1.002041.
self._assertIntegral(sample_vals, pdf_vals, err=3e-3)
if not stats:
return
self.assertNear(stats.t.pdf(np.pi, 3., loc=np.pi), mean_pdf_val, err=1e-6)
def testPdfOfSampleMultiDims(self):
student = student_t.StudentT(df=[7., 11.], loc=[[5.], [6.]], scale=3.)
self.assertAllEqual([], student.event_shape)
self.assertAllEqual([], self.evaluate(student.event_shape_tensor()))
self.assertAllEqual([2, 2], student.batch_shape)
self.assertAllEqual([2, 2], self.evaluate(student.batch_shape_tensor()))
num = 50000
samples = student.sample(num, seed=123456)
pdfs = student.prob(samples)
sample_vals, pdf_vals = self.evaluate([samples, pdfs])
self.assertEqual(samples.get_shape(), (num, 2, 2))
self.assertEqual(pdfs.get_shape(), (num, 2, 2))
self.assertNear(5., np.mean(sample_vals[:, 0, :]), err=.03)
self.assertNear(6., np.mean(sample_vals[:, 1, :]), err=.03)
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
if not stats:
return
self.assertNear(
stats.t.var(7., loc=0., scale=3.), # loc d.n. effect var
np.var(sample_vals[:, :, 0]),
err=.4)
self.assertNear(
stats.t.var(11., loc=0., scale=3.), # loc d.n. effect var
np.var(sample_vals[:, :, 1]),
err=.4)
def _assertIntegral(self, sample_vals, pdf_vals, err=1.5e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (sample_vals.min() - 1000, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testNegativeDofFails(self):
with self.test_session():
with self.assertRaisesOpError(r"Condition x > 0 did not hold"):
student = student_t.StudentT(
df=[2, -5.], loc=0., scale=1., validate_args=True, name="S")
self.evaluate(student.mean())
def testStudentTWithAbsDfSoftplusScale(self):
with self.test_session():
df = constant_op.constant([-3.2, -4.6])
mu = constant_op.constant([-4.2, 3.4])
sigma = constant_op.constant([-6.4, -8.8])
student = student_t.StudentTWithAbsDfSoftplusScale(
df=df, loc=mu, scale=sigma)
self.assertAllClose(
math_ops.floor(self.evaluate(math_ops.abs(df))),
self.evaluate(student.df))
self.assertAllClose(self.evaluate(mu), self.evaluate(student.loc))
self.assertAllClose(
self.evaluate(nn_ops.softplus(sigma)), self.evaluate(student.scale))
if __name__ == "__main__":
test.main()
| apache-2.0 | -7,335,599,630,733,818,000 | 37.934489 | 80 | 0.603801 | false | 3.094013 | true | false | false |
AnhellO/DAS_Sistemas | Ago-Dic-2019/Luis Llanes/segundo_parcial/DataBase.py | 1 | 5673 | import sqlite3
from artista import artist
from disquillos import album
def Crear_Tabla_Artistas():
try:
conexion = sqlite3.connect('musicBrainz.db')
cursor = conexion.cursor()
print('Conectado')
query = """CREATE TABLE IF NOT EXISTS artistas(
id TEXT,
nombre TEXT ,
tags TEXT,
area TEXT,
ExtScore TEXT,
tipo TEXT
);"""
cursor.execute(query)
print('Tabla creada con exito')
cursor.close()
except sqlite3.Error as error:
print('Error con la conexion',error)
finally:
if(conexion):
conexion.close()
def Agregar_Elemento_Artista(artist):
try:
conexion = sqlite3.connect('musicBrainz.db')
cursor = conexion.cursor()
print('Conectado')
query = """INSERT INTO artistas VALUES ('{}', '{}', '{}', '{}', '{}', '{}')""".format(artist._id, artist._name, artist._tags, artist._area, artist._extScore, artist._type)
resultado = cursor.execute(query)
conexion.commit()
print('Valor Insertado Correctamente', resultado)
cursor.close()
except sqlite3.Error as error:
print('Error con la conexion',error)
finally:
if(conexion):
conexion.close()
def Ver_Todo_Artistas():
try:
conexion = sqlite3.connect('musicBrainz.db')
cursor = conexion.cursor()
print('Conectado')
query = 'SELECT * FROM artistas;'
cursor.execute(query)
rows = cursor.fetchall()
print('Total de registros: ', len(rows))
print('------------Registros-------------')
for row in rows:
print('Id: {}\nNombre: {}\nTags: {}\nArea: {}\nExtScore: {}\nTipo: {}'.format(*row))
print('-------------------------------')
print('Total de registros: ', len(rows))
cursor.close()
except sqlite3.Error as error:
print('Error con la conexion',error)
finally:
if(conexion):
conexion.close()
def Ver_Nombres():
try:
conexion = sqlite3.connect('musicBrainz.db')
cursor = conexion.cursor()
print('Conectado')
query = 'SELECT nombre FROM artistas;'
cursor.execute(query)
rows = cursor.fetchall()
print('Total de registros: ', len(rows))
print('------------Registros-------------')
lista = []
for row in rows:
lista.append(row[0])
print('Total de registros: ', len(rows))
cursor.close()
except sqlite3.Error as error:
print('Error con la conexion',error)
finally:
if(conexion):
conexion.close()
return lista
def Crear_Tabla_Albums():
try:
conexion = sqlite3.connect('musicBrainz.db')
cursor = conexion.cursor()
print('Conectado')
query = """CREATE TABLE IF NOT EXISTS albums(
id TEXT,
artista TEXT ,
titulo TEXT,
status TEXT,
type TEXT
);"""
cursor.execute(query)
print('Tabla creada con exito')
cursor.close()
except sqlite3.Error as error:
print('Error con la conexion',error)
finally:
if(conexion):
conexion.close()
def Agregar_Elemento_Album(album):
try:
conexion = sqlite3.connect('musicBrainz.db')
cursor = conexion.cursor()
print('Conectado')
query = """INSERT INTO albums VALUES ('{}', '{}', '{}', '{}', '{}')""".format(album._id, album._artista, album._titulo, album._status, album._type)
resultado = cursor.execute(query)
conexion.commit()
print('Valor Insertado Correctamente', resultado)
cursor.close()
except sqlite3.Error as error:
print('Error con la conexion',error)
finally:
if(conexion):
conexion.close()
def Ver_Todo_Albums():
try:
conexion = sqlite3.connect('musicBrainz.db')
cursor = conexion.cursor()
print('Conectado')
query = 'SELECT * FROM albums;'
cursor.execute(query)
rows = cursor.fetchall()
print('Total de registros: ', len(rows))
print('------------Registros-------------')
for row in rows:
print('Id: {}\nArtista: {}\nTitulo: {}\nStatus: {}\nType: {}'.format(*row))
print('-------------------------------')
print('Total de registros: ', len(rows))
cursor.close()
except sqlite3.Error as error:
print('Error con la conexion',error)
finally:
if(conexion):
conexion.close()
def borrar_tabla_albums():
try:
conexion = sqlite3.connect('musicBrainz.db')
cursor = conexion.cursor()
print('Conectado')
query = 'DROP TABLE albums;'
cursor.execute(query)
print('registros eliminados')
cursor.close()
except sqlite3.Error as error:
print('Error con la conexion',error)
finally:
if(conexion):
conexion.close()
def borrar_tabla_Artistas():
try:
conexion = sqlite3.connect('musicBrainz.db')
cursor = conexion.cursor()
print('Conectado')
query = 'DROP TABLE artistas;'
cursor.execute(query)
print('registros eliminados')
cursor.close()
except sqlite3.Error as error:
print('Error con la conexion',error)
finally:
if(conexion):
conexion.close() | mit | -2,657,901,512,989,142,500 | 25.027523 | 179 | 0.532346 | false | 3.749504 | false | false | false |
arizvisa/syringe | template/video/h264.py | 1 | 2279 | from ptypes import *
v = 0 # FIXME: this file format is busted
class seq_parameter_set_rbsp(pbinary.struct):
class __pic_order_type_1(pbinary.struct):
_fields_ = [
(1, 'delta_pic_order_always_zero_flag'),
(v, 'offset_for_non_ref_pic'),
(v, 'offset_for_top_to_bottom_field'),
(v, 'num_ref_frames_in_pic_order_cnt_cycle'),
(lambda s: dyn.array( dyn.clone(pbinary.struct,_fields_=[(v,'offset_for_ref_frame')]), s['num_ref_frames_in_pic_order_cnt_cycle']), 'ref_frames')
]
def __pic_order(self):
type = self['pic_order_cnt_type']
if type == 0:
return dyn.clone(pbinary.struct, _fields_=[(v, 'log2_max_pic_order_cnt_lsb')])
elif type == 1:
return __pic_order_type_1
raise NotImplementedError(type)
class __frame_crop_offset(pbinary.struct):
_fields_ = [
(v, 'frame_crop_left_offset'),
(v, 'frame_crop_right_offset'),
(v, 'frame_crop_top_offset'),
(v, 'frame_crop_bottom_offset'),
]
def __frame_crop(self):
if self['frame_cropping_flag']:
return __frame_crop_offset
return dyn.clone(pbinary.struct,_fields_=[])
def __rbsp_trailing_bits(self):
return 0
_fields_ = [
(8, 'profile_idc'),
(1, 'constraint_set0_flag'),
(1, 'constraint_set1_flag'),
(1, 'constraint_set2_flag'),
(5, 'reserved_zero_5bits'),
(8, 'level_idc'),
(v, 'seq_parameter_set_id'),
(v, 'pic_order_cnt_type'),
(__pic_order, 'pic_order'),
(v, 'num_ref_frames'),
(1, 'gaps_in_frame_num_value_allowed_flag'),
(v, 'pic_width_in_mbs_minus1'),
(v, 'pic_height_in_map_units_minus1'),
(1, 'frame_mbs_only_flag'),
(lambda s: [0,1][s['frame_mbs_only_flag']], 'mb_adaptive_frame_field_flag'),
(1, 'direct_8x8_inference_flag'),
(1, 'frame_cropping_flag'),
(__frame_crop, 'frame_crop'),
(1, 'vul_parameters_present_flag'),
(lambda s: [dyn.clone(pbinary.struct,_fields_=[]),__vul_parameters][s['vul_parameters_present_flag']], 'vul_parameters'),
(__rbsp_trailing_bits, 'rbsp_trailing_bits'),
]
| bsd-2-clause | -6,038,716,225,828,748,000 | 36.360656 | 157 | 0.540588 | false | 3.10068 | false | false | false |
EmokitAlife/EmokitVisualizer | sample/tests.py | 1 | 2520 | """ ps_QPainter_drawRect101.py
explore the PySide GUI toolkit to draw rectangles in different colors
there are a number of ways colors can be specified
fill colors are set with the brush
perimeter colors are set with the pen
QColor can be given a transparency value
(PySide is the official LGPL-licensed version of PyQT)
for Python33 you can use the Windows self-extracting installer
PySide-1.1.2.win32-py3.3.exe
(PyQT483 equivalent) from:
http://qt-project.org/wiki/PySide
or:
http://www.lfd.uci.edu/~gohlke/pythonlibs/
for Qpainter methods see:
http://srinikom.github.com/pyside-docs/PySide/QtGui/
QPainter.html?highlight=qpainter#PySide.QtGui.PySide.QtGui.QPainter
tested with Python27 and Python33 by vegaseat 14jan2013
"""
from PySide.QtCore import *
from PySide.QtGui import *
class MyWindow(QWidget):
def __init__(self):
QWidget.__init__(self)
# setGeometry(x_pos, y_pos, width, height)
# upper left corner coordinates (x_pos, y_pos)
self.setGeometry(300, 300, 370, 100)
self.setWindowTitle('Colors set with brush and pen')
def paintEvent(self, e):
'''
the method paintEvent() is called automatically
the QPainter class does all the low-level drawing
coded between its methods begin() and end()
'''
qp = QPainter()
qp.begin(self)
self.drawRectangles(qp)
qp.end()
def drawRectangles(self, qp):
'''use QPainter (instance qp) methods to do drawings'''
# there are several different ways to reference colors
# use HTML style color string #RRGGBB with values 00 to FF
black = "#000000"
# QPen(color, width, style)
qp.setPen(black)
# use QColor(r, g, b) with values 0 to 255
qp.setBrush(QColor(255, 0, 0))
# drawRect(int x, int y, int width, int height)
# upper left corner coordinates (x, y)
qp.drawRect(10, 15, 90, 60)
# there are some preset named colors
qp.setBrush(QColor(Qt.green))
qp.drawRect(160, 25, 90, 60)
# this rectangle will overlap the previous one
# you can give it some transparency alpha 0 to 255
# QColor(int r, int g, int b, int alpha=255)
qp.setBrush(QColor(0, 0, 255, 100))
qp.drawRect(130, 15, 90, 60)
# some colors can be given as strings
qp.setBrush(QColor('yellow'))
qp.drawRect(265, 25, 90, 60)
app = QApplication([])
win = MyWindow()
win.show()
# run the application event loop
app.exec_() | mit | -5,538,054,166,984,605,000 | 37.784615 | 69 | 0.660714 | false | 3.461538 | false | false | false |
nathanielvarona/airflow | airflow/providers/jenkins/operators/jenkins_job_trigger.py | 1 | 11064 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ast
import json
import socket
import time
from typing import Any, Dict, Iterable, List, Mapping, Optional, Union
from urllib.error import HTTPError, URLError
import jenkins
from jenkins import Jenkins, JenkinsException
from requests import Request
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.jenkins.hooks.jenkins import JenkinsHook
JenkinsRequest = Mapping[str, Any]
ParamType = Optional[Union[str, Dict, List]]
def jenkins_request_with_headers(jenkins_server: Jenkins, req: Request) -> Optional[JenkinsRequest]:
"""
We need to get the headers in addition to the body answer
to get the location from them
This function uses jenkins_request method from python-jenkins library
with just the return call changed
:param jenkins_server: The server to query
:param req: The request to execute
:return: Dict containing the response body (key body)
and the headers coming along (headers)
"""
try:
response = jenkins_server.jenkins_request(req)
response_body = response.content
response_headers = response.headers
if response_body is None:
raise jenkins.EmptyResponseException(
f"Error communicating with server[{jenkins_server.server}]: empty response"
)
return {'body': response_body.decode('utf-8'), 'headers': response_headers}
except HTTPError as e:
# Jenkins's funky authentication means its nigh impossible to distinguish errors.
if e.code in [401, 403, 500]:
raise JenkinsException(f'Error in request. Possibly authentication failed [{e.code}]: {e.reason}')
elif e.code == 404:
raise jenkins.NotFoundException('Requested item could not be found')
else:
raise
except socket.timeout as e:
raise jenkins.TimeoutException(f'Error in request: {e}')
except URLError as e:
raise JenkinsException(f'Error in request: {e.reason}')
return None
class JenkinsJobTriggerOperator(BaseOperator):
"""
Trigger a Jenkins Job and monitor it's execution.
This operator depend on python-jenkins library,
version >= 0.4.15 to communicate with jenkins server.
You'll also need to configure a Jenkins connection in the connections screen.
:param jenkins_connection_id: The jenkins connection to use for this job
:type jenkins_connection_id: str
:param job_name: The name of the job to trigger
:type job_name: str
:param parameters: The parameters block provided to jenkins for use in
the API call when triggering a build. (templated)
:type parameters: str, Dict, or List
:param sleep_time: How long will the operator sleep between each status
request for the job (min 1, default 10)
:type sleep_time: int
:param max_try_before_job_appears: The maximum number of requests to make
while waiting for the job to appears on jenkins server (default 10)
:type max_try_before_job_appears: int
:param allowed_jenkins_states: Iterable of allowed result jenkins states, default is ``['SUCCESS']``
:type allowed_jenkins_states: Optional[Iterable[str]]
"""
template_fields = ('parameters',)
template_ext = ('.json',)
ui_color = '#f9ec86'
def __init__(
self,
*,
jenkins_connection_id: str,
job_name: str,
parameters: ParamType = "",
sleep_time: int = 10,
max_try_before_job_appears: int = 10,
allowed_jenkins_states: Optional[Iterable[str]] = None,
**kwargs,
):
super().__init__(**kwargs)
self.job_name = job_name
self.parameters = parameters
self.sleep_time = max(sleep_time, 1)
self.jenkins_connection_id = jenkins_connection_id
self.max_try_before_job_appears = max_try_before_job_appears
self.allowed_jenkins_states = list(allowed_jenkins_states) if allowed_jenkins_states else ['SUCCESS']
def build_job(self, jenkins_server: Jenkins, params: ParamType = "") -> Optional[JenkinsRequest]:
"""
This function makes an API call to Jenkins to trigger a build for 'job_name'
It returned a dict with 2 keys : body and headers.
headers contains also a dict-like object which can be queried to get
the location to poll in the queue.
:param jenkins_server: The jenkins server where the job should be triggered
:param params: The parameters block to provide to jenkins API call.
:return: Dict containing the response body (key body)
and the headers coming along (headers)
"""
# Since params can be either JSON string, dictionary, or list,
# check type and pass to build_job_url
if params and isinstance(params, str):
params = ast.literal_eval(params)
# We need a None to call the non-parametrized jenkins api end point
if not params:
params = None
request = Request(method='POST', url=jenkins_server.build_job_url(self.job_name, params, None))
return jenkins_request_with_headers(jenkins_server, request)
def poll_job_in_queue(self, location: str, jenkins_server: Jenkins) -> int:
"""
This method poll the jenkins queue until the job is executed.
When we trigger a job through an API call,
the job is first put in the queue without having a build number assigned.
Thus we have to wait the job exit the queue to know its build number.
To do so, we have to add /api/json (or /api/xml) to the location
returned by the build_job call and poll this file.
When a 'executable' block appears in the json, it means the job execution started
and the field 'number' then contains the build number.
:param location: Location to poll, returned in the header of the build_job call
:param jenkins_server: The jenkins server to poll
:return: The build_number corresponding to the triggered job
"""
try_count = 0
location += '/api/json'
# TODO Use get_queue_info instead
# once it will be available in python-jenkins (v > 0.4.15)
self.log.info('Polling jenkins queue at the url %s', location)
while try_count < self.max_try_before_job_appears:
location_answer = jenkins_request_with_headers(
jenkins_server, Request(method='POST', url=location)
)
if location_answer is not None:
json_response = json.loads(location_answer['body'])
if 'executable' in json_response:
build_number = json_response['executable']['number']
self.log.info('Job executed on Jenkins side with the build number %s', build_number)
return build_number
try_count += 1
time.sleep(self.sleep_time)
raise AirflowException(
"The job hasn't been executed after polling " f"the queue {self.max_try_before_job_appears} times"
)
def get_hook(self) -> JenkinsHook:
"""Instantiate jenkins hook"""
return JenkinsHook(self.jenkins_connection_id)
def execute(self, context: Mapping[Any, Any]) -> Optional[str]:
if not self.jenkins_connection_id:
self.log.error(
'Please specify the jenkins connection id to use.'
'You must create a Jenkins connection before'
' being able to use this operator'
)
raise AirflowException(
'The jenkins_connection_id parameter is missing, impossible to trigger the job'
)
if not self.job_name:
self.log.error("Please specify the job name to use in the job_name parameter")
raise AirflowException('The job_name parameter is missing,impossible to trigger the job')
self.log.info(
'Triggering the job %s on the jenkins : %s with the parameters : %s',
self.job_name,
self.jenkins_connection_id,
self.parameters,
)
jenkins_server = self.get_hook().get_jenkins_server()
jenkins_response = self.build_job(jenkins_server, self.parameters)
if jenkins_response:
build_number = self.poll_job_in_queue(jenkins_response['headers']['Location'], jenkins_server)
time.sleep(self.sleep_time)
keep_polling_job = True
build_info = None
# pylint: disable=too-many-nested-blocks
while keep_polling_job:
try:
build_info = jenkins_server.get_build_info(name=self.job_name, number=build_number)
if build_info['result'] is not None:
keep_polling_job = False
# Check if job ended with not allowed state.
if build_info['result'] not in self.allowed_jenkins_states:
raise AirflowException(
'Jenkins job failed, final state : %s.'
'Find more information on job url : %s'
% (build_info['result'], build_info['url'])
)
else:
self.log.info('Waiting for job to complete : %s , build %s', self.job_name, build_number)
time.sleep(self.sleep_time)
except jenkins.NotFoundException as err:
# pylint: disable=no-member
raise AirflowException(f'Jenkins job status check failed. Final error was: {err.resp.status}')
except jenkins.JenkinsException as err:
raise AirflowException(
f'Jenkins call failed with error : {err}, if you have parameters '
'double check them, jenkins sends back '
'this exception for unknown parameters'
'You can also check logs for more details on this exception '
'(jenkins_url/log/rss)'
)
if build_info:
# If we can we return the url of the job
# for later use (like retrieving an artifact)
return build_info['url']
return None
| apache-2.0 | 1,856,690,641,726,038,500 | 44.159184 | 110 | 0.639642 | false | 4.427371 | false | false | false |
smilusingjavascript/blink | Tools/GardeningServer/alerts.py | 2 | 5805 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import calendar
import datetime
import json
import logging
import webapp2
import zlib
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.datastore import datastore_query
from google.appengine.ext import ndb
LOGGER = logging.getLogger(__name__)
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return calendar.timegm(obj.timetuple())
# Let the base class default method raise the TypeError.
return json.JSONEncoder.default(self, obj)
class AlertsJSON(ndb.Model):
type = ndb.StringProperty()
json = ndb.BlobProperty(compressed=True)
date = ndb.DateTimeProperty(auto_now_add=True)
class AlertsHandler(webapp2.RequestHandler):
ALERTS_TYPE = 'alerts'
# Has no 'response' member.
# pylint: disable=E1101
def send_json_headers(self):
self.response.headers.add_header('Access-Control-Allow-Origin', '*')
self.response.headers['Content-Type'] = 'application/json'
# Has no 'response' member.
# pylint: disable=E1101
def send_json_data(self, data):
self.send_json_headers()
self.response.write(data)
def generate_json_dump(self, alerts):
return json.dumps(alerts, cls=DateTimeEncoder, indent=1)
def get_from_memcache(self, memcache_key):
compressed = memcache.get(memcache_key)
if not compressed:
self.send_json_headers()
return
uncompressed = zlib.decompress(compressed)
self.send_json_data(uncompressed)
def get(self):
self.get_from_memcache(AlertsHandler.ALERTS_TYPE)
def post_to_history(self, alerts_type, alerts):
last_query = AlertsJSON.query().filter(AlertsJSON.type == alerts_type)
last_entry = last_query.order(-AlertsJSON.date).get()
last_alerts = json.loads(last_entry.json) if last_entry else {}
# Only changes to the fields with 'alerts' in the name should cause a
# new history entry to be saved.
def alert_fields(alerts_json):
filtered_json = {}
for key, value in alerts_json.iteritems():
if 'alerts' in key:
filtered_json[key] = value
return filtered_json
if alert_fields(last_alerts) != alert_fields(alerts):
new_entry = AlertsJSON(
json=self.generate_json_dump(alerts),
type=alerts_type)
new_entry.put()
# Has no 'response' member.
# pylint: disable=E1101
def post_to_memcache(self, memcache_key, alerts):
uncompressed = self.generate_json_dump(alerts)
compression_level = 1
compressed = zlib.compress(uncompressed, compression_level)
memcache.set(memcache_key, compressed)
def parse_alerts(self, alerts_json):
try:
alerts = json.loads(alerts_json)
except ValueError:
warning = 'content field was not JSON'
self.response.set_status(400, warning)
LOGGER.warn(warning)
return
alerts.update({'date': datetime.datetime.utcnow()})
return alerts
def update_alerts(self, alerts_type):
alerts = self.parse_alerts(self.request.get('content'))
if alerts:
self.post_to_memcache(alerts_type, alerts)
self.post_to_history(alerts_type, alerts)
def post(self):
self.update_alerts(AlertsHandler.ALERTS_TYPE)
class AlertsHistory(webapp2.RequestHandler):
MAX_LIMIT_PER_PAGE = 100
def get_entry(self, query, key):
try:
key = int(key)
except ValueError:
self.response.set_status(400, 'Invalid key format')
return {}
ndb_key = ndb.Key(AlertsJSON, key)
result = query.filter(AlertsJSON.key == ndb_key).get()
if result:
return json.loads(result.json)
else:
self.response.set_status(404, 'Failed to find key %s' % key)
return {}
def get_list(self, query):
cursor = self.request.get('cursor')
if cursor:
cursor = datastore_query.Cursor(urlsafe=cursor)
limit = int(self.request.get('limit', self.MAX_LIMIT_PER_PAGE))
limit = min(self.MAX_LIMIT_PER_PAGE, limit)
if cursor:
alerts, next_cursor, has_more = query.fetch_page(limit,
start_cursor=cursor)
else:
alerts, next_cursor, has_more = query.fetch_page(limit)
return {
'has_more': has_more,
'cursor': next_cursor.urlsafe() if next_cursor else '',
'history': [alert.key.integer_id() for alert in alerts]
}
def get(self, key=None):
query = AlertsJSON.query().order(-AlertsJSON.date)
result_json = {}
user = users.get_current_user()
result_json['login-url'] = users.create_login_url(self.request.uri)
# Return only public alerts for non-internal users.
if not user or not user.email().endswith('@google.com'):
query = query.filter(AlertsJSON.type == AlertsHandler.ALERTS_TYPE)
if key:
result_json.update(self.get_entry(query, key))
else:
result_json.update(self.get_list(query))
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(result_json))
app = webapp2.WSGIApplication([
('/alerts', AlertsHandler),
('/alerts-history', AlertsHistory),
('/alerts-history/(.*)', AlertsHistory),
])
| bsd-3-clause | -8,315,493,951,139,623,000 | 31.982955 | 81 | 0.621016 | false | 3.943614 | false | false | false |
WorldViews/Spirals | scripts/genImagesP2.py | 1 | 2863 |
from math import log, pow
import os, traceback
import Image, ImageDraw
def lg(x):
return log(x)/log(2.0)
def truncDown(n):
return int(pow(2,int(lg(n))))
def verifyDir(path):
if not os.path.exists(path):
print "Creating", path
os.mkdir(path)
def add_corners(im, rad):
circle = Image.new('L', (rad * 2, rad * 2), 0)
draw = ImageDraw.Draw(circle)
draw.ellipse((0, 0, rad * 2, rad * 2), fill=255)
alpha = Image.new('L', im.size, 255)
w, h = im.size
alpha.paste(circle.crop((0, 0, rad, rad)), (0, 0))
alpha.paste(circle.crop((0, rad, rad, rad * 2)), (0, h - rad))
alpha.paste(circle.crop((rad, 0, rad * 2, rad)), (w - rad, 0))
alpha.paste(circle.crop((rad, rad, rad * 2, rad * 2)), (w - rad, h - rad))
im.putalpha(alpha)
return im
def genImagePow2(path, opath, ow=None, oh=None, cornerRad=200):
path = path.replace("\\", "/")
if not (path.endswith(".jpg") or path.endswith(".png")):
return
print "Opening", path, os.path.exists(path)
im = Image.open(path)
w,h = im.size
if not ow:
ow = truncDown(w)
if not oh:
oh = truncDown(h)
size = im.size
im = im.resize((ow,oh), Image.ANTIALIAS)
if cornerRad:
im = add_corners(im, cornerRad)
print "Saving", opath, w, h, ow, oh
im.save(opath)
def genImagesPow2(inputDir, outputDir):
verifyDir(outputDir)
names = os.listdir(inputDir)
for name in names:
path = os.path.join(inputDir, name)
opath = os.path.join(outputDir, name)
try:
genImagePow2(path, opath)
except:
traceback.print_exc()
def genImagesPow2Rename(inputDir, outputDir, cornerRad=None):
verifyDir(outputDir)
names = os.listdir(inputDir)
i = 0
for name in names:
if not (name.lower().endswith(".jpg") or name.lower().endswith(".png")):
continue
i += 1
#oname = "image%03d.png"
oname = "image%d.png" % i
path = os.path.join(inputDir, name)
opath = os.path.join(outputDir, oname)
try:
genImagePow2(path, opath, cornerRad=cornerRad)
except:
traceback.print_exc()
if __name__ == '__main__':
"""
genImagesPow2Rename("../images", "../imagesPow2")
genImagesPow2Rename("../images", "../imagesRoundedPow2", cornerRad=200)
genImagesPow2Rename("../images/FXPAL/src", "../images/FXPAL/imagesPow2")
genImagesPow2Rename("../images/FXPAL/src", "../images/FXPAL/imagesRoundedPow2",
cornerRad=200)
"""
genImagesPow2Rename("../images/Spirals/src", "../images/Spirals/imagesPow2")
genImagesPow2Rename("../images/Spirals/src", "../images/Spirals/imagesRoundedPow2",
cornerRad=200)
| mit | -3,266,906,963,975,171,000 | 30.119565 | 87 | 0.571079 | false | 3.174058 | false | false | false |
rtapadar/pscan | pscan/__main__.py | 1 | 1758 | # Copyright 2016 Rudrajit Tapadar
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pscan import scan
def main():
parser = argparse.ArgumentParser()
parser.add_argument('target', action="store",
help="IP or CIDR. eg. 10.10.10.10 or 10.10.10.0/24")
parser.add_argument('-p', dest='PORT',
help="port or port-range. eg. 80 or 80-100.")
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('-sU', action='store_true', default=False,
help="UDP scan.")
group.add_argument('-sT', action='store_true', default=True,
help="TCP scan. Default is TCP scan.")
args = parser.parse_args()
try:
ports = None
if args.target:
target = args.target
if args.PORT:
ports = args.PORT
s = scan.Scan(target, ports)
print("")
print("Starting Pscan 1.0\n")
if args.sU:
s.udp()
else:
s.tcp()
s.show()
print("")
except Exception as e:
print(e.__class__.__name__ + ":" + e.message + "\n")
parser.print_help()
if __name__ == '__main__':
main()
| apache-2.0 | -3,271,357,822,958,191,000 | 33.470588 | 76 | 0.591581 | false | 3.846827 | false | false | false |
remico/vision_0 | goods2excel.py | 1 | 5421 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Parse a goods database table and create an ms excel's workbook from it
"""
__author__ = 'remico'
from xml.etree.ElementTree import ElementTree
from abc import ABC, abstractmethod
from bs4 import BeautifulSoup, Tag
import xlsxwriter
import sys, os, glob
class IBuilder(ABC):
@abstractmethod
def convert_articul(self, text):
pass
@abstractmethod
def convert_sizes(self, text):
pass
@abstractmethod
def convert_description(self, text):
pass
@abstractmethod
def convert_price(self, text):
pass
@abstractmethod
def convert_price_retail(self, text):
pass
@abstractmethod
def increment_row(self):
pass
class XlsxBuilder(IBuilder):
def __init__(self):
self.filename = "output_.xlsx"
self.book = xlsxwriter.Workbook(self.filename)
self.sheet = self.book.add_worksheet("goods")
self.fill_header()
self.current_row = 2 # there is the header in the first row
self.cell_format = self.book.add_format()
self.cell_format.set_text_wrap()
self.cell_format.set_align('vjustify')
# self.cell_format.set_align('top')
def fill_header(self):
header_format = self.book.add_format()
header_format.set_align('center')
header_format.set_align('vcenter')
header_format.set_bg_color('yellow')
header_format.set_bold()
self.sheet.write_string('A1', 'Артикул')
self.sheet.write_string('B1', 'Описание')
self.sheet.write_string('C1', 'Цена')
self.sheet.write_string('D1', 'Розничная цена')
self.sheet.write_string('E1', 'Размеры')
self.sheet.set_column('A:A', 50)
self.sheet.set_column('B:B', 80)
self.sheet.set_column('C:C', 20)
self.sheet.set_column('D:D', 20)
self.sheet.set_column('E:E', 20)
self.sheet.set_row(0, 25, header_format)
self.sheet.set_default_row(35)
def get_result(self):
self.book.close()
print("'%s' created" % self.filename)
return self.book
def increment_row(self):
self.current_row += 1
def convert_articul(self, text=""):
cleantext = text.replace('"', '"') if text is not None else ""
self.sheet.write('A%d' % self.current_row, cleantext, self.cell_format)
def convert_description(self, text=""):
cleantext = ""
if text is not None:
soup = BeautifulSoup(text)
rows = []
# utilize the direct child objects
for tag in soup.children:
if not isinstance(tag, Tag):
continue
# parse an html table
if tag.name == 'table':
for row in tag.find_all('tr'):
r = ' '.join([col.get_text().strip()
for col in row.find_all('td')])
rows.append(r)
# parse simple html paragraphs
else:
rows.append(tag.get_text().strip())
cleantext = "\n".join(rows).strip()
self.sheet.write('B%d' % self.current_row, cleantext, self.cell_format)
def convert_price(self, text=""):
self.sheet.write('C%d' % self.current_row, text, self.cell_format)
def convert_price_retail(self, text=""):
self.sheet.write('D%d' % self.current_row, text, self.cell_format)
def convert_sizes(self, text=""):
self.sheet.write('E%d' % self.current_row, text, self.cell_format)
class GoodsReader(object):
def __init__(self, filename, IBuilder_builder):
self.doc = ElementTree(file=filename)
self.database = self.doc.find("database")
if self.database is None:
raise LookupError("It seems that the input file is not a dump of "
"'gloowi_goods' database table")
print("Database: '%s'" % self.database.get("name"))
self.builder = IBuilder_builder
def parse_goods(self):
goods = self.database.findall('table')
len_ = len(goods)
denominator_ = 20
part_ = len_ // denominator_
records = ({column.get('name'): column.text
for column in item.getiterator('column')}
for item in goods)
for i, rec in enumerate(records):
self.builder.convert_articul(rec['name'])
self.builder.convert_description(rec['content'])
self.builder.convert_price(rec['price'])
self.builder.convert_price_retail(rec['price_retail'])
self.builder.convert_sizes(rec['har_size'])
self.builder.increment_row()
# indicate progress
if not i % part_:
print('#', end='' if i < part_*denominator_ else '\n')
sys.stdout.flush()
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: %s <xmlFile>" % (sys.argv[0],))
sys.exit(-1)
# clear garbage
for file in glob.glob("output_*.xlsx"):
os.remove(file)
print("'%s' removed" % file)
input_file = sys.argv[1]
try:
builder = XlsxBuilder()
parser = GoodsReader(input_file, builder)
parser.parse_goods()
finally:
builder.get_result()
| gpl-3.0 | -302,040,297,643,390,460 | 31.618182 | 79 | 0.566146 | false | 3.661224 | false | false | false |
revarbat/epubber | epubber/views/main.py | 1 | 2968 | from __future__ import absolute_import
import re, os, sys
from clay import app
import clay.config
from flask import make_response, request, redirect, render_template, url_for
from epubber.fimfic_epubgen import FimFictionEPubGenerator
site_epub_classes = [
FimFictionEPubGenerator
]
accesslog = clay.config.get_logger('epubber_access')
#####################################################################
# Main App Views Section
#####################################################################
@app.route('/', methods=['GET', 'POST'])
def main_view():
story = request.args.get("story") or None
if story:
data = None
for epgenclass in site_epub_classes:
epgen = epgenclass()
if epgen.handle_url(story):
epub_file, data = epgen.gen_epub()
accesslog.info('%(title)s - %(url)s' % epgen.metas)
del epgen
response = make_response(data)
response.headers["Content-Type"] = "application/epub+zip"
response.headers["Content-Disposition"] = "attachment; filename=%s" % epub_file
return response
del epgen
return ("Cannot generate epub for this URL.", 400)
return render_template("main.html")
#####################################################################
# Secondary Views Section
#####################################################################
@app.route('/health', methods=['GET'])
def health_view():
'''
Heartbeat view, because why not?
'''
return ('OK', 200)
#####################################################################
# URL Shortener Views Section
#####################################################################
@app.route('/img/<path>', methods=['GET', 'POST'])
def static_img_proxy_view(path):
'''
Make shorter URLs for image files.
'''
path = re.sub(r'[^A-Za-z0-9_.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('img', path)))
@app.route('/js/<path>', methods=['GET', 'POST'])
def static_js_proxy_view(path):
'''
Make shorter URLs for javascript files.
'''
path = re.sub(r'[^A-Za-z0-9_+.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('js', path)))
@app.route('/css/<path>', methods=['GET', 'POST'])
def static_css_proxy_view(path):
'''
Make shorter URLs for CSS files.
'''
path = re.sub(r'[^A-Za-z0-9_+.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('css', path)))
#####################################################################
# Main
#####################################################################
def main():
# Make templates copacetic with UTF8
reload(sys)
sys.setdefaultencoding('utf-8')
# App Config
app.secret_key = clay.config.get('flask.secret_key')
main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 nowrap
| bsd-2-clause | 5,776,971,009,247,673,000 | 25.981818 | 95 | 0.495283 | false | 4.043597 | false | false | false |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/grpc/tools/buildgen/plugins/expand_version.py | 36 | 4112 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Buildgen package version plugin
This parses the list of targets from the yaml build file, and creates
a custom version string for each language's package.
"""
import re
LANGUAGES = [
'core',
'cpp',
'csharp',
'node',
'objc',
'php',
'python',
'ruby',
]
class Version:
def __init__(self, s):
self.tag = None
if '-' in s:
s, self.tag = s.split('-')
self.major, self.minor, self.patch = [int(x) for x in s.split('.')]
def __str__(self):
"""Version string in a somewhat idiomatic style for most languages"""
s = '%d.%d.%d' % (self.major, self.minor, self.patch)
if self.tag:
s += '-%s' % self.tag
return s
def pep440(self):
"""Version string in Python PEP440 style"""
s = '%d.%d.%d' % (self.major, self.minor, self.patch)
if self.tag:
# we need to translate from grpc version tags to pep440 version
# tags; this code is likely to be a little ad-hoc
if self.tag == 'dev':
s += '.dev0'
elif len(self.tag) >= 3 and self.tag[0:3] == 'pre':
s += 'rc%d' % int(self.tag[3:])
else:
raise Exception('Don\'t know how to translate version tag "%s" to pep440' % self.tag)
return s
def ruby(self):
"""Version string in Ruby style"""
if self.tag:
return '%d.%d.%d.%s' % (self.major, self.minor, self.patch, self.tag)
else:
return '%d.%d.%d' % (self.major, self.minor, self.patch)
def php(self):
"""Version string for PHP PECL package"""
s = '%d.%d.%d' % (self.major, self.minor, self.patch)
if self.tag:
if self.tag == 'dev':
s += 'dev'
elif len(self.tag) >= 3 and self.tag[0:3] == 'pre':
s += 'RC%d' % int(self.tag[3:])
else:
raise Exception('Don\'t know how to translate version tag "%s" to PECL version' % self.tag)
return s
def php_composer(self):
"""Version string for PHP Composer package"""
return '%d.%d.%d' % (self.major, self.minor, self.patch)
def mako_plugin(dictionary):
"""Expand version numbers:
- for each language, ensure there's a language_version tag in
settings (defaulting to the master version tag)
- expand version strings to major, minor, patch, and tag
"""
settings = dictionary['settings']
master_version = Version(settings['version'])
settings['version'] = master_version
for language in LANGUAGES:
version_tag = '%s_version' % language
if version_tag in settings:
settings[version_tag] = Version(settings[version_tag])
else:
settings[version_tag] = master_version
| gpl-3.0 | 6,242,880,577,715,036,000 | 33.847458 | 99 | 0.667072 | false | 3.769019 | false | false | false |
mozilla/socorro | webapp-django/crashstats/crashstats/tests/test_models.py | 1 | 17410 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import random
from urllib.parse import urlparse, parse_qs
import mock
import pytest
from django.core.cache import cache
from django.conf import settings
from django.utils import dateparse
from crashstats.crashstats import models
from crashstats.crashstats.tests.conftest import Response
from crashstats.crashstats.tests.testbase import DjangoTestCase
from socorro.lib import BadArgumentError
from socorro.unittest.external.pubsub import get_config_manager, PubSubHelper
class TestGraphicsDevices(DjangoTestCase):
def setUp(self):
super().setUp()
cache.clear()
def test_get_pairs(self):
"""Test get_pairs() works correctly
The GraphicsDevice.get_pairs() lets you expand a bunch of (vendor, adapter)
pairs at the same time. It's more performant since it does a single query.
"""
models.GraphicsDevice.objects.create(
vendor_hex="vhex3",
vendor_name="V 3",
adapter_hex="ahex3",
adapter_name="A 3",
)
models.GraphicsDevice.objects.create(
vendor_hex="vhex2",
vendor_name="V 2",
adapter_hex="ahex2",
adapter_name="A 2",
)
models.GraphicsDevice.objects.create(
vendor_hex="vhex1",
vendor_name="V 1",
adapter_hex="ahex1",
adapter_name="A 1",
)
r = models.GraphicsDevice.objects.get_pairs(
["vhex1", "vhex2"], ["ahex1", "ahex2"]
)
expected = {
("vhex1", "ahex1"): ("V 1", "A 1"),
("vhex2", "ahex2"): ("V 2", "A 2"),
}
assert r == expected
r = models.GraphicsDevice.objects.get_pairs(
["vhex2", "vhex3"], ["ahex2", "ahex3"]
)
assert len(r) == 2
expected = {
("vhex2", "ahex2"): ("V 2", "A 2"),
("vhex3", "ahex3"): ("V 3", "A 3"),
}
assert r == expected
class TestBugs(DjangoTestCase):
def setUp(self):
super().setUp()
cache.clear()
def test_get_one(self):
models.BugAssociation.objects.create(bug_id="999999", signature="OOM | small")
api = models.Bugs()
resp = api.get(signatures=["OOM | small"])
assert resp == {
"hits": [{"id": 999999, "signature": "OOM | small"}],
"total": 1,
}
def test_get_multiple(self):
models.BugAssociation.objects.create(bug_id="999999", signature="OOM | small")
models.BugAssociation.objects.create(bug_id="1000000", signature="OOM | large")
api = models.Bugs()
resp = api.get(signatures=["OOM | small", "OOM | large"])
assert resp == {
"hits": [
{"id": 999999, "signature": "OOM | small"},
{"id": 1000000, "signature": "OOM | large"},
],
"total": 2,
}
def test_related(self):
models.BugAssociation.objects.create(bug_id="999999", signature="OOM | small")
models.BugAssociation.objects.create(bug_id="999999", signature="OOM | medium")
models.BugAssociation.objects.create(bug_id="1000000", signature="OOM | large")
api = models.Bugs()
resp = api.get(signatures=["OOM | small"])
assert resp == {
"hits": [
{"id": 999999, "signature": "OOM | medium"},
{"id": 999999, "signature": "OOM | small"},
],
"total": 2,
}
class TestSignaturesByBugs(DjangoTestCase):
def setUp(self):
super().setUp()
cache.clear()
def test_get_one(self):
models.BugAssociation.objects.create(bug_id="999999", signature="OOM | small")
api = models.SignaturesByBugs()
resp = api.get(bug_ids=["999999"])
assert resp == {
"hits": [{"id": 999999, "signature": "OOM | small"}],
"total": 1,
}
def test_get_multiple(self):
models.BugAssociation.objects.create(bug_id="999999", signature="OOM | small")
models.BugAssociation.objects.create(bug_id="1000000", signature="OOM | large")
api = models.SignaturesByBugs()
resp = api.get(bug_ids=["999999", "1000000"])
assert resp == {
"hits": [
{"id": 999999, "signature": "OOM | small"},
{"id": 1000000, "signature": "OOM | large"},
],
"total": 2,
}
class TestSignatureFirstDate(DjangoTestCase):
def setUp(self):
super().setUp()
cache.clear()
def test_get_one(self):
some_date = dateparse.parse_datetime("2018-10-06T00:22:58.074859+00:00")
models.Signature.objects.create(
signature="OOM | Small", first_build="20180920131237", first_date=some_date
)
models.Signature.objects.create(
signature="OOM | Large", first_build="20180920131237", first_date=some_date
)
api = models.SignatureFirstDate()
resp = api.get(signatures="OOM | Small")
assert resp["total"] == 1
assert resp["hits"] == [
{
"first_build": "20180920131237",
"first_date": "2018-10-06T00:22:58.074859+00:00",
"signature": "OOM | Small",
}
]
def test_get_two(self):
some_date = dateparse.parse_datetime("2018-10-06T00:22:58.074859+00:00")
models.Signature.objects.create(
signature="OOM | Small", first_build="20180920131237", first_date=some_date
)
models.Signature.objects.create(
signature="OOM | Large", first_build="20180920131237", first_date=some_date
)
api = models.SignatureFirstDate()
resp = api.get(signatures=["OOM | Small", "OOM | Large"])
assert resp["total"] == 2
assert resp["hits"] == [
{
"first_build": "20180920131237",
"first_date": "2018-10-06T00:22:58.074859+00:00",
"signature": "OOM | Small",
},
{
"first_build": "20180920131237",
"first_date": "2018-10-06T00:22:58.074859+00:00",
"signature": "OOM | Large",
},
]
class TestVersionString(DjangoTestCase):
def setUp(self):
super().setUp()
cache.clear()
def test_bad_args_raise_error(self):
api = models.VersionString()
with pytest.raises(models.RequiredParameterError):
api.get()
with pytest.raises(models.RequiredParameterError):
api.get(product="Firefox", channel="beta")
def test_beta(self):
models.ProductVersion.objects.create(
product_name="Firefox",
release_channel="beta",
build_id="20161129164126",
version_string="51.0b5",
major_version=51,
)
api = models.VersionString()
resp = api.get(product="Firefox", channel="beta", build_id="20161129164126")
assert resp == {"hits": [{"version_string": "51.0b5"}], "total": 1}
def test_release_rc(self):
"""If the channel is beta, but there aren't versions with 'b' in them,
then these are release candidates for a final release, so return an rc one.
"""
models.ProductVersion.objects.create(
product_name="Firefox",
release_channel="beta",
build_id="20161104212021",
version_string="50.0rc2",
major_version=50,
)
api = models.VersionString()
resp = api.get(product="Firefox", channel="beta", build_id="20161104212021")
assert resp == {"hits": [{"version_string": "50.0rc2"}], "total": 1}
def test_beta_and_rc(self):
"""If there are multiple version strings for a given (product, channel,
build_id), and they have 'b' in them, then we want the non-rc one.
"""
models.ProductVersion.objects.create(
product_name="Firefox",
release_channel="beta",
build_id="20160920155715",
version_string="50.0b1rc2",
major_version=50,
)
models.ProductVersion.objects.create(
product_name="Firefox",
release_channel="beta",
build_id="20160920155715",
version_string="50.0b1rc1",
major_version=50,
)
models.ProductVersion.objects.create(
product_name="Firefox",
release_channel="beta",
build_id="20160920155715",
version_string="50.0b1",
major_version=50,
)
api = models.VersionString()
resp = api.get(product="Firefox", channel="beta", build_id="20160920155715")
assert resp == {"hits": [{"version_string": "50.0b1"}], "total": 1}
class TestMiddlewareModels(DjangoTestCase):
def setUp(self):
super().setUp()
cache.clear()
@mock.patch("requests.Session")
def test_bugzilla_api(self, rsession):
model = models.BugzillaBugInfo
api = model()
def mocked_get(url, **options):
assert url.startswith(settings.BZAPI_BASE_URL)
parsed = urlparse(url)
query = parse_qs(parsed.query)
assert query["include_fields"] == ["summary,status,id,resolution"]
return Response(
{
"bugs": [
{
"status": "NEW",
"resolution": "",
"id": 123456789,
"summary": "Some summary",
}
]
}
)
rsession().get.side_effect = mocked_get
info = api.get("123456789")
expected = [
{
"status": "NEW",
"resolution": "",
"id": 123456789,
"summary": "Some summary",
}
]
assert info["bugs"] == expected
# prove that it's cached
def new_mocked_get(**options):
return Response(
{
"bugs": [
{
"status": "RESOLVED",
"resolution": "",
"id": 123456789,
"summary": "Some summary",
}
]
}
)
rsession().get.side_effect = new_mocked_get
info = api.get("123456789")
expected = [
{
"status": "NEW",
"resolution": "",
"id": 123456789,
"summary": "Some summary",
}
]
assert info["bugs"] == expected
@mock.patch("requests.Session")
def test_bugzilla_api_bad_status_code(self, rsession):
model = models.BugzillaBugInfo
api = model()
def mocked_get(url, **options):
return Response("I'm a teapot", status_code=418)
rsession().get.side_effect = mocked_get
with pytest.raises(models.BugzillaRestHTTPUnexpectedError):
api.get("123456789")
def test_processed_crash(self):
model = models.ProcessedCrash
api = model()
def mocked_get(**params):
assert "datatype" in params
assert params["datatype"] == "processed"
return {
"product": "WaterWolf",
"uuid": "7c44ade2-fdeb-4d6c-830a-07d302120525",
"version": "13.0",
"build": "20120501201020",
"ReleaseChannel": "beta",
"os_name": "Windows NT",
"date_processed": "2012-05-25 11:35:57",
"success": True,
"signature": "CLocalEndpointEnumerator::OnMediaNotific",
"addons": [
["[email protected]", "1.2.1"],
["{972ce4c6-7e08-4474-a285-3208198ce6fd}", "13.0"],
],
}
model.implementation().get.side_effect = mocked_get
r = api.get(crash_id="7c44ade2-fdeb-4d6c-830a-07d302120525")
assert r["product"]
def test_unredacted_crash(self):
model = models.UnredactedCrash
api = model()
def mocked_get(**params):
assert "datatype" in params
assert params["datatype"] == "unredacted"
return {
"product": "WaterWolf",
"uuid": "7c44ade2-fdeb-4d6c-830a-07d302120525",
"version": "13.0",
"build": "20120501201020",
"ReleaseChannel": "beta",
"os_name": "Windows NT",
"date_processed": "2012-05-25 11:35:57",
"success": True,
"signature": "CLocalEndpointEnumerator::OnMediaNotific",
"exploitability": "Sensitive stuff",
"addons": [
["[email protected]", "1.2.1"],
["{972ce4c6-7e08-4474-a285-3208198ce6fd}", "13.0"],
],
}
model.implementation().get.side_effect = mocked_get
r = api.get(crash_id="7c44ade2-fdeb-4d6c-830a-07d302120525")
assert r["product"]
assert r["exploitability"]
def test_raw_crash(self):
model = models.RawCrash
api = model()
def mocked_get(**params):
return {
"InstallTime": "1339289895",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "[email protected]",
"Vendor": "Mozilla",
}
model.implementation().get.side_effect = mocked_get
r = api.get(crash_id="some-crash-id")
assert r["Vendor"] == "Mozilla"
assert "Email" in r # no filtering at this level
def test_raw_crash_invalid_id(self):
# NOTE(alexisdeschamps): this undoes the mocking of the implementation so we can test
# the implementation code.
models.RawCrash.implementation = self._mockeries[models.RawCrash]
model = models.RawCrash
api = model()
with pytest.raises(BadArgumentError):
api.get(crash_id="821fcd0c-d925-4900-85b6-687250180607docker/as_me.sh")
def test_raw_crash_raw_data(self):
model = models.RawCrash
api = model()
mocked_calls = []
def mocked_get(**params):
mocked_calls.append(params)
assert params["datatype"] == "raw"
if params.get("name") == "other":
return "\xe0\xe0"
else:
return "\xe0"
model.implementation().get.side_effect = mocked_get
r = api.get(crash_id="some-crash-id", format="raw")
assert r == "\xe0"
r = api.get(crash_id="some-crash-id", format="raw", name="other")
assert r == "\xe0\xe0"
@mock.patch("requests.Session")
def test_massive_querystring_caching(self, rsession):
# doesn't actually matter so much what API model we use
# see https://bugzilla.mozilla.org/show_bug.cgi?id=803696
model = models.BugzillaBugInfo
api = model()
def mocked_get(url, **options):
assert url.startswith(settings.BZAPI_BASE_URL)
return Response(
{
"bugs": [
{
"id": 123456789,
"status": "NEW",
"resolution": "",
"summary": "Some Summary",
}
]
}
)
rsession().get.side_effect = mocked_get
bugnumbers = [str(random.randint(10000, 100000)) for __ in range(100)]
info = api.get(bugnumbers)
assert info
def test_Reprocessing(self):
# This test runs against the Pub/Sub emulator, so undo the mock to let
# that work.
self.undo_implementation_mock(models.Reprocessing)
config_manager = get_config_manager()
with config_manager.context() as config:
pubsub_helper = PubSubHelper(config)
api = models.Reprocessing()
with pubsub_helper as helper:
api.post(crash_ids="some-crash-id")
crash_ids = helper.get_crash_ids("reprocessing")
assert crash_ids == ["some-crash-id"]
def test_PriorityJob(self):
# This test runs against the Pub/Sub emulator, so undo the mock to let
# that work.
self.undo_implementation_mock(models.PriorityJob)
config_manager = get_config_manager()
with config_manager.context() as config:
pubsub_helper = PubSubHelper(config)
api = models.PriorityJob()
with pubsub_helper as helper:
api.post(crash_ids="some-crash-id")
crash_ids = helper.get_crash_ids("priority")
assert crash_ids == ["some-crash-id"]
| mpl-2.0 | -5,132,085,217,994,681,000 | 31.787194 | 93 | 0.522516 | false | 3.95143 | true | false | false |
SwissTPH/openhds-sim | submission.py | 1 | 13836 | #!/usr/bin/env python
"""Test form submission"""
__email__ = "[email protected]"
__status__ = "Alpha"
from lxml import etree
import urllib2
import uuid
import logging
DEVICE_ID = "8d:77:12:5b:c1:3c"
def submit_data(data, url):
"""Submit an instance to ODKAggregate"""
r = urllib2.Request(url, data=data, headers={'Content-Type': 'application/xml'})
try:
u = urllib2.urlopen(r)
response = u.read()
return response
except urllib2.HTTPError as e:
print(e.read())
print(e.code)
print(e.info())
print(data)
def submit_from_instance_file(filename, aggregate_url):
"""Read an instance from a file and submit to ODKAggregate"""
f = open(filename, 'r')
data = f.read()
f.close()
submit_data(data, aggregate_url)
def submit_from_dict(form_dict, aggregate_url):
"""Create an instance from a dict and submit to ODKAggregate"""
root = etree.Element(form_dict["id"], id=form_dict["id"])
#TODO: deviceid should be added here, but what spelling , Id or id?
dev_id = etree.SubElement(root, "deviceid")
dev_id.text = DEVICE_ID
meta = etree.SubElement(root, "meta")
inst_id = etree.SubElement(meta, "instanceID")
inst_id.text = str(uuid.uuid1())
p_b_m = etree.SubElement(root, "processedByMirth")
p_b_m.text = '0'
etree.SubElement(root, "start")
for field in form_dict["fields"]:
if type(field[1]) == list:
el_par = etree.SubElement(root, field[0])
for sub_field in field[1]:
el = etree.SubElement(el_par, sub_field[0])
el.text = sub_field[1]
else:
el = etree.SubElement(root, field[0])
el.text = field[1]
logging.debug(form_dict)
submit_data(etree.tostring(root), aggregate_url)
def submit_baseline_individual(start, end, location_id, visit_id, fieldworker_id, individual_id, mother_id, father_id,
first_name, middle_name, last_name, gender, date_of_birth, partial_date,
date_of_visit, aggregate_url):
"""Register an individual during baseline"""
# dateOfMigration is date of visit by definition
form_dict = {"id": "baseline",
"fields": [["start", start], ["end", end],
["openhds", [["migrationType", "BASELINE"], ["locationId", location_id],
["visitId", visit_id], ["fieldWorkerId", fieldworker_id]]],
["individualInfo", [["individualId", individual_id], ["motherId", mother_id],
["fatherId", father_id], ["firstName", first_name],
["middleName", middle_name], ["lastName", last_name],
["gender", gender], ["religion", "unk"], ["dateOfBirth", date_of_birth],
["partialDate", partial_date]]],
["dateOfMigration", date_of_visit], ["warning", ""], ["visitDate", date_of_visit],
["majo4mo", "yes"], ["spelasni", "yes"]]}
return submit_from_dict(form_dict, aggregate_url)
def submit_in_migration(start, end, migration_type, location_id, visit_id, fieldworker_id, individual_id, mother_id,
father_id, first_name, middle_name, last_name, gender, date_of_birth, partial_date,
date_of_migration, aggregate_url):
"""Register an inmigration"""
form_dict = {"id": "in_migration",
"fields": [["start", start], ["end", end],
["openhds", [["visitId", visit_id], ["fieldWorkerId", fieldworker_id],
["migrationType", migration_type], ["locationId", location_id]]],
["individualInfo", [["individualId", individual_id], ["motherId", mother_id],
["fatherId", father_id], ["firstName", first_name],
["middleName", middle_name], ["lastName", last_name],
["gender", gender], ["dateOfBirth", date_of_birth],
["partialDate", partial_date]]],
["dateOfMigration", date_of_migration], ["warning", ""], ["origin", "other"],
["reason", "NA"], ["maritalChange", "NA"], ["reasonOther", "NA"], ["movedfrom", "NA"],
["shortorlongstay", "NA"]]}
return submit_from_dict(form_dict, aggregate_url)
def submit_death_registration(start, individual_id, first_name, last_name, field_worker_id, visit_id, date_of_death,
place_of_death, place_of_death_other, end, aggregate_url):
form_dict = {"id": "death_registration",
"fields": [["start", start], ["end", end],
["openhds", [["fieldWorkerId", field_worker_id], ["visitId", visit_id],
["individualId", individual_id], ["firstName", first_name],
["lastName", last_name]]],
["dateOfDeath", date_of_death], ["diagnoseddeath", ''], ["whom", ''],
["causeofdeathdiagnosed", ''], ["causofdeathnotdiagnosed", ''],
["placeOfDeath", place_of_death], ["placeOfDeathOther", place_of_death_other],
["causeOfDeath", '']
]}
return submit_from_dict(form_dict, aggregate_url)
def submit_death_of_hoh_registration(start, end, individual_id, household_id, new_hoh_id, field_worker_id, gender,
death_within_dss, death_village, have_death_certificate, visit_id, cause_of_death,
date_of_death, place_of_death, place_of_death_other, aggregate_url):
#TODO: update form fields to lastest
form_dict = {"id": "DEATHTOHOH",
"fields": [["start", start], ["end", end],
["openhds", [["visitId", visit_id], ["fieldWorkerId", field_worker_id],
["householdId", household_id], ["individualId", individual_id],
["firstName", "first"], ["lastName", "last"], ["new_hoh_id", new_hoh_id]]],
["gender", gender], ["deathWithinDSS", death_within_dss], ["deathVillage", death_village],
["haveDeathCertificate", have_death_certificate],
["causeOfDeath", cause_of_death], ["dateOfDeath", date_of_death],
["placeOfDeath", place_of_death], ["placeOfDeathOther", place_of_death_other],
]}
return submit_from_dict(form_dict, aggregate_url)
def submit_location_registration(start, hierarchy_id, fieldworker_id, location_id, location_name, ten_cell_leader,
location_type, geopoint, end, aggregate_url):
form_dict = {"id": "location_registration",
"fields": [["start", start], ["end", end],
["openhds", [["fieldWorkerId", fieldworker_id], ["hierarchyId", hierarchy_id],
["locationId", location_id]]],
["locationName", location_name], ["tenCellLeader", ten_cell_leader],
["locationType", location_type], ["geopoint", geopoint]]}
return submit_from_dict(form_dict, aggregate_url)
def submit_membership(start, individual_id, household_id, fieldworker_id, relationship_to_group_head, start_date, end,
aggregate_url):
form_dict = {"id": "membership",
"fields": [["start", start], ["end", end],
["openhds", [["householdId", household_id], ["fieldWorkerId", fieldworker_id],
["individualId", individual_id]]],
["relationshipToGroupHead", relationship_to_group_head],
["startDate", start_date]]}
return submit_from_dict(form_dict, aggregate_url)
def submit_out_migration_registration(start, end, individual_id, fieldworker_id, visit_id, first_name, last_name,
date_of_migration, name_of_destination, reason_for_out_migration, marital_change,
aggregate_url):
form_dict = {"id": "out_migration_registration",
"fields": [["start", start], ["end", end],
["openhds", [["individualId", individual_id], ["fieldWorkerId", fieldworker_id],
["visitId", visit_id], ["firstName", first_name], ["lastName", last_name]]],
["dateOfMigration", date_of_migration], ["nameOfDestination", name_of_destination],
["reasonForOutMigration", reason_for_out_migration], ["maritalChange", marital_change]]}
return submit_from_dict(form_dict, aggregate_url)
def submit_pregnancy_observation(start, end, estimated_age_of_preg, individual_id, fieldworker_id, visit_id,
exptected_delivery_date, recorded_date, aggregate_url):
form_dict = {"id": "pregnancy_observation",
"fields": [["start", start], ["end", end],
["openhds", [["fieldWorkerId", fieldworker_id], ["visitId", visit_id],
["individualId", individual_id], ["recordedDate", recorded_date]]],
["estimatedAgeOfPreg", estimated_age_of_preg], ["pregNotes", "1"],
["ageOfPregFromPregNotes", estimated_age_of_preg],
["anteNatalClinic", "YES"], ["lastClinicVisitDate", recorded_date],
["healthfacility", "1"], ["medicineforpregnancy", "NO"],
["ttinjection", "YES"], ["othermedicine", "othermedicine"],
["pregnancyNumber", "1"], ["expectedDeliveryDate", exptected_delivery_date]]}
return submit_from_dict(form_dict, aggregate_url)
def submit_pregnancy_outcome(start, mother_id, father_id, visit_id, fieldworker_id, nboutcomes, partial_date,
birthingplace, birthing_assistant, hours_or_days_in_hospital, hours_in_hospital,
caesarian_or_natural, total_number_children_still_living, attended_anc,
number_of_attendances, recorded_date, end, aggregate_url):
form_dict = {"id": "pregnancy_outcome",
"fields": [["start", start], ["end", end],
["openhds", [["visitId", visit_id], ["fieldWorkerId", fieldworker_id],
["motherId", mother_id], ["fatherId", father_id]]],
["nboutcomes", nboutcomes], ["partialDate", partial_date], ["birthingPlace", birthingplace],
["birthingAssistant", birthing_assistant],
["hoursOrDaysInHospital", hours_or_days_in_hospital],
["hoursInHospital", hours_in_hospital], ["caesarianOrNatural", caesarian_or_natural],
["totalNumberChildrenStillLiving", total_number_children_still_living],
["attendedANC", attended_anc], ["numberOfANCAttendances", number_of_attendances],
["recordedDate", recorded_date]]}
return submit_from_dict(form_dict, aggregate_url)
def submit_relationship(start, individual_a, individual_b, fieldworker_id, relationship_type, start_date, end,
aggregate_url):
form_dict = {"id": "relationship",
"fields": [["start", start], ["end", end],
["openhds", [["fieldWorkerId", fieldworker_id], ["individualA", individual_a],
["individualB", individual_b]]],
["relationshipType", relationship_type], ["startDate", start_date]]}
return submit_from_dict(form_dict, aggregate_url)
def submit_social_group_registration(start, household_id, individual_id, field_worker_id, group_name, social_group_type,
end, aggregate_url):
form_dict = {"id": "social_group_registration",
"fields": [["start", start], ["end", end],
["openhds", [["fieldWorkerId", field_worker_id], ["householdId", household_id],
["individualId", individual_id]]],
["groupName", group_name], ["socialGroupType", social_group_type]]}
return submit_from_dict(form_dict, aggregate_url)
def submit_visit_registration(start, visit_id, field_worker_id, location_id, round_number, visit_date, interviewee_id,
correct_interviewee, farmhouse, coordinates, end, aggregate_url):
form_dict = {"id": "visit_registration",
"fields": [["start", start], ["end", end],
["openhds", [["visitId", visit_id], ["fieldWorkerId", field_worker_id],
["locationId", location_id], ["roundNumber", round_number]]],
["visitDate", visit_date], ["intervieweeId", interviewee_id],
["correctInterviewee", correct_interviewee], ["realVisit", "1"],
["farmhouse", farmhouse], ["coordinates", coordinates]]}
return submit_from_dict(form_dict, aggregate_url)
| gpl-2.0 | 2,658,511,324,205,245,000 | 59.419214 | 120 | 0.527681 | false | 4.09228 | false | false | false |
anqilu/dispel4py | dispel4py/test/workflow_graph_test.py | 2 | 2075 | # Copyright (c) The University of Edinburgh 2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Tests for simple sequential processing engine.
Using nose (https://nose.readthedocs.org/en/latest/) run as follows::
$ nosetests dispel4py/test/workflow_graph_test.py
'''
from nose import tools
from dispel4py.workflow_graph import WorkflowGraph
from dispel4py.workflow_graph import draw
from dispel4py.examples.graph_testing.testing_PEs \
import TestProducer, TestOneInOneOut
from dispel4py.base import create_iterative_chain
def test_types():
graph = WorkflowGraph()
prod = TestProducer()
cons = TestOneInOneOut()
graph.connect(prod, 'output', cons, 'input')
graph.propagate_types()
tools.eq_(prod.outputconnections['output']['type'],
cons.inputconnections['input']['type'])
def test_dot_pipeline():
graph = WorkflowGraph()
prod = TestProducer()
cons = TestOneInOneOut()
graph.connect(prod, 'output', cons, 'input')
draw(graph)
def test_dot_composite():
def inc(a):
return a+1
def dec(a):
return a-1
graph = WorkflowGraph()
prod = TestProducer()
comp = create_iterative_chain([inc, dec])
cons = TestOneInOneOut()
graph.connect(prod, 'output', comp, 'input')
graph.connect(comp, 'output', cons, 'input')
graph.inputmappings = {'input': (prod, 'input')}
root_prod = TestProducer()
root_graph = WorkflowGraph()
root_graph.connect(root_prod, 'output', graph, 'input')
dot = draw(root_graph)
tools.ok_('subgraph cluster_' in dot)
| apache-2.0 | 7,368,770,741,785,826,000 | 29.072464 | 74 | 0.698795 | false | 3.685613 | true | false | false |
dhruvesh13/Audio-Genre-Classification | plot-spectogram.py | 1 | 1043 | import sys
import os
import scipy.io.wavfile
import scipy.signal
import matplotlib.pyplot as plt
# from matplotlib.pyplot import specgram
os.chdir(sys.argv[1])
# Directory provided as a command line argument will be opened to visualize the files inside
wavfiles = []
for wavfile in os.listdir(sys.argv[1]):
if wavfile.endswith("wav"):
wavfiles.append(wavfile)
wavfiles.sort()
# Declare sampling rates and song arrays for each arg
sampling_rates = []
song_arrays = []
# Read wavfiles
for wavfile in wavfiles:
sampling_rate, song_array = scipy.io.wavfile.read(wavfile)
sampling_rates.append(sampling_rate)
song_arrays.append(song_array)
i = 1 # plot number
# Plot spectrogram for each wave_file
for song_id, song_array, sampling_rate in zip(wavfiles, song_arrays, sampling_rates):
# Create subplots
plt.subplot(10, 10, i)
i += 1
#plt.title(song_id)
plt.specgram(song_array[:30000], Fs=sampling_rate)
print("Plotting spectrogram of song_id: " + song_id)
plt.savefig('Spectrogram.png')
plt.show() | mit | 1,518,071,443,818,557,000 | 24.463415 | 92 | 0.725791 | false | 3.113433 | false | false | false |
warmlogic/happening | searchForGeo_twitter.py | 1 | 1624 | '''
search for geographic tweets
'''
import pandas as pd
import numpy as np
import cPickle
# # import MySQLdb as mdb
# import pymysql as mdb
import time
import twitter_tools
# from authent import dbauth as authsql
import pdb
# load beer names with >500 ratings
# sql='''
# SELECT beers.beername, beers.id
# FROM beers
# JOIN revstats ON beers.id=revstats.id
# WHERE revstats.nreviews>500;
# '''
# con=mdb.connect(**authsql)
# print 'Loading neighborhoods'
# df=pd.io.sql.read_frame(sql,con)
# beers=list(df['neighborhoods'])
# ids=list(df['id'])
# totalnum=len(beers)
# print 'Found %i beers'%totalnum
# # NB: tweets seem to come in from outside bounding box
# bayArea_bb_twit = [-122.75,36.8,-121.75,37.8] # from twitter's dev site
# bayArea_bb_me = [-122.53,36.94,-121.8,38.0] # I made this one
# searches twitter backwards in time
query = "since:2014-09-02 until:2014-09-03"
sf_center = "37.75,-122.44,4mi"
# count = 100
# results = twitter_tools.TwitSearchGeoOld(query,sf_center,count,twitter_tools.twitAPI)
count = 100
max_tweets = 1000
results = twitter_tools.TwitSearchGeo(query,sf_center,count,max_tweets,twitter_tools.twitAPI)
if len(results) > 0:
pdb.set_trace()
# # search twitter for beers and save out to dataframe
# count=0
# tweetholder=[]
# for bn in beers:
# searchstr='"'+bn+'"'
# print 'On %i of %i'%(count+1,totalnum)
# results = twittertools.TwitSearch(searchstr,twittertools.twitAPI)
# tweetholder.append(results)
# count+=1
print('Done.')
# save
# timeint = np.int(time.time())
# cPickle.dump(tweetholder,open('tweetsearch_%i.cpk'%timeint,'w'))
| gpl-3.0 | -7,280,097,865,136,772,000 | 24.375 | 93 | 0.693966 | false | 2.743243 | false | false | false |
tdautc19841202/wechatpy | wechatpy/client/api/customservice.py | 7 | 7735 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import hashlib
import time
import datetime
from six.moves.urllib.parse import quote
from optionaldict import optionaldict
from wechatpy.utils import to_binary
from wechatpy.client.api.base import BaseWeChatAPI
class WeChatCustomService(BaseWeChatAPI):
def add_account(self, account, nickname, password):
"""
添加客服账号
详情请参考
http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html
:param account: 完整客服账号,格式为:账号前缀@公众号微信号
:param nickname: 客服昵称,最长6个汉字或12个英文字符
:param password: 客服账号登录密码
:return: 返回的 JSON 数据包
"""
password = to_binary(password)
password = hashlib.md5(password).hexdigest()
return self._post(
'https://api.weixin.qq.com/customservice/kfaccount/add',
data={
'kf_account': account,
'nickname': nickname,
'password': password
}
)
def update_account(self, account, nickname, password):
"""
更新客服账号
详情请参考
http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html
:param account: 完整客服账号,格式为:账号前缀@公众号微信号
:param nickname: 客服昵称,最长6个汉字或12个英文字符
:param password: 客服账号登录密码
:return: 返回的 JSON 数据包
"""
password = to_binary(password)
password = hashlib.md5(password).hexdigest()
return self._post(
'https://api.weixin.qq.com/customservice/kfaccount/update',
data={
'kf_account': account,
'nickname': nickname,
'password': password
}
)
def delete_account(self, account):
"""
删除客服账号
详情请参考
http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html
:param account: 完整客服账号,格式为:账号前缀@公众号微信号
:return: 返回的 JSON 数据包
"""
params_data = [
'access_token={0}'.format(quote(self.access_token)),
'kf_account={0}'.format(quote(to_binary(account), safe=b'/@')),
]
params = '&'.join(params_data)
return self._get(
'https://api.weixin.qq.com/customservice/kfaccount/del',
params=params
)
def get_accounts(self):
"""
获取客服账号列表
详情请参考
http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html
:return: 客服账号列表
"""
res = self._get('customservice/getkflist')
return res['kf_list']
def upload_headimg(self, account, media_file):
"""
上传客服账号头像
详情请参考
http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html
:param account: 完整客服账号
:param media_file: 要上传的头像文件,一个 File-Object
:return: 返回的 JSON 数据包
"""
return self._post(
'https://api.weixin.qq.com/customservice/kfaccount/uploadheadimg',
params={
'kf_account': account
},
files={
'media': media_file
}
)
def get_online_accounts(self):
"""
获取在线客服接待信息
详情请参考
http://mp.weixin.qq.com/wiki/9/6fff6f191ef92c126b043ada035cc935.html
:return: 客服接待信息列表
"""
res = self._get('customservice/getonlinekflist')
return res['kf_online_list']
def create_session(self, openid, account, text=None):
"""
多客服创建会话
详情请参考
http://mp.weixin.qq.com/wiki/2/6c20f3e323bdf5986cfcb33cbd3b829a.html
:param openid: 客户 openid
:param account: 完整客服账号
:param text: 附加信息,可选
:return: 返回的 JSON 数据包
"""
data = optionaldict(
openid=openid,
kf_account=account,
text=text
)
return self._post(
'https://api.weixin.qq.com/customservice/kfsession/create',
data=data
)
def close_session(self, openid, account, text=None):
"""
多客服关闭会话
详情请参考
http://mp.weixin.qq.com/wiki/2/6c20f3e323bdf5986cfcb33cbd3b829a.html
:param openid: 客户 openid
:param account: 完整客服账号
:param text: 附加信息,可选
:return: 返回的 JSON 数据包
"""
data = optionaldict(
openid=openid,
kf_account=account,
text=text
)
return self._post(
'https://api.weixin.qq.com/customservice/kfsession/close',
data=data
)
def get_session(self, openid):
"""
获取客户的会话状态
详情请参考
http://mp.weixin.qq.com/wiki/2/6c20f3e323bdf5986cfcb33cbd3b829a.html
:param openid: 客户 openid
:return: 返回的 JSON 数据包
"""
return self._get(
'https://api.weixin.qq.com/customservice/kfsession/getsession',
params={'openid': openid}
)
def get_session_list(self, account):
"""
获取客服的会话列表
详情请参考
http://mp.weixin.qq.com/wiki/2/6c20f3e323bdf5986cfcb33cbd3b829a.html
:param account: 完整客服账号
:return: 客服的会话列表
"""
res = self._get(
'https://api.weixin.qq.com/customservice/kfsession/getsessionlist',
params={'kf_account': account}
)
return res['sessionlist']
def get_wait_case(self):
"""
获取未接入会话列表
详情请参考
http://mp.weixin.qq.com/wiki/2/6c20f3e323bdf5986cfcb33cbd3b829a.html
:return: 返回的 JSON 数据包
"""
return self._get(
'https://api.weixin.qq.com/customservice/kfsession/getwaitcase'
)
def get_records(self, start_time, end_time, page_index,
page_size=10, user_id=None):
"""
获取客服聊天记录
详情请参考
http://mp.weixin.qq.com/wiki/19/7c129ec71ddfa60923ea9334557e8b23.html
:param start_time: 查询开始时间,UNIX 时间戳
:param end_time: 查询结束时间,UNIX 时间戳,每次查询不能跨日查询
:param page_index: 查询第几页,从 1 开始
:param page_size: 每页大小,每页最多拉取 1000 条
:param user_id: 普通用户的标识,对当前公众号唯一
:return: 返回的 JSON 数据包
"""
if isinstance(start_time, datetime.datetime):
start_time = time.mktime(start_time.timetuple())
if isinstance(end_time, datetime.datetime):
end_time = time.mktime(end_time.timetuple())
record_data = {
'starttime': int(start_time),
'endtime': int(end_time),
'pageindex': page_index,
'pagesize': page_size
}
if user_id:
record_data['openid'] = user_id
res = self._post(
'https://api.weixin.qq.com/customservice/msgrecord/getrecord',
data=record_data
)
return res['recordlist']
| mit | 6,661,586,624,816,034,000 | 28.106838 | 79 | 0.559536 | false | 2.79483 | false | false | false |
Answeror/lit | pywingui/lib/scintilla.py | 1 | 4493 | from pywingui.windows import *
from pywingui.wtl import *
from ctypes import c_char
try:
LoadLibrary("SciLexer.DLL")
#except Exception, e:
except:# for compatibility with Python 3 version
MessageBox(0, "The Scintilla DLL could not be loaded.", "Error loading Scintilla", MB_OK | MB_ICONERROR)
#~ raise e
from .scintilla_constants import *
class SCNotification(Structure):
_fields_ = [("nmhdr", NMHDR),
("position", c_int),
("ch", c_int),
("modifiers", c_int),
("modificationType", c_int),
("text", c_wchar_p),
("length", c_int),
("linesAdded", c_int),
("message", c_int),
("wParam", WPARAM),
("lParam", LPARAM),
("line", c_int),
("foldLevelNow", c_int),
("foldLevelPrev", c_int),
("margin", c_int),
("listType", c_int),
("x", c_int),
("y", c_int)]
copyright = \
"""
Scintilla
Copyright 1998-2003 by Neil Hodgson <[email protected]>
All Rights Reserved
"""
class Scintilla(Window):
_window_class_ = "Scintilla"
_window_style_ = WS_VISIBLE | WS_CHILD
def __init__(self, *args, **kwargs):
Window.__init__(self, *args, **kwargs)
self.InterceptParent()
def GetNotification(self, event):
return SCNotification.from_address(int(event.lParam))
def SendScintillaMessage(self, msg, wParam, lParam):
#TODO use fast path,e.g. retreive direct message fn from
#scintilla as described in scintilla docs
return windll.user32.SendMessageA(self.handle, msg, wParam, lParam)
#~ return self.SendMessage(msg, wParam, lParam)
def SetText(self, txt):
self.SendScintillaMessage(SCI_SETTEXT, 0, txt)
def GetLexer(self):
return self.SendScintillaMessage(SCI_GETLEXER, 0, 0)
def SetLexerLanguage(self, lang):
self.SendScintillaMessage(SCI_SETLEXERLANGUAGE, 0, lang)
def SetStyleBits(self, key, value):
self.SendScintillaMessage(SCI_SETSTYLEBITS, key, value)
def SetMarginWidth(self, width = 0):
self.SendScintillaMessage(SCI_SETMARGINWIDTHN, 0, width)
def SetProperty(self, key, value):
self.SendScintillaMessage(SCI_SETPROPERTY, key, value)
def SetKeyWords(self, keyWordSet, keyWordList):
self.SendScintillaMessage(SCI_SETKEYWORDS, keyWordSet, " ".join(keyWordList))
def StyleSetFore(self, styleNumber, color):
self.SendScintillaMessage(SCI_STYLESETFORE, styleNumber, color)
def StyleSetBack(self, styleNumber, color):
self.SendScintillaMessage(SCI_STYLESETBACK, styleNumber, color)
def StyleSetSize(self, styleNumber, size):
self.SendScintillaMessage(SCI_STYLESETSIZE, styleNumber, size)
def StyleSetFont(self, styleNumber, face):
self.SendScintillaMessage(SCI_STYLESETFONT, styleNumber, face)
def StyleClearAll(self):
self.SendScintillaMessage(SCI_STYLECLEARALL, 0, 0)
def GetLength(self):
return self.SendScintillaMessage(SCI_GETLENGTH, 0, 0)
def GetText(self):
buff_length = self.GetLength() + 1
buff = create_string_buffer(buff_length)
self.SendScintillaMessage(SCI_GETTEXT, buff_length, byref(buff))
return str(buff.value)
def GetSelText(self):
start = self.SendScintillaMessage(SCI_GETSELECTIONSTART, 0, 0)
end = self.SendScintillaMessage(SCI_GETSELECTIONEND, 0, 0)
if start == end: return ""
buff = (c_char * (end - start + 1))()
self.SendScintillaMessage(SCI_GETSELTEXT, 0, byref(buff))
return str(buff.value)
def HasSelection(self):
start = self.SendScintillaMessage(SCI_GETSELECTIONSTART, 0, 0)
end = self.SendScintillaMessage(SCI_GETSELECTIONEND, 0, 0)
return (end - start) > 0
def AddText(self, text):
self.SendScintillaMessage(SCI_ADDTEXT, len(text), text)
def SetTabWidth(self, width):
self.SendScintillaMessage(SCI_SETTABWIDTH, width, 0)
def SetUseTabs(self, useTabs):
self.SendScintillaMessage(SCI_SETUSETABS, int(useTabs), 0)
def SetEolMode(self, eolMode):
self.SendScintillaMessage(SCI_SETEOLMODE, eolMode, 0)
def Undo(self):
self.SendScintillaMessage(SCI_UNDO, 0, 0)
def Redo(self):
self.SendScintillaMessage(SCI_REDO, 0, 0)
def CanUndo(self):
return self.SendScintillaMessage(SCI_CANUNDO, 0, 0)
def CanRedo(self):
return self.SendScintillaMessage(SCI_CANREDO, 0, 0)
def Cut(self):
self.SendScintillaMessage(SCI_CUT, 0, 0)
def Copy(self):
self.SendScintillaMessage(SCI_COPY, 0, 0)
def Clear(self):
self.SendScintillaMessage(SCI_CLEAR, 0, 0)
def Paste(self):
self.SendScintillaMessage(SCI_PASTE, 0, 0)
def CanPaste(self):
return self.SendScintillaMessage(SCI_CANPASTE, 0, 0)
def SelectAll(self):
self.SendScintillaMessage(SCI_SELECTALL, 0, 0)
| mit | -2,416,158,969,552,404,000 | 27.801282 | 105 | 0.719341 | false | 2.859962 | false | false | false |
jbrodin/glowswitch-test | glowswitch-test.py | 1 | 3010 | #Author: Jbrodin
#A simple program used to make a color-changing orb
#Requires the Panda3D game engine to run
import direct.directbase.DirectStart
from panda3d.core import AmbientLight,DirectionalLight
from panda3d.core import NodePath,TextNode
from panda3d.core import Camera,Vec3,Vec4
from direct.gui.OnscreenText import OnscreenText
from direct.interval.IntervalGlobal import *
import sys
from direct.showbase.DirectObject import DirectObject #enables sys.accept
class World(DirectObject):
def __init__(self):
#Load switch model
self.glowswitch = loader.loadModel("glowswitch")
self.sphere=self.glowswitch.find("**/sphere") #finds a subcomponent of the .egg model... sphere is the name of the sphere geometry in the .egg file
self.glowswitch.reparentTo(render)
base.disableMouse() #mouse-controlled camera cannot be moved within the program
camera.setPosHpr( 0, -6.5, 1.4, 0, -2, 0)
#Light up everything an equal amount
ambientLight = AmbientLight("ambientLight")
ambientLight.setColor(Vec4(.95, .95, 1.05, 1))
render.setLight(render.attachNewNode(ambientLight))
#Add lighting that only casts light on one side of everything in the scene
directionalLight = DirectionalLight("directionalLight")
directionalLight.setDirection(Vec3(-5, -5, -5))
directionalLight.setColor(Vec4(.2, .2, .2, .1)) #keepin it dim
directionalLight.setSpecularColor(Vec4(0.2, 0.2, 0.2, 0.2))
render.setLight(render.attachNewNode(directionalLight))
#initalize sequence variable
self.ChangeColorSeq = Sequence(Wait(.1))
#start with blue by default
self.changeOrbColor(.1,0,.6,.3,.2,1)
#^(R min, Gmin, Bmin, Rmax, Gmax, Bmax)
#user controls
#note that changing the color means it will "pulse" that color and therefore needs a range of color values
self.accept("1", self.changeOrbColor,[.6,.1,.1,1,.3,.3]) #change orb color to red
self.accept("2", self.changeOrbColor,[.1,.6,.1,.3,1,.3])#change orb color to green
self.accept("3", self.changeOrbColor,[.1,0,.6,.3,.2,1]) #change orb color to blue
self.accept("escape", sys.exit)
instructions = OnscreenText(text="1: Change to red \n2: Change to Green \n3: Change to Blue \nEsc: Exit",
fg=(1,1,1,1), pos = (-1.3, -.82), scale = .05, align = TextNode.ALeft)
def changeOrbColor(self,Ra,Ga,Ba,Rz,Gz,Bz):
self.ChangeColorSeq.finish() #end the last sequence
BrightenSwitch = self.sphere.colorScaleInterval(2, Vec4(Ra,Ga,Ba,1), Vec4(Rz,Gz,Bz,1)) #the first number inside the () gives the amount of time this takes to execute
DarkenSwitch = self.sphere.colorScaleInterval(2, Vec4(Rz,Gz,Bz,1), Vec4(Ra,Ga,Ba,1))
self.ChangeColorSeq = Sequence(BrightenSwitch,Wait(.1),DarkenSwitch,Wait(.1))
self.ChangeColorSeq.loop()
w = World()
run()
| mit | -4,604,273,806,460,519,400 | 45.307692 | 173 | 0.6701 | false | 3.296824 | false | false | false |
svp-dev/slcore | slc/tools/slc/mt/mtsparc/asmproc/opc/mtsparc.py | 1 | 38860 | # This file was generated by decode.py. Do not edit!
# For each instruction the information available is:'
# re_parser, input_regs, output_regs, double_regs, long_latency, delayed, extra_phy_inputs, extra_phy_outputs'
import re
class insn_metadata(object):
def __init__(self, info):
self.inputs, self.outputs, self.double_regs, self.long_latency, self.delayed, self.extra_inputs, self.extra_outputs, self.immediates, self.is_branch, self.is_condbranch = info
reg = r"""(\$[lgsd]?f?\d+|%(?:sp|fp|[ilog][0-7]|[rf]\d+))"""
imm = r"""([^%$]\S*|%(?:(?:hi|lo)x?|hh|hm|lm|h44|m44|uhi|ulo|(?:tgd|tldm|tie)_(?:hi22|lo10)|(?:tldo|tle)_(?:hix22|lox10))\([^)]+\))"""
re000 = re.compile(r'''\s*''' + imm + r'''\s*$''')
re001 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*$''')
re002 = re.compile(r'''\s*$''')
re003 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*$''')
re004 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*(\dx(\d|[a-f])+|\d+)\s*,\s*''' + reg + r'''\s*$''')
re005 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*(\dx(\d|[a-f])+|\d+)\s*,\s*''' + reg + r'''\s*$''')
re006 = re.compile(r'''\s*(?:)\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*$''')
re007 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*''' + reg + r'''\s*$''')
re008 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*,\s*''' + reg + r'''\s*$''')
re009 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*,\s*''' + reg + r'''\s*$''')
re010 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*''' + reg + r'''\s*$''')
re011 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*\]\s*,\s*''' + reg + r'''\s*$''')
re012 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%c\d+\S*\s*$''')
re013 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*,\s*%c\d+\S*\s*$''')
re014 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*,\s*%c\d+\S*\s*$''')
re015 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%c\d+\S*\s*$''')
re016 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*\]\s*,\s*%c\d+\S*\s*$''')
re017 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*$''')
re018 = re.compile(r'''\s*''' + reg + r'''\s*$''')
re019 = re.compile(r'''\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*$''')
re020 = re.compile(r'''\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*$''')
re021 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%fsr\S*\s*$''')
re022 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*,\s*%fsr\S*\s*$''')
re023 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*,\s*%fsr\S*\s*$''')
re024 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%fsr\S*\s*$''')
re025 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*\]\s*,\s*%fsr\S*\s*$''')
re026 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%csr\S*\s*$''')
re027 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*,\s*%csr\S*\s*$''')
re028 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*,\s*%csr\S*\s*$''')
re029 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%csr\S*\s*$''')
re030 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*\]\s*,\s*%csr\S*\s*$''')
re031 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*$''')
re032 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*%asr\S*\s*$''')
re033 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*%asr\S*\s*$''')
re034 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*%y\S*\s*$''')
re035 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*%y\S*\s*$''')
re036 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*%psr\S*\s*$''')
re037 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*%psr\S*\s*$''')
re038 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*%wim\S*\s*$''')
re039 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*%wim\S*\s*$''')
re040 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*%tbr\S*\s*$''')
re041 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*%tbr\S*\s*$''')
re042 = re.compile(r'''\s*%asr\d+\S*\s*,\s*''' + reg + r'''\s*$''')
re043 = re.compile(r'''\s*%y\S*\s*,\s*''' + reg + r'''\s*$''')
re044 = re.compile(r'''\s*%psr\S*\s*,\s*''' + reg + r'''\s*$''')
re045 = re.compile(r'''\s*%wim\S*\s*,\s*''' + reg + r'''\s*$''')
re046 = re.compile(r'''\s*%tbr\S*\s*,\s*''' + reg + r'''\s*$''')
re047 = re.compile(r'''\s*''' + reg + r'''\s*,\s*%asr\S*\s*$''')
re048 = re.compile(r'''\s*''' + imm + r'''\s*,\s*%asr\S*\s*$''')
re049 = re.compile(r'''\s*''' + reg + r'''\s*,\s*%y\S*\s*$''')
re050 = re.compile(r'''\s*''' + imm + r'''\s*,\s*%y\S*\s*$''')
re051 = re.compile(r'''\s*''' + reg + r'''\s*,\s*%psr\S*\s*$''')
re052 = re.compile(r'''\s*''' + imm + r'''\s*,\s*%psr\S*\s*$''')
re053 = re.compile(r'''\s*''' + reg + r'''\s*,\s*%wim\S*\s*$''')
re054 = re.compile(r'''\s*''' + imm + r'''\s*,\s*%wim\S*\s*$''')
re055 = re.compile(r'''\s*''' + reg + r'''\s*,\s*%tbr\S*\s*$''')
re056 = re.compile(r'''\s*''' + imm + r'''\s*,\s*%tbr\S*\s*$''')
re057 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re058 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''')
re059 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''')
re060 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re061 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''')
re062 = re.compile(r'''\s*%c\d+\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re063 = re.compile(r'''\s*%c\d+\S*\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''')
re064 = re.compile(r'''\s*%c\d+\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''')
re065 = re.compile(r'''\s*%c\d+\S*\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re066 = re.compile(r'''\s*%c\d+\S*\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''')
re067 = re.compile(r'''\s*%csr\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re068 = re.compile(r'''\s*%csr\S*\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''')
re069 = re.compile(r'''\s*%csr\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''')
re070 = re.compile(r'''\s*%csr\S*\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re071 = re.compile(r'''\s*%csr\S*\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''')
re072 = re.compile(r'''\s*%fsr\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re073 = re.compile(r'''\s*%fsr\S*\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''')
re074 = re.compile(r'''\s*%fsr\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''')
re075 = re.compile(r'''\s*%fsr\S*\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re076 = re.compile(r'''\s*%fsr\S*\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''')
re077 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re078 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*$''')
re079 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''')
re080 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re081 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*\]\s*$''')
re082 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*(\dx(\d|[a-f])+|\d+)\s*$''')
re083 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + reg + r'''\s*\]\s*(\dx(\d|[a-f])+|\d+)\s*$''')
re084 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*$''')
re085 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*$''')
re086 = re.compile(r'''\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*$''')
re087 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*$''')
re088 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*$''')
re089 = re.compile(r'''\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*$''')
re090 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*$''')
re091 = re.compile(r'''\s*''' + imm + r'''\s*,\s*\d+\s*$''')
re092 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*,\s*\d+\s*$''')
re093 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\d+\s*$''')
re094 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*,\s*\d+\s*$''')
re095 = re.compile(r'''\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*,\s*\d+\s*$''')
re096 = re.compile(r'''\s*%fq\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re097 = re.compile(r'''\s*%fq\S*\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''')
re098 = re.compile(r'''\s*%fq\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''')
re099 = re.compile(r'''\s*%fq\S*\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re100 = re.compile(r'''\s*%fq\S*\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''')
re101 = re.compile(r'''\s*%cq\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re102 = re.compile(r'''\s*%cq\S*\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''')
re103 = re.compile(r'''\s*%cq\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''')
re104 = re.compile(r'''\s*%cq\S*\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re105 = re.compile(r'''\s*%cq\S*\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''')
re106 = re.compile(r'''\s*%wim\S*\s*,\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*$''')
form000 = insn_metadata(([], [], [], False, True, [], [], [0], False, True))
form001 = insn_metadata(([0, 1], [2], [], False, False, [], [], [], False, False))
form002 = insn_metadata(([], [], [], False, False, [], [], [], False, False))
form003 = insn_metadata(([0], [2], [], False, False, [], [], [1], False, False))
form004 = insn_metadata(([0, 1], [2], [], True, False, [], [], [], False, False))
form005 = insn_metadata(([0], [1], [], True, False, [], [], [], False, False))
form006 = insn_metadata(([], [1], [], False, False, [], [], [0], False, False))
form007 = insn_metadata(([0, 1], [2], [2], True, False, [], [], [], False, False))
form008 = insn_metadata(([0], [1], [1], True, False, [], [], [], False, False))
form009 = insn_metadata(([0], [2], [2], True, False, [], [], [1], False, False))
form010 = insn_metadata(([1], [2], [2], True, False, [], [], [0], False, False))
form011 = insn_metadata(([], [1], [1], True, False, [], [], [0], False, False))
form012 = insn_metadata(([0, 1], [], [], True, False, [], [], [], False, False))
form013 = insn_metadata(([0], [], [], True, False, [], [], [], False, False))
form014 = insn_metadata(([0], [], [], True, False, [], [], [1], False, False))
form015 = insn_metadata(([1], [], [], True, False, [], [], [0], False, False))
form016 = insn_metadata(([], [], [], True, False, [], [], [0], False, False))
form017 = insn_metadata(([0], [1], [], False, False, [], [], [], False, False))
form018 = insn_metadata(([], [0], [], False, False, [], [], [], False, False))
form019 = insn_metadata(([0, 1], [2], [0, 1, 2], True, False, [], [], [], False, False))
form020 = insn_metadata(([1], [2], [], False, False, [], [], [0], False, False))
form021 = insn_metadata(([0, 1], [], [0], False, False, [], [], [], False, False))
form022 = insn_metadata(([0, 1], [1], [], False, False, [], [], [], False, False))
form023 = insn_metadata(([1], [1], [], False, False, [], [], [0], False, False))
form024 = insn_metadata(([0], [2], [], True, False, [], [], [1], False, False))
form025 = insn_metadata(([1], [2], [], True, False, [], [], [0], False, False))
form026 = insn_metadata(([], [1], [], True, False, [], [], [0], False, False))
form027 = insn_metadata(([0], [], [], False, False, [], [], [], False, False))
form028 = insn_metadata(([], [0], [], True, False, [], [], [], False, False))
form029 = insn_metadata(([0, 1], [2], [1, 2], True, False, [], [], [], False, False))
form030 = insn_metadata(([0, 1], [], [], False, False, [], [], [], False, False))
form031 = insn_metadata(([0], [], [], False, False, [], [], [1], False, False))
form032 = insn_metadata(([], [], [], False, False, [], [], [0], False, False))
form033 = insn_metadata(([0], [1], [0, 1], True, False, [], [], [], False, False))
form034 = insn_metadata(([0, 1], [2], [], True, False, [], ['y'], [], False, False))
form035 = insn_metadata(([0], [2], [], True, False, [], ['y'], [1], False, False))
form036 = insn_metadata(([1], [2], [], True, False, [], ['y'], [0], False, False))
form037 = insn_metadata(([0, 1, 2], [], [], False, False, [], [], [], False, False))
form038 = insn_metadata(([0, 1], [], [], False, False, [], [], [2], False, False))
form039 = insn_metadata(([0, 2], [], [], False, False, [], [], [1], False, False))
form040 = insn_metadata(([1], [], [], False, False, [], [], [0], False, False))
form041 = insn_metadata(([0], [0], [], False, False, [], [], [], False, False))
form042 = insn_metadata(([0, 1], [], [0, 1], True, False, [], [], [], False, False))
form043 = insn_metadata(([0, 1], [2], [], True, False, ['y'], [], [], False, False))
form044 = insn_metadata(([0], [2], [], True, False, ['y'], [], [1], False, False))
form045 = insn_metadata(([1], [2], [], True, False, ['y'], [], [0], False, False))
form046 = insn_metadata(([0, 1], [2], [], False, True, [], [], [], True, False))
form047 = insn_metadata(([0], [1], [], False, True, [], [], [], True, False))
form048 = insn_metadata(([], [1], [], False, True, [], [], [0], True, False))
form049 = insn_metadata(([0], [2], [], False, True, [], [], [1], True, False))
form050 = insn_metadata(([1], [2], [], False, True, [], [], [0], True, False))
form051 = insn_metadata(([0], [1], [1], False, False, [], [], [], False, False))
form052 = insn_metadata(([], [], [], False, True, [], [], [0], True, False))
form053 = insn_metadata(([0, 1, 2], [], [0], False, False, [], [], [], False, False))
form054 = insn_metadata(([0], [1], [0], False, False, [], [], [], False, False))
form055 = insn_metadata(([], [], [], False, True, [15], [], [], True, False))
form056 = insn_metadata(([], [], [], True, False, [], [], [], False, False))
form057 = insn_metadata(([0], [1], [0, 1], False, False, [], [], [], False, False))
form058 = insn_metadata(([], [], [], False, True, [31], [], [], True, False))
form059 = insn_metadata(([], [], [], False, True, [], [15], [0], True, False))
form060 = insn_metadata(([0, 1], [], [], False, True, [], [15], [], True, False))
form061 = insn_metadata(([0], [], [], False, True, [], [15], [], True, False))
form062 = insn_metadata(([0], [], [], False, True, [], [15], [1], True, False))
form063 = insn_metadata(([1], [], [], False, True, [], [15], [0], True, False))
form064 = insn_metadata(([0, 1], [], [], False, True, [], [], [], True, False))
form065 = insn_metadata(([0], [], [], False, True, [], [], [], True, False))
form066 = insn_metadata(([0], [], [], False, True, [], [], [1], True, False))
form067 = insn_metadata(([1], [], [], False, True, [], [], [0], True, False))
form068 = insn_metadata(([0, 1], [], [1], True, False, [], [], [], False, False))
form069 = insn_metadata(([0, 1], [], [0], False, False, [], [], [2], False, False))
form070 = insn_metadata(([0, 2], [], [0], False, False, [], [], [1], False, False))
form071 = insn_metadata(([0], [], [0], False, False, [], [], [1], False, False))
form072 = insn_metadata(([0], [], [], False, False, [], [], [], True, False))
insninfo = {
'add' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'addcc' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'addx' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'addxcc' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'allocate' : [
(re018, form028),
(re017, form005),
(re020, form026),
],
'allocates' : [
(re018, form028),
(re017, form005),
(re020, form026),
],
'allocatex' : [
(re018, form028),
(re017, form005),
(re020, form026),
],
'and' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'andcc' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'andn' : [
(re001, form001),
(re003, form003),
],
'andncc' : [
(re001, form001),
(re003, form003),
],
'b' : [
(re000, form052),
],
'b,a' : [
(re000, form052),
],
'ba' : [
(re000, form000),
],
'ba,a' : [
(re000, form000),
],
'bcc' : [
(re000, form000),
],
'bcc,a' : [
(re000, form000),
],
'bclr' : [
(re017, form022),
(re020, form023),
],
'bcs' : [
(re000, form000),
],
'bcs,a' : [
(re000, form000),
],
'be' : [
(re000, form000),
],
'be,a' : [
(re000, form000),
],
'beq' : [
(re000, form000),
],
'beq,a' : [
(re000, form000),
],
'bg' : [
(re000, form000),
],
'bg,a' : [
(re000, form000),
],
'bge' : [
(re000, form000),
],
'bge,a' : [
(re000, form000),
],
'bgeu' : [
(re000, form000),
],
'bgeu,a' : [
(re000, form000),
],
'bgt' : [
(re000, form000),
],
'bgt,a' : [
(re000, form000),
],
'bgu' : [
(re000, form000),
],
'bgu,a' : [
(re000, form000),
],
'bl' : [
(re000, form000),
],
'bl,a' : [
(re000, form000),
],
'ble' : [
(re000, form000),
],
'ble,a' : [
(re000, form000),
],
'bleu' : [
(re000, form000),
],
'bleu,a' : [
(re000, form000),
],
'blt' : [
(re000, form000),
],
'blt,a' : [
(re000, form000),
],
'blu' : [
(re000, form000),
],
'blu,a' : [
(re000, form000),
],
'bn' : [
(re000, form000),
],
'bn,a' : [
(re000, form000),
],
'bne' : [
(re000, form000),
],
'bne,a' : [
(re000, form000),
],
'bneg' : [
(re000, form000),
],
'bneg,a' : [
(re000, form000),
],
'bnz' : [
(re000, form000),
],
'bnz,a' : [
(re000, form000),
],
'bpos' : [
(re000, form000),
],
'bpos,a' : [
(re000, form000),
],
'break' : [
(re018, form027),
(re000, form032),
],
'bset' : [
(re017, form022),
(re020, form023),
],
'btog' : [
(re017, form022),
(re020, form023),
],
'btst' : [
(re017, form030),
(re020, form040),
],
'bvc' : [
(re000, form000),
],
'bvc,a' : [
(re000, form000),
],
'bvs' : [
(re000, form000),
],
'bvs,a' : [
(re000, form000),
],
'bz' : [
(re000, form000),
],
'bz,a' : [
(re000, form000),
],
'call' : [
(re000, form059),
(re091, form059),
(re087, form060),
(re092, form060),
(re018, form061),
(re093, form061),
(re088, form062),
(re094, form062),
(re089, form063),
(re095, form063),
(re000, form059),
(re091, form059),
(re018, form061),
(re093, form061),
],
'clr' : [
(re018, form018),
(re018, form018),
(re077, form030),
(re078, form027),
(re079, form031),
(re080, form040),
(re081, form032),
(re078, form027),
],
'clrb' : [
(re077, form030),
(re078, form027),
(re079, form031),
(re080, form040),
(re081, form032),
(re078, form027),
],
'clrh' : [
(re077, form030),
(re078, form027),
(re079, form031),
(re080, form040),
(re081, form032),
(re078, form027),
],
'cmp' : [
(re017, form030),
(re031, form031),
],
'cpop1' : [
(re007, form001),
],
'cpop2' : [
(re007, form001),
],
'create' : [
(re017, form005),
],
'cred' : [
(re020, form015),
],
'crei' : [
(re017, form012),
],
'dec' : [
(re018, form041),
(re020, form023),
],
'deccc' : [
(re018, form041),
(re020, form023),
],
'detach' : [
(re018, form027),
],
'f_alloc' : [
(re018, form018),
],
'f_break' : [
(re002, form002),
(re018, form027),
],
'f_create' : [
(re017, form012),
(re001, form004),
(re031, form014),
(re003, form024),
],
'f_fence' : [
(re017, form030),
(re001, form001),
(re031, form031),
(re003, form003),
(re018, form027),
(re000, form032),
],
'f_freesrb' : [
(re018, form027),
],
'f_get_blockindex' : [
(re017, form017),
(re018, form018),
],
'f_get_blocksize' : [
(re017, form017),
(re018, form018),
],
'f_get_gridsize' : [
(re017, form017),
(re018, form018),
],
'f_mapg' : [
(re001, form001),
(re003, form003),
(re017, form030),
(re031, form031),
],
'f_maphtg' : [
(re001, form001),
(re003, form003),
(re017, form030),
(re031, form031),
],
'f_set_blocksize' : [
(re017, form030),
(re031, form031),
],
'f_set_gridsize' : [
(re017, form030),
(re031, form031),
],
'fabss' : [
(re017, form017),
],
'faddd' : [
(re001, form029),
],
'faddq' : [
(re001, form019),
],
'fadds' : [
(re001, form004),
],
'faddx' : [
(re001, form019),
],
'fb' : [
(re000, form000),
],
'fb,a' : [
(re000, form000),
],
'fba' : [
(re000, form000),
],
'fba,a' : [
(re000, form000),
],
'fbe' : [
(re000, form000),
],
'fbe,a' : [
(re000, form000),
],
'fbg' : [
(re000, form000),
],
'fbg,a' : [
(re000, form000),
],
'fbge' : [
(re000, form000),
],
'fbge,a' : [
(re000, form000),
],
'fbl' : [
(re000, form000),
],
'fbl,a' : [
(re000, form000),
],
'fble' : [
(re000, form000),
],
'fble,a' : [
(re000, form000),
],
'fblg' : [
(re000, form000),
],
'fblg,a' : [
(re000, form000),
],
'fbn' : [
(re000, form000),
],
'fbn,a' : [
(re000, form000),
],
'fbne' : [
(re000, form000),
],
'fbne,a' : [
(re000, form000),
],
'fbnz' : [
(re000, form000),
],
'fbnz,a' : [
(re000, form000),
],
'fbo' : [
(re000, form000),
],
'fbo,a' : [
(re000, form000),
],
'fbu' : [
(re000, form000),
],
'fbu,a' : [
(re000, form000),
],
'fbue' : [
(re000, form000),
],
'fbue,a' : [
(re000, form000),
],
'fbug' : [
(re000, form000),
],
'fbug,a' : [
(re000, form000),
],
'fbuge' : [
(re000, form000),
],
'fbuge,a' : [
(re000, form000),
],
'fbul' : [
(re000, form000),
],
'fbul,a' : [
(re000, form000),
],
'fbule' : [
(re000, form000),
],
'fbule,a' : [
(re000, form000),
],
'fbz' : [
(re000, form000),
],
'fbz,a' : [
(re000, form000),
],
'fcmpd' : [
(re017, form068),
],
'fcmped' : [
(re017, form068),
],
'fcmpeq' : [
(re017, form042),
],
'fcmpes' : [
(re017, form012),
],
'fcmpex' : [
(re017, form042),
],
'fcmpq' : [
(re017, form042),
],
'fcmps' : [
(re017, form012),
],
'fcmpx' : [
(re017, form042),
],
'fdivd' : [
(re001, form029),
],
'fdivq' : [
(re001, form019),
],
'fdivs' : [
(re001, form004),
],
'fdivx' : [
(re001, form019),
],
'fdmulq' : [
(re001, form029),
],
'fdmulx' : [
(re001, form029),
],
'fdtoi' : [
(re017, form054),
],
'fdtoq' : [
(re017, form057),
],
'fdtos' : [
(re017, form054),
],
'fgets' : [
(re003, form024),
],
'fitod' : [
(re017, form051),
],
'fitoq' : [
(re017, form051),
],
'fitos' : [
(re017, form017),
],
'flush' : [
(re087, form012),
(re018, form013),
(re018, form013),
(re000, form016),
(re088, form014),
(re089, form015),
],
'fmovs' : [
(re017, form017),
],
'fmuld' : [
(re001, form029),
],
'fmulq' : [
(re001, form019),
],
'fmuls' : [
(re001, form004),
],
'fmulx' : [
(re001, form019),
],
'fnegs' : [
(re017, form017),
],
'fprintd' : [
(re017, form030),
],
'fprintq' : [
(re017, form021),
],
'fprints' : [
(re017, form030),
],
'fputg' : [
(re090, form038),
],
'fputs' : [
(re090, form038),
],
'fqtod' : [
(re017, form057),
],
'fqtoi' : [
(re017, form054),
],
'fqtos' : [
(re017, form054),
],
'fsmuld' : [
(re001, form007),
],
'fsqrtd' : [
(re017, form033),
],
'fsqrtq' : [
(re017, form033),
],
'fsqrts' : [
(re017, form005),
],
'fsqrtx' : [
(re017, form033),
],
'fstod' : [
(re017, form051),
],
'fstoi' : [
(re017, form017),
],
'fstoq' : [
(re017, form051),
],
'fsubd' : [
(re001, form029),
],
'fsubq' : [
(re001, form019),
],
'fsubs' : [
(re001, form004),
],
'fsubx' : [
(re001, form019),
],
'getcid' : [
(re018, form018),
],
'getfid' : [
(re018, form018),
],
'getpid' : [
(re018, form018),
],
'gets' : [
(re003, form024),
],
'gettid' : [
(re018, form018),
],
'iflush' : [
(re087, form012),
(re018, form013),
(re018, form013),
(re000, form016),
(re088, form014),
(re089, form015),
],
'inc' : [
(re018, form041),
(re020, form023),
],
'inccc' : [
(re018, form041),
(re020, form023),
],
'jmp' : [
(re087, form064),
(re018, form065),
(re088, form066),
(re089, form067),
(re000, form052),
(re018, form065),
],
'jmpl' : [
(re084, form046),
(re017, form047),
(re017, form047),
(re020, form048),
(re085, form049),
(re086, form050),
],
'launch' : [
(re018, form072),
],
'ld' : [
(re007, form004),
(re008, form005),
(re009, form024),
(re010, form025),
(re011, form026),
(re008, form005),
(re007, form004),
(re008, form005),
(re009, form024),
(re010, form025),
(re011, form026),
(re008, form005),
(re021, form012),
(re022, form013),
(re023, form014),
(re024, form015),
(re025, form016),
(re022, form013),
(re012, form012),
(re013, form013),
(re014, form014),
(re015, form015),
(re016, form016),
(re013, form013),
(re026, form012),
(re027, form013),
(re028, form014),
(re029, form015),
(re030, form016),
(re027, form013),
],
'lda' : [
(re004, form004),
(re005, form005),
],
'ldbp' : [
(re018, form018),
],
'ldd' : [
(re007, form007),
(re008, form008),
(re009, form009),
(re010, form010),
(re011, form011),
(re008, form008),
(re007, form007),
(re008, form008),
(re009, form009),
(re010, form010),
(re011, form011),
(re008, form008),
(re012, form012),
(re013, form013),
(re014, form014),
(re015, form015),
(re016, form016),
(re013, form013),
],
'ldda' : [
(re004, form004),
(re005, form005),
],
'ldfp' : [
(re018, form018),
],
'ldsb' : [
(re007, form004),
(re008, form005),
(re009, form024),
(re010, form025),
(re011, form026),
(re008, form005),
],
'ldsba' : [
(re004, form004),
(re005, form005),
],
'ldsh' : [
(re008, form005),
(re007, form004),
(re009, form024),
(re010, form025),
(re011, form026),
(re008, form005),
],
'ldsha' : [
(re004, form004),
(re005, form005),
],
'ldstub' : [
(re007, form004),
(re008, form005),
(re009, form024),
(re010, form025),
(re011, form026),
(re008, form005),
],
'ldstuba' : [
(re004, form004),
(re005, form005),
],
'ldub' : [
(re007, form004),
(re008, form005),
(re009, form024),
(re010, form025),
(re011, form026),
(re008, form005),
],
'lduba' : [
(re004, form004),
(re005, form005),
],
'lduh' : [
(re007, form004),
(re008, form005),
(re009, form024),
(re010, form025),
(re011, form026),
(re008, form005),
],
'lduha' : [
(re004, form004),
(re005, form005),
],
'mov' : [
(re032, form030),
(re033, form031),
(re034, form030),
(re035, form031),
(re036, form030),
(re037, form031),
(re038, form030),
(re039, form031),
(re040, form030),
(re041, form031),
(re042, form018),
(re043, form018),
(re044, form018),
(re045, form018),
(re046, form018),
(re047, form027),
(re048, form032),
(re047, form027),
(re049, form027),
(re050, form032),
(re049, form027),
(re051, form027),
(re052, form032),
(re051, form027),
(re053, form027),
(re054, form032),
(re053, form027),
(re055, form027),
(re056, form032),
(re055, form027),
(re017, form017),
(re020, form006),
(re017, form017),
(re017, form017),
],
'mulscc' : [
(re001, form043),
(re003, form044),
],
'neg' : [
(re017, form017),
(re018, form041),
],
'nop' : [
(re002, form002),
],
'not' : [
(re017, form017),
(re018, form041),
],
'or' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'orcc' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'orn' : [
(re001, form001),
(re003, form003),
],
'orncc' : [
(re001, form001),
(re003, form003),
],
'print' : [
(re017, form030),
(re031, form031),
],
'putg' : [
(re090, form038),
],
'puts' : [
(re090, form038),
],
'r_allocsrb' : [
(re017, form017),
(re020, form006),
],
'r_read' : [
(re017, form005),
],
'r_write' : [
(re017, form012),
(re001, form004),
(re031, form014),
(re003, form024),
],
'rd' : [
(re042, form018),
(re043, form018),
(re044, form018),
(re045, form018),
(re106, form006),
(re046, form018),
],
'release' : [
(re018, form027),
],
'restore' : [
(re001, form001),
(re002, form002),
(re003, form003),
(re002, form002),
],
'ret' : [
(re002, form055),
],
'retl' : [
(re002, form058),
],
'rett' : [
(re087, form064),
(re018, form065),
(re088, form066),
(re089, form067),
(re000, form052),
(re000, form052),
(re018, form065),
],
'save' : [
(re001, form001),
(re003, form003),
(re002, form002),
],
'sdiv' : [
(re001, form043),
(re003, form044),
(re019, form045),
],
'sdivcc' : [
(re001, form043),
(re003, form044),
(re019, form045),
],
'set' : [
(re006, form006),
],
'setblock' : [
(re017, form030),
(re031, form031),
],
'sethi' : [
(re020, form006),
],
'setarg' : [
(re017, form030),
(re031, form031),
],
'setlimit' : [
(re017, form030),
(re031, form031),
],
'setstart' : [
(re017, form030),
(re031, form031),
],
'setstep' : [
(re017, form030),
(re031, form031),
],
'setthread' : [
(re017, form030),
(re031, form031),
],
'sll' : [
(re001, form001),
(re003, form003),
],
'smul' : [
(re001, form034),
(re003, form035),
(re019, form036),
],
'smulcc' : [
(re001, form034),
(re003, form035),
(re019, form036),
],
'spill' : [
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
],
'spilld' : [
(re057, form053),
(re058, form021),
(re059, form069),
(re060, form070),
(re061, form071),
(re058, form021),
],
'sra' : [
(re001, form001),
(re003, form003),
],
'srl' : [
(re001, form001),
(re003, form003),
],
'st' : [
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
(re062, form030),
(re063, form027),
(re064, form031),
(re065, form040),
(re066, form032),
(re063, form027),
(re067, form030),
(re068, form027),
(re069, form031),
(re070, form040),
(re071, form032),
(re068, form027),
(re072, form030),
(re073, form027),
(re074, form031),
(re075, form040),
(re076, form032),
(re073, form027),
],
'sta' : [
(re082, form037),
(re083, form030),
],
'stb' : [
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
],
'stba' : [
(re082, form037),
(re083, form030),
],
'stbar' : [
(re002, form056),
],
'std' : [
(re057, form053),
(re058, form021),
(re059, form069),
(re060, form070),
(re061, form071),
(re058, form021),
(re096, form030),
(re097, form027),
(re098, form031),
(re099, form040),
(re100, form032),
(re097, form027),
(re057, form053),
(re058, form021),
(re059, form069),
(re060, form070),
(re061, form071),
(re058, form021),
(re101, form030),
(re102, form027),
(re103, form031),
(re104, form040),
(re105, form032),
(re102, form027),
(re062, form030),
(re063, form027),
(re064, form031),
(re065, form040),
(re066, form032),
(re063, form027),
],
'stda' : [
(re082, form053),
(re083, form021),
],
'sth' : [
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
],
'stha' : [
(re082, form037),
(re083, form030),
],
'stsb' : [
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
],
'stsba' : [
(re082, form037),
(re083, form030),
],
'stsh' : [
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
],
'stsha' : [
(re082, form037),
(re083, form030),
],
'stub' : [
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
],
'stuba' : [
(re082, form037),
(re083, form030),
],
'stuh' : [
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
],
'stuha' : [
(re082, form037),
(re083, form030),
],
'sub' : [
(re001, form001),
(re003, form003),
],
'subcc' : [
(re001, form001),
(re003, form003),
],
'subx' : [
(re001, form001),
(re003, form003),
],
'subxcc' : [
(re001, form001),
(re003, form003),
],
'swap' : [
(re007, form004),
(re008, form005),
(re009, form024),
(re010, form025),
(re011, form026),
(re008, form005),
],
'swapa' : [
(re004, form004),
(re005, form005),
],
'sync' : [
(re017, form005),
],
't_allochtg' : [
(re001, form001),
(re003, form003),
(re001, form004),
(re003, form024),
],
't_end' : [
(re002, form002),
],
't_freehtg' : [
(re018, form027),
],
't_get_fid' : [
(re018, form018),
],
't_get_pindex' : [
(re018, form018),
],
't_get_tid' : [
(re018, form018),
],
't_wait' : [
(re002, form002),
(re018, form027),
],
'taddcc' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'taddcctv' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'tst' : [
(re018, form027),
(re018, form027),
(re018, form027),
],
'tsubcc' : [
(re001, form001),
(re003, form003),
],
'tsubcctv' : [
(re001, form001),
(re003, form003),
],
'udiv' : [
(re001, form043),
(re003, form044),
(re019, form045),
],
'udivcc' : [
(re001, form043),
(re003, form044),
(re019, form045),
],
'umul' : [
(re001, form034),
(re003, form035),
(re019, form036),
],
'umulcc' : [
(re001, form034),
(re003, form035),
(re019, form036),
],
'unimp' : [
(re000, form032),
],
'wr' : [
(re032, form030),
(re033, form031),
(re047, form027),
(re034, form030),
(re035, form031),
(re049, form027),
(re036, form030),
(re037, form031),
(re051, form027),
(re038, form030),
(re039, form031),
(re053, form027),
(re040, form030),
(re041, form031),
(re055, form027),
],
'xnor' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'xnorcc' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'xor' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'xorcc' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
}
| gpl-3.0 | -8,466,637,602,393,314,000 | 25.185984 | 183 | 0.426608 | false | 2.525509 | false | false | false |
Pinafore/qb | qanta/wikipedia/cached_wikipedia.py | 2 | 7744 | from typing import Dict
import os
import json
import csv
import pickle
import re
from collections import namedtuple
import nltk
from unidecode import unidecode
from qanta import qlogging
from qanta.datasets.quiz_bowl import QantaDatabase
from qanta.util.constants import (
COUNTRY_LIST_PATH,
WIKI_DUMP_REDIRECT_PICKLE,
WIKI_LOOKUP_PATH,
)
log = qlogging.get(__name__)
COUNTRY_SUB = ["History_of_", "Geography_of_"]
WikipediaPage = namedtuple("WikipediaPage", ["id", "title", "text", "url"])
def normalize_wikipedia_title(title):
return title.replace(" ", "_")
def create_wikipedia_title_pickle(dump_path, disambiguation_pages_path, output_path):
from qanta.spark import create_spark_session
with open(disambiguation_pages_path) as f:
disambiguation_pages = set(json.load(f))
spark = create_spark_session()
wiki_df = spark.read.json(dump_path)
rows = wiki_df.select("title", "id").distinct().collect()
content_pages = [r for r in rows if int(r.id) not in disambiguation_pages]
clean_titles = {normalize_wikipedia_title(r.title) for r in content_pages}
with open(output_path, "wb") as f:
pickle.dump(clean_titles, f)
spark.stop()
def create_wikipedia_cache(
parsed_wiki_path="data/external/wikipedia/parsed-wiki", output_path=WIKI_LOOKUP_PATH
):
from qanta.spark import create_spark_context
sc = create_spark_context()
db = QantaDatabase()
train_questions = db.train_questions
answers = {q.page for q in train_questions}
b_answers = sc.broadcast(answers)
# Paths used in spark need to be absolute and it needs to exist
page_path = os.path.abspath(parsed_wiki_path)
page_pattern = os.path.join(page_path, "*", "*")
def parse_page(json_text):
page = json.loads(json_text)
return {
"id": int(page["id"]),
"title": page["title"].replace(" ", "_"),
"text": page["text"],
"url": page["url"],
}
wiki_pages = (
sc.textFile(page_pattern)
.map(parse_page)
.filter(lambda p: p["title"] in b_answers.value)
.collect()
)
wiki_lookup = {p["title"]: p for p in wiki_pages}
with open(output_path, "w") as f:
json.dump(wiki_lookup, f)
return wiki_lookup
def create_wikipedia_redirect_pickle(redirect_csv, output_pickle):
countries = {}
with open(COUNTRY_LIST_PATH) as f:
for line in f:
k, v = line.split("\t")
countries[k] = v.strip()
db = QantaDatabase()
pages = {q.page for q in db.train_questions}
with open(redirect_csv) as redirect_f:
redirects = {}
n_total = 0
n_selected = 0
for row in csv.reader(redirect_f, quotechar='"', escapechar="\\"):
n_total += 1
source = row[0]
target = row[1]
if (
target not in pages
or source in countries
or target.startswith("WikiProject")
or target.endswith("_topics")
or target.endswith("_(overview)")
):
continue
else:
redirects[source] = target
n_selected += 1
log.info(
"Filtered {} raw wikipedia redirects to {} matching redirects".format(
n_total, n_selected
)
)
with open(output_pickle, "wb") as output_f:
pickle.dump(redirects, output_f)
def extract_wiki_sentences(title, text, n_sentences, replace_title_mentions=""):
"""
Extracts the first n_paragraphs from the text of a wikipedia page corresponding to the title.
strip_title_mentions and replace_title_mentions control handling of references to the title in text.
Oftentimes QA models learn *not* to answer entities mentioned in the question so this helps deal with this
in the domain adaptation case.
:param title: title of page
:param text: text of page
:param n_paragraphs: number of paragraphs to use
:param replace_title_mentions: Replace mentions with the provided string token, by default removing them
:return:
"""
# Get simplest representation of title and text
title = unidecode(title).replace("_", " ")
text = unidecode(text)
# Split on non-alphanumeric
title_words = re.split("[^a-zA-Z0-9]", title)
title_word_pattern = "|".join(re.escape(w.lower()) for w in title_words)
# Breaking by newline yields paragraphs. Ignore the first since its always just the title
paragraphs = [p for p in text.split("\n") if len(p) != 0][1:]
sentences = []
for p in paragraphs:
formatted_text = re.sub(
title_word_pattern, replace_title_mentions, p, flags=re.IGNORECASE
)
# Cleanup whitespace
formatted_text = re.sub("\s+", " ", formatted_text).strip()
sentences.extend(nltk.sent_tokenize(formatted_text))
return sentences[:n_sentences]
class Wikipedia:
def __init__(
self, lookup_path=WIKI_LOOKUP_PATH, dump_redirect_path=WIKI_DUMP_REDIRECT_PICKLE
):
"""
CachedWikipedia provides a unified way and easy way to access Wikipedia pages. Its design is motivated by:
1) Getting a wikipedia page should function as a simple python dictionary access
2) It should support access to pages using non-canonical names by resolving them to canonical names
The following sections explain how the different levels of caching work as well as how redirects work
Redirects
To support some flexibility in requesting pages that are very close matches we have two sources of redirects.
The first is based on wikipedia database dumps which is the most reliable. On top
of this we do the very light preprocessing step of replacing whitespace with underscores since the canonical
page names in the wikipedia database dumps contains an underscore instead of whitespace (a difference from the
HTTP package which defaults to the opposite)
"""
self.countries = {}
self.redirects = {}
self.lookup_path = lookup_path
self.dump_redirect_path = dump_redirect_path
with open(lookup_path, "rb") as f:
raw_lookup: Dict[str, Dict] = json.load(f)
self.lookup: Dict[str, WikipediaPage] = {
title: WikipediaPage(
page["id"], page["title"], page["text"], page["url"]
)
for title, page in raw_lookup.items()
}
if COUNTRY_LIST_PATH:
with open(COUNTRY_LIST_PATH) as f:
for line in f:
k, v = line.split("\t")
self.countries[k] = v.replace(" ", "_").strip()
if os.path.exists(self.dump_redirect_path):
with open(self.dump_redirect_path, "rb") as f:
self.redirects = pickle.load(f)
else:
raise ValueError(
f"{self.dump_redirect_path} missing, run: luigi --module qanta.pipeline.preprocess "
f"WikipediaRedirectPickle"
)
def load_country(self, key: str):
content = self.lookup[key]
for page in [f"{prefix}{self.countries[key]}" for prefix in COUNTRY_SUB]:
if page in self.lookup:
content = content + " " + self.lookup[page].text
return content
def __getitem__(self, key: str) -> WikipediaPage:
if key in self.countries:
return self.load_country(key)
else:
return self.lookup[key]
def __contains__(self, item):
return item in self.lookup
def __len__(self):
return len(self.lookup)
| mit | 3,321,599,547,326,466,600 | 33.726457 | 118 | 0.615444 | false | 3.917046 | false | false | false |
chrisjsewell/ipymd | ipymd/plotting/animation_examples/3d_contour.py | 1 | 1105 | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 17 21:56:26 2016
@author: cjs14
http://scicomp.stackexchange.com/questions/7030/plotting-a-2d-animated-data-surface-on-matplotlib
"""
from mpl_toolkits.mplot3d import axes3d
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
from ipymd.plotting.JSAnimation.IPython_display import display_animation
def generate(X, Y, phi):
R = 1 - np.sqrt(X**2 + Y**2)
return np.cos(2 * np.pi * X + phi) * R
fig = plt.figure()
ax = axes3d.Axes3D(fig)
#plt.close()
xs = np.linspace(-1, 1, 50)
ys = np.linspace(-1, 1, 50)
X, Y = np.meshgrid(xs, ys)
Z = generate(X, Y, 0.0)
wframe = ax.plot_wireframe(X, Y, Z, rstride=2, cstride=2)
ax.set_zlim(-1,1)
def update(i, ax, fig):
ax.cla()
phi = i * 360 / 2 / np.pi / 100
Z = generate(X, Y, phi)
wframe = ax.plot_wireframe(X, Y, Z, rstride=2, cstride=2)
ax.set_zlim(-1,1)
return wframe,
ani = animation.FuncAnimation(fig, update,
frames=xrange(100),
fargs=(ax, fig), interval=100)
display_animation(ani) | gpl-3.0 | 6,210,049,458,574,649,000 | 24.72093 | 97 | 0.663348 | false | 2.612293 | false | false | false |
jamesliu96/renren | bin/renren.py | 1 | 3619 | #-*- coding:UTF-8 -*-
#! /usr/bin/python
from sgmllib import SGMLParser
import sys,urllib,urllib2,cookielib
import datetime
import time
import getpass
class spider(SGMLParser):
def __init__(self,email,password):
SGMLParser.__init__(self)
self.h3=False
self.h3_is_ready=False
self.div=False
self.h3_and_div=False
self.a=False
self.depth=0
self.names=""
self.dic={}
self.email=email
self.password=password
self.domain='renren.com'
try:
cookie=cookielib.CookieJar()
cookieProc=urllib2.HTTPCookieProcessor(cookie)
except:
raise
else:
opener=urllib2.build_opener(cookieProc)
urllib2.install_opener(opener)
def login(self):
print '[%s] 开始登录' % datetime.datetime.now()
url='http://www.renren.com/PLogin.do'
postdata={
'email':self.email,
'password':self.password,
'domain':self.domain
}
try:
req=urllib2.Request(url,urllib.urlencode(postdata))
self.file=urllib2.urlopen(req).read()
idPos = self.file.index("'id':'")
self.id=self.file[idPos+6:idPos+15]
tokPos=self.file.index("get_check:'")
self.tok=self.file[tokPos+11:tokPos+21]
rtkPos=self.file.index("get_check_x:'")
self.rtk=self.file[rtkPos+13:rtkPos+21]
print '[%s] 登录成功' % datetime.datetime.now()
except:
print '[%s] 登录失败' % datetime.datetime.now()
sys.exit()
def publish(self,content):
url='http://shell.renren.com/'+self.id+'/status'
postdata={
'content':content,
'hostid':self.id,
'requestToken':self.tok,
'_rtk':self.rtk,
'channel':'renren',
}
req=urllib2.Request(url,urllib.urlencode(postdata))
self.file=urllib2.urlopen(req).read()
print '[%s] 刚才使用账号 %s 发了一条状态 %s' % (datetime.datetime.now(),self.email,postdata.get('content',''))
def visit(self,content):
url='http://www.renren.com/'+content+'/profile'
self.file=urllib2.urlopen(url).read()
print '[%s] 刚才使用账号 %s 访问了ID %s' % (datetime.datetime.now(),self.email,content)
def start_h3(self,attrs):
self.h3 = True
def end_h3(self):
self.h3=False
self.h3_is_ready=True
def start_a(self,attrs):
if self.h3 or self.div:
self.a=True
def end_a(self):
self.a=False
def start_div(self,attrs):
if self.h3_is_ready == False:
return
if self.div==True:
self.depth += 1
for k,v in attrs:
if k == 'class' and v == 'content':
self.div=True;
self.h3_and_div=True
def end_div(self):
if self.depth == 0:
self.div=False
self.h3_and_div=False
self.h3_is_ready=False
self.names=""
if self.div == True:
self.depth-=1
def handle_data(self,text):
if self.h3 and self.a:
self.names+=text
if self.h3 and (self.a==False):
if not text:pass
else: self.dic.setdefault(self.names,[]).append(text)
return
if self.h3_and_div:
self.dic.setdefault(self.names,[]).append(text)
def show(self):
type = sys.getfilesystemencoding()
for key in self.dic:
print ( (''.join(key)).replace(' ','')).decode('utf-8').encode(type), \
( (''.join(self.dic[key])).replace(' ','')).decode('utf-8').encode(type)
email=raw_input('请输入用户名:')
password=getpass.getpass('请输入密码:')
renrenspider=spider(email,password)
renrenspider.login()
mode=999
while(mode!='000'):
mode=raw_input('请输入操作代码:')
if(mode=='120'):
content=raw_input('请输入状态内容:')
renrenspider.publish(content)
if(mode=='200'):
content=raw_input('请输入要访问的ID:')
renrenspider.visit(content)
if(mode=='100'):
renrenspider.feed(renrenspider.file)
renrenspider.show()
sys.exit()
| mit | -524,980,596,590,272,100 | 23.5 | 100 | 0.660822 | false | 2.506484 | false | false | false |
GEANT/met | met/metadataparser/templatetags/metadataparsertags.py | 2 | 13270 | #################################################################
# MET v2 Metadate Explorer Tool
#
# This Software is Open Source. See License: https://github.com/TERENA/met/blob/master/LICENSE.md
# Copyright (c) 2012, TERENA All rights reserved.
#
# This Software is based on MET v1 developed for TERENA by Yaco Sistemas, http://www.yaco.es/
# MET v2 was developed for TERENA by Tamim Ziai, DAASI International GmbH, http://www.daasi.de
# Current version of MET has been revised for performance improvements by Andrea Biancini,
# Consortium GARR, http://www.garr.it
##########################################################################
from django import template
from django.template.base import Node, TemplateSyntaxError
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe, SafeData
from met.metadataparser.models import Federation
from met.metadataparser.xmlparser import DESCRIPTOR_TYPES, DESCRIPTOR_TYPES_DISPLAY
from met.metadataparser.query_export import export_modes
from met.metadataparser.summary_export import export_summary_modes
from urllib import urlencode
register = template.Library()
class AddGetParameter(Node):
def __init__(self, values):
self.values = values
def render(self, context):
req = template.resolve_variable('request', context)
params = req.GET.copy()
for key, value in self.values.items():
params[key] = value.resolve(context)
return '?%s' % params.urlencode()
@register.tag()
def add_get(parser, token):
pairs = token.split_contents()[1:]
values = {}
for pair in pairs:
s = pair.split('=', 1)
values[s[0]] = parser.compile_filter(s[1])
return AddGetParameter(values)
@register.inclusion_tag('metadataparser/bootstrap_form.html')
def bootstrap_form(form, cancel_link='..', delete_link=True):
return {'form': form,
'cancel_link': cancel_link,
'delete_link': delete_link}
@register.inclusion_tag('metadataparser/bootstrap_searchform.html')
def bootstrap_searchform(form):
return {'form': form}
@register.inclusion_tag('metadataparser/federations_summary_tag.html', takes_context=True)
def federations_summary(context, queryname, counts, federations=None):
if not federations:
federations = Federation.objects.all()
user = context.get('user', None)
add_federation = user and user.has_perm('metadataparser.add_federation')
return {'federations': federations,
'add_federation': add_federation,
'queryname': queryname,
'counts': counts,
'entity_types': DESCRIPTOR_TYPES}
@register.inclusion_tag('metadataparser/interfederations_summary_tag.html', takes_context=True)
def interfederations_summary(context, queryname, counts, federations=None):
if not federations:
federations = Federation.objects.all()
user = context.get('user', None)
add_federation = user and user.has_perm('metadataparser.add_federation')
return {'federations': federations,
'add_federation': add_federation,
'queryname': queryname,
'counts': counts,
'entity_types': DESCRIPTOR_TYPES}
@register.inclusion_tag('metadataparser/tag_entity_list.html', takes_context=True)
def entity_list(context, entities, categories=None, pagination=None, curfed=None, show_total=True, append_query=None, onclick_page=None, onclick_export=None):
request = context.get('request', None)
lang = 'en'
if request:
lang = request.GET.get('lang', 'en')
return {'request': request,
'entities': entities,
'categories': categories,
'curfed': curfed,
'show_filters': context.get('show_filters'),
'append_query': append_query,
'show_total': show_total,
'lang': lang,
'pagination': pagination,
'onclick_page': onclick_page,
'onclick_export': onclick_export,
'entity_types': DESCRIPTOR_TYPES}
@register.inclusion_tag('metadataparser/most_fed_entities_summary.html', takes_context=True)
def most_fed_entity_list(context, entities, categories=None, pagination=None, curfed=None, show_total=True, append_query=None, onclick_page=None, onclick_export=None):
request = context.get('request', None)
lang = 'en'
if request:
lang = request.GET.get('lang', 'en')
return {'request': request,
'entities': entities,
'categories': categories,
'curfed': curfed,
'show_filters': context.get('show_filters'),
'append_query': append_query,
'show_total': show_total,
'lang': lang,
'pagination': pagination,
'onclick_page': onclick_page,
'onclick_export': onclick_export,
'entity_types': DESCRIPTOR_TYPES}
@register.inclusion_tag('metadataparser/service_search_summary.html', takes_context=True)
def service_search_result(context, entities, categories=None, pagination=None, curfed=None, show_total=True, append_query=None, onclick_page=None, onclick_export=None):
request = context.get('request', None)
lang = 'en'
if request:
lang = request.GET.get('lang', 'en')
return {'request': request,
'entities': entities,
'categories': categories,
'curfed': curfed,
'show_filters': context.get('show_filters'),
'append_query': append_query,
'show_total': show_total,
'lang': lang,
'pagination': pagination,
'onclick_page': onclick_page,
'onclick_export': onclick_export,
'entity_types': DESCRIPTOR_TYPES}
@register.inclusion_tag('metadataparser/tag_entity_filters.html', takes_context=True)
def entity_filters(context, entities, categories):
entity_types = ('All', ) + DESCRIPTOR_TYPES
request = context.get('request')
entity_type = request.GET.get('entity_type', '')
entity_category = request.GET.get('entity_category', '')
rquery = request.GET.copy()
for filt in 'entity_type', 'entity_category', 'page':
if filt in rquery:
rquery.pop(filt)
if not entity_type:
entity_type = 'All'
if not entity_category:
entity_category = 'All'
query = urlencode(rquery)
filter_base_path = request.path
return {'filter_base_path': filter_base_path,
'otherparams': query,
'entity_types': entity_types,
'entity_type': entity_type,
'entity_category': entity_category,
'entities': entities,
'categories': categories}
@register.simple_tag()
def entity_filter_url(base_path, filt, otherparams=None):
url = base_path
if filt != 'All':
url += '?entity_type=%s' % filt
if otherparams:
url += '&%s' % otherparams
elif otherparams:
url += '?%s' % otherparams
return url
@register.simple_tag()
def entitycategory_filter_url(base_path, filt, otherparams=None):
url = base_path
if filt != 'All':
url += '?entity_category=%s' % filt
if otherparams:
url += '&%s' % otherparams
elif otherparams:
url += '?%s' % otherparams
return url
@register.inclusion_tag('metadataparser/export-menu.html', takes_context=True)
def export_menu(context, entities, append_query=None, onclick=None):
request = context.get('request')
copy_query = request.GET.copy()
if 'page' in copy_query:
copy_query.pop('page')
query = copy_query.urlencode()
base_path = request.path
formats = []
for mode in export_modes.keys():
url = base_path
if query:
url += '?%s&format=%s' % (query, mode)
else:
url += '?format=%s' % (mode)
if append_query:
url += "&%s" % (append_query)
formats.append({'url': url, 'label': mode, 'onclick': onclick})
return {'formats': formats}
@register.inclusion_tag('metadataparser/export-menu.html')
def export_summary_menu(query, onclick=None):
formats = []
for mode in export_summary_modes.keys():
urlquery = {'format': mode,
'export': query}
url = "./?%(query)s" % {'query': urlencode(urlquery)}
formats.append({'url': url, 'label': mode, 'onclick': onclick})
return {'formats': formats}
@register.simple_tag()
def entities_count(entity_qs, entity_type=None):
if entity_type and entity_type != 'All':
return entity_qs.filter(types__xmlname=entity_type).count()
else:
return entity_qs.count()
@register.simple_tag()
def get_fed_total(totals, entity_type='All'):
tot_count = 0
for curtotal in totals:
if entity_type == 'All' or curtotal['types__xmlname'] == entity_type:
tot_count += curtotal['types__xmlname__count']
return tot_count
@register.simple_tag()
def get_fed_count(counts, federation='All', entity_type='All'):
count = counts[entity_type]
fed_count = 0
for curcount in count:
if federation == 'All' or curcount['federations__id'] == federation:
fed_count += curcount['federations__id__count']
return fed_count
@register.simple_tag()
def get_fed_count_by_country(count, country='All'):
fed_count = 0
for curcount in count:
if country == 'All' or curcount['federations__country'] == country:
fed_count += curcount['federations__country__count']
return fed_count
@register.simple_tag(takes_context=True)
def l10n_property(context, prop, lang):
if isinstance(prop, dict) and len(prop) > 0:
if not lang:
lang = context.get('LANGUAGE_CODE', None)
if lang and lang in prop:
return prop.get(lang)
else:
return prop[prop.keys()[0]]
return prop
@register.simple_tag(takes_context=True)
def organization_property(context, organizations, prop, lang):
if not isinstance(organizations, list):
return prop
lang = lang or context.get('LANGUAGE_CODE', None)
val = None
for organization in organizations:
if prop in organization:
if val is None: val = organization[prop]
if organization['lang'] == lang:
val = organization[prop]
return val
@register.simple_tag()
def get_property(obj, prop=None):
uprop = unicode(prop)
if not uprop:
return '<a href="%(link)s">%(name)s</a>' % {"link": obj.get_absolute_url(),
"name": unicode(obj)}
if isinstance(obj, dict):
return obj.get(prop, None)
if getattr(getattr(obj, uprop, None), 'all', None):
return '. '.join(['<a href="%(link)s">%(name)s</a>' % {"link": item.get_absolute_url(),
"name": unicode(item)}
for item in getattr(obj, uprop).all()])
if isinstance(getattr(obj, uprop, ''), list):
return ', '.join(getattr(obj, uprop, []))
return getattr(obj, uprop, '')
@register.simple_tag(takes_context=True)
def active_url(context, pattern):
request = context.get('request')
if request.path == pattern:
return 'active'
return ''
@register.filter(name='display_etype')
def display_etype(value, separator=', '):
if isinstance(value, list):
return separator.join(value)
elif hasattr(value, 'all'):
return separator.join([unicode(item) for item in value.all()])
else:
if value in DESCRIPTOR_TYPES_DISPLAY:
return DESCRIPTOR_TYPES_DISPLAY.get(value)
else:
return value
@register.filter(name='mailto')
def mailto(value):
if value.startswith('mailto:'):
return value
else:
return 'mailto:%s' % value
@register.filter(name='wrap')
def wrap(value, length):
value = unicode(value)
if len(value) > length:
return "%s..." % value[:length]
return value
class CanEdit(Node):
child_nodelists = 'nodelist'
def __init__(self, obj, nodelist):
self.obj = obj
self.nodelist = nodelist
@classmethod
def __repr__(cls):
return "<CanEdit>"
def render(self, context):
obj = self.obj.resolve(context, True)
user = context.get('user')
if obj.can_edit(user, False):
return self.nodelist.render(context)
else:
return ''
def do_canedit(parser, token):
bits = list(token.split_contents())
if len(bits) != 2:
raise TemplateSyntaxError("%r takes 1 argument" % bits[0])
end_tag = 'end' + bits[0]
nodelist = parser.parse((end_tag,))
obj = parser.compile_filter(bits[1])
token = parser.next_token()
return CanEdit(obj, nodelist)
@register.tag
def canedit(parser, token):
"""
Outputs the contents of the block if user has edit pemission
Examples::
{% canedit obj %}
...
{% endcanedit %}
"""
return do_canedit(parser, token)
@register.filter
@stringfilter
def split(value, splitter='|'):
if not isinstance(value, SafeData):
value = mark_safe(value)
return value.split(splitter)
| bsd-2-clause | 6,972,299,513,368,247,000 | 31.604423 | 168 | 0.616202 | false | 3.781704 | false | false | false |
pybee/briefcase | src/briefcase/linux.py | 1 | 1657 | import os
import subprocess
import sys
from .app import app
class linux(app):
description = "Create a Linux installer to wrap this project"
def finalize_options(self):
# Copy over all the options from the base 'app' command
finalized = self.get_finalized_command('app')
for attr in ('formal_name', 'organization_name', 'bundle', 'icon', 'guid', 'splash', 'download_dir'):
if getattr(self, attr) is None:
setattr(self, attr, getattr(finalized, attr))
super(linux, self).finalize_options()
# Set platform-specific options
self.platform = 'Linux'
if self.dir is None:
self.dir = 'linux'
self.resource_dir = self.dir
def install_icon(self):
raise RuntimeError("Linux doesn't support icons screens.")
def install_splash(self):
raise RuntimeError("Linux doesn't support splash screens.")
def install_support_package(self):
# No support package; we just use the system python
pass
@property
def launcher_header(self):
"""
Override the shebang line for launcher scripts
"""
return "#!python{}.{}\n".format(sys.version_info.major, sys.version_info.minor)
@property
def launcher_script_location(self):
return self.resource_dir
def build_app(self):
return True
def post_build(self):
pass
def start_app(self):
print("Starting {}".format(self.formal_name))
subprocess.Popen([
'./{}'.format(self.formal_name)
],
cwd=os.path.abspath(self.dir)
).wait()
| bsd-3-clause | -7,958,137,519,832,249,000 | 26.163934 | 109 | 0.602897 | false | 4.173804 | false | false | false |
luotao1/Paddle | python/paddle/fluid/contrib/slim/tests/quant_int8_image_classification_comparison.py | 1 | 12626 | # copyright (c) 2019 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
import unittest
import os
import sys
import argparse
import logging
import struct
import six
import numpy as np
import time
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import QuantInt8MkldnnPass
from paddle.fluid import core
paddle.enable_static()
logging.basicConfig(format='%(asctime)s-%(levelname)s: %(message)s')
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.INFO)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=1, help='Batch size.')
parser.add_argument(
'--skip_batch_num',
type=int,
default=0,
help='Number of the first minibatches to skip in performance statistics.'
)
parser.add_argument(
'--debug',
action='store_true',
help='If used, the graph of Quant model is drawn.')
parser.add_argument(
'--quant_model', type=str, default='', help='A path to a Quant model.')
parser.add_argument('--infer_data', type=str, default='', help='Data file.')
parser.add_argument(
'--batch_num',
type=int,
default=0,
help='Number of batches to process. 0 or less means whole dataset. Default: 0.'
)
parser.add_argument(
'--acc_diff_threshold',
type=float,
default=0.01,
help='Accepted accuracy difference threshold.')
test_args, args = parser.parse_known_args(namespace=unittest)
return test_args, sys.argv[:1] + args
class QuantInt8ImageClassificationComparisonTest(unittest.TestCase):
"""
Test for accuracy comparison of Quant FP32 and INT8 Image Classification inference.
"""
def _reader_creator(self, data_file='data.bin'):
def reader():
with open(data_file, 'rb') as fp:
num = fp.read(8)
num = struct.unpack('q', num)[0]
imgs_offset = 8
img_ch = 3
img_w = 224
img_h = 224
img_pixel_size = 4
img_size = img_ch * img_h * img_w * img_pixel_size
label_size = 8
labels_offset = imgs_offset + num * img_size
step = 0
while step < num:
fp.seek(imgs_offset + img_size * step)
img = fp.read(img_size)
img = struct.unpack_from(
'{}f'.format(img_ch * img_w * img_h), img)
img = np.array(img)
img.shape = (img_ch, img_w, img_h)
fp.seek(labels_offset + label_size * step)
label = fp.read(label_size)
label = struct.unpack('q', label)[0]
yield img, int(label)
step += 1
return reader
def _get_batch_accuracy(self, batch_output=None, labels=None):
total = 0
correct = 0
correct_5 = 0
for n, result in enumerate(batch_output):
index = result.argsort()
top_1_index = index[-1]
top_5_index = index[-5:]
total += 1
if top_1_index == labels[n]:
correct += 1
if labels[n] in top_5_index:
correct_5 += 1
acc1 = float(correct) / float(total)
acc5 = float(correct_5) / float(total)
return acc1, acc5
def _prepare_for_fp32_mkldnn(self, graph):
ops = graph.all_op_nodes()
for op_node in ops:
name = op_node.name()
if name in ['depthwise_conv2d']:
input_var_node = graph._find_node_by_name(
op_node.inputs, op_node.input("Input")[0])
weight_var_node = graph._find_node_by_name(
op_node.inputs, op_node.input("Filter")[0])
output_var_node = graph._find_node_by_name(
graph.all_var_nodes(), op_node.output("Output")[0])
attrs = {
name: op_node.op().attr(name)
for name in op_node.op().attr_names()
}
conv_op_node = graph.create_op_node(
op_type='conv2d',
attrs=attrs,
inputs={
'Input': input_var_node,
'Filter': weight_var_node
},
outputs={'Output': output_var_node})
graph.link_to(input_var_node, conv_op_node)
graph.link_to(weight_var_node, conv_op_node)
graph.link_to(conv_op_node, output_var_node)
graph.safe_remove_nodes(op_node)
return graph
def _predict(self,
test_reader=None,
model_path=None,
batch_size=1,
batch_num=1,
skip_batch_num=0,
transform_to_int8=False):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.executor.global_scope()
with fluid.scope_guard(inference_scope):
if os.path.exists(os.path.join(model_path, '__model__')):
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(model_path, exe)
else:
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(
model_path, exe, 'model', 'params')
graph = IrGraph(core.Graph(inference_program.desc), for_test=True)
if (self._debug):
graph.draw('.', 'quant_orig', graph.all_op_nodes())
if (transform_to_int8):
mkldnn_int8_pass = QuantInt8MkldnnPass(
_scope=inference_scope, _place=place)
graph = mkldnn_int8_pass.apply(graph)
else:
graph = self._prepare_for_fp32_mkldnn(graph)
inference_program = graph.to_program()
dshape = [3, 224, 224]
outputs = []
infer_accs1 = []
infer_accs5 = []
fpses = []
batch_times = []
total_samples = 0
iters = 0
infer_start_time = time.time()
for data in test_reader():
if batch_num > 0 and iters >= batch_num:
break
if iters == skip_batch_num:
total_samples = 0
infer_start_time = time.time()
if six.PY2:
images = map(lambda x: x[0].reshape(dshape), data)
if six.PY3:
images = list(map(lambda x: x[0].reshape(dshape), data))
images = np.array(images).astype('float32')
labels = np.array([x[1] for x in data]).astype('int64')
start = time.time()
out = exe.run(inference_program,
feed={feed_target_names[0]: images},
fetch_list=fetch_targets)
batch_time = (time.time() - start) * 1000 # in miliseconds
outputs.append(out[0])
batch_acc1, batch_acc5 = self._get_batch_accuracy(out[0],
labels)
infer_accs1.append(batch_acc1)
infer_accs5.append(batch_acc5)
samples = len(data)
total_samples += samples
batch_times.append(batch_time)
fps = samples / batch_time * 1000
fpses.append(fps)
iters += 1
appx = ' (warm-up)' if iters <= skip_batch_num else ''
_logger.info('batch {0}{5}, acc1: {1:.4f}, acc5: {2:.4f}, '
'latency: {3:.4f} ms, fps: {4:.2f}'.format(
iters, batch_acc1, batch_acc5, batch_time /
batch_size, fps, appx))
# Postprocess benchmark data
batch_latencies = batch_times[skip_batch_num:]
batch_latency_avg = np.average(batch_latencies)
latency_avg = batch_latency_avg / batch_size
fpses = fpses[skip_batch_num:]
fps_avg = np.average(fpses)
infer_total_time = time.time() - infer_start_time
acc1_avg = np.mean(infer_accs1)
acc5_avg = np.mean(infer_accs5)
_logger.info('Total inference run time: {:.2f} s'.format(
infer_total_time))
return outputs, acc1_avg, acc5_avg, fps_avg, latency_avg
def _summarize_performance(self, fp32_fps, fp32_lat, int8_fps, int8_lat):
_logger.info('--- Performance summary ---')
_logger.info('FP32: avg fps: {0:.2f}, avg latency: {1:.4f} ms'.format(
fp32_fps, fp32_lat))
_logger.info('INT8: avg fps: {0:.2f}, avg latency: {1:.4f} ms'.format(
int8_fps, int8_lat))
def _compare_accuracy(self, fp32_acc1, fp32_acc5, int8_acc1, int8_acc5,
threshold):
_logger.info('--- Accuracy summary ---')
_logger.info(
'Accepted top1 accuracy drop threshold: {0}. (condition: (FP32_top1_acc - IN8_top1_acc) <= threshold)'
.format(threshold))
_logger.info(
'FP32: avg top1 accuracy: {0:.4f}, avg top5 accuracy: {1:.4f}'.
format(fp32_acc1, fp32_acc5))
_logger.info(
'INT8: avg top1 accuracy: {0:.4f}, avg top5 accuracy: {1:.4f}'.
format(int8_acc1, int8_acc5))
assert fp32_acc1 > 0.0
assert int8_acc1 > 0.0
assert fp32_acc1 - int8_acc1 <= threshold
def test_graph_transformation(self):
if not fluid.core.is_compiled_with_mkldnn():
return
quant_model_path = test_case_args.quant_model
assert quant_model_path, 'The Quant model path cannot be empty. Please, use the --quant_model option.'
data_path = test_case_args.infer_data
assert data_path, 'The dataset path cannot be empty. Please, use the --infer_data option.'
batch_size = test_case_args.batch_size
batch_num = test_case_args.batch_num
skip_batch_num = test_case_args.skip_batch_num
acc_diff_threshold = test_case_args.acc_diff_threshold
self._debug = test_case_args.debug
_logger.info('Quant FP32 & INT8 prediction run.')
_logger.info('Quant model: {0}'.format(quant_model_path))
_logger.info('Dataset: {0}'.format(data_path))
_logger.info('Batch size: {0}'.format(batch_size))
_logger.info('Batch number: {0}'.format(batch_num))
_logger.info('Accuracy drop threshold: {0}.'.format(acc_diff_threshold))
_logger.info('--- Quant FP32 prediction start ---')
val_reader = paddle.batch(
self._reader_creator(data_path), batch_size=batch_size)
fp32_output, fp32_acc1, fp32_acc5, fp32_fps, fp32_lat = self._predict(
val_reader,
quant_model_path,
batch_size,
batch_num,
skip_batch_num,
transform_to_int8=False)
_logger.info('--- Quant INT8 prediction start ---')
val_reader = paddle.batch(
self._reader_creator(data_path), batch_size=batch_size)
int8_output, int8_acc1, int8_acc5, int8_fps, int8_lat = self._predict(
val_reader,
quant_model_path,
batch_size,
batch_num,
skip_batch_num,
transform_to_int8=True)
self._summarize_performance(fp32_fps, fp32_lat, int8_fps, int8_lat)
self._compare_accuracy(fp32_acc1, fp32_acc5, int8_acc1, int8_acc5,
acc_diff_threshold)
if __name__ == '__main__':
global test_case_args
test_case_args, remaining_args = parse_args()
unittest.main(argv=remaining_args)
| apache-2.0 | -8,799,431,174,079,440,000 | 39.08254 | 114 | 0.535878 | false | 3.773461 | true | false | false |
p0123n/mygate | mygate.py | 1 | 2806 | #! /usr/bin/env python
#-*- coding:utf-8 -*-
__author__ = 'p0123n'
import MySQLdb
from MySQLdb import cursors
def keepSingleConn(cls):
instances = dict()
def getInstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getInstance
@keepSingleConn
class Connection():
def __init__(self):
self.connection = None
self.cursor = None
def connect(self, params):
connection = MySQLdb.connect(
host = params['addr'],
port = params['port'],
user = params['user'],
passwd = params['pass'],
db = params['name'],
cursorclass = params['curs']
)
self.cursor = connection.cursor()
self.cursor.execute('set names utf8')
self.cursor.execute('set session time_zone="%s"' % params['tmzn'])
return connection, self.cursor
class Query():
def __init__(self, params):
params['curs'] = cursors.SSDictCursor
self.connection, self.cursor = Connection().connect(params)
def query(self, query):
self.cursor.execute(query)
self.connection.commit()
def __iter__(self):
for row in self.connection:
yield row
def __enter__(self):
return self.cursor
def __ex_t__(self,ext_type,exc_value,traceback):
self.connection.close()
if isinstance(exc_value, Exception):
self.connection.rollback()
else:
self.connection.commit()
self.connection.close()
class Dump():
def __init__(self, params):
params['curs'] = cursors.SSCursor
self.connection, self.cursor = Connection().connect(params)
def dump(self, query, filn=None, dirn='.', sep=';', pref='mygate'):
if not filn:
from datetime import datetime
filn = datetime.today().strftime('%Y-%m-%d(%H%M%S).csv')
filn = '%s/%s_%s' % (dirn, pref, filn)
else:
filn = '%s/%s%s' % (dirn, pref, filn)
fn = open(filn, 'w')
self.cursor.execute(query)
rows = 0
for row in self.cursor:
fn.write(sep.join( str(field) for field in row ) + '\n')
rows += 1
fn.close()
return filn, rows
class MyGate():
def __init__(self, params):
self._query = None
self._dump = None
self._params= params
def query(self, *args, **kwargs):
self._query = self._query or Query(self._params)
return self._query.query(*args, **kwargs)
def dump(self, *args, **kwargs):
self._dump = self._dump or Dump(self._params)
return self._dump.dump(*args, **kwargs)
if __name__ == '__main__':
print 'Hi.'
| apache-2.0 | 5,312,227,526,525,010,000 | 26.509804 | 74 | 0.543122 | false | 3.854396 | false | false | false |
DonaldMcRonald/SketchRecognitionWithTensorFlow | src/main/python/recognition/custom/recognizer.py | 1 | 5154 | import math
import tensorflow as tf
from recognition import sketch_utils as utils
import custom_recogntion_utilities as training_helpers
from generated_proto import sketch_pb2 as Sketch
from recognition.generation import feature_generation as features
X = 0
Y = 1
ID = 2
class Recognizer:
num_points = 32
classifier = None
training_bundle_features = None
training_bundle_targets = None
training_bundle_amount = 1000
training_bundle_counter = 0
X_placeholder = None
Y_placeholder = None
num_classes = 2
session = None
def __init__(self, label):
self.label = label
self.graph = tf.Graph()
with self.graph.as_default() as g:
with g.name_scope(label) as scope:
self.points_placeholder = tf.placeholder(tf.float32, shape=[None, 2], name="points")
feature_list = features.generate_features(self.points_placeholder)
#feature_list = tf.Print(feature_list, [feature_list], "Features for recognition", summarize=500)
column_list = features.generate_columns()
mapping = features.match_features_columns(feature_list, column_list)
first_layer = tf.contrib.layers.input_from_feature_columns(columns_to_tensors=mapping,
feature_columns=column_list)
with g.name_scope('layer2') as scope1:
layer2 = tf.contrib.layers.fully_connected(first_layer, 50, scope=scope1)
with g.name_scope('hidden1') as scope2:
hidden = tf.contrib.layers.fully_connected(layer2, 20, scope=scope2)
with g.name_scope('hidden2') as scope3:
output = tf.contrib.layers.fully_connected(hidden, self.num_classes, scope=scope3)
output = tf.sigmoid(output)
print output
self.class_index = tf.argmax(output, 0)
output = tf.Print(output, [output, self.class_index], "Raw output of training data")
self.output = output
self.target = training_helpers.create_target_classes(self.num_classes)
lossTarget = tf.Print(self.target, [self.target], "Raw target data")
self.loss = training_helpers.create_loss_function(output, lossTarget)
self.train_step = training_helpers.create_training(self.loss, .01)
self.init = tf.initialize_all_variables()
self.graph.finalize()
def create_features(self, point_list):
utils.strip_ids_from_points(point_list)
return point_list
def create_target(self, label):
# big punishment to show difference between 0 and 1
true_class = 1.0 if label == self.label else 0.0
null_class = 1.0 if label != self.label else 0.0
return [[true_class, null_class]]
def train(self, label, points):
target = self.create_target(label)
if self.training_bundle_features is None:
self.training_bundle_features = [points]
else:
self.training_bundle_features.append(points)
if self.training_bundle_targets is None:
self.training_bundle_targets = [target]
else:
self.training_bundle_targets.append(target)
if self.training_bundle_counter >= self.training_bundle_amount:
self.execute_train_bundle()
else:
self.training_bundle_counter += 1
# TODO: change back to this when the code is fixed
def single_train(self, label, features):
target = self.create_target(label)
self.classifier.fit(x=features, y=target, steps=1)
def execute_train_bundle(self):
print 'batch training: ' + self.label
with tf.Session(graph=self.graph) as sess:
sess.run(self.init)
for i in range(self.training_bundle_counter):
feed = {self.points_placeholder: self.training_bundle_features[i],
self.target: self.training_bundle_targets[i]}
result = sess.run(self.train_step, feed_dict=feed)
print result
self.training_bundle_features = None
self.training_bundle_targets = None
self.training_bundle_counter = 0
def finish_training(self):
if self.training_bundle_counter > 0:
self.execute_train_bundle()
def recognize(self, features):
interpretation = Sketch.SrlInterpretation()
with tf.Session(graph=self.graph) as sess:
sess.run(self.init)
feed = {self.points_placeholder: features}
raw_output, class_index = sess.run([self.output, self.class_index], feed)
print class_index
print 'result: ' + str(self.label if class_index == 0 else None)
print raw_output
interpretation.label = self.label
interpretation.confidence = raw_output[class_index]
return interpretation
| mit | -6,903,016,166,910,179,000 | 40.595041 | 113 | 0.5974 | false | 4.190244 | false | false | false |
dpazel/music_rep | search/melodicsearch/melodic_search_analysis.py | 1 | 6914 | from structure.LineGrammar.core.line_grammar_executor import LineGrammarExecutor
from enum import Enum
from tonalmodel.interval import Interval
class MelodicSearchAnalysis(object):
def __init__(self, pattern_line, pattern_hct):
self.__pattern_line = pattern_line
self.__pattern_hct = pattern_hct
self.__note_annotation = self.prepare_note_search_parameters()
self.__note_pair_annotation = self.prepare_note_pair_search_parameters()
self.__hct_annotation = self.prepare_hct_search_parameters()
@staticmethod
def create(pattern_string):
line, hct = LineGrammarExecutor().parse(pattern_string)
return MelodicSearchAnalysis(line, hct)
@property
def pattern_line(self):
return self.__pattern_line
@property
def pattern_hct(self):
return self.__pattern_hct
@property
def note_annotation(self):
return self.__note_annotation
@property
def note_pair_annotation(self):
return self.__note_pair_annotation
@property
def hct_annotation(self):
return self.__hct_annotation
def prepare_note_search_parameters(self):
annotation_list = list()
for note in self.pattern_line.get_all_notes():
hc = self.pattern_hct[note.get_absolute_position()]
annotation_list.append(NoteInformation(note, hc))
return annotation_list
def prepare_note_pair_search_parameters(self):
pair_annotation_list = list()
note_list = self.pattern_line.get_all_notes()
for i in range(0, len(note_list) - 1):
first = note_list[i]
if first.diatonic_pitch is None:
continue
second = None
for j in range(i + 1, len(note_list)):
second = note_list[j]
if second.diatonic_pitch is not None:
break
if second.diatonic_pitch is None:
continue
pair_annotation_list.append(NotePairInformation(first, second))
return pair_annotation_list
def prepare_hct_search_parameters(self):
hct_annotation = list()
hc_list = self.pattern_hct.hc_list()
for i in range(0, len(hc_list)):
hc = hc_list[i]
hct_annotation.append(HCInformation(hc))
return hct_annotation
class NoteInformation(object):
def __init__(self, note, hc):
self.__note = note
self.__hc = hc
self.__scale_degree = self.compute_scale_degree()
self.__chord_interval = self.compute_chord_interval()
self.__root_based_interval = self.compute_root_based_interval()
self.__duration = note.duration
@property
def note(self):
return self.__note
@property
def hc(self):
return self.__hc
@property
def scale_degree(self):
return self.__scale_degree
@property
def chord_interval(self):
return self.__chord_interval
@property
def is_scalar(self):
return self.scale_degree is not None
@property
def is_chordal(self):
return self.chord_interval is not None
@property
def duration(self):
return self.__duration
@property
def root_based_interval(self):
return self.__root_based_interval
def compute_scale_degree(self):
annotation = self.hc.tonality.annotation
if self.note.diatonic_pitch is None: # Rest
return None
if self.note.diatonic_pitch.diatonic_tone in annotation:
return annotation.index(self.note.diatonic_pitch.diatonic_tone)
return None
def compute_root_based_interval(self):
if self.note.diatonic_pitch is None:
return None
return Interval.calculate_tone_interval(self.hc.tonality.root_tone, self.note.diatonic_pitch.diatonic_tone)
def compute_chord_interval(self):
tones = self.hc.chord.tones
if self.note.diatonic_pitch is None:
return None
for tone in tones:
if tone[0] == self.note.diatonic_pitch.diatonic_tone:
return tone[1]
return None
def __str__(self):
return '{0} hc={1} scale_degree={2} interval={3} is_scalar={4} is_chordal={5} duration={6}'.\
format(self.note, self. hc, self.scale_degree, self.chord_interval, self.is_scalar,
self.is_chordal, self.duration)
class NotePairInformation(object):
class Relationship(Enum):
LT = -1
EQ = 0
GT = 1
def __init__(self, first_note, second_note):
self.__first_note = first_note
self.__second_note = second_note
self.__time_difference = self.second_note.get_absolute_position() - self.first_note.get_absolute_position()
self.__forward_interval = Interval.create_interval(self.first_note.diatonic_pitch,
self.second_note.diatonic_pitch)
cd = self.forward_interval.chromatic_distance
self.__relationship = NotePairInformation.Relationship.GT if cd < 0 else NotePairInformation.Relationship.LT \
if cd > 0 else NotePairInformation.Relationship.EQ
@property
def first_note(self):
return self.__first_note
@property
def second_note(self):
return self.__second_note
@property
def time_difference(self):
return self.__time_difference
@property
def forward_interval(self):
return self.__forward_interval
@property
def relationship(self):
return self.__relationship
@staticmethod
def rel_pair_symbol(relationship):
return '>' if relationship == NotePairInformation.Relationship.GT else \
'<' if relationship == NotePairInformation.Relationship.LT else '=='
def __str__(self):
return '{0} {1} {2}'.format(self.first_note, NotePairInformation.rel_pair_symbol(self.relationship),
self.second_note)
class HCInformation(object):
def __init__(self, hc):
self.__hc = hc
self.__span = hc.duration
if self.hc.chord.chord_template.diatonic_basis is None:
self.__relative_chord_degree = self.hc.chord.chord_template.scale_degree
else:
interval = Interval.calculate_tone_interval(self.hc.tonality.diatonic_tone,
self.hc.chord.chord_template.diatonic_basis)
self.__relative_chord_degree = interval.diatonic_distance + 1
@property
def hc(self):
return self.__hc
@property
def span(self):
return self.__span
@property
def relative_chord_degree(self):
return self.__relative_chord_degree
def __str__(self):
return '{0} span={1} chord_degree={2}'.format(self.hc, self.span, self.relative_chord_degree)
| mit | 3,146,836,538,345,960,000 | 29.866071 | 118 | 0.613827 | false | 3.893018 | false | false | false |
Lukasa/cryptography | tests/hazmat/primitives/test_block.py | 1 | 5776 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import binascii
import pytest
from cryptography import utils
from cryptography.exceptions import (
AlreadyFinalized, _Reasons
)
from cryptography.hazmat.primitives import interfaces
from cryptography.hazmat.primitives.ciphers import (
Cipher, algorithms, modes
)
from .utils import (
generate_aead_exception_test, generate_aead_tag_exception_test
)
from ...utils import raises_unsupported_algorithm
@utils.register_interface(interfaces.Mode)
class DummyMode(object):
name = "dummy-mode"
def validate_for_algorithm(self, algorithm):
pass
@utils.register_interface(interfaces.CipherAlgorithm)
class DummyCipher(object):
name = "dummy-cipher"
@pytest.mark.cipher
class TestCipher(object):
def test_creates_encryptor(self, backend):
cipher = Cipher(
algorithms.AES(binascii.unhexlify(b"0" * 32)),
modes.CBC(binascii.unhexlify(b"0" * 32)),
backend
)
assert isinstance(cipher.encryptor(), interfaces.CipherContext)
def test_creates_decryptor(self, backend):
cipher = Cipher(
algorithms.AES(binascii.unhexlify(b"0" * 32)),
modes.CBC(binascii.unhexlify(b"0" * 32)),
backend
)
assert isinstance(cipher.decryptor(), interfaces.CipherContext)
def test_instantiate_with_non_algorithm(self, backend):
algorithm = object()
with pytest.raises(TypeError):
Cipher(algorithm, mode=None, backend=backend)
@pytest.mark.cipher
class TestCipherContext(object):
def test_use_after_finalize(self, backend):
cipher = Cipher(
algorithms.AES(binascii.unhexlify(b"0" * 32)),
modes.CBC(binascii.unhexlify(b"0" * 32)),
backend
)
encryptor = cipher.encryptor()
encryptor.update(b"a" * 16)
encryptor.finalize()
with pytest.raises(AlreadyFinalized):
encryptor.update(b"b" * 16)
with pytest.raises(AlreadyFinalized):
encryptor.finalize()
decryptor = cipher.decryptor()
decryptor.update(b"a" * 16)
decryptor.finalize()
with pytest.raises(AlreadyFinalized):
decryptor.update(b"b" * 16)
with pytest.raises(AlreadyFinalized):
decryptor.finalize()
def test_unaligned_block_encryption(self, backend):
cipher = Cipher(
algorithms.AES(binascii.unhexlify(b"0" * 32)),
modes.ECB(),
backend
)
encryptor = cipher.encryptor()
ct = encryptor.update(b"a" * 15)
assert ct == b""
ct += encryptor.update(b"a" * 65)
assert len(ct) == 80
ct += encryptor.finalize()
decryptor = cipher.decryptor()
pt = decryptor.update(ct[:3])
assert pt == b""
pt += decryptor.update(ct[3:])
assert len(pt) == 80
assert pt == b"a" * 80
decryptor.finalize()
@pytest.mark.parametrize("mode", [DummyMode(), None])
def test_nonexistent_cipher(self, backend, mode):
cipher = Cipher(
DummyCipher(), mode, backend
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
cipher.encryptor()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
cipher.decryptor()
def test_incorrectly_padded(self, backend):
cipher = Cipher(
algorithms.AES(b"\x00" * 16),
modes.CBC(b"\x00" * 16),
backend
)
encryptor = cipher.encryptor()
encryptor.update(b"1")
with pytest.raises(ValueError):
encryptor.finalize()
decryptor = cipher.decryptor()
decryptor.update(b"1")
with pytest.raises(ValueError):
decryptor.finalize()
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES("\x00" * 16), modes.GCM("\x00" * 12)
),
skip_message="Does not support AES GCM",
)
@pytest.mark.cipher
class TestAEADCipherContext(object):
test_aead_exceptions = generate_aead_exception_test(
algorithms.AES,
modes.GCM,
)
test_aead_tag_exceptions = generate_aead_tag_exception_test(
algorithms.AES,
modes.GCM,
)
class TestModeValidation(object):
def test_cbc(self, backend):
with pytest.raises(ValueError):
Cipher(
algorithms.AES(b"\x00" * 16),
modes.CBC(b"abc"),
backend,
)
def test_ofb(self, backend):
with pytest.raises(ValueError):
Cipher(
algorithms.AES(b"\x00" * 16),
modes.OFB(b"abc"),
backend,
)
def test_cfb(self, backend):
with pytest.raises(ValueError):
Cipher(
algorithms.AES(b"\x00" * 16),
modes.CFB(b"abc"),
backend,
)
def test_ctr(self, backend):
with pytest.raises(ValueError):
Cipher(
algorithms.AES(b"\x00" * 16),
modes.CTR(b"abc"),
backend,
)
| apache-2.0 | -7,514,326,614,195,163,000 | 29.083333 | 71 | 0.605263 | false | 3.994467 | true | false | false |
crossroadchurch/paul | openlp/core/ui/servicenoteform.py | 1 | 3146 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2015 OpenLP Developers #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`~openlp.core.ui.servicenoteform` module contains the `ServiceNoteForm` class.
"""
from PyQt4 import QtGui
from openlp.core.common import Registry, RegistryProperties, translate
from openlp.core.lib import SpellTextEdit
from openlp.core.lib.ui import create_button_box
class ServiceNoteForm(QtGui.QDialog, RegistryProperties):
"""
This is the form that is used to edit the verses of the song.
"""
def __init__(self):
"""
Constructor
"""
super(ServiceNoteForm, self).__init__(Registry().get('main_window'))
self.setupUi()
self.retranslateUi()
def exec_(self):
"""
Execute the form and return the result.
"""
self.text_edit.setFocus()
return QtGui.QDialog.exec_(self)
def setupUi(self):
"""
Set up the UI of the dialog
"""
self.setObjectName('serviceNoteEdit')
self.dialog_layout = QtGui.QVBoxLayout(self)
self.dialog_layout.setContentsMargins(8, 8, 8, 8)
self.dialog_layout.setSpacing(8)
self.dialog_layout.setObjectName('vertical_layout')
self.text_edit = SpellTextEdit(self, False)
self.text_edit.setObjectName('textEdit')
self.dialog_layout.addWidget(self.text_edit)
self.button_box = create_button_box(self, 'button_box', ['cancel', 'save'])
self.dialog_layout.addWidget(self.button_box)
def retranslateUi(self):
"""
Translate the UI on the fly
"""
self.setWindowTitle(translate('OpenLP.ServiceNoteForm', 'Service Item Notes'))
| gpl-2.0 | -3,048,598,671,742,118,400 | 43.942857 | 87 | 0.529561 | false | 4.781155 | false | false | false |
CoderDuan/mantaflow | tools/tests/test_0030_gridop.py | 2 | 2839 | #
# Basic test, grid operators
#
import sys
print ("Running python "+sys.version)
from manta import *
from helperInclude import *
# solver params
gs = vec3(10, 20, 30)
s = Solver(name='main', gridSize = gs, dim=3)
# prepare grids
rlg1 = s.create(RealGrid)
rlg2 = s.create(RealGrid)
rlg3 = s.create(RealGrid)
vcg1 = s.create(MACGrid)
vcg2 = s.create(MACGrid)
vcg3 = s.create(MACGrid)
int1 = s.create(IntGrid)
int2 = s.create(IntGrid)
int3 = s.create(IntGrid)
vcgTmp= s.create(MACGrid)
genRefFiles = getGenRefFileSetting()
if (genRefFiles==1):
# manually init result
rlg1.setConst( 1.1 )
rlg2.setConst( 1.2 )
rlg3.setConst( 2.9 )
#vcg1.setConst( vec3(1.2, 1.2, 1.2) )
#vcg2.setConst( vec3(0.5, 0.5, 0.5) )
#vcg3.setConst( vec3(1.95, 1.95, 1.95) )
vcg1.setConst( vec3(1.25, 1.25, 1.25) )
vcg2.setConst( vec3(0.5, 0.5, 0.5) )
vcg3.setConst( vec3(1.95, 1.95, 1.95) )
int1.setConst( 125 )
int2.setConst( 6 )
int3.setConst( 143 )
else:
# real test run, perform basic calculations
rlg1.setConst( 1.0 )
rlg2.setConst( 2.4 )
rlg3.setConst( 9.6 )
rlg1.addConst (0.1) # 1.1
rlg2.multConst(0.5) # 1.2
rlg3.copyFrom( rlg1 ) # 1.1
rlg3.add(rlg2) # 2.3
rlg3.addScaled(rlg2, 0.5) # 2.9
#print "r1 %f , r2 %f , r3 %f " % ( rlg1.getMaxAbs() , rlg2.getMaxAbs() , rlg3.getMaxAbs() )
vcg1.setConst( vec3(1.0, 1.0, 1.0) )
vcg2.setConst( vec3(1.0, 1.0, 1.0) )
vcg3.setConst( vec3(9.0, 9.0, 9.0) )
vcg1.addConst ( vec3(0.25,0.25,0.25) ) # 1.25
vcg2.multConst( vec3(0.5,0.5,0.5) ) # 0.5
vcgTmp.setConst( vec3(1.2, 1.2, 1.2) )
vcg3.copyFrom( vcgTmp ) # 1.2
vcg3.add(vcg2) # 1.7
vcg3.addScaled(vcg2, vec3(0.5, 0.5, 0.5) ) # 1.95
#print "v1 %s , v2 %s , v3 %s " % ( vcg1.getMaxAbs() , vcg2.getMaxAbs(), vcg3.getMaxAbs() )
int1.setConst( 123 )
int2.setConst( 2 )
int3.setConst( 9 )
int1.addConst ( 2 ) # 125
int2.multConst( 3 ) # 6
int3.copyFrom( int1 ) # 125
int3.add(int2) # 131
int3.addScaled(int2, 2) # 143
#print "i1 %s , i2 %s , i3 %s " % ( int1.getMaxAbs() , int2.getMaxAbs() , int3.getMaxAbs() )
# verify
doTestGrid( sys.argv[0], "rlg1", s, rlg1 , threshold=1e-07 , thresholdStrict=1e-14 )
doTestGrid( sys.argv[0], "rlg2", s, rlg2 , threshold=1e-07 , thresholdStrict=1e-14 )
doTestGrid( sys.argv[0], "rlg3", s, rlg3 , threshold=1e-07 , thresholdStrict=1e-14 )
doTestGrid( sys.argv[0], "vcg1", s, vcg1 , threshold=5e-07 , thresholdStrict=1e-14 )
doTestGrid( sys.argv[0], "vcg2", s, vcg2 , threshold=5e-07 , thresholdStrict=1e-14 )
doTestGrid( sys.argv[0], "vcg3", s, vcg3 , threshold=5e-07 , thresholdStrict=1e-14 )
doTestGrid( sys.argv[0], "int1", s, int1 , threshold=1e-14 , thresholdStrict=1e-14 )
doTestGrid( sys.argv[0], "int2", s, int2 , threshold=1e-14 , thresholdStrict=1e-14 )
doTestGrid( sys.argv[0], "int3", s, int3 , threshold=1e-14 , thresholdStrict=1e-14 )
| gpl-3.0 | -1,891,309,112,479,031,000 | 28.268041 | 93 | 0.644593 | false | 2.0092 | true | true | false |
stucchio/Django-Media-Generator | mediagenerator/filters/media_url.py | 10 | 1369 | from django.utils.encoding import smart_str
from django.utils.simplejson import dumps
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import get_media_url_mapping
_CODE = """
_$MEDIA_URLS = %s;
media_urls = function(key) {
var urls = _$MEDIA_URLS[key];
if (!urls)
throw 'Could not resolve media url ' + key;
return urls;
};
media_url = function(key) {
var urls = media_urls(key);
if (urls.length == 1)
return urls[0];
throw 'media_url() only works with keys that point to a single entry (e.g. an image), but not bundles. Use media_urls() instead.';
};
""".lstrip()
class MediaURL(Filter):
takes_input = False
def __init__(self, **kwargs):
super(MediaURL, self).__init__(**kwargs)
assert self.filetype == 'js', (
'MediaURL only supports JS output. '
'The parent filter expects "%s".' % self.filetype)
def get_output(self, variation):
yield self._compile()
def get_dev_output(self, name, variation):
assert name == '.media_url.js'
return self._compile()
def get_dev_output_names(self, variation):
content = self._compile()
hash = sha1(smart_str(content)).hexdigest()
yield '.media_url.js', hash
def _compile(self):
return _CODE % dumps(get_media_url_mapping())
| bsd-3-clause | -4,843,638,403,972,205,000 | 28.12766 | 132 | 0.638422 | false | 3.574413 | false | false | false |
bparzella/secsgem | secsgem/secs/functions/s06f06.py | 1 | 1595 | #####################################################################
# s06f06.py
#
# (c) Copyright 2021, Benjamin Parzella. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#####################################################################
"""Class for stream 06 function 06."""
from secsgem.secs.functions.base import SecsStreamFunction
from secsgem.secs.data_items import GRANT6
class SecsS06F06(SecsStreamFunction):
"""
multi block data grant.
**Data Items**
- :class:`GRANT6 <secsgem.secs.data_items.GRANT6>`
**Structure**::
>>> import secsgem.secs
>>> secsgem.secs.functions.SecsS06F06
GRANT6: B[1]
**Example**::
>>> import secsgem.secs
>>> secsgem.secs.functions.SecsS06F06(secsgem.secs.data_items.GRANT6.BUSY)
S6F6
<B 0x1> .
:param value: parameters for this function (see example)
:type value: byte
"""
_stream = 6
_function = 6
_data_format = GRANT6
_to_host = False
_to_equipment = True
_has_reply = False
_is_reply_required = False
_is_multi_block = False
| lgpl-2.1 | -2,935,853,949,612,352,500 | 26.5 | 82 | 0.618182 | false | 3.797619 | false | false | false |
nolram/NewsReader-Django | NewsReaderDjango/settings.py | 1 | 4456 | """
Django settings for NewsReaderDjango project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.conf import global_settings
from datetime import timedelta
from celery import Celery
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ENV_PATH = os.path.abspath(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'iudhm*b^7!8ea5nrjgwz@m1(pkjq60acj0+9*h1_d6!!c(&yr3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
NO_SERVIDOR = True
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#'djcelery',
'Site',
'Crawler',
'sorl.thumbnail',
#'kronos',
)
#HAYSTACK_SIGNAL_PROCESSOR = 'celery_haystack.signals.CelerySignalProcessor'
BROKER_TRANSPORT = "memory"
CELERY_ALWAYS_EAGER = True
CELERY_IGNORE_RESULT = True
CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml']
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
#HAYSTACK_CONNECTIONS = {
# 'default': {
# 'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
# 'URL': 'http://127.0.0.1:9200/',
# 'INDEX_NAME': 'haystack',
# },
#}
ROOT_URLCONF = 'NewsReaderDjango.urls'
WSGI_APPLICATION = 'NewsReaderDjango.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'docker',
'HOST': 'db',
'PORT': '5432',
'TEST': {
'NAME': 'test_crawler',
},
},
}
TIMEOUT = 10000
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'pt-BR'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
if NO_SERVIDOR:
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
#
#if NO_SERVIDOR:
# STATICFILES_DIRS = (
# os.path.join(BASE_DIR, "static_root"),
# )
#else:
# STATICFILES_DIRS = (
# os.path.join(BASE_DIR, "Site/static"),
# )
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
],
},
},
]
LOGIN_URL = '/login/'
#BOOTSTRAP_ADMIN_SIDEBAR_MENU = True
#KRONOS_PYTHONPATH = "/home/nolram/Virtualenv/py3_django/bin/python3"
#if NO_SERVIDOR:
# KRONOS_POSTFIX = "> /opt/flyn_django/log_thread.log 2>&1 "
#else:
# KRONOS_PREFIX = "source /home/nolram/Virtualenv/py3_django/bin/activate &&"
# KRONOS_POSTFIX = "> /home/nolram/log_thread.log 2>&1 " | mit | -6,919,374,225,255,333,000 | 26.343558 | 87 | 0.654399 | false | 3.271659 | false | false | false |
dmpayton/sphinx-granite | setup.py | 1 | 1263 | from granite import __version__
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
setup(
name='sphinx-granite',
version=__version__,
license='MIT',
url='https://github.com/dmpayton/sphinx-granite/',
description='A theme for Sphinx.',
long_description=open('./README.rst', 'r').read(),
keywords='sphinx theme'
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Documentation',
'Topic :: Software Development :: Documentation',
],
author='Derek Payton',
author_email='[email protected]',
install_requires=open('requirements.txt').read().splitlines(),
include_package_data=True,
packages=['granite'],
package_data={'granite': [
'theme.conf',
'*.html',
'static/css/*.css',
'static/js/*.js',
'static/font/*.*'
]},
zip_safe=False,
)
| mit | -3,708,856,599,919,096,300 | 27.704545 | 66 | 0.599367 | false | 4.168317 | false | false | false |
jimsrc/seatos | sheaths/src/forbush/tt2_o7o6.py | 2 | 3169 | import os
from rebineo_o7o6 import *
import matplotlib.patches as patches
import matplotlib.transforms as transforms
import console_colors as ccl
def makefig(medVAR, avrVAR, stdVAR, nVAR, tnorm,
dTday, SUBTITLE, YLIMS, YLAB, fname_fig):
fig = figure(1, figsize=(13, 6))
ax = fig.add_subplot(111)
ax.plot(tnorm, avrVAR, 'o-', c='black', markersize=5, label='mean')
ax.plot(tnorm, medVAR, 'o-', c='red', alpha=.5, markersize=5, markeredgecolor='none', label='median')
inf = avrVAR + stdVAR/sqrt(nVAR)
sup = avrVAR - stdVAR/sqrt(nVAR)
ax.fill_between(tnorm, inf, sup, facecolor='gray', alpha=0.5)
trans = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
rect1 = patches.Rectangle((0., 0.), width=1.0, height=1,
transform=trans, color='blue',
alpha=0.3)
ax.add_patch(rect1)
ax.legend(loc='upper right')
ax.grid()
ax.set_ylim(YLIMS)
TITLE = SUBTITLE
ax.set_title(TITLE)
ax.set_xlabel('time normalized to MC passage time [1]')
ax.set_ylabel(YLAB)
savefig(fname_fig, format='png', dpi=180, bbox_inches='tight')
close()
def wangflag(ThetaThres):
if ThetaThres<0:
return 'NaN'
else:
return str(ThetaThres)
#-------------------- para figuras:
Nsh = dVARS[0][0]
WangFlag = 'NaN'#wangflag(ThetaThres)
SUBTITLE = 'number of sheaths: %d \n\
%dbins per time unit \n\
sheaths w/ dT>%gdays \n\
MCflags: %s \n\
WangFlag: %s' % (Nsh, nbin/(1+nbefore+nafter), dTday, MCwant['alias'], WangFlag)
# prefijo gral para los nombres de los graficos:
if CorrShift:
prexShift = 'wShiftCorr'
else:
prexShift = 'woShiftCorr'
DIR_FIGS = '../plots/MCflag%s/%s' % (MCwant['alias'], prexShift)
DIR_ASCII = '../ascii/MCflag%s/%s' % (MCwant['alias'], prexShift)
try:
os.system('mkdir -p %s' % DIR_FIGS)
os.system('mkdir -p %s' % DIR_ASCII)
print ccl.On + " -------> creando: %s" % DIR_FIGS + ccl.W
print ccl.On + " -------> creando: %s" % DIR_ASCII + ccl.W
except:
print ccl.On + " Ya existe: %s" %DIR_FIGS + ccl.W
print ccl.On + " Ya existe: %s" %DIR_ASCII + ccl.W
FNAMEs = 'MCflag%s_%dbefore.%dafter_Wang%s_fgap%1.1f' % (MCwant['alias'], nbefore, nafter, WangFlag, fgap)
FNAME_ASCII = '%s/%s' % (DIR_ASCII, FNAMEs)
FNAME_FIGS = '%s/%s' % (DIR_FIGS, FNAMEs)
#----------------------------------------------------------------------------------------------------
for i in range(nvars):
fname_fig = '%s_%s.png' % (FNAME_FIGS, VARS[i][1])
print ccl.Rn+ " ------> %s" % fname_fig
ylims = VARS[i][2]
ylabel = VARS[i][3]
mediana = dVARS[i][4]
average = dVARS[i][3]
std_err = dVARS[i][5]
nValues = dVARS[i][6] # nmbr of good values aporting data
binsPerTimeUnit = nbin/(1+nbefore+nafter)
SUBTITLE = '# of selected events: %d \n\
events w/80%% of data: %d \n\
bins per time unit: %d \n\
MCflag: %s \n\
WangFlag: %s' % (dVARS[i][0], nEnough[i], binsPerTimeUnit, MCwant['alias'], WangFlag)
makefig(mediana, average, std_err, nValues, tnorm,
dTday, SUBTITLE, ylims, ylabel, fname_fig)
fdataout = '%s_%s.txt' % (FNAME_ASCII, VARS[i][1])
dataout = array([tnorm, mediana, average, std_err, nValues])
print " ------> %s\n" % fdataout + ccl.W
savetxt(fdataout, dataout.T, fmt='%12.5f')
##
| mit | 4,850,216,339,005,660,000 | 33.075269 | 106 | 0.623856 | false | 2.366692 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.