metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jesjimher/onstar",
"score": 3
}
|
#### File: onstar/onstar/demo.py
```python
import aiohttp
import asyncio
import json
from collections import namedtuple
import getpass
print("This demo will connect to: https://gsp.eur.onstar.com/\n")
print("Before trying - ensure you have access by login in above site\n")
print("\nProvide credentials\n")
username = input("Username/email: ")
password = getpass.getpass("Password: ")
gm_pin = getpass.getpass("PIN for localization: ")
def _json_object_hook(d): return namedtuple('X', d.keys())(*d.values())
def dumpJson(parsed):
print(json.dumps(parsed, sort_keys=True, indent=4))
@asyncio.coroutine
def fetch(loop):
payload = {'username': username, 'password': password, 'roleCode': 'driver', 'place': ''}
session = aiohttp.ClientSession(loop=loop)
response = yield from session.post('https://gsp.eur.onstar.com/gspserver/services/admin/login.json', data=payload)
ret = yield from response.text()
return json.loads(ret, object_hook=lambda d: namedtuple('X', d.keys())(*d.values())), session
@asyncio.coroutine
def getLoginInfo(token, session):
header = {'X-GM-token': token}
ret = yield from session.get('https://gsp.eur.onstar.com/gspserver/services/admin/getLoginInfo.json', headers=header)
return ret
@asyncio.coroutine
def getDiagnostics(token, session, vehicleId):
header = {'X-GM-token': token}
payload = {'vehicleId': vehicleId}
ret = yield from session.get('https://gsp.eur.onstar.com/gspserver/services/vehicle/getDiagnosticsReport.json', params=payload, headers = header)
return ret
@asyncio.coroutine
def locateCar(token, pin, session, vehicleId):
header = {'X-GM-token': token, 'X-GM-pincode': pin}
payload = {'vehicleId': vehicleId}
ret = yield from session.post('https://gsp.eur.onstar.com/gspserver/services/vehicle/performLocationHistoryQuery.json', data=payload, headers = header)
return ret
@asyncio.coroutine
def main(loop):
data,session = ((yield from fetch(loop)))
print("Data from fetch - looking for token\n")
print(data)
token = data.results[0].token
print("Found token: ")
print(token)
print("\n\n")
data = (yield from getLoginInfo(token,session))
object = yield from data.text()
print("Getting login info data, looking for first vehicleId:\n")
dumpJson(json.loads(object) )
object = json.loads(object, object_hook=lambda d: namedtuple('X', list(map(lambda x:x.replace('$','_'),d.keys())))(*d.values()))
vehicleId = object.results[0].vehicles[0].vehicle.vehicleId
print("Found vehicleId: ")
print(vehicleId)
print ("\n")
diag = (yield from getDiagnostics(token, session, vehicleId))
vehDiag = yield from diag.text()
print("Getting diagnostics information:\n")
dumpJson(json.loads(vehDiag))
print("Getting localization:\n")
locate = (yield from locateCar(token, gm_pin, session, vehicleId) )
locate = yield from locate.text()
dumpJson(json.loads(locate))
print("Done for now")
session.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
```
|
{
"source": "jesk3r/manganelo",
"score": 3
}
|
#### File: manganelo/api/apibase.py
```python
import requests
import threading
import urllib.parse
class APIBase:
def __init__(self, threaded):
if threaded:
# Create and start a new thread to send the request.
self._thread: threading.Thread = threading.Thread(target=self._start)
self._thread.start()
else:
# Single-threaded - We call the start method on the main thread
self._start()
def _join_thread(self):
""" Handles the extra thread by joining it onto the main thread. """
# If a thread object exists and it is still active, wait for it to finish.
if hasattr(self, "_thread") and self._thread.is_alive():
self._thread.join()
def _start(self) -> None:
raise NotImplementedError()
@staticmethod
def send_request(url: str) -> requests.Response:
"""
Send a request to the URL provided
:param str url: The URL which we are sending a GET request to.
:raise: Will raise exceptions from the requests module
:return: The response object or None
"""
parsed_url = urllib.parse.urlparse(url)
domain = parsed_url.netloc
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0'}
r = requests.get(url, stream=True, timeout=5, headers=headers)
return r
@staticmethod
def send_request_image(url: str) -> requests.Response:
"""
Send a request to the URL provided
:param str url: The URL which we are sending a GET request to.
:raise: Will raise exceptions from the requests module
:return: The response object or None
"""
parsed_url = urllib.parse.urlparse(url)
domain = parsed_url.netloc
#headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0'}
header = {
'Accept': 'image/png,image/svg+xml,image/*;q=0.8,video/*;q=0.8,*/*;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.2 Safari/605.1.15',
'Host': 's31.mkklcdnv31.com',
'Accept-Language': 'en-ca',
'Referer': 'https://manganelo.com/',
'Connection': 'keep-alive'
}
header['Host'] = domain
print(domain)
r = requests.get(url, stream=True, timeout=5, headers=header)
return r
```
|
{
"source": "jeske/csla",
"score": 3
}
|
#### File: pysrc/archiver/mmarchiver.py
```python
import sys, string, os, getopt, pwd, signal, time
import fcntl
import glob
import nstart
from clearsilver import handle_error
from log import *
import re
import message_db
import index
from mimelib import Parser,Message
DISCUSS_DATA_ROOT = "/home/discuss/data"
#DISCUSS_MAILDIR = "/home/discuss/Maildir"
DISCUSS_MAILDIR = "/var/lib/mailman/archives/private/*.mbox/*.mbox"
def archive_dirs():
base_dir = DISCUSS_MAILDIR
mboxes = glob.glob(base_dir)
for mboxpath in mboxes:
_p, fn = os.path.split(mboxpath)
listname, ext = os.path.splitext(fn)
archive_dir(listname, mboxpath)
def archive_dir(listname, mboxpath):
# process files...
global DONE
listpath = os.path.join(DISCUSS_DATA_ROOT, listname)
if not os.path.exists(listpath):
print "list doesn't exists", listpath
return
mboxpos_fn = os.path.join(listpath, "mbox.pos")
if not os.path.exists(mboxpos_fn):
# create the position file
open(mboxpos_fn, "w").write('0')
else:
if os.stat(mboxpath).st_ctime < os.stat(mboxpos_fn).st_ctime:
#print "nothing new: ", listname, os.stat(mboxpath).st_ctime, os.stat(mboxpos_fn).st_ctime
return
pos = int(open(mboxpos_fn, "r").read())
fp = open(mboxpath, "r")
index.index_mbox(fp, listname, mboxpath, pos)
DONE = 0
def handleSignal(*arg):
global DONE
DONE = 1
LockFailed = "LockFailed"
def do_lock(path, filename):
if not os.path.exists(path):
os.makedirs(path, 0777)
lockfp = open(os.path.join(path, filename),"wb")
try:
fcntl.lockf(lockfp.fileno(),fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError, reason:
log ("Unable to lock %s: %s" % (filename, str(reason)))
raise LockFailed, reason
lockfp.write("%d" % os.getpid())
lockfp.truncate()
return lockfp
def usage(progname):
print __doc__ % vars()
def main(argv, stdout, environ):
progname = argv[0]
optlist, args = getopt.getopt(argv[1:], "", ["help", "test", "debug"])
testflag = 0
if len(args) != 0:
usage(progname)
return
lock = do_lock(DISCUSS_DATA_ROOT, "archiver.lock")
global DONE
#signal.signal(signal.SIGTERM, handleSignal)
log("archiver: start")
try:
while not DONE:
try:
archive_dirs()
except:
handle_error.handleException("Archiver Error")
if DONE: break
# tlog("sleeping")
time.sleep(10)
finally:
os.unlink(os.path.join(DISCUSS_DATA_ROOT, "archiver.lock"))
if __name__ == "__main__":
main(sys.argv, sys.stdout, os.environ)
```
#### File: pysrc/base/CSPage.py
```python
import neo_cgi
import sys, os, string
import time
import profiler
from clearsilver.log import *
# errors thrown...
class NoPageName(Exception): pass
class NoDisplayMethod(Exception): pass
# errors signaled back to here
class Redirected(Exception): pass
class DisplayDone(Exception): pass
class DisplayError(Exception): pass
class Context:
def __init__ (self):
self.argv = sys.argv
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
self.environ = os.environ
class CSPage:
def __init__(self, context, pagename=0,readDefaultHDF=1,israwpage=0):
if pagename == 0:
raise NoPageName, "missing pagename"
self.pagename = pagename
self.readDefaultHDF = readDefaultHDF
self._israwpage = israwpage
self.context = context
self._error_template = None
self.page_start_time = time.time()
neo_cgi.cgiWrap(context.stdin, context.stdout, context.environ)
neo_cgi.IgnoreEmptyFormVars(1)
self.ncgi = neo_cgi.CGI()
self.ncgi.parse()
self._path_num = 0
domain = self.ncgi.hdf.getValue("CGI.ServerName","")
domain = self.ncgi.hdf.getValue("HTTP.Host", domain)
self.domain = domain
self.debugEnabled = self.ncgi.hdf.getIntValue("Query.debug",self.ncgi.hdf.getIntValue("Cookie.debug",0))
if self.debugEnabled:
profiler.enable()
self.subclassinit()
self.setPaths([self.ncgi.hdf.getValue("CGI.DocumentRoot","")])
def __del__ (self):
profiler.disable()
def subclassinit(self):
pass
def setPaths(self, paths):
if (type(paths) != type([])):
paths = [paths]
for path in paths:
self.ncgi.hdf.setValue("hdf.loadpaths.%d" % self._path_num, path)
self._path_num = self._path_num + 1
def redirectUri(self,redirectTo):
ncgi = self.ncgi
if ncgi.hdf.getIntValue("Cookie.debug",0) == 1:
ncgi.hdf.setValue("CGI.REDIRECT_TO",redirectTo)
ncgi.display("dbg/redirect.cs")
print "<PRE>"
print neo_cgi.htmlEscape(ncgi.hdf.dump())
print "</PRE>"
raise DisplayDone
self.ncgi.redirectUri(redirectTo)
raise Redirected, "redirected To: %s" % redirectTo
## ----------------------------------
## methods to be overridden in subclass when necessary:
def setup(self):
pass
def display(self):
raise NoDisplayMethod, "no display method present in %s" % repr(self)
def main(self):
self.setup()
self.handle_actions()
self.display()
## ----------------------------------
def handle_actions(self):
hdf = self.ncgi.hdf
hdfobj = hdf.getObj("Query.Action")
if hdfobj:
firstchild = hdfobj.child()
if firstchild:
action = firstchild.name()
if firstchild.next():
raise "multiple actions present!!!"
method_name = "Action_%s" % action
method = getattr(self,method_name)
apply(method,[])
def dumpDebug(self, etime):
log("dumpDebug")
print "<HR>\n"
if etime: print "Execution Time: %5.3f<BR><HR>" % (etime)
print '<table align=center width=90% style="font-size:10pt">'
bgcolor = "#eeeeee"
import math
for p in profiler.PROFILER_DATA:
if bgcolor == "#dddddd": bgcolor = "#eeeeee"
else: bgcolor = "#dddddd"
print "<tr bgcolor=%s><td NOWRAP>%02d:%04d<td>%5.3fs<td>%s<td>%s</tr>" % (bgcolor, math.floor(p.when),(p.when - math.floor(p.when)) * 10000, p.length, p.klass, p.what)
print "</table>"
def start(self):
SHOULD_DISPLAY = 1
if self._israwpage:
SHOULD_DISPLAY = 0
ncgi = self.ncgi
if self.readDefaultHDF:
try:
if not self.pagename is None:
ncgi.hdf.readFile("%s.hdf" % self.pagename)
except:
log("Error reading HDF file: %s.hdf" % (self.pagename))
DISPLAY_ERROR = 0
ERROR_MESSAGE = ""
# call page main function!
try:
self.main()
except DisplayDone:
SHOULD_DISPLAY = 0
except Redirected:
# catch redirect exceptions
SHOULD_DISPLAY = 0
except DisplayError, num:
ncgi.hdf.setValue("Query.error", str(num))
if self._error_template:
ncgi.hdf.setValue("Content", self._error_template)
else:
DISPLAY_ERROR = 1
except:
SHOULD_DISPLAY = 0
DISPLAY_ERROR = 1
from clearsilver import handle_error
handle_error.handleException("Display Failed!")
ERROR_MESSAGE = handle_error.exceptionString()
if DISPLAY_ERROR:
print "Content-Type: text/html\n\n"
# print the page
print "<H1> Error in Page </H1>"
print "A copy of this error report has been submitted to the developers. "
print "The details of the error report are below."
print "<PRE>"
print neo_cgi.htmlEscape(ERROR_MESSAGE)
print "</PRE>\n"
# print debug info always on page error...
print "<HR>\n"
print "<PRE>"
print neo_cgi.htmlEscape(ncgi.hdf.dump())
print "</PRE>"
etime = time.time() - self.page_start_time
ncgi.hdf.setValue("CGI.debug.execute_time","%f" % (etime))
if SHOULD_DISPLAY and self.pagename:
debug_output = self.debugEnabled or ncgi.hdf.getIntValue("page.debug",ncgi.hdf.getIntValue("Cookie.debug",0))
if not debug_output:
ncgi.hdf.setValue("Config.CompressionEnabled","1")
# default display
template_name = ncgi.hdf.getValue("Content","%s.cs" % self.pagename)
# ncgi.hdf.setValue ("cgiout.charset", "utf-8");
if self.debugEnabled: p = profiler.Profiler("CS", "display %s" % template_name)
if debug_output:
try:
ncgi.display(template_name)
except:
print "Content-Type: text/html\n\n"
from clearsilver import handle_error
print "<PRE>"
print handle_error.exceptionString()
print "</PRE>"
else:
ncgi.display(template_name)
if self.debugEnabled: p.end()
# debug output
if debug_output:
self.dumpDebug(etime)
print "<HR>\n"
print "<PRE>"
print neo_cgi.htmlEscape(ncgi.hdf.dump())
print "</PRE>"
script_name = ncgi.hdf.getValue("CGI.ScriptName","")
if script_name:
script_name = string.split(script_name,"/")[-1]
log ("[%s] etime/dtime: %5.3f/%5.3f %s (%s)" % (self.domain, etime, time.time() - etime - self.page_start_time, script_name, self.pagename))
# a protected output function to catch the output errors that occur when
# the server is either restarted or the user pushes the stop button on the
# browser
def output(self, str):
try:
self.context.stdout.write(str)
except IOError, reason:
log("IOError: %s" % (repr(reason)))
raise DisplayDone
def allQuery (self, s):
l = []
if self.ncgi.hdf.getValue ("Query.%s.0" % s, ""):
obj = self.ncgi.hdf.getChild ("Query.%s" % s)
while obj:
l.append(obj.value())
obj = obj.next()
else:
t = self.ncgi.hdf.getValue ("Query.%s" % s, "")
if t: l.append(t)
return l
```
#### File: pysrc/base/htmlhelp.py
```python
import string
import re
import neo_cgi
# --- For these ---
# & -> &
# < -> <
# > -> >
# ? -> %3f
#
# use neo_cgi.htmlEscape(string)
replace_dict = {}
for a_char in ['&','<','>','?', ' ', '=']:
replace_dict[a_char] = "%%%X" % ord(a_char)
def urlEscape(str):
global replace_dict
new_str = ""
for a_char in str:
if replace_dict.has_key(a_char):
new_str = new_str + replace_dict[a_char]
else:
new_str = new_str + a_char
return new_str
NON_BREAKING_HYPHEN = "‑"
def emailEscape(addr):
return neo_cgi.htmlEscape(addr)
# this was causing problems with delivery to these email addresses
# - jeske
# return string.replace(neo_cgi.htmlEscape(addr),"-",NON_BREAKING_HYPHEN)
#################
# jsEscape(str)
#
# escape a string for use inside a javascript string
js_replace = {}
for a_char in ["'",'"',"\n","\r","\t","\\",">",">","&"]:
js_replace[a_char] = "\\x%02X" % ord(a_char)
def jsEscape(str):
global js_replace
new_str = ""
for a_char in str:
if js_replace.has_key(a_char):
new_str = new_str + js_replace[a_char]
else:
new_str = new_str + a_char
return new_str
def name_case(a_str):
if len(a_str):
a_str = string.upper(a_str[0]) + string.lower(a_str[1:])
return a_str
def split_name(a_name):
if not a_name:
return ""
last_name = ""
comma_sep_parts = string.split(string.strip(a_name),",")
if len(comma_sep_parts) > 1:
try:
first_name = string.split(comma_sep_parts[1])[0]
except IndexError:
first_name = comma_sep_parts[1]
last_name = string.strip(comma_sep_parts[0])
else:
parts = string.split(string.strip(a_name)," ")
first_name = parts[0]
if len(parts) > 1:
last_name = parts[-1]
return name_case(first_name),name_case(last_name)
```
#### File: pysrc/base/RandomWords.py
```python
import whrandom
import os, sys, getopt, time
gDict = "/usr/dict/words";
gWords = None
class RandomWords:
def __init__ (self):
global gWords
if not gWords:
fp = open (gDict)
gWords = fp.readlines()
fp.close()
self._words = gWords
def word (self):
word = whrandom.choice(self._words)
if word[-1] == "\n":
word = word[:-1]
return word
def words (self, number):
words = self.word()
for x in range (number-1):
words = words + " " + self.word()
return words
def message (self, maxlines = 1000, max = None):
if not max:
max = whrandom.randint(100, 5000)
slen = 0
lines = 0
results = ""
for x in range (max):
word = self.word()
if (len (word) + slen) > 72:
lines = lines + 1
if (lines > maxlines):
return results
results = results + "\n"
slen = 0
slen = slen + len (word)
results = results + word + " "
return results
def usage(progname):
print "usage: %s [--help] [--lines <num>] [--num <num>] [--fork <num>]" % progname
print __doc__
def main(argc, argv):
import sendmail
progname = argv[0]
alist, args = getopt.getopt(argv[1:], "", ["help", "lines=", "num=", "fork=", ])
maxlines = 100
num = 1
fork = 0
random_from = 1
if len (args) < 1:
usage (progname)
return
for (field, val) in alist:
if field == "--help":
usage(progname)
return
if field == "--lines":
maxlines = int(val)
if field == "--num":
num = int (val)
if field == "--fork":
fork = int (val)
mailto = args[0]
email = ""
author = "RandomWords"
if len (args) > 1:
email = args[1]
random_from = 0
if len (args) > 2:
author = args[2]
print "Creating %d processes" % fork
while (fork):
pid = os.fork()
if pid == 0:
# In child
whrandom.seed (int(time.time()) % 256, fork % 256, os.getpid() % 256)
fork = 0
print "Created Child Process"
else:
# In parent
fork = fork - 1
for x in range (num):
rw = RandomWords()
body = rw.message(maxlines)
body = body + "\n-- \n This is a test message!\n"
subject = rw.words(5)
now = time.time()
def date_header (time_t):
sec_offset = 0
tup = time.gmtime(time_t + sec_offset)
datestr = time.strftime("%a, %d %b %Y %H:%M:%S", tup)
if sec_offset <= 0: sign = '-'
else: sign = '+'
return "%s %s%02d00" % (datestr, sign, abs(sec_offset / 3600))
date = date_header(now)
if random_from:
first_name = rw.word()
last_name = rw.word()
email = '<EMAIL>' % last_name
author = "%s %s" % (first_name, last_name)
print "Message sent to %s from \"%s\" <%s>\n Subject: %s" % ( mailto, author, email, subject)
msg = 'To: %s\nFrom: "%s" <%s>\nSubject: %s\nDate: %s\n\n%s' % (mailto, author, email, subject, date, body)
sendmail.sendmail(email, [mailto], msg)
if __name__ == "__main__":
main(len(sys.argv), sys.argv)
```
#### File: pysrc/base/send_dada.py
```python
import whrandom
import os, sys, getopt, time, string
def usage(progname):
print "usage: %s [--help] [--num <num>] [--fork <num>] dada.pb" % progname
print __doc__
def main(argc, argv):
import sendmail
progname = argv[0]
alist, args = getopt.getopt(argv[1:], "", ["help", "num=", "fork=", ])
num = 1
fork = 0
random_from = 1
if len (args) < 1:
usage (progname)
return
for (field, val) in alist:
if field == "--help":
usage(progname)
return
if field == "--num":
num = int (val)
if field == "--fork":
fork = int (val)
mailto = args[0]
if len (args) > 1:
email = args[1]
random_from = 0
if len (args) > 2:
author = args[2]
print "Creating %d processes" % fork
while (fork):
pid = os.fork()
if pid == 0:
# In child
whrandom.seed (int(time.time()) % 256, fork % 256, os.getpid() % 256)
fork = 0
print "Created Child Process"
else:
# In parent
fork = fork - 1
for x in range (num):
now = time.time()
def date_header (time_t):
sec_offset = 0
tup = time.gmtime(time_t + sec_offset)
datestr = time.strftime("%a, %d %b %Y %H:%M:%S", tup)
if sec_offset <= 0: sign = '-'
else: sign = '+'
return "%s %s%02d00" % (datestr, sign, abs(sec_offset / 3600))
date = date_header(now)
rseed = int(now * 1000 % 10000000)
cmd = "/usr/local/bin/dada -w 68 -r %d %s" % (rseed, args[1])
print cmd
msg = os.popen(cmd, "r").read()
lines = string.split(msg, '\n')
email = lines[0]
lines[0] = "Date: %s" % date
msg = string.join(lines, '\n')
print "Message sent to %s from %s\n" % ( mailto, email)
sendmail.sendmail(email, [mailto], msg)
#time.sleep(1)
if __name__ == "__main__":
main(len(sys.argv), sys.argv)
```
#### File: pysrc/clearsilver/CSPage.py
```python
import neo_cgi, neo_cs
import sys, os, string
import time
from log import *
# errors thrown...
NoPageName = "NoPageName"
NoDisplayMethod = "NoDisplayMethod"
# errors signaled back to here
Redirected = "Redirected"
DisplayDone = "DisplayDone"
DisplayError = "DisplayError"
class Context:
def __init__ (self):
self.argv = sys.argv
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
self.environ = os.environ
class CSPage:
def __init__(self, context, pagename=0,readDefaultHDF=1,israwpage=0,**parms):
if pagename == 0:
raise NoPageName, "missing pagename"
self.pagename = pagename
self.readDefaultHDF = readDefaultHDF
self._israwpage = israwpage
self.context = context
self._pageparms = parms
self._error_template = None
self.page_start_time = time.time()
neo_cgi.cgiWrap(context.stdin, context.stdout, context.environ)
neo_cgi.IgnoreEmptyFormVars(1)
self.ncgi = neo_cgi.CGI()
self.ncgi.parse()
self._path_num = 0
domain = self.ncgi.hdf.getValue("CGI.ServerName","")
domain = self.ncgi.hdf.getValue("HTTP.Host", domain)
self.domain = domain
self.subclassinit()
self.setPaths([self.ncgi.hdf.getValue("CGI.DocumentRoot","")])
self._sent_headers = 0
self._reply_headers = {}
self._reply_code = 200
if self.ncgi.hdf.getValue("CGI.HTTPS", ""):
self.http = "https://"
else:
self.http = "http://"
def __setitem__(self, key, value):
self._reply_headers[string.lower(key)] = value
self.ncgi.hdf.setValue("cgiout.other.%s" % key, "%s: %s" % (key, value))
def __getitem__(self, key):
return self._reply_headers[string.lower(key)]
def has_key(self, key):
return self._reply_headers.has_key(string.lower(key))
def subclassinit(self):
pass
def setPaths(self, paths):
for path in paths:
self.ncgi.hdf.setValue("hdf.loadpaths.%d" % self._path_num, path)
self._path_num = self._path_num + 1
def redirectUri(self,redirectTo):
ncgi = self.ncgi
if ncgi.hdf.getIntValue("Cookie.debug",0) == 1:
ncgi.hdf.setValue("CGI.REDIRECT_TO",redirectTo)
# ncgi.display("dbg/redirect.cs")
cs = neo_cs.CS(ncgi.hdf)
self['Content-Type'] = "text/html"
template = """
Redirect
<br><br>
<a href="<?cs var:CGI.REDIRECT_TO ?>"><?cs var:CGI.REDIRECT_TO ?></a>
"""
cs.parseStr(template)
page = cs.render()
self.push(page)
self.push("<PRE>\n")
self.push(neo_cgi.htmlEscape(ncgi.hdf.dump()) + "\n")
self.push("</PRE>\n")
raise DisplayDone
self.ncgi.redirectUri(redirectTo)
raise Redirected, "redirected To: %s" % redirectTo
## ----------------------------------
## methods to be overridden in subclass when necessary:
def setup(self):
pass
def display(self):
raise NoDisplayMethod, "no display method present in %s" % repr(self)
def main(self):
self.setup()
self.handle_actions()
self.display()
## ----------------------------------
def handle_actions(self):
hdf = self.ncgi.hdf
hdfobj = hdf.getObj("Query.Action")
if hdfobj:
firstchild = hdfobj.child()
if firstchild:
action = firstchild.name()
if firstchild.next():
raise "multiple actions present!!!"
method_name = "Action_%s" % action
method = getattr(self,method_name)
apply(method,[])
def start(self):
SHOULD_DISPLAY = 1
if self._israwpage:
SHOULD_DISPLAY = 0
ncgi = self.ncgi
if self.readDefaultHDF:
try:
if not self.pagename is None:
ncgi.hdf.readFile("%s.hdf" % self.pagename)
except:
debug("Error reading HDF file: %s.hdf" % (self.pagename))
DISPLAY_ERROR = 0
ERROR_MESSAGE = ""
# call page main function!
try:
self.main()
except DisplayDone:
SHOULD_DISPLAY = 0
except Redirected:
# catch redirect exceptions
SHOULD_DISPLAY = 0
except DisplayError, num:
ncgi.hdf.setValue("Query.error", str(num))
if self._error_template:
ncgi.hdf.setValue("Content", self._error_template)
else:
DISPLAY_ERROR = 1
except:
SHOULD_DISPLAY = 0
DISPLAY_ERROR = 1
import handle_error
handle_error.handleException("Display Failed!")
ERROR_MESSAGE = handle_error.exceptionString()
if DISPLAY_ERROR:
#print "Content-Type: text/html\n\n"
# print the page
self['Content-Type'] = "text/html"
# print the page
self.push("<H1> Error in Page </H1>\n")
self.push("A copy of this error report has been submitted to the developers. ")
self.push("The details of the error report are below.")
self.push("<PRE>")
self.push(handle_error.exceptionString())
self.push("</PRE>\n")
# print debug info always on page error...
self.push("<HR>\n")
self.push("<PRE>")
self.push(neo_cgi.htmlEscape(ncgi.hdf.dump()))
self.push("</PRE>")
etime = time.time() - self.page_start_time
ncgi.hdf.setValue("CGI.debug.execute_time","%f" % (etime))
if SHOULD_DISPLAY and self.pagename:
debug_output = ncgi.hdf.getIntValue("page.debug",ncgi.hdf.getIntValue("Cookie.debug",0))
# hijack the built-in debug output method...
if ncgi.hdf.getValue("Query.debug","") == ncgi.hdf.getValue("Config.DebugPassword","1"):
ncgi.hdf.setValue("Config.DebugPassword","<PASSWORD> (%s)" %
ncgi.hdf.getValue("Config.DebugPassword",""))
debug_output = 1
if not debug_output:
ncgi.hdf.setValue("Config.CompressionEnabled","1")
# default display
template_name = ncgi.hdf.getValue("Content","%s.cs" % self.pagename)
# ncgi.hdf.setValue ("cgiout.charset", "utf-8");
try:
ncgi.display(template_name)
self._sent_headers = 1
except:
self['Content-Type'] = 'text/html'
self.push("CSPage: Error occured\n")
import handle_error
self.push("<pre>" + handle_error.exceptionString() + "</pre>")
debug_output = 1
debug_output = True
# debug output
if debug_output:
self.push("<HR>\n")
self.push("Execution Time: %5.3f<BR><HR>" % (etime))
self.push("<PRE>")
self.push(neo_cgi.htmlEscape(ncgi.hdf.dump()))
self.push("</PRE>")
# ncgi.hdf.setValue("hdf.DEBUG",ncgi.hdf.dump())
# ncgi.display("debug.cs")
script_name = ncgi.hdf.getValue("CGI.ScriptName","")
if script_name:
script_name = string.split(script_name,"/")[-1]
log ("[%s] etime/dtime: %5.3f/%5.3f %s (%s)" % (self.domain, etime, time.time() - etime - self.page_start_time, script_name, self.pagename))
return self._reply_code
# a protected output function to catch the output errors that occur when
# the server is either restarted or the user pushes the stop button on the
# browser
def output(self, str):
try:
if len(str) > 8196:
import cStringIO
fp = cStringIO.StringIO(str)
while 1:
data = fp.read(8196*8)
if not data: break
self.context.stdout.write(data)
else:
self.context.stdout.write(str)
except IOError, reason:
log("IOError: %s" % (repr(reason)))
raise DisplayDone
def done(self):
if not self._sent_headers: self.error(500)
self._sent_headers = 0
raise DisplayDone
def push(self, data):
if not self._sent_headers:
headerdata = self.send_headers(dont_send=1)
self.output(headerdata + data)
else:
self.output(data)
def send_headers(self, dont_send=0):
self._sent_headers = 1
message = gHTTPResponses[self._reply_code]
if self._reply_code != 200:
self['status'] = "%s %s" % (self._reply_code, message)
self['connection'] = 'close'
headers = []
#headers.append(self.response(self._reply_code))
for (key, value) in self._reply_headers.items():
headers.append('%s: %s' % (key, value))
headers.append('\r\n')
if dont_send == 0:
self.push(string.join(headers, '\r\n'))
else:
return string.join(headers, '\r\n')
def allQuery (self, s):
l = []
if self.ncgi.hdf.getValue ("Query.%s.0" % s, ""):
obj = self.ncgi.hdf.getChild ("Query.%s" % s)
while obj:
l.append(obj.value())
obj = obj.next()
else:
t = self.ncgi.hdf.getValue ("Query.%s" % s, "")
if t: l.append(t)
return l
def error(self, code, reason=None):
self._reply_code = code
message = gHTTPResponses[code]
s = DEFAULT_ERROR_MESSAGE % {
'code': code, 'message': message, 'reason': reason
}
# self['Content-Length'] = len(s)
# self['Content-Type'] = 'text/html'
# self.push(s)
self.context.stdout.write("Content-Type: text/html\n")
self.context.stdout.write("Status: %s\n" % code)
self.context.stdout.write(s)
# self.done()
raise DisplayDone
gHTTPResponses = {
100: "Continue",
101: "Switching Protocols",
200: "OK",
201: "Created",
202: "Accepted",
203: "Non-Authoritative Information",
204: "No Content",
205: "Reset Content",
206: "Partial Content",
300: "Multiple Choices",
301: "Moved Permanently",
302: "Moved Temporarily",
303: "See Other",
304: "Not Modified",
305: "Use Proxy",
400: "Bad Request",
401: "Unauthorized",
402: "Payment Required",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
406: "Not Acceptable",
407: "Proxy Authentication Required",
408: "Request Time-out",
409: "Conflict",
410: "Gone",
411: "Length Required",
412: "Precondition Failed",
413: "Request Entity Too Large",
414: "Request-URI Too Large",
415: "Unsupported Media Type",
500: "Internal Server Error",
501: "Not Implemented",
502: "Bad Gateway",
503: "Service Unavailable",
504: "Gateway Time-out",
505: "HTTP Version not supported"
}
# Default error message
DEFAULT_ERROR_MESSAGE = string.join(
['',
'<head>',
'<title>%(code)d %(message)s</title>',
'</head>',
'<body>',
'<h1>%(message)s</h1>',
'<p>Error code %(code)d.',
'<p>Message: %(message)s.',
'<p>Reason:\n <pre>%(reason)s</pre>',
'</body>',
''
],
'\r\n'
)
```
#### File: pysrc/clearsilver/odb_postgres.py
```python
import os, sys, string, time, getopt
from log import *
import odb
from pyPgSQL import PgSQL
class Cursor(odb.Cursor):
def insert_id(self, tablename, colname):
self.execute("select last_value from %s_%s_seq" % (tablename, colname))
row = self.fetchone()
return row[0]
class Connection(odb.Connection):
def __init__(self, *args, **kwargs):
odb.Connection.__init__(self)
self._conn = apply(PgSQL.connect, args, kwargs)
self.SQLError = PgSQL.OperationalError
def getConnType(self): return "postgres"
def cursor(self):
return Cursor(self._conn.cursor())
def escape(self,str):
if str is None:
return None
elif type(str) == type(""):
return string.replace(str,"'","''")
elif type(str) == type(1):
return str
else:
raise "unknown column data type: %s" % type(str)
def listTables(self, cursor):
cursor.execute("select tablename from pg_catalog.pg_tables")
rows = cursor.fetchall()
tables = []
for row in rows:
tables.append(row[0])
return tables
def listIndices(self, tableName, cursor):
sql = "select indexname from pg_catalog.pg_indexes where tablename='%s'" % tableName
cursor.execute(sql)
rows = cursor.fetchall()
tables = map(lambda row: row[0], rows)
return tables
def listFieldsDict(self, table_name, cursor):
sql = "SELECT c.oid, n.nspname, c.relname FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE pg_catalog.pg_table_is_visible(c.oid) AND c.relname = '%s' ORDER BY 2, 3;" % table_name
cursor.execute(sql)
row = cursor.fetchone()
oid = row[0]
sql = "SELECT a.attname, pg_catalog.format_type(a.atttypid, a.atttypmod), (SELECT substring(d.adsrc for 128) FROM pg_catalog.pg_attrdef d WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef), a.attnotnull, a.attnum FROM pg_catalog.pg_attribute a WHERE a.attrelid = '%s' AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum" % oid
cursor.execute(sql)
rows = cursor.fetchall()
columns = {}
for row in rows:
colname = row[0]
columns[colname] = row
return columns
def alterTableToMatch(self, table, cursor):
invalidAppCols, invalidDBCols = table.checkTable()
if not invalidAppCols: return
defs = []
for colname in invalidAppCols.keys():
col = table.getColumnDef(colname)
colname = col[0]
coltype = col[1]
options = col[2]
defs.append(table._colTypeToSQLType(colname, coltype, options))
defs = string.join(defs, ", ")
sql = "alter table %s add column " % table.getTableName()
sql = sql + "(" + defs + ")"
print sql
cursor.execute(sql)
def auto_increment(self, coltype):
return "SERIAL", None
def supportsTriggers(self): return False
```
#### File: pysrc/clearsilver/odb_sqlite3.py
```python
import os, sys, string, time, getopt
from log import *
import types
import odb
#import sqliterep as sqlite
import sqlite3
sqlite = sqlite3
import _sqlite3
odb.OperationalError = _sqlite3.OperationalError
odb.DatabaseError = _sqlite3.DatabaseError
odb.DataError = _sqlite3.DataError
odb.IntegrityError = _sqlite3.IntegrityError
odb.NotSupportedError = _sqlite3.NotSupportedError
class Cursor(odb.Cursor):
def insert_id(self, tablename, colname):
return self.cursor.lastrowid
def execute(self, sql):
try:
return self.cursor.execute(sql)
except _sqlite3.DatabaseError, reason:
reason = str(reason) + "(%s)" % sql
raise _sqlite3.DatabaseError, reason
def begin(self):
pass
class Connection(odb.Connection):
def __init__(self, *args, **kwargs):
odb.Connection.__init__(self)
self._conn = apply(sqlite.connect, args, kwargs)
self._conn.text_factory = str
self.SQLError = sqlite.Error
def getConnType(self): return "sqlite"
def cursor(self):
return Cursor(self._conn.cursor())
def encode(self, str):
return sqlite3.encode(str)
def decode(self, str):
return sqlite3.decode(str)
def escape(self,s):
if s is None:
return None
elif type(s) == types.StringType:
return string.replace(s,"'","''")
elif type(s) in (types.IntType, types.FloatType):
return s
elif type(s) == types.UnicodeType:
return str(s)
else:
warn("unknown column data type: <%s> value=%s" % (type(s), s[:100]))
return str(s)
def listTables(self, cursor):
cursor.execute("select name from sqlite_master where type='table'")
rows = cursor.fetchall()
tables = []
for row in rows: tables.append(row[0])
return tables
def supportsTriggers(self): return True
def listTriggers(self, cursor):
cursor.execute("select name from sqlite_master where type='trigger'")
rows = cursor.fetchall()
tables = []
for row in rows: tables.append(row[0])
return tables
def listIndices(self, tableName, cursor):
cursor.execute("select name from sqlite_master where type='index'")
rows = cursor.fetchall()
tables = []
for row in rows:
if row[0].find("sqlite_autoindex_") != -1:
continue
tables.append(row[0])
return tables
def listFieldsDict(self, table_name, cursor):
sql = "pragma table_info(%s)" % table_name
cursor.execute(sql)
rows = cursor.fetchall()
columns = {}
for row in rows:
colname = row[1]
columns[colname] = row
return columns
def _tableCreateStatement(self, table_name, cursor):
sql = "select sql from sqlite_master where type='table' and name='%s'" % table_name
print sql
cursor.execute(sql)
row = cursor.fetchone()
sqlstatement = row[0]
return sqlstatement
def alterTableToMatch(self, table, cursor):
tableName = table.getTableName()
debug("alterTableToMatch", tableName)
tmpTableName = tableName + "_" + str(os.getpid())
invalidAppCols, invalidDBCols = table.checkTable(warnflag=0)
# warn(invalidAppCols, invalidDBCols)
## if invalidAppCols or invalidDBCols:
## return
if not invalidAppCols and not invalidDBCols:
return
oldcols = self.listFieldsDict(tableName, cursor)
# tmpcols = oldcols.keys()
tmpcols = []
newcols = table.getAppColumnList()
for colname, coltype, options in newcols:
if oldcols.has_key(colname): tmpcols.append(colname)
tmpcolnames = string.join(tmpcols, ",")
count = table.fetchRowCount()
warn("count for %s=" % table.getTableName(), count)
statements = []
#sql = "begin transaction"
#statements.append(sql)
# sql = "create temporary table %s (%s)" % (tmpTableName, tmpcolnames)
sql = "create table %s (%s)" % (tmpTableName, tmpcolnames)
statements.append(sql)
sql = "insert into %s select %s from %s" % (tmpTableName, tmpcolnames, tableName)
statements.append(sql)
sql = "drop table %s" % tableName
statements.append(sql)
sql = table._createTableSQL()
statements.append(sql)
sql = "insert into %s(%s) select %s from %s" % (tableName, tmpcolnames, tmpcolnames, tmpTableName)
statements.append(sql)
sql = "drop table %s" % tmpTableName
statements.append(sql)
#sql = "commit"
#statements.append(sql)
self.begin()
for statement in statements:
print statement
cursor.execute(statement)
self.commit()
def auto_increment(self, coltype):
return coltype, ""
def create_fullTextSearchTable(self, tableName, column_list):
defs = []
for colname, coltype, options in column_list:
if colname in ("rowid", "docid"): continue
defs.append(colname)
defs = string.join(defs, ", ")
return "CREATE virtual TABLE %s using FTS3(%s)" % (tableName, defs)
def test():
pass
def usage(progname):
print __doc__ % vars()
def main(argv, stdout, environ):
progname = argv[0]
optlist, args = getopt.getopt(argv[1:], "", ["help", "test", "debug"])
testflag = 0
if len(args) == 0:
usage(progname)
return
for (field, val) in optlist:
if field == "--help":
usage(progname)
return
elif field == "--debug":
debugfull()
elif field == "--test":
testflag = 1
if testflag:
test()
return
if __name__ == "__main__":
main(sys.argv, sys.stdout, os.environ)
```
#### File: pysrc/clearsilver/test.py
```python
import os, sys, string, time, getopt
from log import *
import odb
import odb_sqlite3
def test(name, email):
print dir()
def usage(progname):
print __doc__ % vars()
def main(argv, stdout, environ):
progname = argv[0]
optlist, args = getopt.getopt(argv[1:], "", ["help", "test", "debug"])
testflag = 0
for (field, val) in optlist:
if field == "--help":
usage(progname)
return
elif field == "--debug":
debugfull()
elif field == "--test":
testflag = 1
test("scott", "<EMAIL>")
if __name__ == "__main__":
main(sys.argv, sys.stdout, os.environ)
```
#### File: pysrc/mimelib/address.py
```python
from rfc822 import unquote, quote, parseaddr
from rfc822 import dump_address_pair
from rfc822 import AddrlistClass as _AddrlistClass
COMMASPACE = ', '
def getaddresses(fieldvalues):
all = COMMASPACE.join(fieldvalues)
a = _AddrlistClass(all)
return a.getaddrlist()
```
#### File: pysrc/mimelib/Errors.py
```python
class MessageError(Exception):
"""Base class for errors in this module."""
class MessageParseError(MessageError):
"""Base class for message parsing errors."""
def __init__(self, msg, lineno):
self._msg = msg
self._lineno = lineno
def __str__(self):
return self._msg + ' (line %d)' % self._lineno
def __add__(self, other):
return self._lineno + other
def __sub__(self, other):
return self._lineno - other
def __iadd__(self, other):
self._lineno += other
def __isub__(self, other):
self._lineno -= other
class HeaderParseError(MessageParseError):
"""Error while parsing headers."""
class BoundaryError(MessageParseError):
"""Couldn't find terminating boundary."""
class MultipartConversionError(MessageError, TypeError):
"""Conversion to a multipart is prohibited."""
```
#### File: pysrc/mimelib/MIMEBase.py
```python
import Message
class MIMEBase(Message.Message):
"""Base class for MIME specializations."""
def __init__(self, _major, _minor, **_params):
"""This constructor adds a Content-Type: and a MIME-Version: header.
The Content-Type: header is taken from the _major and _minor
arguments. Additional parameters for this header are taken from the
keyword arguments.
"""
Message.Message.__init__(self)
ctype = '%s/%s' % (_major, _minor)
self['MIME-Version'] = '1.0'
self.addheader('Content-Type', ctype, **_params)
```
#### File: pysrc/mimelib/Text.py
```python
import MIMEBase
import base64, string, MimeQPEnc
def base64_encoder(msg):
"""Base64 is the standard encoder for image data."""
orig = msg.get_payload()
encdata = base64.encodestring(orig)
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'base64'
def qp_encoder(msg):
orig = msg.get_payload()
encdata = MimeQPEnc.encodeQPrfc2045(orig)
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'quoted-printable'
def count_nonascii (str, check = 0):
count = 0
for x in range (len (str)):
if ord(str[x]) > 127 or ord(str[x]) < 32:
count = count + 1
if check:
return count
return count
class Text(MIMEBase.MIMEBase):
"""Class for generating text/* type MIME documents."""
def __init__(self, _text, _minor='plain', _charset="us-ascii"):
"""Create a text/* type MIME document.
_text is the string for this message object. If the text does not end
in a newline, one is added.
_minor is the minor content type, defaulting to "plain".
_charset is the character set parameter added to the Content-Type:
header. This defaults to "us-ascii".
"""
_charset = string.lower(_charset)
enc = None
cnt8 = count_nonascii (_text)
# upgrade to iso-8859-1 if there are 8bit characters
if cnt8 and _charset == "us-ascii":
_charset = "iso-8859-1"
# Which is shorter, base64 or qp?
if cnt8:
# the russians all have old clients which can't do mime decoding
# apparently, Outlook for JP doesn't do line breaks correctly
# if we base64 encode the message. Technically, jis should be
# 7bit, but it will contain \027 <esc>... so we'll say 8bit
if _charset in ['koi8-r', 'iso-2022-jp', 'iso-2022-kr']:
enc = "8bit"
elif len (_text) + cnt8 * 3 < len (_text) * 4 / 3:
enc = "qp"
else:
enc = "base64"
MIMEBase.MIMEBase.__init__(self, 'text', _minor,
**{'charset': _charset})
if _text and _text[-1] <> '\n':
_text += '\n'
self.set_payload(_text)
if enc == "qp":
qp_encoder(self)
elif enc == "base64":
base64_encoder(self)
elif enc == "8bit":
self['Content-Transfer-Encoding'] = '8bit'
```
#### File: csla/pysrc/which_read.py
```python
import os, sys, string, re, time
import marshal
from log import *
import time,hashlib
import neo_cgi, neo_util
from clearsilver import odb, hdfhelp, odb_sqlite3
class WhichReadDB(odb.Database):
def __init__ (self, conn):
odb.Database.__init__(self, conn)
self.addTable("whichread", "wr_whichread", WhichReadTable)
def get(self, readerid):
row = self.whichread.lookup(readerid=readerid)
if not row: row = ''
return row
class WhichReadTable(odb.Table):
def _defineRows(self):
self.d_addColumn("readerid", odb.kVarString, primarykey=1)
self.d_addColumn("wrlist", odb.kVarString)
def createTables(path):
dbpath = "%s/whichread.db3" % path
# conn = odb_sqlite3.Connection(dbpath, autocommit=0)
conn = odb_sqlite3.Connection(dbpath)
db = WhichReadDB(conn)
db.createTables()
db.synchronizeSchema()
db.createIndices()
class WhichRead:
def __init__ (self, listname,path,ncgi):
self.listname = listname
self._path = path
self.ncgi = ncgi
self.__db = None
self._whichReadID = self.getWhichReadID()
def getWhichReadID(self):
wrid = self.ncgi.hdf.getValue("Cookie.WRID","")
if not wrid:
m = hashlib.md5()
m.update("%s-%s" % (self.ncgi.hdf.getValue("CGI.RemoteAddress","ADDR"),
time.time()))
wrid = m.hexdigest()
log("issued new WhichReadID: %s" % wrid)
self.ncgi.cookieSet("WRID",wrid,persist=1)
# self.ncgi.hdf.setValue("Cookie.WRID",wrid)
return wrid
def _db(self):
if self.__db is None:
dbpath = "%s/whichread.db3" % self._path
# conn = odb_sqlite3.Connection(dbpath, autocommit=0)
conn = odb_sqlite3.Connection(dbpath)
self.__db = WhichReadDB(conn)
return self.__db
def markMsgRead(self, message_num):
# unpack the seen cookie
seencookiename = "%s.WR" % self.listname
seencookie = self.ncgi.hdf.getValue("Cookie.%s" % seencookiename, "")
if seencookie:
c_parts = string.split(seencookie,",")
else:
c_parts = []
mnum_str = "%s" % message_num
try:
index = c_parts.remove(mnum_str)
log("already seen in cookie: %s" % message_num)
except ValueError:
log("markread: %s" % message_num)
# yes, it's new!
# make a new seen cookie! (only 200 entries)
c_parts.insert(0,mnum_str)
new_seencookie = string.join(c_parts[:200],",")
self.ncgi.cookieSet(seencookiename,new_seencookie,persist=1)
# add to whichread DB
self.addToDB(message_num)
# append to whichread log
fp = open("%s/whichreadchanges.log" % self._path,"ab+")
fp.write("%s %s\n" % (self._whichReadID,mnum_str))
fp.close()
def getWRList(self):
# read whichread from disk
wdb = self._db()
whichread = ""
whichread = wdb.whichread.lookup(readerid=self._whichReadID)
if whichread is None:
wrlist = ''
else:
wrlist = whichread.wrlist
wrl = WRList(wrlist)
return wrl
def addToDB(self,mnum):
wdb = self._db()
whichread = ""
whichread = wdb.whichread.lookup(readerid=self._whichReadID)
if whichread is None:
wrlist = ''
else:
wrlist = whichread.wrlist
wr_list = WRList(wrlist)
wr_list.markRead(mnum)
row = wdb.whichread.lookupCreate(readerid=self._whichReadID)
row.wrlist = wr_list.dump()
row.save()
def __del__ (self):
if self.__db:
self.__db.close()
class WRList:
def __init__(self,val):
self._val = val
self._parts = string.split(val,",")
self._dict = {}
dict = self._dict
for a_part in self._parts:
dict[a_part] = 1
def markRead(self,mnum):
mnum = "%s" % mnum
try:
index = self._parts.index(mnum)
except ValueError:
self._parts.insert(0,mnum)
def dump(self):
# log("WRLIST: %s" % self._parts)
return string.join(self._parts,",")
def isRead(self,mnum):
mnum = "%s" % mnum
# log("isRead %s = %s" % (mnum,self._dict.has_key(mnum)))
return self._dict.has_key(mnum)
```
|
{
"source": "JeskeG/final",
"score": 3
}
|
#### File: final/meetings/flask_main.py
```python
import hashlib
import flask
import requests
import sys
from flask import render_template
from flask import request
from flask import url_for
import uuid
import string
import random
from bson.objectid import ObjectId
import json
import logging
# Date handling
import arrow # Replacement for datetime, based on moment.js
# import datetime # But we still need time
from dateutil import tz # For interpreting local times
# OAuth2 - Google library implementation for convenience
from oauth2client import client
import httplib2 # used in oauth2 flow
# Google API for services
from apiclient import discovery
###
# Globals
###
import config
from pymongo import MongoClient
if __name__ == "__main__":
CONFIG = config.configuration()
else:
CONFIG = config.configuration(proxied=True)
MONGO_CLIENT_URL = "mongodb://{}:{}@{}.{}/{}".format(
CONFIG.DB_USER,
CONFIG.DB_USER_PW,
CONFIG.DB_HOST,
CONFIG.DB_PORT,
CONFIG.DB)
try:
dbclient = MongoClient(MONGO_CLIENT_URL)
db = getattr(dbclient, CONFIG.DB)
collection = db.meetings
except:
print("Failure opening database. Is Mongo running? Correct password?")
sys.exit(1)
print("Using URL '{}'".format(MONGO_CLIENT_URL))
app = flask.Flask(__name__)
app.debug = CONFIG.DEBUG
app.logger.setLevel(logging.DEBUG)
app.secret_key = CONFIG.SECRET_KEY
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
CLIENT_SECRET_FILE = CONFIG.GOOGLE_KEY_FILE ## You'll need this
APPLICATION_NAME = 'MeetMe class project'
#############################
#
# Pages (routed from URLs)
#
#############################
@app.route("/")
@app.route("/index")
def index():
app.logger.debug("Entering index")
if 'begin_date' not in flask.session:
init_session_values()
return render_template('request.html')
@app.route("/choose")
def choose():
return render_template('request.html')
@app.route("/login", methods=["POST"])
def login():
app.logger.debug("Checking credentials for Google calendar access")
credentials = valid_credentials()
if not credentials:
app.logger.debug("Redirecting to authorization")
return flask.redirect(flask.url_for('oauth2callback'))
gcal_service = get_gcal_service(credentials)
app.logger.debug("Returned from get_gcal_service")
flask.g.calendars = list_calendars(gcal_service)
return flask.render_template('attendee.html')
@app.route("/respond/<meeting>/<ID>")
def respond(meeting, ID):
flask.session['meeting'] = meeting
flask.session['ID'] = ID
db = collection.find_one({"_id": ObjectId(meeting)})
flask.g.name = db["meeting_name"]
beg = arrow.get(db['begin_date']).format("YYYY-MM-DD")
end = arrow.get(db['end_date']).format("YYYY-MM-DD")
flask.g.date = beg + " - " + end
flask.g.time = db['begin_time'] + " - " + db['end_time']
return flask.render_template("attendee.html")
@app.route("/schedule/<meet_id>")
def schedule(meet_id):
meet_id = flask.session['meeting']
return flask.render_template("schedule.html")
@app.route("/create_meeting", methods=["POST"])
def create_meeting():
name = request.form["name"]
emails = request.form.getlist("emails[]")
app.logger.debug("meeting =" + name)
app.logger.debug("emails = "+ str(emails))
beg_time = flask.session['start_time']
end_time = flask.session['stop_time']
beg_date = flask.session['begin_date']
end_date = flask.session['end_date']
create_db(name, beg_date, end_date, beg_time, end_time, emails)
app.logger.debug("DB created, redirecting to URLS")
app.logger.debug("URL_list = " + str(flask.session['url_list']))
return flask.jsonify(result=flask.url_for("add"))
@app.route("/add")
def add():
flask.g.url_list = flask.session['url_list']
return flask.render_template("add.html")
@app.route("/calculate")
def calculate():
db = collection.find_one({"_id": ObjectId(flask.session['meeting'])})
users = db['attendees']
app.logger.debug("Starting to calculate free times")
freetime = []
busytime= []
no_response=[]
beg = arrow.get(db['begin_date']).format("YYYY-MM-DD")
end = arrow.get(db['end_date']).format("YYYY-MM-DD")
start_time = db['begin_time']
end_time = db['end_time']
start_hr = time_to_num(str(start_time))[0]
start_min = time_to_num(str(start_time))[1]
end_hr = time_to_num(str(end_time))[0]
end_min = time_to_num(str(end_time))[1]
date = arrow.get(beg)
stop = arrow.get(end)
app.logger.debug("Start hour to shift = " + str(start_hr))
app.logger.debug("End hour to shift = " + str(end_hr))
while date <= stop:
s_date = date.shift(hours=start_hr, minutes=start_min)
e_date = date.shift(hours=end_hr, minutes=end_min)
freetime.append({"name": 'Free', "start": s_date, "end": e_date})
date = date.shift(days=+1)
for user in users:
if not user['responded']:
no_response.append(user['email'])
continue
if user['busy_times']:
for event in user['busy_times']:
busytime.append(event)
app.logger.debug("A busy time = " + str(event))
meet_time = calc_free_time(freetime, busytime)
app.logger.debug("meet_times = " + str(meet_time))
app.logger.debug("hasn't responded = " + str(no_response))
meeting_info = {"meet_times": meet_time, "no_response": no_response}
return flask.jsonify(result=meeting_info)
@app.route("/busy", methods=["POST"])
def busy():
app.logger.debug("calculating busy times for {}".format(flask.session["ID"]))
db = collection.find_one({"_id": ObjectId(flask.session["meeting"])})
start_time = db['begin_time']
end_time = db['end_time']
start_hr = time_to_num(str(start_time))[0]
start_min = time_to_num(str(start_time))[1]
end_hr = time_to_num(str(end_time))[0]
end_min = time_to_num(str(end_time))[1]
beg = arrow.get(db['begin_date']).shift(hours=start_hr, minutes=start_min)
app.logger.debug("Begin Time = " + str(beg))
end = arrow.get(db['end_date']).shift(hours=end_hr, minutes=end_min)
busy = calc_busy_time(beg, end)
app.logger.debug("busy times = {}".format(str(busy)))
update_busy_times(busy, flask.session['meeting'], flask.session["ID"])
update_responded(flask.session['meeting'], flask.session["ID"])
app.logger.debug("DB is updated")
return flask.jsonify(result=flask.url_for("schedule", meet_id=flask.session['meeting']))
@app.route('/oauth2callback')
def oauth2callback():
"""
The 'flow' has this one place to call back to. We'll enter here
more than once as steps in the flow are completed, and need to keep
track of how far we've gotten. The first time we'll do the first
step, the second time we'll skip the first step and do the second,
and so on.
"""
app.logger.debug("Entering oauth2callback")
flow = client.flow_from_clientsecrets(
CLIENT_SECRET_FILE,
scope= SCOPES,
redirect_uri=flask.url_for('oauth2callback', _external=True))
app.logger.debug("Got flow")
if 'code' not in flask.request.args:
app.logger.debug("Code not in flask.request.args")
auth_uri = flow.step1_get_authorize_url()
return flask.redirect(auth_uri)
else:
app.logger.debug("Code was in flask.request.args")
auth_code = flask.request.args.get('code')
credentials = flow.step2_exchange(auth_code)
flask.session['credentials'] = credentials.to_json()
app.logger.debug("Got credentials")
return flask.redirect(flask.url_for('respond', meeting=flask.session['meeting'], ID=flask.session['ID']))
@app.route('/setrange', methods=['POST'])
def setrange():
"""
User chose a date range with the bootstrap daterange
widget.
"""
app.logger.debug("Entering setrange")
flask.flash("Setrange gave us '{}'".format(
request.form.get('daterange')))
daterange = request.form.get('daterange')
flask.session['start_time'] = request.form.get("begin_time")
flask.session['stop_time'] = request.form.get('end_time')
flask.session['daterange'] = daterange
daterange_parts = daterange.split()
flask.session['begin_date'] = interpret_date(daterange_parts[0])
flask.session['end_date'] = interpret_date(daterange_parts[2])
app.logger.debug("Setrange parsed {} - {} dates as {} - {} and {} - {} times as {} - {}".format(
daterange_parts[0], daterange_parts[1],
flask.session['begin_date'], flask.session['end_date'],
flask.session['start_time'], flask.session['stop_time'],
flask.session['begin_time'],flask.session['end_time']))
return flask.redirect(flask.url_for("choose"))
#######
#FUNCTIONS USED
def update_busy_times(busy, meeting, user):
collection.update_one({'_id': ObjectId(meeting), "attendees.ID": str(user)},
{"$set": {'attendees.$.busy_times': busy}}, upsert=True)
def update_responded(meeting, user):
collection.update_one({'_id': ObjectId(meeting), "attendees.ID": str(user)},
{"$set": {"attendees.$.responded": True}}, upsert=True)
def calc_busy_time(beg, end):
events = []
cals = request.form.getlist('list[]')
service = get_gcal_service(valid_credentials())
for cal in cals:
try:
results = service.events().list(calendarId=cal, timeMin=beg, timeMax=end, singleEvents=True,
orderBy="startTime").execute()
real = results.get('items', [])
for elem in real:
if elem['start']['dateTime']:
events.append({"name": elem['summary'],
"start": elem['start']['dateTime'],
"end": elem['end']['dateTime']})
else: # all day event!
start = str(elem['start']['date'])+"T00:00:00-08:00"
end = str(elem['end']['date'])+"T24:00:00-08:00"
events.append({"name": elem['summary'], "start": start, "end": end})
except:
app.logger.debug("Something failed")
app.logger.debug("events = " + str(events))
return events
def calc_free_time(freetime, busytime):
for free in freetime:
for busy in busytime:
free_start = arrow.get(free['start'])
fs = free_start.time()
free_end = arrow.get(free['end'])
extra_free = free['end']
fe = free_end.time()
busy_start = arrow.get(busy['start'])
bs = busy_start.time()
busy_end = arrow.get(busy['end'])
be = busy_end.time()
if bs >= be:
busytime.remove(busy)
break
if bs < fs:
if be < fs:
busytime.remove(busy)
break
if bs > fe:
busytime.remove(busy)
break
if free_start.date() == busy_start.date():
if free_start.date() == busy_end.date(): # single day event
if bs <= fs:
if be >= fe: # busy throughout free
freetime.remove(free)
app.logger.debug("Free time completely removed")
break
else:
if be > fs:
free['start'] = busy['end'] # busy front overlaps
app.logger.debug("Free start = " + str(free_start) + " changed to " + str(busy['end']))
continue
if bs > fs:
if be < fe:
free['end'] = busy['start'] # busy cuts up free into two
app.logger.debug("Free end = " + str(free_end) + " changed to " + str(busy['start']))
freetime.append({"name": 'Free', "start": busy['end'],
"end": extra_free})
app.logger.debug("New time created from "+str(busy['end'])+" to "+str(extra_free))
continue
elif be >= fe:
if bs < fe:
free['end'] = busy['start'] # busy back overlaps
app.logger.debug("Free end = " + str(free_end) + " changed to " + str(busy['start']))
continue
elif busy_end.date() > free_end.date(): # multiday event
if bs <= fs:
freetime.remove(free) # multiday event completely kills this free time
app.logger.debug("Free time completely removed")
break
if bs < fe:
free['end'] = busy['start'] # multiday event starts after free
app.logger.debug("Free end = " + str(free_end) + " changed to " + str(busy['start']))
continue
elif free_start.date() == busy_end.date():
if be > fs: # wrap around from prev day busy event
if be < fe:
free["start"] = busy['end']
app.logger.debug("Free start = " + str(free_start) + " changed to " + str(busy['end']))
continue
if be >= fe:
freetime.remove(free)
app.logger.debug("Free time completely removed")
break
times = []
for event in freetime:
times.append({"event": event['name'], "start": arrow.get(event['start']).isoformat(),
"end": arrow.get(event['end']).isoformat()})
times = sorted(times, key=lambda k: arrow.get(k['start']))
return times
def valid_credentials():
"""
Returns OAuth2 credentials if we have valid
credentials in the session. This is a 'truthy' value.
Return None if we don't have credentials, or if they
have expired or are otherwise invalid. This is a 'falsy' value.
"""
if 'credentials' not in flask.session:
return None
credentials = client.OAuth2Credentials.from_json(
flask.session['credentials'])
if (credentials.invalid or
credentials.access_token_expired):
return None
return credentials
def get_gcal_service(credentials):
"""
We need a Google calendar 'service' object to obtain
list of calendars, busy times, etc. This requires
authorization. If authorization is already in effect,
we'll just return with the authorization. Otherwise,
control flow will be interrupted by authorization, and we'll
end up redirected back to /choose *without a service object*.
Then the second call will succeed without additional authorization.
"""
app.logger.debug("Entering get_gcal_service")
http_auth = credentials.authorize(httplib2.Http())
service = discovery.build('calendar', 'v3', http=http_auth)
app.logger.debug("Returning service")
return service
def create_db(meet_name, beg_date, end_date, beg_time, end_time, attendees):
attendee_list = []
url_list = []
for name in attendees:
attendee_list.append({"email": name,
"responded": False,
"busy_times": None,
"ID": hashlib.md5(name.encode()).hexdigest()})
meeting = {"meeting_name": meet_name,
"begin_date": beg_date,
"end_date": end_date,
"begin_time": beg_time,
"end_time": end_time,
"attendees": attendee_list}
flask.session['meet_id'] = str(collection.insert_one(meeting).inserted_id)
for person in attendee_list:
url_dict = {"name": person['email'],
"url": str(flask.url_for("respond",
meeting=flask.session['meet_id'], ID=person["ID"], _external=True))}
url_list.append(url_dict)
flask.session['url_list'] = url_list
return
def time_to_num(time_str):
hh, mm = map(int, time_str.split(':'))
return [hh, mm]
####
#
# Initialize session variables
#
####
def init_session_values():
"""
Start with some reasonable defaults for date and time ranges.
Note this must be run in app context ... can't call from main.
"""
# Default date span = tomorrow to 1 week from now
now = arrow.now('local') # We really should be using tz from browser
tomorrow = now.replace(days=+1)
nextweek = now.replace(days=+7)
flask.session["begin_date"] = tomorrow.floor('day').isoformat()
flask.session["end_date"] = nextweek.ceil('day').isoformat()
flask.session["daterange"] = "{} - {}".format(
tomorrow.format("MM/DD/YYYY"),
nextweek.format("MM/DD/YYYY"))
# Default time span each day, 8 to 5
flask.session["begin_time"] = interpret_time("9am")
flask.session["end_time"] = interpret_time("5pm")
def interpret_time( text ):
"""
Read time in a human-compatible format and
interpret as ISO format with local timezone.
May throw exception if time can't be interpreted. In that
case it will also flash a message explaining accepted formats.
"""
app.logger.debug("Decoding time '{}'".format(text))
time_formats = ["ha", "h:mma", "h:mm a", "H:mm"]
try:
as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal())
as_arrow = as_arrow.replace(year=2016) #HACK see below
app.logger.debug("Succeeded interpreting time")
except:
app.logger.debug("Failed to interpret time")
flask.flash("Time '{}' didn't match accepted formats 13:30 or 1:30pm"
.format(text))
raise
return as_arrow.isoformat()
def interpret_date( text ):
"""
Convert text of date to ISO format used internally,
with the local time zone.
"""
try:
as_arrow = arrow.get(text, "MM/DD/YYYY").replace(
tzinfo=tz.tzlocal())
except:
flask.flash("Date '{}' didn't fit expected format 12/31/2001")
raise
return as_arrow.isoformat()
def next_day(isotext):
"""
ISO date + 1 day (used in query to Google calendar)
"""
as_arrow = arrow.get(isotext)
return as_arrow.replace(days=+1).isoformat()
####
#
# Functions (NOT pages) that return some information
#
####
def list_calendars(service):
"""
Given a google 'service' object, return a list of
calendars. Each calendar is represented by a dict.
The returned list is sorted to have
the primary calendar first, and selected (that is, displayed in
Google Calendars web app) calendars before unselected calendars.
"""
app.logger.debug("Entering list_calendars")
calendar_list = service.calendarList().list().execute()["items"]
result = [ ]
for cal in calendar_list:
kind = cal["kind"]
id = cal["id"]
if "description" in cal:
desc = cal["description"]
else:
desc = "(no description)"
summary = cal["summary"]
# Optional binary attributes with False as default
selected = ("selected" in cal) and cal["selected"]
primary = ("primary" in cal) and cal["primary"]
result.append(
{ "kind": kind,
"id": id,
"summary": summary,
"selected": selected,
"primary": primary
})
return sorted(result, key=cal_sort_key)
def cal_sort_key( cal ):
"""
Sort key for the list of calendars: primary calendar first,
then other selected calendars, then unselected calendars.
(" " sorts before "X", and tuples are compared piecewise)
"""
if cal["selected"]:
selected_key = " "
else:
selected_key = "X"
if cal["primary"]:
primary_key = " "
else:
primary_key = "X"
return (primary_key, selected_key, cal["summary"])
#################
#
# Functions used within the templates
#
#################
@app.template_filter( 'fmtdate' )
def format_arrow_date( date ):
try:
normal = arrow.get( date )
return normal.format("ddd MM/DD/YYYY")
except:
return "(bad date)"
@app.template_filter( 'fmttime' )
def format_arrow_time( time ):
try:
normal = arrow.get( time )
return normal.format("HH:mm")
except:
return "(bad time)"
#############
if __name__ == "__main__":
# App is created above so that it will
# exist whether this is 'main' or not
# (e.g., if we are running under green unicorn)
app.run(port=CONFIG.PORT,host="0.0.0.0")
```
|
{
"source": "jeskesen/kobayashi_aru",
"score": 2
}
|
#### File: kobayashi_maru_control/scripts/random_path.py
```python
import rospy
from std_msgs.msg import String
from nav_msgs.msg import Path
from geometry_msgs.msg import PoseStamped,Point
import random
from asn1crypto.cms import Time
POINTS = 10
SPREAD = 5
def random_path_publisher():
pub = rospy.Publisher('path_to_follow', Path, queue_size=10)
rospy.init_node('random_planner', anonymous=True)
rate = rospy.Rate(1) # 10hz
rate.sleep()
#hello_str = "hello world %s" % rospy.get_time()
myPath = Path()
myPath.header.frame_id="odom"
myPath.header.stamp = rospy.Time.now()
myPoses = []
for i in range(POINTS):
newPose = PoseStamped()
newPose.header.frame_id="odom"
newPose.header.stamp = rospy.Time.now()
newPose.pose.position = Point(random.uniform(-SPREAD,SPREAD), random.uniform(-SPREAD,SPREAD), 0)
myPoses.append(newPose)
myPath.poses = myPoses
#pub2.publish(myPoses[0])
rospy.loginfo(myPath)
pub.publish(myPath)
rate.sleep()
rospy.spin()
if __name__ == '__main__':
try:
random_path_publisher()
except rospy.ROSInterruptException:
pass
```
|
{
"source": "jeskol/hibernation-dashboard",
"score": 3
}
|
#### File: jeskol/hibernation-dashboard/list_hibernate.py
```python
import boto.ec2
regions = ['us-east-1']
#region_list = [x.name for x in conn.get_all_regions()]
def main():
for region in regions:
conn = boto.ec2.connect_to_region(region)
instances = conn.get_only_instances(
filters={"tag:InstanceHibernate":"*"})
for inst in instances:
hibernationTag = inst.tags.get('InstanceHibernate', "NONE!")
print "{:22} Tag:{:10} {}".format(inst.id, hibernationTag, inst.state)
if __name__ == '__main__':
main()
```
|
{
"source": "jeslinmx/advent2020",
"score": 3
}
|
#### File: jeslinmx/advent2020/18.py
```python
from sys import stdin
expressions = [line.strip() for line in stdin]
# Part 1
from collections import deque
from operator import add, mul
def p1evaluate(expression):
ctx = deque([{}])
tokens = deque(
expression
.replace("(", "( ")
.replace(")", " )")
.split()
)
while len(tokens) > 0:
token = tokens.popleft()
if token == "+":
ctx[-1]["operator"] = add
elif token == "*":
ctx[-1]["operator"] = mul
elif token == "(":
# add nest level in ctx
ctx.append({})
elif token == ")":
# pop nest level and push its value onto stack
tokens.appendleft(ctx.pop()["value"])
else:
ctx[-1]["value"] = ctx[-1]["operator"](ctx[-1]["value"], int(token))if "value" in ctx[-1] else int(token)
return ctx[-1]["value"]
print(sum(map(p1evaluate, expressions)))
# Part 2
from math import prod
def p2evaluate(expression):
if isinstance(expression, str):
tokens = deque(
expression
.replace("(", "( ")
.replace(")", " )")
.split()
)
else:
tokens = expression
# resolve all brackets
tokens_no_brackets = deque()
while len(tokens) > 0:
token = tokens.popleft()
if token == "(":
# use recursion to evaluate to the closing bracket
result, tokens = p2evaluate(tokens)
tokens.appendleft(result)
elif token == ")":
# stop evaluation at this point; remaining tokens become remnant
remnant = tokens
break
else:
tokens_no_brackets.append(token)
# resolve addition
tokens = tokens_no_brackets
tokens_addition_done = deque()
while len(tokens) > 0:
token = tokens.popleft()
if token == "+":
# pop off last transferred value, pop off next value, add them
# together, then put it back
tokens_addition_done.append(
int(tokens_addition_done.pop()) + int(tokens.popleft())
)
else:
tokens_addition_done.append(token)
# resolve multiplication
result = prod(map(int, filter(lambda x: x != "*", tokens_addition_done)))
try:
return result, remnant
except UnboundLocalError:
# if remnant is undefined, that's because this is the outermost call
# (and hence no closing bracket was ever seen)
return result
print(sum(map(p2evaluate, expressions)))
```
#### File: jeslinmx/advent2020/8.py
```python
from sys import stdin
instructions = [line.split() for line in stdin]
# Part 1
def execute(instructions):
visited = [False] * len(instructions)
acc, cur = 0, 0
while cur < len(instructions) and not visited[cur]:
visited[cur] = True
if instructions[cur][0] == "acc":
acc += int(instructions[cur][1])
cur += 1
elif instructions[cur][0] == "jmp":
cur += int(instructions[cur][1])
else:
cur += 1
return acc, cur >= len(instructions)
print(execute(instructions))
# Part 2
def modify_line(number):
# return a copy of instructions in which the instruction at
# the given line number is substituted between jmp and nop
return (
instructions[:number]
+ [
("jmp" if instructions[number][0] == "nop" else "nop",
instructions[number][1])
]
+ instructions[number+1:]
)
# brute force through substituting each jmp/nop to find a terminating program
for modified_instructions in (modify_line(number) for number, instruction in enumerate(instructions) if instruction[0] in ("jmp", "nop")):
result = execute(modified_instructions)
if result[1]:
print(result)
```
|
{
"source": "jeslinmx/dailytelegram",
"score": 3
}
|
#### File: jeslinmx/dailytelegram/fpwrapper.py
```python
import sys
import time
from concurrent import futures
import feedparser
class Feed(object):
def __init__(self, feed_url: str):
self.url = feed_url
self.previous_entries = []
self.etag = ""
self.modified = ""
# grab feed metadata and populate previous_entries
self.get_new_entries()
def get_new_entries(self):
"""Downloads and parses the RSS feed, returning new entries (by timestamp)."""
try:
# xml/rss parsing and feedparser are complex beasts
d = feedparser.parse(self.url, etag=self.etag, modified=self.modified)
except Exception:
# so we just ignore anything that goes wrong with it
# and worry about it later.
self.pause_updates()
self._nullupdate()
return sys.exc_info()
if d.get("status", None) == 301:
# if the feed is permanently redirected, update the feed url
self.url = d.get("href", self.url)
if d.get("status", None) == 304:
# if the server returns a Not Modified, return no entries
return []
if d.get("status", None) == 410:
# if the feed is Gone, disable future feedparser calls
self.get_new_entries = self._nullupdate
return self._nullupdate()
# update feed metadata
self.metadata = {
"title": d.feed.get("title", f"Untitled feed - {self.url}"),
"subtitle": d.feed.get("subtitle", ""),
"link": d.feed.get("link", self.url),
"description": d.feed.get("description", "")
}
self.etag = d.get("etag", None)
self.modified = d.get("modified", None)
# cherry-pick only entries which do not match URLs from previous update
# this approach works for feeds which contain all posts ever published
# as well as feeds which maintain a rolling window of latest entries.
if d.entries:
entries = [
entry for entry in d.entries
if entry.get("link", "") not in self.previous_entries
]
self.previous_entries = [entry.get("link", "") for entry in d.entries]
else:
entries = []
return entries
def pause_updates(self, duration: float = 24 * 60 * 60):
"""Rewrites get_new_updates to not download the feed, and return no entries, until duration is elapsed."""
self._get_new_entries = self.get_new_entries
self.get_new_entries = self._deferupdate
self.delay_until = time.time() + duration
def _deferupdate(self):
if time.time() >= self.delay_until:
# restore get_new_entries, and let it take over
self.get_new_entries = self._get_new_entries
del self._get_new_entries
return self.get_new_entries()
else:
return []
def _nullupdate(self):
self.metadata = {
"title": f"Feed not found - {self.url}",
"link": self.url,
}
return []
class FeedCollection(object):
def __init__(self, feed_urls: list, max_workers:int=5):
self.feeds = { url: Feed(url) for url in feed_urls }
self.workers = max_workers
def get_new_entries(self):
with futures.ThreadPoolExecutor(max_workers=self.workers) as ex:
fs = { url: ex.submit(feed.get_new_entries) for url, feed in self.feeds.items() }
return { url: f.result() for url, f in fs.items() }
def add_feed(self, feed_url: str):
if feed_url in self.feeds:
raise FeedCollectionError(feed_url, "The provided url has already previously been added")
self.feeds[feed_url] = Feed(feed_url)
def remove_feed(self, feed_url: str):
if feed_url not in self.feeds:
raise FeedCollectionError(feed_url, "The provided url does not exist in this FeedCollection")
del self.feeds[feed_url]
class FeedCollectionError(Exception):
def __init__(self, feed_url, message):
self.feed_url = feed_url
self.message = message
```
|
{
"source": "jeslyvarghese/pyxmp",
"score": 2
}
|
#### File: pyxmp/pyxmp/xmp.py
```python
import xml.etree.ElementTree as ET
from .__keysearch import keysearch
from .__attribute import Attribute
class XMP(object):
def __init__(self, filepath, **namespaces):
self.filepath = filepath
with open(self.filepath, 'rb') as f:
data = f.read()
xmp_start = data.find(b'<x:xmpmeta')
xmp_end = data.find(b'</x:xmpmeta')
self.__namespaces = namespaces
self.__xmp_string = data[xmp_start:xmp_end+12]
try:
self.__root = ET.fromstring(self.__xmp_string)
self.__rdf_el = self.__root[0][0]
self.__attrib_dict = self.__rdf_el.attrib
except ET.ParseError:
self.__attrib_dict = {}
self.__namespaced_dict = {}
self.__update_namespaced_dict()
self.__create_namespace_attributes()
def __update_namespaced_dict(self):
for k, v in self.__attrib_dict.items():
nk = k
for ns, url in self.__namespaces.items():
nk = k.replace('{'+ url +'}', ns+':')
if k != nk:
break
self.__namespaced_dict[nk] = v
def __create_namespace_attributes(self):
for k in self.__namespaces.keys():
setattr(self, k, Attribute())
obj = getattr(self, k)
for key in keysearch(self.__namespaced_dict, k):
attr_name = key.replace(k+':', '')
setattr(obj, attr_name, self.__namespaced_dict[key])
```
|
{
"source": "jesman/hue",
"score": 2
}
|
#### File: src/about/views.py
```python
import json
from django.conf import settings
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from desktop.lib.django_util import render
from desktop.models import Settings
from desktop import appmanager
def admin_wizard(request):
apps = appmanager.get_apps(request.user)
app_names = [app.name for app in sorted(apps, key=lambda app: app.menu_index)]
tours_and_tutorials = Settings.get_settings().tours_and_tutorials
return render('admin_wizard.mako', request, {
'version': settings.HUE_DESKTOP_VERSION,
'apps': dict([(app.name, app) for app in apps]),
'app_names': app_names,
'tours_and_tutorials': tours_and_tutorials,
})
def update_preferences(request):
response = {'status': -1, 'data': ''}
if request.method == 'POST':
try:
settings = Settings.get_settings()
settings.tours_and_tutorials = request.POST.get('tours_and_tutorials', False)
settings.save()
response['status'] = 0
response['tours_and_tutorials'] = settings.tours_and_tutorials
except Exception, e:
response['data'] = str(e)
else:
response['data'] = _('POST request required.')
return HttpResponse(json.dumps(response), mimetype="application/json")
```
#### File: src/beeswax/data_export.py
```python
import logging
import time
from django.http import HttpResponse
from desktop.lib.export_csvxls import CSVformatter, TooBigToDownloadException
from beeswax import common
LOG = logging.getLogger(__name__)
_DATA_WAIT_SLEEP = 0.1 # Sleep 0.1 sec before checking for data availability
FETCH_ROWS = 100000
def download(handle, format, db):
"""
download(query_model, format) -> HttpResponse
Retrieve the query result in the format specified. Return an HttpResponse object.
"""
if format not in common.DL_FORMATS:
LOG.error('Unknown download format "%s"' % (format,))
return
if format == 'csv':
formatter = CSVformatter()
mimetype = 'application/csv'
elif format == 'xls':
# We 'fool' the user by sending back CSV as XSL as it supports streaming and won't freeze Hue
formatter = CSVformatter()
mimetype = 'application/xls'
gen = data_generator(handle, formatter, db)
resp = HttpResponse(gen, mimetype=mimetype)
resp['Content-Disposition'] = 'attachment; filename=query_result.%s' % (format,)
return resp
def data_generator(handle, formatter, db):
"""
data_generator(query_model, formatter) -> generator object
Return a generator object for a csv. The first line is the column names.
This is similar to export_csvxls.generator, but has
one or two extra complexities.
"""
_DATA_WAIT_SLEEP
is_first_row = True
yield formatter.init_doc()
results = db.fetch(handle, start_over=is_first_row, rows=FETCH_ROWS)
while results is not None:
while not results.ready: # For Beeswax
time.sleep(_DATA_WAIT_SLEEP)
results = db.fetch(handle, start_over=is_first_row, rows=FETCH_ROWS)
# TODO Check for concurrent reading when HS2 supports start_row
if is_first_row:
is_first_row = False
yield formatter.format_header(results.cols())
for row in results.rows():
try:
yield formatter.format_row(row)
except TooBigToDownloadException, ex:
LOG.error(ex)
if results.has_more:
results = db.fetch(handle, start_over=is_first_row, rows=FETCH_ROWS)
else:
results = None
yield formatter.fini_doc()
```
#### File: src/search/models.py
```python
try:
import json
except ImportError:
import simplejson as json
from datetime import datetime
from lxml import etree
import re
import logging
from django.db import models
from django.utils.translation import ugettext as _, ugettext_lazy as _t
from django.core.urlresolvers import reverse
from mako.template import Template
from search.api import SolrApi
from search.conf import SOLR_URL
LOG = logging.getLogger(__name__)
class Facet(models.Model):
_ATTRIBUTES = ['properties', 'fields', 'ranges', 'dates', 'charts', 'order']
enabled = models.BooleanField(default=True)
data = models.TextField()
def get_data(self):
return json.loads(self.data)
def update_from_post(self, post_data):
data_dict = json.loads(self.data)
for attr in Facet._ATTRIBUTES:
if post_data.get(attr) is not None:
data_dict[attr] = json.loads(post_data[attr])
self.data = json.dumps(data_dict)
def get_query_params(self):
data_dict = json.loads(self.data)
properties = data_dict.get('properties')
params = (
('facet', properties.get('isEnabled') and 'true' or 'false'),
('facet.mincount', properties.get('mincount')),
('facet.limit', 100),
('facet.sort', properties.get('sort')),
)
if data_dict.get('fields'):
field_facets = tuple([('facet.field', field_facet['field']) for field_facet in data_dict['fields']])
params += field_facets
if data_dict.get('charts'):
for field_facet in data_dict['charts']:
if field_facet['start'] and field_facet['end'] and field_facet['gap']:
range_facets = tuple([
('facet.range', field_facet['field']),
('f.%s.facet.limit' % field_facet['field'], -1),
('f.%s.facet.range.start' % field_facet['field'], field_facet['start']),
('f.%s.facet.range.end' % field_facet['field'], field_facet['end']),
('f.%s.facet.range.gap' % field_facet['field'], field_facet['gap']),]
)
params += range_facets
else:
field_facets = tuple([
('facet.field', field_facet['field']),
('f.%s.facet.limit' % field_facet['field'], -1),
])
params += field_facets
if data_dict.get('ranges'):
for field_facet in data_dict['ranges']:
range_facets = tuple([
('facet.range', field_facet['field']),
('f.%s.facet.range.start' % field_facet['field'], field_facet['start']),
('f.%s.facet.range.end' % field_facet['field'], field_facet['end']),
('f.%s.facet.range.gap' % field_facet['field'], field_facet['gap']),]
)
params += range_facets
if data_dict.get('dates'):
for field_facet in data_dict['dates']:
start = field_facet['start']
end = field_facet['end']
gap = field_facet['gap']
date_facets = tuple([
('facet.date', field_facet['field']),
('f.%s.facet.date.start' % field_facet['field'], 'NOW-%(frequency)s%(unit)s/%(rounder)s' % {"frequency": start["frequency"], "unit": start["unit"], "rounder": gap["unit"]}),
('f.%s.facet.date.end' % field_facet['field'], 'NOW-%(frequency)s%(unit)s' % end),
('f.%s.facet.date.gap' % field_facet['field'], '+%(frequency)s%(unit)s' % gap),]
)
params += date_facets
return params
class Result(models.Model):
_ATTRIBUTES = ['properties', 'template', 'highlighting', 'extracode']
data = models.TextField()
def update_from_post(self, post_data):
data_dict = json.loads(self.data)
for attr in Result._ATTRIBUTES:
if post_data.get(attr) is not None:
data_dict[attr] = json.loads(post_data[attr])
self.data = json.dumps(data_dict)
def get_template(self, with_highlighting=False):
data_dict = json.loads(self.data)
template = data_dict.get('template')
if with_highlighting:
for field in data_dict.get('highlighting', []):
template = re.sub('\{\{%s\}\}' % field, '{{{%s}}}' % field, template)
return template
def get_extracode(self):
data_dict = json.loads(self.data)
return data_dict.get('extracode')
def get_highlighting(self):
data_dict = json.loads(self.data)
return json.dumps(data_dict.get('highlighting'))
def get_properties(self):
data_dict = json.loads(self.data)
return json.dumps(data_dict.get('properties'))
def get_query_params(self):
data_dict = json.loads(self.data)
params = ()
if data_dict.get('highlighting'):
params += (
('hl', data_dict.get('properties', {}).get('highlighting_enabled') and 'true' or 'false'),
('hl.fl', ','.join(data_dict.get('highlighting'))),
)
return params
class Sorting(models.Model):
_ATTRIBUTES = ['properties', 'fields']
data = models.TextField()
def update_from_post(self, post_data):
data_dict = json.loads(self.data)
for attr in Sorting._ATTRIBUTES:
if post_data.get(attr) is not None:
data_dict[attr] = json.loads(post_data[attr])
self.data = json.dumps(data_dict)
def get_query_params(self, client_query=None):
params = ()
data_dict = json.loads(self.data)
fields = []
if data_dict.get('properties', {}).get('is_enabled') and 'true' or 'false':
if client_query is not None and client_query.get('sort'):
params += (
('sort', client_query.get('sort')),
)
elif data_dict.get('fields'):
fields = []
for field in data_dict.get('fields'):
if field['include']:
fields.append('%s %s' % (field['field'], field['asc'] and 'asc' or 'desc'))
params += (
('sort', ','.join(fields)),
)
return params
class CollectionManager(models.Manager):
def get_or_create(self, name, solr_properties, is_core_only=False, is_enabled=True, user=None):
try:
return self.get(name=name), False
except Collection.DoesNotExist:
facets = Facet.objects.create(data=json.dumps({
'properties': {'isEnabled': False, 'limit': 10, 'mincount': 1, 'sort': 'count'},
'ranges': [],
'fields': [],
'dates': []
}))
result = Result.objects.create(data=json.dumps({
'template': '',
'highlighting': [],
'properties': {'highlighting_enabled': False},
'extracode':
"""
<style>
em {
color: red;
}
</style>
<script>
</script>
"""
}))
sorting = Sorting.objects.create(data=json.dumps({'properties': {'is_enabled': False}, 'fields': []}))
cores = json.dumps(solr_properties)
collection = Collection.objects.create(
name=name,
label=name,
enabled=is_enabled,
cores=cores,
is_core_only=is_core_only,
facets=facets,
result=result,
sorting=sorting
)
template = """
<div class="row-fluid">
<div class="row-fluid">
<div class="span12">%s</div>
</div>
<br/>
</div>""" % ' '.join(['{{%s}}' % field for field in collection.fields(user)])
result.update_from_post({'template': json.dumps(template)})
result.save()
return collection, True
class Collection(models.Model):
# Perms coming with https://issues.cloudera.org/browse/HUE-950
enabled = models.BooleanField(default=True)
name = models.CharField(max_length=40, verbose_name=_t('Solr index name pointing to'))
label = models.CharField(max_length=100, verbose_name=_t('Friendlier name in UI'))
is_core_only = models.BooleanField(default=False)
cores = models.TextField(default=json.dumps({}), verbose_name=_t('Collection with cores data'), help_text=_t('Solr json'))
properties = models.TextField(
default=json.dumps({}), verbose_name=_t('Properties'),
help_text=_t('Hue properties (e.g. results by pages number)'))
facets = models.ForeignKey(Facet)
result = models.ForeignKey(Result)
sorting = models.ForeignKey(Sorting)
objects = CollectionManager()
def get_query(self, client_query=None):
return self.facets.get_query_params() + self.result.get_query_params() + self.sorting.get_query_params(client_query)
def get_absolute_url(self):
return reverse('search:admin_collection', kwargs={'collection_id': self.id})
def fields(self, user):
return sorted([field.get('name') for field in self.fields_data(user)])
def fields_data(self, user):
solr_schema = SolrApi(SOLR_URL.get(), user).schema(self.name)
schema = etree.fromstring(solr_schema)
return sorted([{'name': field.get('name'), 'type': field.get('type')}
for fields in schema.iter('fields') for field in fields.iter('field')])
def get_facet_field_format(field, type, facets):
format = ""
try:
if type == 'field':
for fld in facets['fields']:
if fld['field'] == field:
format = fld['format']
elif type == 'range':
for fld in facets['ranges']:
if fld['field'] == field:
format = fld['format']
elif type == 'date':
for fld in facets['dates']:
if fld['field'] == field:
format = fld['format']
except:
pass
return format
def get_facet_field_label(field, type, facets):
label = field
if type == 'field':
for fld in facets['fields']:
if fld['field'] == field:
label = fld['label']
elif type == 'range':
for fld in facets['ranges']:
if fld['field'] == field:
label = fld['label']
elif type == 'date':
for fld in facets['dates']:
if fld['field'] == field:
label = fld['label']
elif type == 'chart':
for fld in facets['charts']:
if fld['field'] == field:
label = fld['label']
return label
def get_facet_field_uuid(field, type, facets):
uuid = ''
if type == 'field':
for fld in facets['fields']:
if fld['field'] == field:
uuid = fld['uuid']
elif type == 'range':
for fld in facets['ranges']:
if fld['field'] == field:
uuid = fld['uuid']
elif type == 'date':
for fld in facets['dates']:
if fld['field'] == field:
uuid = fld['uuid']
return uuid
def is_chart_field(field, charts):
found = False
for fld in charts:
if field == fld['field']:
found = True
return found
def augment_solr_response(response, facets):
augmented = response
augmented['normalized_facets'] = []
normalized_facets = {}
default_facets = []
chart_facets = facets.get('charts', [])
if response and response.get('facet_counts'):
if response['facet_counts']['facet_fields']:
for cat in response['facet_counts']['facet_fields']:
facet = {
'field': cat,
'type': 'chart' if is_chart_field(cat, chart_facets) else 'field',
'label': get_facet_field_label(cat, is_chart_field(cat, chart_facets) and 'chart' or 'field', facets),
'counts': response['facet_counts']['facet_fields'][cat],
}
uuid = get_facet_field_uuid(cat, 'field', facets)
if uuid == '':
default_facets.append(facet)
else:
normalized_facets[uuid] = facet
if response['facet_counts']['facet_ranges']:
for cat in response['facet_counts']['facet_ranges']:
facet = {
'field': cat,
'type': 'chart' if is_chart_field(cat, chart_facets) else 'range',
'label': get_facet_field_label(cat, 'range', facets),
'counts': response['facet_counts']['facet_ranges'][cat]['counts'],
'start': response['facet_counts']['facet_ranges'][cat]['start'],
'end': response['facet_counts']['facet_ranges'][cat]['end'],
'gap': response['facet_counts']['facet_ranges'][cat]['gap'],
}
uuid = get_facet_field_uuid(cat, 'range', facets)
if uuid == '':
default_facets.append(facet)
else:
normalized_facets[uuid] = facet
if response['facet_counts']['facet_dates']:
for cat in response['facet_counts']['facet_dates']:
facet = {
'field': cat,
'type': 'date',
'label': get_facet_field_label(cat, 'date', facets),
'format': get_facet_field_format(cat, 'date', facets),
'start': response['facet_counts']['facet_dates'][cat]['start'],
'end': response['facet_counts']['facet_dates'][cat]['end'],
'gap': response['facet_counts']['facet_dates'][cat]['gap'],
}
counts = []
for date, count in response['facet_counts']['facet_dates'][cat].iteritems():
if date not in ('start', 'end', 'gap'):
counts.append(date)
counts.append(count)
facet['counts'] = counts
uuid = get_facet_field_uuid(cat, 'date', facets)
if uuid == '':
default_facets.append(facet)
else:
normalized_facets[uuid] = facet
for ordered_uuid in facets.get('order', []):
try:
augmented['normalized_facets'].append(normalized_facets[ordered_uuid])
except:
pass
if default_facets:
augmented['normalized_facets'].extend(default_facets)
return augmented
```
#### File: src/spark/conf.py
```python
from django.utils.translation import ugettext as _, ugettext_lazy as _t
from desktop.lib.conf import Config, validate_path
SPARK_MASTER = Config(
key="spark_master",
help=_t("Address of the Spark master, e.g spark://localhost:7077. If empty use the current configuration. "
"Can be overriden in the script too."),
default=""
)
SPARK_HOME = Config(
key="spark_home",
help=_t("Local path to Spark Home on all the nodes of the cluster."),
default="/usr/lib/spark"
)
def config_validator(user):
res = []
res.extend(validate_path(SPARK_HOME, is_dir=True))
return res
```
#### File: src/spark/tests.py
```python
import json
import time
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from nose.tools import assert_true, assert_equal
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access
from spark.models import create_or_update_script, SparkScript
from spark.api import OozieSparkApi, get
class TestSparkBase(object):
SCRIPT_ATTRS = {
'id': 1000,
'name': 'Test',
'script': 'print "spark"',
'parameters': [],
'resources': [],
'hadoopProperties': [],
'language': 'python'
}
def setUp(self):
self.c = make_logged_in_client(is_superuser=False)
grant_access("test", "test", "spark")
self.user = User.objects.get(username='test')
def create_script(self):
return create_script(self.user)
def create_script(user, xattrs=None):
attrs = {'user': user}
attrs.update(TestSparkBase.SCRIPT_ATTRS)
if xattrs is not None:
attrs.update(xattrs)
return create_or_update_script(**attrs)
class TestMock(TestSparkBase):
def test_create_script(self):
spark_script = self.create_script()
assert_equal('Test', spark_script.dict['name'])
def test_save(self):
attrs = {'user': self.user,}
attrs.update(TestSparkBase.SCRIPT_ATTRS)
attrs['language'] = json.dumps(TestSparkBase.SCRIPT_ATTRS['language'])
attrs['parameters'] = json.dumps(TestSparkBase.SCRIPT_ATTRS['parameters'])
attrs['resources'] = json.dumps(TestSparkBase.SCRIPT_ATTRS['resources'])
attrs['hadoopProperties'] = json.dumps(TestSparkBase.SCRIPT_ATTRS['hadoopProperties'])
# Save
self.c.post(reverse('spark:save'), data=attrs, follow=True)
# Update
self.c.post(reverse('spark:save'), data=attrs, follow=True)
def test_parse_oozie_logs(self):
api = get(None, None, self.user)
assert_equal('''Stdoutput aaa''', api._match_logs({'logs': [None, OOZIE_LOGS]}))
OOZIE_LOGS =""" Log Type: stdout
Log Length: 58465
Oozie Launcher starts
Heart beat
Starting the execution of prepare actions
Completed the execution of prepare actions successfully
Files in current dir:/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1383078934625_0050/container_1383078934625_0050_01_000002/.
======================
File: .launch_container.sh.crc
File: oozie-sharelib-oozie-3.3.2-cdh4.4.0-SNAPSHOT.jar
Oozie Java/Map-Reduce/Pig action launcher-job configuration
=================================================================
Workflow job id : 0000011-131105103808962-oozie-oozi-W
Workflow action id: 0000011-131105103808962-oozie-oozi-W@spark
Classpath :
------------------------
/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1383078934625_0050/container_1383078934625_0050_01_000002
/etc/hadoop/conf
/usr/lib/hadoop/hadoop-nfs-2.1.0-cdh5.0.0-SNAPSHOT.jar
/usr/lib/hadoop/hadoop-common-2.1.0-cdh5.0.0-SNAPSHOT.jar
------------------------
Main class : org.apache.oozie.action.hadoop.ShellMain
Maximum output : 2048
Arguments :
Java System Properties:
------------------------
#
#Tue Nov 05 14:02:13 ICT 2013
java.runtime.name=Java(TM) SE Runtime Environment
oozie.action.externalChildIDs.properties=/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1383078934625_0050/container_1383078934625_0050_01_000002/externalChildIds.properties
sun.boot.library.path=/usr/lib/jvm/java-7-oracle/jre/lib/amd64
------------------------
=================================================================
>>> Invoking Main class now >>>
Oozie Shell action configuration
=================================================================
Shell configuration:
--------------------
dfs.datanode.data.dir : file:///var/lib/hadoop-hdfs/cache/${user.name}/dfs/data
dfs.namenode.checkpoint.txns : 1000000
s3.replication : 3
--------------------
Current working dir /var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1383078934625_0050/container_1383078934625_0050_01_000002
Full Command ..
-------------------------
0:spark.sh:
List of passing environment
-------------------------
TERM=xterm:
JSVC_HOME=/usr/lib/bigtop-utils:
HADOOP_PREFIX=/usr/lib/hadoop:
HADOOP_MAPRED_HOME=/usr/lib/hadoop-mapreduce:
YARN_NICENESS=0:
=================================================================
>>> Invoking Shell command line now >>
Stdoutput aaa
Exit code of the Shell command 0
<<< Invocation of Shell command completed <<<
<<< Invocation of Main class completed <<<
Oozie Launcher ends
"""
```
#### File: desktop/auth/views_test.py
```python
from nose.tools import assert_true, assert_false, assert_equal
from django.contrib.auth.models import User
from django.test.client import Client
from desktop import conf
from desktop.lib.django_test_util import make_logged_in_client
from hadoop.test_base import PseudoHdfsTestBase
from hadoop import pseudo_hdfs4
class TestLoginWithHadoop(PseudoHdfsTestBase):
def setUp(self):
# Simulate first login ever
User.objects.all().delete()
self.c = Client()
def test_login(self):
response = self.c.get('/accounts/login/')
assert_equal(200, response.status_code, "Expected ok status.")
assert_true(response.context['first_login_ever'])
response = self.c.post('/accounts/login/', dict(username="foo", password="<PASSWORD>"))
assert_equal(302, response.status_code, "Expected ok redirect status.")
assert_true(self.fs.exists("/user/foo"))
response = self.c.get('/accounts/login/')
assert_equal(200, response.status_code, "Expected ok status.")
assert_false(response.context['first_login_ever'])
def test_login_home_creation_failure(self):
response = self.c.get('/accounts/login/')
assert_equal(200, response.status_code, "Expected ok status.")
assert_true(response.context['first_login_ever'])
# Create home directory as a file in order to fail in the home creation later
cluster = pseudo_hdfs4.shared_cluster()
fs = cluster.fs
assert_false(cluster.fs.exists("/user/foo2"))
fs.do_as_superuser(fs.create, "/user/foo2")
response = self.c.post('/accounts/login/', dict(username="foo2", password="<PASSWORD>"), follow=True)
assert_equal(200, response.status_code, "Expected ok status.")
assert_true('/beeswax' in response.content, response.content)
# Custom login process should not do 'http-equiv="refresh"' but call the correct view
# 'Could not create home directory.' won't show up because the messages are consumed before
class TestLogin(object):
def setUp(self):
# Simulate first login ever
User.objects.all().delete()
self.c = Client()
def test_bad_first_user(self):
finish = conf.AUTH.BACKEND.set_for_testing("desktop.auth.backend.AllowFirstUserDjangoBackend")
response = self.c.get('/accounts/login/')
assert_equal(200, response.status_code, "Expected ok status.")
assert_true(response.context['first_login_ever'])
response = self.c.post('/accounts/login/', dict(username="foo 1", password="<PASSWORD>"))
assert_equal(200, response.status_code, "Expected ok status.")
assert_true('This value may contain only letters, numbers and @/./+/-/_ characters.' in response.content, response)
finish()
def test_non_jframe_login(self):
client = make_logged_in_client(username="test", password="<PASSWORD>")
# Logout first
client.get('/accounts/logout')
# Login
response = client.post('/accounts/login/', dict(username="test", password="<PASSWORD>"), follow=True)
assert_true(any(["admin_wizard.mako" in _template.filename for _template in response.template]), response.template) # Go to superuser wizard
```
|
{
"source": "jes-moore/hockey_scraper",
"score": 2
}
|
#### File: hockey_scraper/hockey_scraper/game_scraper.py
```python
import hockey_scraper.json_pbp as json_pbp
import hockey_scraper.html_pbp as html_pbp
import hockey_scraper.espn_pbp as espn_pbp
import hockey_scraper.json_shifts as json_shifts
import hockey_scraper.html_shifts as html_shifts
import hockey_scraper.playing_roster as playing_roster
import hockey_scraper.shared as shared
import pandas as pd
broken_shifts_games = []
broken_pbp_games = []
players_missing_ids = []
missing_coords =[]
pbp_columns = [
'Game_Id', 'Date', 'Period', 'Event', 'Description', 'Time_Elapsed',
'Seconds_Elapsed', 'Strength', 'Ev_Zone', 'Type', 'Ev_Team', 'Home_Zone',
'Away_Team', 'Home_Team', 'p1_name', 'p1_ID', 'p2_name', 'p2_ID',
'p3_name', 'p3_ID', 'awayPlayer1', 'awayPlayer1_id', 'awayPlayer2',
'awayPlayer2_id', 'awayPlayer3', 'awayPlayer3_id', 'awayPlayer4',
'awayPlayer4_id', 'awayPlayer5', 'awayPlayer5_id', 'awayPlayer6',
'awayPlayer6_id', 'homePlayer1', 'homePlayer1_id', 'homePlayer2',
'homePlayer2_id', 'homePlayer3', 'homePlayer3_id', 'homePlayer4',
'homePlayer4_id', 'homePlayer5', 'homePlayer5_id', 'homePlayer6',
'homePlayer6_id', 'Away_Players', 'Home_Players', 'Away_Score',
'Home_Score', 'Away_Goalie', 'Away_Goalie_Id', 'Home_Goalie',
'Home_Goalie_Id', 'xC', 'yC', 'Home_Coach', 'Away_Coach']
def check_goalie(row):
"""
Checks for bad goalie names (you can tell by them having no player id)
:param row: df row
:return: None
"""
if row['Away_Goalie'] != '' and row['Away_Goalie_Id'] == 'NA':
if [row['Away_Goalie'], row['Game_Id']] not in players_missing_ids:
players_missing_ids.extend([[row['Away_Goalie'], row['Game_Id']]])
if row['Home_Goalie'] != '' and row['Home_Goalie_Id'] == 'NA':
if [row['Home_Goalie'], row['Game_Id']] not in players_missing_ids:
players_missing_ids.extend([[row['Home_Goalie'], row['Game_Id']]])
def get_sebastian_aho(player):
"""
This checks which Sebastian Aho it is based on the position. I have the player id's hardcoded here.
This function is needed because "get_players_json" doesn't control for when there are two Sebastian Aho's (it just
writes over the first one).
:param player: player info
:return: Player ID for specific Aho
"""
return 8480222 if player[1] == 'D' else 8478427
def get_players_json(players_json):
"""
Return dict of players for that game
:param players_json: players section of json
:return: dict of players->keys are the name (in uppercase)
"""
players = dict()
for key in players_json.keys():
name = shared.fix_name(players_json[key]['fullName'].upper())
players[name] = {'id': ' ', 'last_name': players_json[key]['lastName'].upper()}
try:
players[name]['id'] = players_json[key]['id']
except KeyError:
shared.print_warning('{name} is missing an ID number in the pbp json'.format(name=name))
players[name]['id'] = 'NA'
return players
def combine_players_lists(json_players, roster_players, game_id):
"""
Combine the json list of players (which contains id's) with the list in the roster html
:param json_players: dict of all players with id's
:param roster_players: dict with home and and away keys for players
:param game_id: id of game
:return: dict containing home and away keys -> which contains list of info on each player
"""
players = {'Home': dict(), 'Away': dict()}
for venue in players.keys():
for player in roster_players[venue]:
try:
name = shared.fix_name(player[2])
player_id = json_players[name]['id'] if name != '<NAME>' else get_sebastian_aho(player)
players[venue][name] = {'id': player_id, 'number': player[0], 'last_name': json_players[name]['last_name']}
except KeyError:
# If he was listed as a scratch and not a goalie (check_goalie deals with goalies)
# As a whole the scratch list shouldn't be trusted but if a player is missing an id # and is on the
# scratch list I'm willing to assume that he didn't play
if not player[3] and player[1] != 'G':
player.extend([game_id])
players_missing_ids.extend([[player[2], player[4]]])
players[venue][name] = {'id': 'NA', 'number': player[0], 'last_name': ''}
return players
def get_teams_and_players(game_json, roster, game_id):
"""
Get list of players and teams for game
:param game_json: json pbp for game
:param roster: players from roster html
:param game_id: id for game
:return: dict for both - players and teams
"""
try:
teams = json_pbp.get_teams(game_json)
player_ids = get_players_json(game_json['gameData']['players'])
players = combine_players_lists(player_ids, roster['players'], game_id)
except Exception as e:
shared.print_warning('Problem with getting the teams or players')
return None, None
return players, teams
def combine_html_json_pbp(json_df, html_df, game_id, date):
"""
Join both data sources. First try merging on event id (which is the DataFrame index) if both DataFrames have the
same number of rows. If they don't have the same number of rows, merge on: Period', Event, Seconds_Elapsed, p1_ID.
:param json_df: json pbp DataFrame
:param html_df: html pbp DataFrame
:param game_id: id of game
:param date: date of game
:return: finished pbp
"""
# Don't need those columns to merge in
json_df = json_df.drop(['p1_name', 'p2_name', 'p2_ID', 'p3_name', 'p3_ID'], axis=1)
try:
html_df.Period = html_df.Period.astype(int)
# If they aren't equal it's usually due to the HTML containing a challenge event
if html_df.shape[0] == json_df.shape[0]:
json_df = json_df[['period', 'event', 'seconds_elapsed', 'xC', 'yC']]
game_df = pd.merge(html_df, json_df, left_index=True, right_index=True, how='left')
else:
# We always merge if they aren't equal but we check if it's due to a challenge so we can print out a better
# warning message for the user.
# NOTE: May be slightly incorrect. It's possible for there to be a challenge and another issue for one game.
if'CHL' in list(html_df.Event):
shared.print_warning("The number of columns in the Html and Json pbp are different because the"
" Json pbp, for some reason, does not include challenges. Will instead merge on "
"Period, Event, Time, and p1_id.")
else:
shared.print_warning("The number of columns in the Html and json pbp are different because "
"someone fucked up. Will instead merge on Period, Event, Time, and p1_id.")
# Actual Merging
game_df = pd.merge(html_df, json_df, left_on=['Period', 'Event', 'Seconds_Elapsed', 'p1_ID'],
right_on=['period', 'event', 'seconds_elapsed', 'p1_ID'], how='left')
# This is always done - because merge doesn't work well with shootouts
game_df = game_df.drop_duplicates(subset=['Period', 'Event', 'Description', 'Seconds_Elapsed'])
except Exception as e:
shared.print_warning('Problem combining Html Json pbp for game {}'.format(game_id, e))
return
game_df['Game_Id'] = game_id[-5:]
game_df['Date'] = date
return pd.DataFrame(game_df, columns=pbp_columns)
def combine_espn_html_pbp(html_df, espn_df, game_id, date, away_team, home_team):
"""
Merge the coordinate from the espn feed into the html DataFrame
Can't join here on event_id because the plays are often out of order and pre-2009 are often missing events.
:param html_df: DataFrame with info from html pbp
:param espn_df: DataFrame with info from espn pbp
:param game_id: json game id
:param date: ex: 2016-10-24
:param away_team: away team
:param home_team: home team
:return: merged DataFrame
"""
if espn_df is not None:
try:
espn_df.period = espn_df.period.astype(int)
game_df = pd.merge(html_df, espn_df, left_on=['Period', 'Seconds_Elapsed', 'Event'],
right_on=['period', 'time_elapsed', 'event'], how='left')
# Shit happens
game_df = game_df.drop_duplicates(subset=['Period', 'Event', 'Description', 'Seconds_Elapsed'])
df = game_df.drop(['period', 'time_elapsed', 'event'], axis=1)
except Exception as e:
shared.print_warning('Error for combining espn and html pbp for game {}'.format(game_id))
return None
else:
df = html_df
df['Game_Id'] = game_id[-5:]
df['Date'] = date
df['Away_Team'] = away_team
df['Home_Team'] = home_team
return pd.DataFrame(df, columns=pbp_columns)
def scrape_pbp_live(game_id, date, roster, game_json, players, teams, espn_id=None):
"""
Scrape the live pbp
:param game_id: json game id
:param date: date of game
:param roster: list of players in pre game roster
:param game_json: json pbp for game
:param players: dict of players
:param teams: dict of teams
:param espn_id: Game Id for the espn game. Only provided when live scraping
:return: Tuple - pbp & status
"""
html_df, status = html_pbp.scrape_game_live(game_id, players, teams)
game_df = scrape_pbp(game_id, date, roster, game_json, players, teams, espn_id=espn_id, html_df=html_df)
return game_df, status
def scrape_pbp(game_id, date, roster, game_json, players, teams, espn_id=None, html_df=None):
"""
Automatically scrapes the json and html, if the json is empty the html picks up some of the slack and the espn
xml is also scraped for coordinates.
:param game_id: json game id
:param date: date of game
:param roster: list of players in pre game roster
:param game_json: json pbp for game
:param players: dict of players
:param teams: dict of teams
:param espn_id: Game Id for the espn game. Only provided when live scraping
:param html_df: Can provide DataFrame for html. Only done for live-scraping
:return: DataFrame with info or None if it fails
"""
# Coordinates are only available in json from 2010 onwards
# Note: This doesn't work as intended for second half of 2009 season...it still works just takes slightly longer
if int(str(game_id)[:4]) >= 2010:
json_df = json_pbp.parse_json(game_json, game_id)
if json_df is None:
return None # Means there was an error parsing
if_json = True if len(game_json['liveData']['plays']['allPlays']) > 0 else False
else:
if_json = False
# Only scrape if nothing provided
if not isinstance(html_df, pd.DataFrame):
html_df = html_pbp.scrape_game(game_id, players, teams)
# Got nothing if it isn't there
if html_df is None:
return None
# Check if the json is missing the plays...if it is scrape ESPN for the coordinates
if not if_json:
espn_df = espn_pbp.scrape_game(date, teams['Home'], teams['Away'], game_id=espn_id)
game_df = combine_espn_html_pbp(html_df, espn_df, str(game_id), date, teams['Away'], teams['Home'])
# Sometimes espn is corrupted so can't get coordinates
if espn_df is None or espn_df.empty:
missing_coords.extend([[game_id, date]])
else:
game_df = combine_html_json_pbp(json_df, html_df, str(game_id), date)
if game_df is not None:
game_df['Home_Coach'] = roster['head_coaches']['Home']
game_df['Away_Coach'] = roster['head_coaches']['Away']
game_df.insert(2,'play_num',game_df.reset_index().index.values)
return game_df
def scrape_shifts(game_id, players, date):
"""
Scrape the Shift charts (or TOI tables)
:param game_id: json game id
:param players: dict of players with numbers and id's
:param date: date of game
:return: DataFrame with info or None if it fails
"""
shifts_df = None
# Control for fact that shift json is only available from 2010 onwards
if shared.get_season(date) >= 2010:
shifts_df = json_shifts.scrape_game(game_id)
if shifts_df is None:
shifts_df = html_shifts.scrape_game(game_id, players)
if shifts_df is None:
shared.print_warning("Unable to scrape shifts for game" + game_id)
broken_shifts_games.extend([[game_id, date]])
return None # Both failed so just return nothing
shifts_df['Date'] = date
return shifts_df
def scrape_game(game_id, date, if_scrape_shifts):
"""
This scrapes the info for the game.
The pbp is automatically scraped, and the whether or not to scrape the shifts is left up to the user.
:param game_id: game to scrap
:param date: ex: 2016-10-24
:param if_scrape_shifts: Boolean indicating whether to also scrape shifts
:return: DataFrame of pbp info
(optional) DataFrame with shift info otherwise just None
"""
print(' '.join(['Scraping Game ', game_id, date]))
shifts_df = None
roster = playing_roster.scrape_roster(game_id)
game_json = json_pbp.get_pbp(game_id) # Contains both player info (id's) and plays
players, teams = get_teams_and_players(game_json, roster, game_id)
# Game fails without any of these
if not roster or not game_json or not teams or not players:
broken_pbp_games.extend([[game_id, date]])
if if_scrape_shifts:
broken_shifts_games.extend([[game_id, date]])
return None, None
pbp_df = scrape_pbp(game_id, date, roster, game_json, players, teams)
# Only scrape shifts if asked and pbp is good
if if_scrape_shifts and pbp_df is not None:
shifts_df = scrape_shifts(game_id, players, date)
if pbp_df is None:
broken_pbp_games.extend([[game_id, date]])
return pbp_df, shifts_df
```
#### File: hockey_scraper/hockey_scraper/scrape_functions.py
```python
import hockey_scraper.json_schedule as json_schedule
import hockey_scraper.game_scraper as game_scraper
import hockey_scraper.shared as shared
import pandas as pd
import time
import random
# This hold the scraping errors in a string format.
# This may seem pointless but I have a personal reason for it (I think...)
errors = ''
def print_errors():
"""
Print errors with scraping.
Also puts errors in the "error" string (would just print the string but it would look like shit on one line. I
could store it as I "should" print it but that isn't how I want it).
:return: None
"""
global errors
# Log File
if game_scraper.broken_pbp_games:
print('\nBroken pbp:')
errors += 'Broken pbp:'
for x in game_scraper.broken_pbp_games:
print(x[0], x[1])
errors = ' '.join([errors, str(x[0]), ","])
if game_scraper.broken_shifts_games:
print('\nBroken shifts:')
errors += 'Broken shifts:'
for x in game_scraper.broken_shifts_games:
print(x[0], x[1])
errors = ' '.join([errors, str(x[0]), ","])
if game_scraper.players_missing_ids:
print("\nPlayers missing ID's:")
errors += "Players missing ID's:"
for x in game_scraper.players_missing_ids:
print(x[0], x[1])
errors = ' '.join([errors, str(x[0]), ","])
if game_scraper.missing_coords:
print('\nGames missing coordinates:')
errors += 'Games missing coordinates:'
for x in game_scraper.missing_coords:
print(x[0], x[1])
errors = ' '.join([errors, str(x[0]), ","])
# print('\n')
# Clear them all out for the next call
game_scraper.broken_shifts_games = []
game_scraper.broken_pbp_games = []
game_scraper.players_missing_ids = []
game_scraper.missing_coords = []
def check_data_format(data_format):
"""
Checks if data_format specified (if it is at all) is either None, 'Csv', or 'pandas'.
It exits program with error message if input isn't good.
:param data_format: data_format provided
:return: Boolean - True if good
"""
if not data_format or data_format.lower() not in ['csv', 'pandas']:
raise shared.HaltException('{} is an unspecified data format. The two options are Csv and Pandas '
'(Csv is default)\n'.format(data_format))
def check_valid_dates(from_date, to_date):
"""
Check if it's a valid date range
:param from_date: date should scrape from
:param to_date: date should scrape to
:return: None
"""
try:
if time.strptime(to_date, "%Y-%m-%d") < time.strptime(from_date, "%Y-%m-%d"):
raise shared.HaltException("Error: The second date input is earlier than the first one")
except ValueError:
raise shared.HaltException("Error: Incorrect format given for dates. They must be given like 'yyyy-mm-dd' "
"(ex: '2016-10-01').")
def to_csv(file_name, pbp_df, shifts_df, out_loc):
"""
Write DataFrame(s) to csv file(s)
:param file_name: name of file
:param pbp_df: pbp DataFrame
:param shifts_df: shifts DataFrame
:return: None
"""
# Rename
event_types = {
'PSTR': 'Period_Start',
'FAC': 'Faceoff',
'STOP': 'Stoppage',
'TAKE': 'Takeaway',
'BLOCK': 'Blocked Shot',
'SHOT': 'Shot',
'MISS': 'Missed Shot',
'HIT': 'Hit',
'GIVE': 'Giveaway',
'GOAL': 'Goal',
'PENL': 'Penalty',
'PEND': 'Period End',
'GEND': 'Game End'
}
output_cols = [
'game_id', 'date', 'play_num', 'period', 'event', 'description', 'periodTimeM',
'periodTime', 'strength', 'ev_zone', 'type', 'ev_team', 'home_zone',
'away_team', 'home_team', 'p1_name', 'p1_id', 'p2_name', 'p2_id',
'p3_name', 'p3_id', 'awayPlayer1', 'awayPlayer1_id', 'awayPlayer2',
'awayPlayer2_id', 'awayPlayer3', 'awayPlayer3_id', 'awayPlayer4',
'awayPlayer4_id', 'awayPlayer5', 'awayPlayer5_id', 'awayPlayer6',
'awayPlayer6_id', 'homePlayer1', 'homePlayer1_id', 'homePlayer2',
'homePlayer2_id', 'homePlayer3', 'homePlayer3_id', 'homePlayer4',
'homePlayer4_id', 'homePlayer5', 'homePlayer5_id', 'homePlayer6',
'homePlayer6_id', 'away_players', 'home_players', 'away_score',
'home_score', 'away_goalie', 'away_goalie_id', 'home_goalie',
'home_goalie_id', 'x', 'y', 'home_coach', 'away_coach', 'game_type']
if pbp_df is not None:
# Rename Columns and Event Types
pbp_df.columns = output_cols
pbp_df['event'] = pbp_df['event'].map(event_types)
# Add Season
pbp_df.insert(
0,
'season',
pbp_df['date'].apply(
lambda x: str(int(x[0:4]) - int(x[5:] < '08-01')) +
str(int(x[0:4]) - int(x[5:] < '08-01') + 1)))
# Add Game ID
pbp_df['game_id'] = pbp_df['season'].apply(
lambda x: x[0:4]) + '0' + pbp_df['game_id'].astype(str)
# Insert Play_id
pbp_df.insert(
0,
'play_id',
pbp_df['game_id'].astype(str)+'_'+pbp_df['play_num'].astype(str))
print("\nPBP Saved To- " + 'nhl_pbp{}.csv'.format(file_name))
pbp_df.to_csv(
out_loc + 'nhl_pbp{}.csv'.format(file_name),
sep=',',
encoding='utf-8',
index=False)
if shifts_df is not None:
print("Shifts Saved To- " + 'nhl_shifts{}.csv'.format(file_name))
shifts_df.to_csv(
out_loc + 'nhl_shifts{}.csv'.format(file_name),
sep=',',
encoding='utf-8',
index=False)
def scrape_list_of_games_par(games, if_scrape_shifts):
import multiprocessing as mp
from functools import partial
unique_games = [[game] for game in games]
pool = mp.Pool(processes=(mp.cpu_count() - 1))
results = pool.map(scrape_list_of_games, unique_games)
pool.close()
pool.join()
pbp_df = pd.concat([res[0] for res in results])
shifts_df = pd.concat([res[1] for res in results])
return pbp_df, shifts_df
def scrape_list_of_games(games, if_scrape_shifts=True):
"""
Given a list of game_id's (and a date for each game) it scrapes them
:param games: list of [game_id, date]
:param if_scrape_shifts: Boolean indicating whether to also scrape shifts
:return: DataFrame of pbp info, also shifts if specified
"""
pbp_dfs = []
shifts_dfs = []
for game in games:
pbp_df, shifts_df = game_scraper.scrape_game(str(game["game_id"]), game["date"], if_scrape_shifts)
try:
pbp_df['game_type'] = str(game['game_type'])
except Exception:
pass
if pbp_df is not None:
pbp_dfs.extend([pbp_df])
if shifts_df is not None:
shifts_dfs.extend([shifts_df])
# Check if any games...if not let's get out of here
if len(pbp_dfs) == 0:
return None, None
if len(shifts_dfs) == 0:
return None, None
else:
pbp_df = pd.concat(pbp_dfs)
pbp_df = pbp_df.reset_index(drop=True)
pbp_df.apply(lambda row: game_scraper.check_goalie(row), axis=1)
if if_scrape_shifts:
shifts_df = pd.concat(shifts_dfs)
shifts_df = shifts_df.reset_index(drop=True)
else:
shifts_df = None
# Print all errors associated with scrape call
print_errors()
return pbp_df, shifts_df
def scrape_date_range(from_date, to_date, out_loc, if_scrape_shifts = True, data_format='csv', preseason=False, rescrape=False, docs_dir=None):
"""
Scrape games in given date range
:param from_date: date you want to scrape from
:param to_date: date you want to scrape to
:param if_scrape_shifts: Boolean indicating whether to also scrape shifts
:param data_format: format you want data in - csv or pandas (csv is default)
:param preseason: Boolean indicating whether to include preseason games (default if False)
This is may or may not work!!! I don't give a shit.
:param rescrape: If you want to rescrape pages already scraped. Only applies if you supply a docs dir. (def. = None)
:param docs_dir: Directory that either contains previously scraped docs or one that you want them to be deposited
in after scraping. (default is None)
:return: Dictionary with DataFrames and errors or None
"""
# First check if the inputs are good
check_data_format(data_format)
check_valid_dates(from_date, to_date)
# Check on the docs_dir and re_scrape
shared.add_dir(docs_dir)
shared.if_rescrape(rescrape)
games = json_schedule.scrape_schedule(from_date, to_date, preseason)
pbp_df, shifts_df = scrape_list_of_games_par(games, if_scrape_shifts)
if data_format.lower() == 'csv':
to_csv(from_date+'--'+to_date, pbp_df, shifts_df, out_loc)
else:
return {"pbp": pbp_df, "shifts": shifts_df, "errors": errors} if if_scrape_shifts else {"pbp": pbp_df,
"errors": errors}
def scrape_seasons(seasons, out_loc, if_scrape_shifts=True, data_format='csv', preseason=False, rescrape=False, docs_dir=None):
"""
Given list of seasons it scrapes all the seasons
:param seasons: list of seasons
:param if_scrape_shifts: Boolean indicating whether to also scrape shifts
:param data_format: format you want data in - csv or pandas (csv is default)
:param preseason: Boolean indicating whether to include preseason games (default if False)
This is may or may not work!!! I don't give a shit.
:param rescrape: If you want to rescrape pages already scraped. Only applies if you supply a docs dir.
:param docs_dir: Directory that either contains previously scraped docs or one that you want them to be deposited
in after scraping
:return: Dictionary with DataFrames and errors or None
"""
# First check if the inputs are good
check_data_format(data_format)
# Check on the docs_dir and re_scrape
shared.add_dir(docs_dir)
shared.if_rescrape(rescrape)
# Holds all seasons scraped (if not csv)
master_pbps, master_shifts = [], []
for season in seasons:
from_date = '-'.join([str(season), '9', '1'])
to_date = '-'.join([str(season + 1), '7', '1'])
games = json_schedule.scrape_schedule(from_date, to_date, preseason)
# games = games[-3:] # Debugging Only
pbp_df, shifts_df = scrape_list_of_games_par(games, if_scrape_shifts)
if data_format.lower() == 'csv':
to_csv(str(season)+str(season+1), pbp_df, shifts_df,out_loc)
else:
master_pbps.append(pbp_df)
master_shifts.append(shifts_df)
if data_format.lower() == 'pandas':
if if_scrape_shifts:
return {"pbp": pd.concat(master_pbps), "shifts": pd.concat(master_shifts), "errors": errors}
else:
return {"pbp": pd.concat(master_pbps), "errors": errors}
def scrape_games(games, if_scrape_shifts=True, data_format='csv', rescrape=False, docs_dir=None):
"""
Scrape a list of games
:param games: list of game_ids
:param if_scrape_shifts: Boolean indicating whether to also scrape shifts
:param data_format: format you want data in - csv or pandas (csv is default)
:param rescrape: If you want to rescrape pages already scraped. Only applies if you supply a docs dir.
:param docs_dir: Directory that either contains previously scraped docs or one that you want them to be deposited
in after scraping
:return: Dictionary with DataFrames and errors or None
"""
# First check if the inputs are good
check_data_format(data_format)
# Check on the docs_dir and re_scrape
shared.add_dir(docs_dir)
shared.if_rescrape(rescrape)
# Create List of game_id's and dates
games_list = json_schedule.get_dates(games)
# Scrape pbp and shifts
pbp_df, shifts_df = scrape_list_of_games_par(games_list, if_scrape_shifts)
if data_format.lower() == 'csv':
to_csv(str(random.randint(1, 101)), pbp_df, shifts_df, out_loc)
else:
return {"pbp": pbp_df, "shifts": shifts_df, "errors": errors} if if_scrape_shifts else {"pbp": pbp_df,
"errors": errors}
```
|
{
"source": "jes-moore/nhl_analytics",
"score": 3
}
|
#### File: jes-moore/nhl_analytics/nhl_dash_app.py
```python
from plotting import *
# Dashboard Libraries
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, Event, State
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
## App Data
shooting_df = load_shooting_df()
shot_boxes = create_shotbox_df(shooting_df)
## Main App Layout and Graphing
app.layout = html.Div([
# html.H1('NHL Analytics'),
html.Div(
[
html.Div(
[
# html.H3('Column 1'),
dcc.Graph(
id='eg1',
figure=plot_goal_ratio(shot_boxes),
config={"displaylogo": False})
],
className="six columns"),
html.Div(
[
# html.H3('Column 2'),
dcc.Graph(
id='eg2',
figure=plot_goal_ratio(shot_boxes),
config={"displaylogo": False})
],
className="six columns"),
],
className="row"),
html.Div(id='container')
])
## Callbacks
# @app.callback(
# Output('eg1', 'figure'),
# [],
# [
# State('eg1', 'figure'),
# State('eg1', 'hoverData'),
# ],
# [
# Event('eg1', 'hover')
# ]
# )
# def update_graph(eg1, data):
# if data is not None:
# # get the information about the hover point
# hover_curve_idx = data['points'][0]['curveNumber']
# hover_pt_idx = data['points'][0]['pointIndex']
# data_to_highlight = eg1['data'][hover_curve_idx]
# # change the last curve which is reserved for highlight
# eg1['data'][-1]['x'] = [data_to_highlight['x'][hover_pt_idx]]
# eg1['data'][-1]['y'] = [data_to_highlight['y'][hover_pt_idx]]
# return eg1
if __name__ == '__main__':
app.run_server(debug=True)
```
|
{
"source": "jesnyder/allogenic",
"score": 3
}
|
#### File: python/archive/c0200_chart_patents.py
```python
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from c0101_retrieve_clinical import retrieve_clinical
from c0201_query_patents import query_patents
def chart_patents():
"""
"""
query_patents()
# clinical_gov_url = 'https://clinicaltrials.gov/ct2/results?cond=&term=&type=&rslt=&age_v=&gndr=&intr=allogenic+AND+msc&titles=&outc=&spons=&lead=&id=&cntry=&state=&city=&dist=&locn=&rsub=&strd_s=&strd_e=&prcd_s=&prcd_e=&sfpd_s=&sfpd_e=&rfpd_s=&rfpd_e=&lupd_s=&lupd_e=&sort='
# retrieve_clinical(clinical_gov_url)
ref_path = os.path.join( 'metadata')
alloFile = 'allogenicANDmesencymalClinicalGov.csv'
autoFile = 'autologousANDmesencymalClinicalGov.csv'
fig = plt.figure()
ax = plt.subplot(111)
df_return = count_per_year(alloFile)
plt.scatter(df_return['year'], df_return['count'], color = [0,0,1], label = 'allogenic')
plt.plot(df_return['year'], df_return['count'], color = [1,0,0], label = 'allogenic')
df_return = count_per_year(autoFile)
plt.scatter(df_return['year'], df_return['count'], color = [0,0,1], label = 'autologous')
plt.plot(df_return['year'], df_return['count'], color = [0,0,1], label = 'autologous')
ax.legend(loc = 'center left')
plt.title('Clinical Trials of MSC')
plt.savefig('patents.png', bbox_inches='tight')
def count_per_year(refFile):
"""
"""
ref_path = os.path.join( 'metadata')
ref_file = os.path.join(ref_path, refFile)
dfAllo = pd.read_csv(ref_file)
startAllo = list(dfAllo["Start Date"])
years = []
for start in startAllo:
start = str(start)
fullDate = start.split(' ')
year = fullDate[-1]
years.append(year)
dfAllo['Start Year'] = years
# print(years)
unique_years, unique_counts = [], []
for year in np.arange(2000, 2025, 1):
year = str(year)
df = dfAllo
df = dfAllo[ dfAllo['Start Year']==year]
unique_years.append(year)
unique_counts.append(len(list(df['Start Year'])))
df_return = pd.DataFrame()
df_return['year'] = unique_years
df_return['count'] = unique_counts
print(df_return)
return(df_return)
```
#### File: python/archive/c0300_chart_trials.py
```python
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
import pprint
from c0101_retrieve_clinical import retrieve_clinical
from c0201_query_patents import query_patents
def chart_trials():
"""
"""
tasks = [1, 2]
# collect metadata for each clinical trial
if 1 in tasks: collect_metadata_clinical()
if 2 in tasks: refine_trials()
def refine_trials():
"""
Remove Terminated trials
"""
ref_path = os.path.join( 'data', 'meta')
ref_file = os.path.join(ref_path, 'clinical' + 'All' + '.csv')
df = pd.read_csv(ref_file)
# drop duplicates using url
df = df.drop_duplicates(subset=['url'])
# remove terminated trials
print('len(overall_status) = ' + str(len(list(df['Overall_status']))))
df = df[(df['Overall_status'] != "Terminated")]
df = df[(df['Overall_status'] != "Withdrawn")]
df = df[(df['Overall_status'] != "Suspended")]
uniqueOveralStatus = np.unique(list(df['Overall_status']))
print('uniqueOveralStatus = ')
print(uniqueOveralStatus)
print('len(overall_status) = ' + str(len(list(df['Overall_status']))))
ref_file = os.path.join(ref_path, 'clinical' + 'All_withoutTerminated' + '.csv')
df.to_csv(ref_file)
Start_date = list(df['Start_date'])
# print('Start_date = ')
# print(Start_date)
dfAll = pd.DataFrame()
years = ['2021', '2022', '20223']
for year in years:
starts = list(df['Start_date'].str.contains(year))
# print(starts)
print('len(starts) = ' + str(len(starts)))
df['starts'] = starts
dfnew = df[(df['starts'] == True)]
print(dfnew)
dfAll = dfAll.append(dfnew)
print(dfAll)
# df = dfAll
#print('len(overall_status) = ' + str(len(list(df['Overall_status']))))
print('dfAll = ')
print(dfAll)
searchTerm = list(dfAll['searchTerm'])
print('len(searchTerm) = ' + str(len(searchTerm)))
uniqueSearchTerms = np.unique(searchTerm)
print('uniqueSearchTerms = ')
print(uniqueSearchTerms)
for term in uniqueSearchTerms:
dfTerm = dfAll[(dfAll['searchTerm'] == term)]
listTerm = list(dfTerm['searchTerm'])
print(term + 'listTerm = ' + str(len(listTerm)))
ref_file = os.path.join(ref_path, 'clinical' + 'All_withoutTerminated' + '_Recent' + '.csv')
dfAll.to_csv(ref_file)
def collect_metadata_clinical():
"""
"""
df_allTerms = pd.DataFrame()
search_terms = []
search_terms.append('iPSC')
search_terms.append('GeneticEngineering')
search_terms.append('MesenchymalExosome')
search_terms.append('MesenchymalAllogenic')
search_terms.append('MesenchymalAutologous')
search_terms.append('Mesenchymal')
for term in search_terms:
# retrieve clinical trial data
ref_path = os.path.join( 'data', 'source')
ref_file = os.path.join(ref_path, 'clinical' + term + '.csv')
df = pd.read_csv(ref_file)
df_all = pd.DataFrame()
for nctid in list(df['NCT Number']):
subset, subsubset = define_subset()
# pprint.pprint(clinicalTrialsGov(nctid))
tag_dict = clinicalTrialsGov(nctid)
url = "https://clinicaltrials.gov/ct2/show/" + nctid
urlXML = "https://clinicaltrials.gov/ct2/show/" + nctid + "?displayxml=true"
# pprint.pprint(tag_dict)
df = pd.DataFrame(tag_dict.items())
df = df.transpose()
new_header = df.iloc[0]
df = df[1:]
df.columns = new_header
"""
df['Brief_summary'] = linebreak_removal(df['Brief_summary'])
df['Detailed_description'] = linebreak_removal(df['Detailed_description'])
df['Eligibility'] = linebreak_removal(df['Eligibility'])
df['Primary_outcome'] = linebreak_removal(df['Primary_outcome'])
df['Arm_group'] = linebreak_removal(df['Arm_group'])
df['title'] = list(df['Official_title'])
df['status'] = list(df['Overall_status'])
# df['date'] = list(df['Start_date'])
"""
df['source'] = ['https://clinicaltrials.gov/']
df['searchTerm'] = [term]
df['NCT'] = [nctid]
df['url'] = [url]
df['urlXML'] = [urlXML]
"""
if '@' in str(list(df['Overall_official'])[0]):
df['contact'] = list(df['Overall_official'])
elif '@' in str(list(df['Overall_contact'])[0]):
df['contact'] = list(df['Overall_contact'])
elif '@' in str(list(df['Overall_contact_backup'])[0]):
df['contact'] = list(df['Overall_contact_backup'])
elif len(str(list(df['Overall_official'])[0])) > 0:
df['contact'] = list(df['Overall_official'])
elif len(str(list(df['Overall_contact'])[0])) > 0:
df['contact'] = list(df['Overall_contact'])
elif len(str(list(df['Overall_contact_backup'])[0])) > 0:
df['contact'] = list(df['Overall_contact_backup'])
else:
df['contact'] = [' ']
"""
df_all = df_all.append(df)
print(df_all)
df_allTerms = df_allTerms.append(df)
ref_path = os.path.join( 'data', 'meta')
ref_file = os.path.join(ref_path, 'clinical' + term + '.csv')
df_all.to_csv(ref_file)
ref_path = os.path.join( 'data', 'meta')
ref_file = os.path.join(ref_path, 'clinical' + 'All' + '.csv')
df_allTerms.to_csv(ref_file)
def linebreak_removal(source_list):
"""
Remove line breaks from a block of text
"""
source_str = str(' '.join(source_list))
source_str = source_str.replace('\n', '')
single_str = source_str.replace('\r', '').replace('\n', '')
return(single_str)
def clinicalTrialsGov (nctid):
"""
Turn the dictionary into a dataframe
"""
data = BeautifulSoup(requests.get("https://clinicaltrials.gov/ct2/show/" + nctid + "?displayxml=true").text, "xml")
df = pd.DataFrame(data)
subset, subsubset = define_subset()
# tag_matches = data.find_all(subset)
tag_matches = data.find_all(subsubset)
for sub in subsubset:
tag_dict = {'' + current_tag.name.capitalize(): current_tag.text for current_tag in tag_matches}
# tag_dict = {'' + current_tag.name.capitalize(): current_tag.text for current_tag in tag_matches}
for sub in subset:
tag_dict = multipleFields(data, sub, tag_dict)
# return removeEmptyKeys(tag_dict)
return tag_dict
def multipleFields (data, subset, tagDict):
"""
"""
fields = data.find_all(subset)
field = [each.text for each in fields]
# tagDict['ct' + subset.capitalize()] = ", ".join(field)
tagDict['' + subset.capitalize()] = ", ".join(field)
return tagDict
def removeEmptyKeys (dict1):
newDict = {k:v for (k, v) in dict1.items() if v}
return newDict
def define_subset():
"""
Use the study structure to identify the names of each field
# https://clinicaltrials.gov/api/info/study_structure
"""
subset = []
subset.append('study_type')
fileName = os.path.join( 'data', 'ref', 'studyFields.txt')
with open(fileName, 'r') as f:
for line in f:
if '="' in line:
target = line.split('="')
target = target[1]
if '"' in target:
target = target.split('"')
target = target[0]
subset.append(target)
subset.append('brief_title')
subset.append('official_title')
# headers
subset.append('id_info')
subset.append('sponsors')
subset.append('lead_sponsor')
subset.append('oversight_info')
subset.append('brief_summary')
subset.append('detailed_description')
subset.append('why_stopped')
subset.append('study_design_info')
subset.append('primary_outcome')
subset.append('secondary_outcome')
subset.append('intervention')
subset.append('eligibility')
subset.append('location')
subset.append('location_countries')
subset.append('responsible_party')
subset.append('overall_official')
subset.append('overall_contact')
subset.append('overall_contact_backup')
subset.append('responsible_party')
# point of contact
subset.append('lead_sponsor')
subset.append('sponsors_and_collaborators')
subset.append('investigators')
subset.append('study_chair')
subset.append('responsible_party')
subset.append('contacts')
subset.append('locations')
subset.append('sponsored')
subset.append('collaborator')
subset.append('information_provided_by')
subset.append('overall_official')
subset.append('overall_contact')
subset.append('overall_contact_email')
subset.append('overall_contact_backup')
subset.append('overall_contact_backup_email')
subset.append('overall_contact')
subset.append('locations')
# required info
subset.append('required_header')
subset.append('brief_summary')
subset.append('detailed_description')
# description
subset.append('clinicaltrials.gov_identifier')
subset.append('recruitment_status')
subset.append('brief_summary')
subset.append('recruitment_status')
subset.append('estimated_enrollment')
subset.append('allocation')
subset.append('intervention_model')
subset.append('intervention_model_description')
subset.append('primary_purpose')
subset.append('masking')
subset.append('enrollment')
subset.append('official_title')
subset.append('condition')
subset.append('minimum_age')
subset.append('maximum_age')
subset.append('gender')
subset.append('healthy_volunteers')
subset.append('phase')
subset.append('primary_outcome')
subset.append('secondary_outcome')
subset.append('arm_group')
subset.append('number_of_arms')
# logistics
subset.append('actual_study_start_date')
subset.append('estimated_primary_completion_date')
subset.append('estimated_study_completion_date')
subset.append('last_verified')
subset.append('keywords_provided_by')
subset.append('additional_relevant_mesh_terms')
subset.append('oversight_info')
subsubset = []
subsubset.append('overall_status')
subsubset.append('brief_title')
subsubset.append('official_title')
subsubset.append('study_type')
subsubset.append('verification_date')
subsubset.append('start_date')
subsubset.append('completion_date')
subsubset.append('primary_completion_date')
subsubset.append('study_first_submitted')
subsubset.append('study_first_submitted_qc')
subsubset.append('last_update_submitted')
subsubset.append('last_update_submitted_qc')
subsubset.append('last_update_posted')
subsubset.append('is_fda_regulated_drug')
subsubset.append('is_fda_regulated_device')
subsubset.append('has_dmc')
subsubset.append('biospec_retention')
subsubset.append('biospec_descr')
subsubset = subset
# print('subset = ')
# print(subset)
return (subset, subsubset)
```
#### File: code/python/c0100_scrape_trials.py
```python
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime
import requests
from c0001_retrieve_meta import retrieve_path
def scrape_trials():
"""
Objective: Query NIH Clinical Trials repository to database relevant trials
Query terms taken from a text file saved in user_provided folder
Tasks:
(1) Scrape clinicaltrials.gov using terms in a saved file
(2)
(3)
(4)
"""
print("running retireve_trials")
tasks = [1]
if 1 in tasks: scrape_clinical_trials()
print("completed retireve_trials")
def scrape_clinical_trials():
"""
"""
# Pull query terms from a clinicalTrials.gov
# query_terms = retrieve_path('search_terms_nih_clinical_trials')
df = pd.read_csv(retrieve_path('search_terms_nih_clinical_trials'))
query_terms = list(df['search_terms'])
stop_term = '<NStudiesReturned>0</NStudiesReturned>'
stop_term_json = '"NStudiesReturned":0'
# select file_extension: either json or xml
file_type = 'json'
for term in query_terms:
print('term = ' + term)
trials_path = retrieve_path('trials_path')
trials_path = os.path.join(trials_path, term + '.' + file_type)
print('trials_path = ' + trials_path)
f = open(trials_path, "w")
f.close()
f = open(trials_path, "w")
f.write('{')
f.close()
for i in range(3000):
if ' ' in term: term.replace(' ', '+')
url = 'https://clinicaltrials.gov/api/query/full_studies?expr='
url = url + str(term)
url = url + str('&min_rnk=') + str(i)
url = url + str('&max_rnk=' + str(i+1) + '&fmt=')
url = url + file_type
print('url = ' + str(url))
# request contents from link
r = requests.get(url)
# j = r.json()
text = r.text
# print(text)
if stop_term in str(text) or stop_term_json in str(text):
save_count(term, i)
print('end found at i = ' + str(i))
break
trials_path = retrieve_path('trials_path')
trials_path = os.path.join(trials_path, term + '.' + file_type)
f = open(trials_path, "a")
f.write('"Trial":' )
f.write('{')
f.write('"URL":' + url + ',' )
f.write('"SearchTerm":' + term + ',' )
f.write('"Rank":' + str(i) + ',' )
f.write(text)
f.write('}')
f.close()
trials_path = retrieve_path('trials_path')
trials_path = os.path.join(trials_path, term + '.' + file_type)
f = open(trials_path, "a")
f.write('}')
f.close()
def save_count(description, count):
"""
Save the number of queries found for the term
"""
now = datetime.datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print("date and time =", dt_string)
trial_count = retrieve_path('trial_count')
f = open(trial_count, "a")
f.write("\n")
f.write(str(dt_string) + ' , ' + description + ' , ' + str(count))
f.close()
if __name__ == "__main__":
main()
```
|
{
"source": "jesnyder/bioMaking",
"score": 2
}
|
#### File: code/python/c0200_html_build.py
```python
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import shutil, os
from c0101_search_patent import search_patent
from c0102_search_clinic import search_clinic
from c0103_search_pubs import search_pubs
from c0002_html_table import html_table
def html_build():
"""
"""
print("running html_build")
html_table()
html_path = os.path.join('code', 'html')
html_file = os.path.join(html_path, 'table_' + 'patents' + '.html')
old_file = html_file
new_file = 'index.html'
shutil.copy(old_file, new_file)
print("completed search_resources")
```
|
{
"source": "jesnyder/MeasuredStress",
"score": 3
}
|
#### File: python/archive/c0101_find_record_duration.py
```python
from datetime import datetime
from dateutil import tz
import glob
import os
import pandas as pd
import sys
import time
def find_record_duration():
"""
"""
print("running find_record_duration")
path_folders = os.path.join('..', '..', 'source_measurements', 'PMR', 'ref' )
save_file = os.path.join(path_folders, 'source_list_01' + '.csv' )
df = pd.read_csv(save_file)
del df['Unnamed: 0']
print(df)
record_begin = []
record_end = []
record_duration = []
for record in df['record']:
df_record = df[(df['record']==record)]
starts = []
ends = []
for path in df_record['path_long']:
source_path = os.path.join(path, 'EDA.csv')
df_source = pd.read_csv(source_path)
header = list(df_source.columns.values)
record_start = header[-1]
print('record start = ' + str(record_start))
freq = df_source[record_start][0]
print('frequency = ' + str(freq))
measurements = df_source[record_start][1:]
print('measurement number = ' + str(len(measurements)))
seconds = len(measurements)/freq
minutes = seconds/60
print('minutes = ' + str(minutes))
end = float(record_start) + seconds
starts.append(float(record_start))
ends.append(end)
record_begin.append(max(starts))
record_end.append(min(ends))
record_duration.append((min(ends)-max(starts))/60)
df['starts'] = record_begin
df['ends'] = record_end
df['duration'] = record_duration
print('record_end = ' )
print(record_end)
print('len(record_end) = ' + str(len(record_end)))
del df['begin_unix']
del df['shared_begin']
print(df)
path_folders = os.path.join('..', '..', 'source_measurements', 'PMR', 'ref' )
if not os.path.isdir(path_folders): os.mkdir(path_folders)
save_file = os.path.join(path_folders, 'source_list_02' + '.csv' )
df.to_csv(save_file)
print("completed find_record_duration")
if __name__ == "__main__":
find_record_duration()
```
#### File: python/archive/c0102_format_source.py
```python
from datetime import datetime
from dateutil import tz
import glob
import os
import pandas as pd
import sys
import time
def format_source():
"""
"""
print("running format_source")
sensors = ['EDA', 'HR', 'TEMP']
path_folders = os.path.join('..', '..', 'source_measurements', 'PMR', 'ref' )
save_file = os.path.join(path_folders, 'source_list_02' + '.csv' )
df = pd.read_csv(save_file)
del df['Unnamed: 0']
print(df)
for sensor in sensors:
for record in df['record']:
df_records = pd.DataFrame()
df_record = df[(df['record']==record)]
print('df_record =')
print(df_record)
shared_start = max(list(df_record['starts']))
shared_end = min(list(df_record['ends']))
for path in df_record['path_long']:
wearable = list(df[(df['path_long']==path)]['wearable'])[0]
source_path = os.path.join(path, sensor + '.csv')
df_source = pd.read_csv(source_path)
print('df_source = ')
print(df_source)
header = list(df_source.columns.values)[0]
print('header = ')
print(header)
information = list(df_source[header])
freq = information[0]
print('frequency = ' + str(freq))
information = information[1:]
record_start = float(header)
record_length = len(information)/freq
record_end = record_start + record_length
print('record start = ' + str(record_start) + ' record end = ' + str(record_end) + ' length = ' + str(record_length/60) )
time_unix = []
for info in information:
time_unix.append(record_start + len(time_unix)/freq)
df3 = pd.DataFrame()
df3['time_unix'] = time_unix
df3[wearable] = information
df3 = df3[(df3['time_unix']>shared_start+30) & (df3['time_unix']<shared_end-30)]
df_records[str(wearable + '_time_unix')] = df3['time_unix']
df_records[wearable] = df3[wearable]
time = []
for item in list(df_records[wearable]):
time.append(len(time)*1/freq/60)
df_records['time'] = time
path_folders = os.path.join('..', '..', 'source_measurements', 'PMR', 'formatted', sensor )
print('path folders = ' + str(path_folders))
if not os.path.exists(path_folders):
os.mkdir(path_folders)
save_file = os.path.join(path_folders, str(record).zfill(2) + '.csv')
df_records.to_csv(save_file)
print('df_records =')
print(df_records)
print("completed format_source")
if __name__ == "__main__":
format_source()
```
#### File: python/archive/c0103_timestamp_records.py
```python
from datetime import datetime
from dateutil import tz
import glob
import os
import pandas as pd
import sys
import time
def timestamp_records():
"""
"""
print("running format_source")
sensors = ['EDA', 'HR', 'TEMP']
path_folders = os.path.join('..', '..', 'source_measurements', 'PMR', 'ref' )
save_file = os.path.join(path_folders, 'source_list_02' + '.csv' )
df = pd.read_csv(save_file)
del df['Unnamed: 0']
print(df)
timestamped_path = []
for sensor in sensors:
for record in df['record']:
df_record = df[(df['record']==record)]
print('df_record =')
print(df_record)
shared_start = max(list(df_record['starts']))
shared_end = min(list(df_record['ends']))
for path in df_record['path_long']:
wearable = list(df[(df['path_long']==path)]['wearable'])[0]
source_path = os.path.join(path, sensor + '.csv')
df_source = pd.read_csv(source_path)
print('df_source = ')
print(df_source)
header = list(df_source.columns.values)[0]
print('header = ')
print(header)
information = list(df_source[header])
freq = information[0]
print('frequency = ' + str(freq))
information = information[1:]
record_start = float(header)
record_length = len(information)/freq
record_end = record_start + record_length
print('record start = ' + str(record_start) + ' record end = ' + str(record_end) + ' length = ' + str(record_length/60) )
time_unix = []
for info in information:
time_unix.append(record_start + len(time_unix)/freq)
df_timestamped = pd.DataFrame()
df_timestamped[str(str(wearable) + '_time_unix')] = time_unix
df_timestamped[str(str(wearable) + '_measurements')] = information
path_folders = os.path.join('..', '..', 'source_measurements', 'PMR', 'timestamped', sensor )
print('path folders = ' + str(path_folders))
if not os.path.exists(path_folders):
os.mkdir(path_folders)
save_file = os.path.join(path_folders, str(str(wearable) + ' ' + str(record).zfill(2) + '.csv'))
# os.mkdir(save_file)
df_timestamped.to_csv(save_file)
timestamped_path.append(save_file)
if __name__ == "__main__":
format_source()
```
#### File: python/archive/c0103_trim_record_to_max.py
```python
from c0101_retrieve_ref import retrieve_ref
from c0102_timestamp import timestamp_source
from c0104_plot_timestamp import plot_timestamp
from c0105_find_records import find_records
from c0106_record_to_summary import record_to_summary
from c0108_save_meta import save_meta
from c0109_retrieve_meta import retrieve_meta
import glob
import os
import pandas as pd
def trim_record_to_max():
"""
Input: path to a csv
Output: list of timestamps
"""
print("finding the end of the record")
study_list = retrieve_ref('study_list')
sensor_list = retrieve_ref('sensor_list')
max_record_time = retrieve_ref('max_record_time')
sensor = 'TEMP'
for study in study_list:
df_meta = retrieve_meta(study)
source_path = list(df_meta['source_path'])
df_meta['recordLength'] = [None] * len(source_path)
for record in source_path:
# timestamped_file = os.path.join(study, 'timestamp', record, sensor + ".csv")
timestamped_file = os.path.join(study, 'formatted', 'source', record, 'All' , sensor + ".csv")
df_timestamped = pd.read_csv(timestamped_file)
record_length = max(list(df_timestamped['timeMinutes']))
if record_length > max_record_time:
record_length = max_record_time
record_length = round(record_length, 4)
i = df_meta[ df_meta['source_path'] == record].index.values[0]
df_meta.loc[i, 'recordLength' ] = record_length
# save the record length to meta file
save_meta(study, df_meta)
```
#### File: python/archive/c0107_decide_inclusion.py
```python
from c0101_retrieve_ref import retrieve_ref
from c0102_timestamp import timestamp_source
from c0103_trim_record_to_max import trim_record_to_max
from c0104_plot_timestamp import plot_timestamp
from c0105_find_records import find_records
from c0106_record_to_summary import record_to_summary
from c0108_save_meta import save_meta
from c0109_retrieve_meta import retrieve_meta
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def decide_inclusion():
"""
Determine inclusion based on length of the record
"""
print("begin decide inclusion")
study_list = retrieve_ref('study_list')
sensor_list = retrieve_ref('sensor_list')
max_record_time = retrieve_ref('max_record_time')
min_record_time = retrieve_ref('min_record_time')
for study in study_list:
df_meta = retrieve_meta(study)
df_meta = df_meta.sort_values(by=['recordLength'])
records_found = list(df_meta['source_path'])
recordLength = list(df_meta['recordLength'])
inclusionList = []
for i in range(len(recordLength)):
if recordLength[i] < min_record_time:
inclusionList.append('excluded')
else:
inclusionList.append('included')
# save the record length to meta file
df_meta['included'] = inclusionList
save_meta(study, df_meta)
df_meta = df_meta.drop(df_meta[df_meta['included'] == 'excluded'].index)
df_meta = df_meta.sort_values(by=['source_path'])
save_meta(study, df_meta)
print("completed decide inclusion")
```
#### File: python/archive/c0113_plot_acc.py
```python
from c0101_retrieve_ref import retrieve_ref
from c0101_retrieve_ref import retrieve_ref_color
from c0101_retrieve_ref import retrieve_sensor_unit
from c0102_timestamp import timestamp_source
from c0109_retrieve_meta import retrieve_meta
from c0111_retrieve_analyzed import retrieve_analyzed
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def plot_acc():
"""
compare the curves to verify the end of the record was properly found
plot the source measurements for temperature
plot the timestamped data for the temperature
plot the truncated data
plot the timestamped and truncated on the same plot
"""
print("begin plotting acc data")
study_list = retrieve_ref('study_list')
sensor_list = retrieve_ref('sensor_list')
analysis_list = retrieve_ref('analysis_list')
for study in study_list:
metadata_path = os.path.join(study, 'meta')
metadata_file = os.path.join(metadata_path, 'metadata.csv')
df_meta = pd.read_csv(metadata_file)
# print(df_meta)
# timestamp ACC
sensor = 'ACC'
for study in study_list:
df_meta = retrieve_meta(study)
source_path = list(df_meta['source_path'])
for record in source_path:
row_num, col_num, plot_num = 5, 1, 0
row_width_mulp, col_width_mulp = 20, 5
plot_width, plot_height = col_num*row_width_mulp, row_num*col_width_mulp
plt.figure(figsize=(plot_width, plot_height))
analysis_type = 'truncate'
df = retrieve_analyzed(study, analysis_type, record, sensor)
for name in list(df.columns):
if 'time' not in name:
plot_num += 1
plt.subplot(row_num, col_num, plot_num)
colorScatter = valueColor = retrieve_ref_color(str('color_' + str(sensor) + '_' + str(name)))
plt.scatter(df['timeMinutes'], df[name], color = colorScatter, label = str(name))
# plt.scatter(df['timeMinutes'], df['measurement'], label = str('vector'))
plt.title( analysis_type + ' ' + record + ' ' + sensor)
plt.xlabel('Time (Minutes)')
plt.ylabel(str(sensor + ' ' + name))
plt.xlim([0, 1.02*max(list(df['timeMinutes']))])
plt.legend(bbox_to_anchor=(1, 0.5, 0.3, 0.2), loc='upper left')
plot_num += 1
plt.subplot(row_num, col_num, plot_num)
for name in list(df.columns):
if 'time' not in name:
colorScatter = valueColor = retrieve_ref_color(str('color_' + str(sensor) + '_' + str(name)))
plt.scatter(df['timeMinutes'], df[name], color = colorScatter, label = str(name))
plt.title( analysis_type + ' ' + record + ' ' + sensor + ' ' + name)
plt.xlabel('Time (Minutes)')
sensor_unit = retrieve_sensor_unit(sensor)
plt.ylabel(str(sensor + ' ' + name + ' ( ' + str(sensor_unit) + ' )'))
plt.xlim([0, 1.02*max(list(df['timeMinutes']))])
plt.legend(bbox_to_anchor=(1, 0.5, 0.3, 0.2), loc='upper left')
# save the plot
plot_path = os.path.join(study, 'plot')
if not os.path.isdir(plot_path): os.mkdir(plot_path)
plot_path = os.path.join(study, 'plot', 'timestamp')
if not os.path.isdir(plot_path): os.mkdir(plot_path)
plot_path = os.path.join(study, 'plot', 'timestamp', record)
if not os.path.isdir(plot_path): os.mkdir(plot_path)
plot_file = os.path.join(plot_path, sensor + '.png')
plt.savefig(plot_file, bbox_inches='tight')
print('saved plotted acc figure - ' + str(plot_file))
print("completed plotting acc data")
```
#### File: python/archive/c0118_find_paired_duration.py
```python
from c0101_retrieve_ref import retrieve_ref
from c0101_retrieve_ref import retrieve_ref_color
from c0101_retrieve_ref import retrieve_sensor_unit
from c0102_timestamp import timestamp_source
from c0103_trim_record_to_max import trim_record_to_max
from c0104_plot_timestamp import plot_timestamp
from c0105_find_records import find_records
from c0106_record_to_summary import record_to_summary
from c0107_decide_inclusion import decide_inclusion
from c0108_save_meta import save_meta
from c0109_retrieve_meta import retrieve_meta
from c0110_find_temp_end import find_temp_end
from c0111_retrieve_analyzed import retrieve_analyzed
from c0112_plot_truncate import plot_truncate
from c0113_plot_acc import plot_acc
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def find_paired_duration():
"""
Find the duration of the record
Add the end of the coregistered record in the meta file
"""
print("begin find_paired_duration")
study_list = retrieve_ref('study_list')
for study in study_list:
df_meta = retrieve_meta(study)
# print(df_meta)
source_path = list(df_meta['source_path'])
# add emptyt column
df_meta['recordDuration'] = [None] * len(source_path)
for record in source_path:
# save that value in the dataframe
i = df_meta[ df_meta['source_path'] == record].index.values[0]
print('i = ' + str(i))
recordBegin = int(df_meta.loc[i, 'recordBegin' ] )
print('recordBegin = ' + str(recordBegin))
recordEnd = int(df_meta.loc[i, 'recordEnd' ] )
print('recordEnd = ' + str(recordEnd))
recordDuration = round((recordEnd - recordBegin)/60 , 4)
df_meta.loc[i, 'recordDuration' ] = recordDuration
print('recordDuration = ' + str(recordDuration))
save_meta(study, df_meta)
print('df_meta = ')
print(df_meta)
```
#### File: code/python/c0100_clean_data.py
```python
from c0106_find_records import find_records
from c0112_define_records import define_records
from c0113_format_source import format_source
from c0114_format_truncate import format_truncate
from c0116_coregister_formatted import coregister_formatted
from c0117_clean_save import clean_save
from c0118_segment_formatted import segment_formatted
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def clean_data():
"""
Clean the data so each record only includes information measured from the participant.
If the wearable is taken off and then turned off - the extra time off-wrist is truncated
If the wearable is left on between participants, so two records are combined in one - the record is broken into two
If two wearables were used to (one on the left and one on the right wrist) - the data is coregistered.
Timestamps are assigned to the measurements.
Records are broken into segment for comparision - preStudy, Study, postStudy
Here are some definitions:
1. source - a description of data, uneditted measurements downloaded from the cloud client
2. truncate - a description of data, source data only when on the participant's wrist
3. timestamp - action taken on the data, create the lit of unix and time in minutes corresponding to the measurements
4. coregister - action taken on the data, pair up records, for some studies two wearables were used for the same patient
5. segment - action taken on the data, break the entire truncated record into a preStudy, Study, and postStudy period
6. define record - processing step, establish the beginning and end of the record in unix time
7. embedded record - type of record, a record that contains two separate recorded session from what could be two people
embeddd records need to be broken up into two separate records, which is handled in this cleaning process
How is the end of the record found?
Use the temperature sensor. The room is significantly colder than the participant's wrist.
If the temperature drops more than 2 deg C in 3 seconds - the wearable is assummed removed.
If the temperature rises more than 2 deg C in 3 seconds - the wearable is assummed put on - only used for embedded records.
"""
print("begin cleaning data")
# find and list all records
find_records()
# define record - find record begin, end, length
define_records()
# timestamp the source
format_source()
# timestamp the source
format_truncate()
# coregister wearables
coregister_formatted()
# save processed data as clean
clean_save()
# segment clean records
format_type = 'clean'
segment_formatted(format_type)
print("completed cleaning data")
```
#### File: code/python/c0101_retrieve_ref.py
```python
import os
import pandas as pd
def retrieve_ref(valueName):
"""
Input: value name
Output: value saved in reference file
"""
# reference file located
# cwd = os.getcwd()
# print("cwd = ")
# print(str(cwd))
# specifiy path to reference file
ref_path = os.path.join( 'ref')
ref_file = os.path.join(ref_path, 'reference' + '.csv' )
# print('ref path: ' + str(ref_file))
# read in all reference values as a dataframe
df = pd.read_csv(ref_file)
# del df['Unnamed: 0']
# print('ref = ')
# print(df)
# print(df['valueName'])
# print(valueName)
# search the dataframe using string matching
# looking for the same string as passed into the function
valueRow = df[df['valueName'].str.match(valueName)]
# if not match is found
# then write the value into the reference file to be specified
if len(list(df['valueName'])) == 0:
file = open(ref_file, "a")
file.write('\n')
file.write(valueName)
file.write(' , unspecified')
file.write('\n')
file.close()
print('item missing from reference file - ' + str(valueName))
# print("valueRow = ")
# print(valueRow)
#
valueValue = valueRow.iloc[0,1]
valueValue = str(valueValue)
valueValue = valueValue.split(' ')
valueValue = list(valueValue)
for i in valueValue:
if len(i) < 1:
valueValue.remove(i)
if len(valueValue) == 1:
valueValue = float(valueValue[0])
# print(str(valueName) + ' = ')
# print(valueValue)
return(valueValue)
def retrieve_ref_color(valueName):
"""
for a named variable
return the color used in scatter plots
"""
segment_list = retrieve_ref('segment_list')
if valueName in segment_list:
valueName = str('color_' + valueName)
# print('valueName = ' + str(valueName))
valueValue = retrieve_ref(valueName)
valueColor = []
for item in valueValue:
valueColor.append(float(item))
valueColor = list(valueColor)
return(valueColor)
def retrieve_sensor_unit(sensor):
"""
for a sensor
return the associated unit
"""
sensor_list = retrieve_ref('sensor_list')
sensor_unit_list = retrieve_ref('sensor_unit_list')
for i in range(len(sensor_list)):
if sensor == sensor_list[i]:
sensor_unit = sensor_unit_list[i]
return(sensor_unit)
def retrieve_ref_color_wearable_segment(wearable_num, segment):
"""
"""
colorWearable = retrieve_ref_color(segment)
refName = str('color_modifier_wearable_' + str(wearable_num))
# print('refName = ' + str(refName))
modifier = retrieve_ref(refName)
colorWearableSegment = []
for item in colorWearable:
colorWearableSegment.append(modifier * item)
# print('colorWearableSegment = ')
# print(colorWearableSegment)
if max(colorWearableSegment) >= 1:
for i in range(len(colorWearableSegment)):
colorWearableSegment[i] = colorWearableSegment[i]/(1.2*max(colorWearableSegment))
# print('colorWearableSegment = ')
# print(colorWearableSegment)
return(colorWearableSegment)
```
#### File: code/python/c0104_retrieve_meta.py
```python
import glob
import os
import pandas as pd
def retrieve_meta(study):
"""
retrieve metadata
"""
metadata_path = os.path.join('studies', study, 'meta')
metadata_file = os.path.join(metadata_path, 'metadata.csv')
# print('metadata_file = ' + str(metadata_file))
df = pd.read_csv(metadata_file)
# remove unnamed columns created from reading in the csv
col_names = df.head()
for name in col_names:
if 'Unnamed' in name:
del df[name]
return(df)
```
#### File: code/python/c0105_record_to_summary.py
```python
from c0103_save_meta import save_meta
from datetime import date
from datetime import datetime
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def record_to_summary(study, name, value):
"""
Clean the data
"""
metadata_path = os.path.join('studies', study, 'meta')
summary_file = os.path.join(metadata_path, 'summary.txt')
if name == "Records found":
file = open(summary_file, "w")
file = open(summary_file, "w")
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
file.write('File made on ' + str(date.today()) + ' ' + str(current_time) )
file.write('\n')
else:
file = open(summary_file, "a")
file.write('\n' + str(name) + ' = ' + str(value) )
file.close()
```
#### File: code/python/c0107_timestamp_source.py
```python
from c0101_retrieve_ref import retrieve_ref
from c0102_build_path import build_path
from c0103_save_meta import save_meta
from c0104_retrieve_meta import retrieve_meta
from c0105_record_to_summary import record_to_summary
import math
import numpy as np
import os
import pandas as pd
def timestamp_source(study, format_type, segment, record, sensor):
"""
Input: path to a csv
Output: list of timestamps
"""
# read in the source
source = os.path.join('studies', study, 'source', record, sensor + '.csv')
df_source = pd.read_csv(source)
# print('df_source = ')
# print(df_source)
df_timestamped = build_timestamps(df_source, sensor)
path = ['studies', study, 'formatted', str(format_type), str(record), str(segment)]
path = build_path(path)
file = os.path.join(path, sensor + ".csv")
# print('timestamped_file = ' + str(timestamped_file))
df_timestamped.to_csv(file)
# print('timestamped saved: ' + str(file))
return(df_timestamped)
def build_timestamps(df_source, sensor):
"""
from the source dataframe
build the timestamped lists
"""
# find the beginning time
timeStart = list(df_source.columns)
timeStart = float(timeStart[0])
# print('timeStart = ' + str(timeStart))
freq = list(df_source.iloc[0])
freq = float(freq[0])
# print('freq = ' + str(freq))
measurementList = list(df_source.iloc[1:,0])
# print('measurementList = ')
# print(measurementList)
# the ACC sensor takes measurements in the x- , y- , and z- axis
# calculate the magnitude of the acceleration from the three sensors
if sensor == 'ACC':
xMeas = list(df_source.iloc[1:,0])
yMeas = list(df_source.iloc[1:,1])
zMeas = list(df_source.iloc[1:,2])
# take absolute value of acceleration
xMeas = np.abs(xMeas)
yMeas = np.abs(yMeas)
zMeas = np.abs(zMeas)
measurementList = []
for i in range(len(xMeas)):
magACCsquared = math.pow(xMeas[i],2) + math.pow(yMeas[i],2) + math.pow(zMeas[i],2)
magACC = math.sqrt(magACCsquared)
measurementList.append(magACC)
timeUnix, timeMinutes = [], []
for i in range(len(measurementList)):
timeLapsed = i/freq
timeUnix.append(float(round(timeStart + timeLapsed, 3)))
timeMinutes.append(round(timeLapsed/60, 5 ))
# builkd dataframe
df_timestamped = pd.DataFrame()
df_timestamped['timeUnix'] = timeUnix
df_timestamped['timeMinutes'] = timeMinutes
df_timestamped['measurement'] = measurementList
if sensor == 'ACC':
df_timestamped['xMeas'] = xMeas
df_timestamped['yMeas'] = yMeas
df_timestamped['zMeas'] = zMeas
return(df_timestamped)
```
#### File: code/python/c0200_plot_clean.py
```python
from c0101_retrieve_ref import retrieve_ref
from c0101_retrieve_ref import retrieve_ref_color
from c0101_retrieve_ref import retrieve_sensor_unit
from c0102_build_path import build_path
from c0201_plot_source import plot_source
from c0202_plot_acc import plot_acc
from c0203_plot_coregister import plot_coregister
from c0204_plot_segment import plot_segment
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def plot_cleaned():
"""
check the quality of the clean data by plotting
compare source / truncate / coregister / clean
"""
print("begin plotting the clean data ")
study_list = retrieve_ref('study_list')
for study in study_list:
plot_source(study)
plot_acc(study)
plot_coregister(study)
plot_segment(study)
print("completed plotting the clean data")
```
#### File: code/python/c0301_retrieve_analysis.py
```python
from c0101_retrieve_ref import retrieve_ref
from c0101_retrieve_ref import retrieve_ref_color
from c0101_retrieve_ref import retrieve_ref_color_wearable_segment
from c0102_build_path import build_path
from c0104_retrieve_meta import retrieve_meta
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def retrieve_analysis(study, analysis_type, segment, sensor):
"""
"""
if 'statistics_mean' == analysis_type:
file = os.path.join('studies', study, 'analyzed', 'statistics', 'mean', segment, sensor + '.csv')
file = os.path.join('studies', study, 'analyzed', 'statistics', 'mean', segment, sensor + '.csv')
df = pd.read_csv(file)
colNames = list(df.head())
for colName in colNames:
if 'Unnamed' in colName:
del df[colName]
return(df)
```
#### File: code/python/c0403_plot_regression.py
```python
from c0101_retrieve_ref import retrieve_ref
from c0101_retrieve_ref import retrieve_ref_color
from c0101_retrieve_ref import retrieve_ref_color_wearable_segment
from c0101_retrieve_ref import retrieve_sensor_unit
from c0102_build_path import build_path
from c0104_retrieve_meta import retrieve_meta
from c0402_retrieve_regression import retrieve_regression
import math
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
def plot_regression():
"""
"""
print('plotting regression')
study_list = retrieve_ref('study_list')
sensor_list = retrieve_ref('sensor_list')
segment_list = retrieve_ref('segment_list')
degree_list = retrieve_ref('degree_list')
degree_list = [int(x) for x in degree_list]
for study in study_list:
format_type = 'clean'
clean_path = os.path.join('studies', study, 'formatted', format_type)
recordNames = os.listdir(clean_path)
for sensor in sensor_list:
for degree in degree_list:
for record in recordNames:
row_num, col_num, plot_num = len(segment_list)+2, 1, 0
row_width_mulp, col_width_mulp = 14, 5
plot_width, plot_height = col_num*row_width_mulp, row_num*col_width_mulp
plt.figure(figsize=(plot_width, plot_height))
for segment in segment_list:
plot_num += 1
plt.subplot(row_num, col_num, plot_num)
complete = plot_regression_segment(study, record, segment, sensor, degree)
plot_num += 1
plt.subplot(row_num, col_num, plot_num)
for segment in segment_list[:-1]:
complete = plot_regression_segment(study, record, segment, sensor, degree)
plt.title(' ')
plot_num += 1
plt.subplot(row_num, col_num, plot_num)
complete = plot_coefficient_bar(study, record, sensor, degree)
plt.title(' ')
path = ['studies', study, 'plotted', 'regression', str(degree), record]
path = build_path(path)
file = os.path.join(path, sensor + ".png")
plt.savefig(file, bbox_inches='tight')
print('plotted regression for ' + file)
def plot_regression_segment(study, record, segment, sensor, degree):
"""
"""
df_coef = retrieve_regression(study, segment, sensor, degree)
colNamesCoef = list(df_coef.head())
format_type = 'clean'
source = os.path.join('studies', study, 'formatted', format_type, record, segment, sensor + '.csv')
print('source = ' + source)
df = pd.read_csv(source)
colNames = list(df.head())
print('colNames = ')
print(colNames)
xx = list(df['timeMinutes'])
if len(xx) == 0:
return(0)
yyReal = list(df[colNames[-1]])
i = df_coef[df_coef['recordName'] == record].index.values[0]
coeff = df_coef.loc[i, colNamesCoef[-1]]
print('coeff = ')
print(coeff)
try :
float(coeff)
coeff = [coeff]
except:
coeff = coeff.split(' ')
print('coeff = ')
print(coeff)
# print('coeff =')
# print(coeff)
yy = []
for x in xx:
# print('x = ' + str(x))
# print('len(coeff) = ' + str(len(coeff)))
y = 0
coef_equation = []
for i in range(len(coeff)):
y = y + float(coeff[i])*math.pow(x, len(coeff) - i -1)
# y = y + float(coeff[i])*math.pow(x, i)
coef_equation.append(str(str(round(float(coeff[i]), 4)) + '*x^' + str(len(coeff) - i -1)))
yy.append(y)
# explain polyval https://numpy.org/devdocs/reference/generated/numpy.polyval.html
wearable_num = 1
colorWearableSegment = retrieve_ref_color_wearable_segment(wearable_num, segment)
plt.scatter(xx, yyReal, color = colorWearableSegment, label = 'measured')
wearable_num = 2
colorWearableSegment = retrieve_ref_color_wearable_segment(wearable_num, segment)
labelPolyfit = str('polyfit degree = ' + str(degree))
plt.plot(xx, yy, '--', color = colorWearableSegment, label = labelPolyfit)
plt.xlabel('Time (Minutes)')
sensor_unit = retrieve_sensor_unit(sensor)
plt.ylabel(sensor + ' ' + sensor_unit )
plt.title(segment + ' polyfit for degree ' + str(degree))
coef_str = [str(x) for x in coef_equation]
coef_str = ' '.join(coef_str)
print('coef_str = ')
print(coef_str)
plt.legend(bbox_to_anchor=(1, 0.5, 0.3, 0.2), loc='upper left')
plt.title(segment + ' polyfit for degree ' + str(degree) + ' ' + str(coef_str))
return(0)
def plot_coefficient_bar(study, record, sensor, degree):
"""
"""
segment_list = retrieve_ref('segment_list')
for segment in segment_list:
print('bar chart for segment = ' + str(segment))
df_coef = retrieve_regression(study, segment, sensor, degree)
print('df_coef = ')
print(df_coef)
i = df_coef[df_coef['recordName'] == record].index.values[0]
print('i = ' + str(i))
colNames = list(df_coef.head())
coeff = df_coef.loc[i, colNames[-1]]
print('coeff = ' + str(coeff))
a = pd.isnull(df_coef.loc[i, colNames[-1]])
print('a = ' + str(a))
if a == 'True' or str(df_coef.loc[i, colNames[-1]]) == 'None':
print('cell empty a = ' + str(a) + ' coeff = ')
print(coeff)
continue
elif a != 'True':
print('cell not empty a = ' + str(a) + ' coeff = ')
print(coeff)
try :
float(coeff)
coeff = [float(coeff)]
print('try found coeff = ')
print(coeff)
except:
coeff = coeff.split(' ')
coeff = [float(x) for x in coeff]
print('except found coeff = ')
print(coeff)
xx = [segment_list.index(segment)]
yy = [coeff[0]]
wearable_num = 1
colorSegment = retrieve_ref_color_wearable_segment(wearable_num, segment)
plt.bar(xx, yy, color = colorSegment )
plt.xticks(range(len(segment_list)), segment_list)
```
|
{
"source": "jesnyder/MeasuringStress",
"score": 3
}
|
#### File: code/python/c8000_openscad_modeling.py
```python
from c0101_retrieve_ref import retrieve_ref
from c8001_modeling_test import modeling_test
from c8002_modeling_mean import modeling_mean
from datetime import date
from datetime import datetime
import math
import os
import pandas as pd
from pytz import reference
from shutil import copyfile
def openscad_modeling():
"""
Write code for openscad to model parameters of the analysis
"""
print("openSCAD modeling begin")
modeling_test()
modeling_mean()
"""
study_list = retrieve_ref('study_list')
sensor_list = retrieve_ref('sensor_list')
for study in study_list:
metadata_file = os.path.join(study, 'meta', 'metadata.csv')
df_meta = pd.read_csv(metadata_file)
df_meta = df_meta.sort_values(by=['recordLength'])
records_found = list(df_meta['source_path'])
recordLength = list(df_meta['recordLength'])
openscad_path = os.path.join(study, 'openSCAD')
if not os.path.isdir(openscad_path ): os.mkdir(openscad_path)
openscad_file = os.path.join(openscad_path, str(study) + '_' + 'cleaning_data.scad')
file = open(openscad_file, "w")
file = open(openscad_file, "w")
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
file.write('// File made on ' + str(date.today()) + ' ' + str(current_time) )
file.write('\n' + '// records found = ' + str(len(records_found)))
# file.write('\n' + 'd = ' + str(10) + ' ; ' + '\n')
# file.write('\n' + 'sphere( size = ' + str(d) + ') ;')
count_xaxis = math.sqrt(len(records_found))
spacing = round(max(recordLength)*2, 3)
file.write('\n' + '// spacing = ' + str(spacing))
for i in range(len(records_found)):
# print('index = ' + str(i))
x_num = int((i+1)/count_xaxis)
y_num = int((i+1)%count_xaxis)
z_num = 0
length = round(recordLength[i], 3)
# print('x_num, y_num = ' + str(x_num) + ' , ' + str(y_num))
file.write('\n')
file.write('\n' + 'translate([ ' + str(spacing*x_num) + ' , ' + str(spacing*y_num) + ' , ' + str(spacing*z_num) + '])')
file.write('\n' + 'union() {')
file.write(' ' + 'color([ ' + str(1) + ' , ' + str(0) +' , ' + str(1) + ' ])')
file.write(' ' + 'sphere(' + str(length) + ' , $fn=60);')
file.write(' ' + 'color([ ' + str(0.5) + ' , ' + str(0.5) +' , ' + str(1) + ' ])')
file.write(' ' + 'cylinder( r= ' + str(length/2) + ', h= ' + str(2*length) + ' , $fn=60);')
file.write(' } ')
file.write('\n')
file.write('\n')
file.close()
"""
print("openSCAD modeling complete")
```
|
{
"source": "jesnyder/plastic_bio",
"score": 3
}
|
#### File: code/python/a0010_analyze_general.py
```python
import csv
import codecs
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import statistics
from a0001_retrieve_meta import retrieve_path
from a0001_retrieve_meta import clean_dataframe
from find_color import find_color
def analyze_general(name_article):
"""
Objective: compare cell sources in the database
Tasks:
1. Aggregate
2. Add a standardized column for year
3. Count the unique values
4. Plot the unique values
5. Count the articles per year
"""
print('begin analyze_general')
tasks = [0]
# name paths to files using article name
# name_article = 'nih_awards'
name_src, name_dst, name_summary, name_unique, plot_unique = name_paths(name_article)
# tasks to complete for program
if 0 in tasks: tasks = np.arange(1, 100, 1)
if 1 in tasks: aggregate_articles(name_src, name_dst, name_summary)
if 2 in tasks: add_year(name_dst, name_summary)
if 3 in tasks: unique_values(name_dst, name_unique)
if 4 in tasks: unique_plot(name_unique, plot_unique)
if 5 in tasks: articles_per_year(name_article, name_dst)
if 6 in tasks: articles_per_year(name_article, name_dst)
if 7 in tasks: plot_articles_per_year(name_article)
print('completed analyze_general')
def articles_per_year(name_article, name_src):
"""
count number of articles per year
save as dataframe
"""
# retrieve list of all articles with metadata
df = pd.read_csv(os.path.join(retrieve_path(name_src), 'agg_with_year' + '.csv'))
df = clean_dataframe(df)
print(df.columns)
df_annual = pd.DataFrame()
df_annual['years'] = np.arange(min(list(df['ref_year'])) , max(list(df['ref_year']))+1, 1)
df_annual['pdf'] = [0]*len(list(df_annual['years']))
df_annual['cdf'] = [0]*len(list(df_annual['years']))
#df_annual = df_annual.sort_values('years', ascending=True)
print(df_annual)
for i in range(len(list(df_annual['years']))):
year = float(df_annual.loc[i, 'years'])
df_yearly = df[(df.ref_year == year)]
df_annual.loc[i,'pdf'] = len(list(df_yearly.loc[:,'ref_year']))
df_yearly = df[(df.ref_year <= year)]
df_annual.loc[i,'cdf'] = len(list(df_yearly.loc[:,'ref_year']))
print(df_annual)
total_articles_annual = str('df_' + name_article + '_total_annual_counts')
df_annual = clean_dataframe(df_annual)
df_annual.to_csv(os.path.join(retrieve_path(total_articles_annual), total_articles_annual + '.csv'))
print(df.columns)
def plot_articles_per_year(name_article):
"""
plot annual counts
"""
total_articles_annual = str('df_' + name_article + '_total_annual_counts')
file = os.path.join(retrieve_path(total_articles_annual), total_articles_annual + '.csv')
df = clean_dataframe(pd.read_csv(file))
print(df.columns)
print('file = ' + str(file))
plt.close('all')
plt.figure(figsize=(10, 10))
fig, axes = plt.subplots(2, 1, figsize=(10, 10))
yy_lists = [list(df['pdf']) , list(df['cdf'])]
for yy in yy_lists:
plt.subplot(2, 1, yy_lists.index(yy)+1)
xx = list(df['years'])
colorMarker, colorEdge, colorTransparency = find_color(6)
plt.scatter(xx, yy, s=40, color=colorMarker, edgecolor=colorEdge, alpha=colorTransparency)
plt.xlabel('Year')
plt.yscale('log')
# plt.legend(loc ="upper left")
if yy_lists.index(yy) == 0:
plt.ylabel('Annual Number of Articles (mean = ' + str(round(statistics.mean(yy),2)) + ')')
plt.title(str(sum(yy)) + ' ' + name_article + ' included in plot.')
elif yy_lists.index(yy) == 1:
plt.ylabel('Cumulative Number of Articles (mean = ' + str(round(statistics.mean(yy),2)) + ')')
plt.title(str(yy[-1]) + ' ' + name_article + ' included in plot.')
# save plot
plot_total_articles_annual = str('plot_' + name_article + '_total_annual_counts')
plot_dst = os.path.join(retrieve_path(plot_total_articles_annual), plot_total_articles_annual + '.png')
plt.savefig(plot_dst, dpi = 600, edgecolor = 'w')
plt.close('all')
def unique_plot(name_src, name_dst):
"""
"""
for file in os.listdir(retrieve_path(name_src)):
file_split = file.split('.')
file_name = file_split[0]
df_src = os.path.join(retrieve_path(name_src), file)
df = pd.read_csv(df_src)
xx = list(df['terms'])
yy = list(df['counts'])
print('file_name = ' + file_name)
if str(xx[0]).isnumeric() and str(xx[1]).isnumeric():
print('xx[0:2]')
print(xx[0:2])
print('yy[0:2]')
print(yy[0:2])
plt.close('all')
plt.figure(figsize=(16, 6))
for i in range(len(xx)):
if str(xx[i]).isnumeric() and str(yy[i]).isnumeric():
x = [float(xx[i])]
y = [float(yy[i])]
colorMarker, colorEdge, colorTransparency = find_color(6)
plt.scatter(x, y, s=40, color=colorMarker, edgecolor=colorEdge, alpha=colorTransparency)
# save the plot
# print('name_dst: ' + str(name_dst))
# print('file_name: ' + str(file_name))
plot_dst = os.path.join(retrieve_path(name_dst), file_name + '.png')
# print('plt save: ' + str(plot_dst))
plt.xlabel(file_name)
plt.ylabel('counts')
name_src_split = name_src.split('_')
name_article = str(name_src_split[0] + ' ' + name_src_split[1])
plt.title(str(sum(yy)) + ' ' + name_article + ' included in plot.')
# change to log scale
if file_name == 'Support Year':
plt.yscale('log')
plt.grid()
plt.savefig(plot_dst, dpi = 600, edgecolor = 'w')
print('saved plot: ' + plot_dst)
plt.close('all')
def unique_values(name_src, name_unique):
"""
"""
# read dataframe with ref years
df = pd.read_csv(os.path.join(retrieve_path(name_src), 'agg_with_year' + '.csv'))
for name in df.columns:
if 'Unnamed:' in name or 'index' == name:
continue
terms, counts, percentages = [], [], []
term_list = list(df[name])
# remove leading zeros from numbers
for i in range(len(term_list)):
term = term_list[i]
if '0' in str(term):
term_string = str(term)
if str(term_string[0]) == '0':
try:
term = term.lstrip('0')
term_list[i] = term
except:
term_list[i] = term
for i in range(len(term_list)):
term = term_list[i]
if term not in terms:
count = term_list.count(term)
terms.append(term)
counts.append(count)
for count in counts:
percentages.append(round(100*count/sum(counts),4))
df_counts = pd.DataFrame()
df_counts['terms'] = terms
df_counts['counts'] = counts
df_counts['percentages'] = percentages
df_counts = df_counts.sort_values(by=['percentages'], ascending=False)
print('name = ')
if '/' in name:
name = name.replace('/', '_')
print('name = ')
df_counts = clean_dataframe(df_counts)
df_counts.to_csv(os.path.join(retrieve_path(name_unique), name + '.csv'))
print('unique counts saved to: ' + str(os.path.join(retrieve_path(name_unique), name + '.csv')))
def add_year(name_src, name_summary):
"""
add year with standard column name to dataset
"""
df = pd.read_csv(os.path.join(retrieve_path(name_src), 'agg' + '.csv'))
years = []
# nih awards reference for year column
if 'nih_award' in str(name_src):
year_col_name = 'Fiscal Year'
years = list(df[year_col_name])
# clinical trials reference for year column
elif 'clinical_trials' in str(name_src):
year_col_name = 'Start Date'
dates = list(df[year_col_name])
years = []
for date in dates:
try:
# print('date = ')
# print(date)
date_split = date.split(' ')
# print('date_split = ')
# print(date_split)
year = date_split[-1]
# print('year = ')
# print(year)
years.append(float(year))
except:
years.append(0)
elif 'nsf_awards' in str(name_src):
years = []
dates = list(df['StartDate'])
for date in dates:
date_split = date.split('/')
year = date_split[-1]
year = float(year)
years.append(year)
# patent reference for year column
elif 'patents' in str(name_src):
for i in range(len(list(df['patent_date']))):
try:
date = list(df['patent_date'])[i]
date_split = date.split(' ')
years.append(float(date_split[-1]))
except:
try:
date = list(df['file_date'])[i]
date_split = date.split(' ')
years.append(float(date_split[-1]))
except:
years.append('0')
elif 'gscholar' in str(name_src):
for info in list(df['publication_info']):
try:
year_findall = re.findall('[0-9]{4}', str(info))
year = year_findall[0]
print('year = ' + str(year))
years.append(int(year))
except:
years.append(0)
print('len(df.iloc[:,0]) = ' + str(len(df.iloc[:,0])))
print('len(years) = ' + str(len(years)))
df_with_year = df
df_with_year['ref_year'] = years
print('length before dropping 0 years = ' + str(len(list(df_with_year['ref_year']))))
df_with_year = df_with_year[(df_with_year.ref_year > 0)]
print('length after dropping 0 years = ' + str(len(list(df_with_year['ref_year']))))
file_name = os.path.join(retrieve_path(name_src), 'agg_with_year' + '.csv')
df_with_year = clean_dataframe(df_with_year)
df_with_year.to_csv(file_name)
summarize(file_name, name_summary)
def aggregate_articles(name_src, name_dst, name_summary):
"""
aggregate articles
save as a
"""
df_agg = pd.DataFrame()
for file in os.listdir(retrieve_path(name_src)):
df_src = os.path.join(retrieve_path(name_src), file)
# print('file = ' + str(file))
# print('path = ' + str(retrieve_path(name_src)))
# print('df_patent = ' + str(df_src))
df = pd.read_csv(df_src)
df_agg = df_agg.append(df)
print('before dropping duplicated during aggregation: len of all articles = ' + str(len(df_agg.iloc[:,0])))
unique_names = ['Serial Number', 'url', 'publication_info', 'title_link', 'abstract', 'claims', ]
for name in unique_names:
try:
df_agg = df_agg.sort_values(by=['Support Year'], ascending=False)
except:
print('support year column not found')
try:
df_agg = df_agg.drop_duplicates(subset=[name])
except:
print('no duplicated dropped.')
print('after dropping duplicated during aggregation: len of all articles = ' + str(len(df_agg.iloc[:,0])))
for name in df_agg.columns:
if 'year' in name:
df_agg = df_agg.sort_values(by=[name], inplace=True)
if 'Unnamed: ' in name:
del df_agg[name]
# save aggregated articles
df_agg = df_agg.reset_index()
# print('df_agg = ')
# print(df_agg)
df_agg = clean_dataframe(df_agg)
file_name = os.path.join(retrieve_path(name_dst), 'agg' + '.csv')
df_agg.to_csv(file_name)
summarize(file_name, name_summary)
def summarize(name_src, name_summary):
"""
"""
df = pd.read_csv(os.path.join(name_src))
# savesummary articles
df_summary = pd.DataFrame()
df_summary['counts'] = [len(list(df.iloc[:,1]))]
for name in df.columns:
df_summary[name] = str((df.iloc[1][name]))
df_summary = df_summary.T
# print('df_summary = ')
# print(df_summary)
df_summary = clean_dataframe(df_summary)
df_summary.to_csv(os.path.join(retrieve_path(name_summary)))
def name_paths(name_article):
"""
provide article type
make the needed files
"""
name_src = str(name_article + '_search')
name_dst = str('df_' + name_article + '_search')
name_summary = str('sum_' + name_article)
name_unique = str(name_article + '_unique')
plot_unique = str(name_article + '_unique_plot')
return name_src, name_dst, name_summary, name_unique, plot_unique
```
#### File: code/python/a0600_map_maker.py
```python
from bs4 import BeautifulSoup
import datetime
import glob
import json
import lxml
import math
import matplotlib.pyplot as plt
import numpy as np
import os
from os.path import exists
import pandas as pd
from PIL import Image
from serpapi import GoogleSearch
import re
import requests
import time
import urllib.parse
from a0001_admin import clean_dataframe
from a0001_admin import name_paths
from a0001_admin import retreive_categories
from a0001_admin import retrieve_format
from a0001_admin import retrieve_list
from a0001_admin import retrieve_path
from a0001_admin import write_paths
from find_color import find_color
from gif_maker import build_gif
def map_maker():
"""
"""
print('began map_maker')
# List task numbers to complete
tasks = [0]
write_paths()
if 0 in tasks: tasks = np.arange(1, 101, 1)
if 1 in tasks: data_for_js_map()
if 2 in tasks: yearly_map()
if 3 in tasks: build_gif()
#if 3 in tasks: yearly_map_bar()
print('completed map_maker')
def data_for_js_map():
"""
"""
for name_article in retrieve_list('type_article'):
# list compare term files
compare_terms = os.path.join(retrieve_path('term_compare'))
for category in retreive_categories():
# retrieve search terms
f = os.path.join(retrieve_path('term_compare'), category + '.csv')
search_terms = retrieve_list(f)
# retrieve list of al articles
file_src = str(name_article + '_compare_terms_df')
f = os.path.join(retrieve_path(file_src), category + '.csv')
print('f = ' + str(f))
df = clean_dataframe(pd.read_csv(f))
df = df[(df['ref_lat'] != 0)]
df_js = pd.DataFrame()
df_js['0'] = [np.round(float(i), 6) for i in list(df['ref_lon'])]
df_js['1'] = [np.round(float(i), 6) for i in list(df['ref_lat'])]
df_js['date'] = list(df['StartDate'])
df_js = df_js[(df_js['0'] >= -120)]
df_js = df_js[(df_js['0'] <= -79)]
df_js = df_js[(df_js['1'] >= 25)]
df_js = df_js[(df_js['1'] <= 47)]
js_data = str(name_article + '_js_data')
f = os.path.join(retrieve_path(js_data), category + '.tsv')
df_js.to_csv(f, sep="\t", index=False)
def yearly_map_bar():
"""
from df map each year
"""
# list articles
for name_article in retrieve_list('type_article'):
# list compare term files
compare_terms = os.path.join(retrieve_path('term_compare'))
for category in retreive_categories():
# retrieve search terms
f = os.path.join(retrieve_path('term_compare'), category + '.csv')
search_terms = retrieve_list(f)
# retrieve list of al articles
file_src = str(name_article + '_compare_terms_df')
f = os.path.join(retrieve_path(file_src), category + '.csv')
print('f = ' + str(f))
df = clean_dataframe(pd.read_csv(f))
df = df[(df['ref_lat'] != 0)]
print('df.columns = ')
print(df.columns)
print('name_article = ' + name_article)
years = np.arange(int(min(list(df['ref_year']))), int(max(list(df['ref_year']))), 1)
for year in years:
print('year = ' + str(year))
df_temp = df[(df['ref_year'] <= year)]
lats = list(df_temp['ref_lat'])
lons = list(df_temp['ref_lon'])
plt.close('all')
figure, axes = plt.subplots()
plot_row, plot_col, plot_num = 2, 1, 0
plt.rc('font', size=10) #controls default text size
plt.figure(figsize=(plot_col*retrieve_format('fig_wid'), plot_row*retrieve_format('fig_hei')))
#plt.figure(figsize=(plot_col*90, plot_row*45))
plot_num = plot_num +1
plt.subplot(plot_row, plot_col, plot_num)
# add background of the globe
map_path = os.path.join(retrieve_path('blank_map'))
img = plt.imread(map_path)
extent = [-170, 190, -58, 108]
axes.imshow(img, extent=extent, aspect='auto')
label_str = str(len(list(df_temp['ref_year']))) + ' ' + 'all '
num = 8
colorMarker, colorEdge, colorTransparency = find_color(num)
plt.scatter(lons, lats, color=colorMarker, edgecolors=colorEdge, alpha=float(colorTransparency),label=label_str)
for term in search_terms:
#if '|' in term: term = (term.split('|'))[0]
df_term = df_temp[(df_temp[term] > 0)]
lats = list(df_term['ref_lat'])
lons = list(df_term['ref_lon'])
label_str = str(len(list(df_term['ref_year']))) + ' ' + term
num = search_terms.index(term) +1
colorMarker, colorEdge, colorTransparency = find_color(num)
# set sizes based on the reference value
try:
sizes = []
for size in list(df_term['ref_value']):
sizes.append(size+10)
sizes = scale_sizes(sizes)
except:
sizes = [40]*len(lons)
plt.scatter(lons, lats, s=sizes, color=colorMarker, edgecolors=colorEdge, linewidth=float(retrieve_format('markeredgewidth')), alpha=float(colorTransparency),label=label_str)
axes.axis('off')
plt.title(name_article + ' ' + str(int(min(list(df['ref_year'])))) + '-' + str(year))
plt.legend(bbox_to_anchor=(0.2, -0.2), loc ="upper left")
plot_num = plot_num +1
plt.subplot(plot_row, plot_col, plot_num)
file_src = str(name_article + '_compare_terms_annual_count_df')
compare_file_term = str(category + '_percent')
path_src = os.path.join(retrieve_path(file_src), compare_file_term + '.csv')
df_bar = clean_dataframe(pd.read_csv(path_src))
#print('df_bar = ')
#print(df_bar)
#print('df_bar.columns = ')
#print(df_bar.columns)
df_bar = df_bar[(df_bar['cdf_total'] > 0)]
df_bar = clean_dataframe(df_bar)
#print('df_bar = ')
#print(df_bar)
df_bar = df_bar[(df_bar['years'] <= year)]
for term in search_terms:
color_index = search_terms.index(term)
#if '|' in term: term = (term.split('|'))[0]
term_per = str(term + '_percent')
xx = list(df_bar['years'])
yy = list(df_bar[term_per])
offsets = [0] * len(list(xx))
if color_index > 0:
offsets = []
for k in range(len(list(df_bar['years']))):
offset = 0
for j in range(color_index):
#term = term_list[j]
offset = offset + df_bar.loc[k][search_terms[j] + '_percent']
offsets.append(offset)
assert len(offsets) == len(xx)
assert len(offsets) == len(yy)
try:
label_term = str(round(100*yy[-1],2)) + '% ' + term
except:
label_term = str(0) + '% ' + term
colorMarker, colorEdge, colorTransparency = find_color(color_index)
plt.bar(xx, yy, width=1.0, bottom=offsets, align='center', color=colorMarker,label = label_term)
plt.title(name_article + ' Percent ' + str(int(sum(list(df_bar['annual_total'])))))
plt.xlabel('year')
plt.xlim([min(years), max(years)])
plt.ylabel(term_per)
# plt.yscale('log')
plt.legend(bbox_to_anchor=(0.2, -0.2), loc='upper left')
#plt.legend(bbox_to_anchor=(1, 0.8), loc='upper left')
file_dst_name = str(name_article + '_map_bar_png')
df_file = os.path.join(retrieve_path(file_dst_name), category + '_' + str(year) + '.png')
plt.savefig(df_file, bbox_inches='tight', dpi=150, edgecolor = 'w')
plt.close('all')
def build_gif():
"""
"""
print('building gif')
# list articles
for name_article in retrieve_list('type_article'):
for map_type in ['_map_png', '_map_bar_png']:
file_dst_name = str(name_article + map_type)
df_src = os.path.join(retrieve_path(file_dst_name))
# list compare term files
compare_terms = os.path.join(retrieve_path('term_compare'))
for category in retreive_categories():
png_list = []
for file in os.listdir(df_src):
if category not in str(file): continue
df_src = os.path.join(retrieve_path(file_dst_name), file)
png_list.append(df_src)
#print('png_list = ')
#print(png_list)
#assert len(png_list) > 1
frames = []
#png_file = os.path.join(path, "*.png")
gif_dst = str(name_article + '_map_gif')
save_file = os.path.join(retrieve_path(gif_dst) , category + '.gif')
print('save_file = ' + str(save_file))
#imgs = glob.glob(png_file)
for i in png_list:
per_complete = round(100*png_list.index(i)/len(png_list),2)
print(name_article + ' ' + category + ' % complete = ' + str(per_complete) )
new_frame = Image.open(i)
frames.append(new_frame)
# Save into a GIF file that loops forever
frames[0].save(save_file, format='GIF',
append_images=frames[1:],
save_all=True,
duration=300, loop=0)
### completed programs ###
def scale_sizes(sizes):
"""
provide list of numbers
scale
"""
scatter_size_min = int(retrieve_format('scatter_size_min'))
scatter_size_max = int(retrieve_format('scatter_size_min'))
sizes_scaled = []
for size in sizes:
try:
size_min = size + 2
size_scaled = math.log(size_min)
size_scaled = float(size_scaled)
except:
size_scaled = scatter_size_min
size_scaled = float(size_scaled)
size_scaled = scatter_size_max*size_scaled/max(sizes) + scatter_size_min
sizes_scaled.append(size_scaled)
assert len(sizes) == len(sizes_scaled)
return(sizes_scaled)
def yearly_map():
"""
from df map each year
"""
# list articles
for name_article in retrieve_list('type_article'):
# list compare term files
compare_terms = os.path.join(retrieve_path('term_compare'))
for category in retreive_categories():
# retrieve search terms
f = os.path.join(retrieve_path('term_compare'), category + '.csv')
search_terms = retrieve_list(f)
# retrieve list of al articles
file_src = str(name_article + '_compare_terms_df')
f = os.path.join(retrieve_path(file_src), category + '.csv')
print('f = ' + str(f))
df = clean_dataframe(pd.read_csv(f))
df = df[(df['ref_lat'] != 0)]
print('df.columns = ')
print(df.columns)
print('name_article = ' + name_article)
years = np.arange(int(min(list(df['ref_year']))), int(max(list(df['ref_year']))), 1)
for year in years:
print('year = ' + str(year))
df_temp = df[(df['ref_year'] <= year)]
lats = list(df_temp['ref_lat'])
lons = list(df_temp['ref_lon'])
plt.close('all')
figure, axes = plt.subplots()
# add background of the globe
map_path = os.path.join(retrieve_path('blank_map'))
img = plt.imread(map_path)
extent = [-170, 190, -58, 108]
axes.imshow(img, extent=extent)
label_str = str(len(list(df_temp['ref_year']))) + ' ' + 'all '
num = 8
colorMarker, colorEdge, colorTransparency = find_color(num)
plt.scatter(lons, lats, color=colorMarker, edgecolors=colorEdge, alpha=float(colorTransparency),label=label_str)
for term in search_terms:
#if '|' in term: term = (term.split('|'))[0]
df_term = df_temp[(df_temp[term] > 0)]
lats = list(df_term['ref_lat'])
lons = list(df_term['ref_lon'])
label_str = str(len(list(df_term['ref_year']))) + ' ' + term
num = search_terms.index(term) +1
colorMarker, colorEdge, colorTransparency = find_color(num)
# set sizes based on the reference value
try:
sizes = []
for size in list(df_term['ref_value']):
sizes.append(size+10)
sizes = scale_sizes(sizes)
except:
sizes = [40]*len(lons)
plt.scatter(lons, lats, s=sizes, color=colorMarker, edgecolors=colorEdge, linewidth=float(retrieve_format('markeredgewidth')), alpha=float(colorTransparency),label=label_str)
axes.axis('off')
plt.title(category + ' ' + name_article + ' ' + str(int(min(list(df['ref_year'])))) + '-' + str(year))
plt.legend(bbox_to_anchor=(0.2, -0.2), loc ="upper left")
file_dst_name = str(name_article + '_map_png')
#print('file_dst_name = ')
#print(file_dst_name)
df_file = os.path.join(retrieve_path(file_dst_name), category + '_' + str(year) + '.png')
#print('df_file = ')
#print(df_file)
plt.savefig(df_file, bbox_inches='tight', dpi=150, edgecolor = 'w')
plt.close('all')
if __name__ == "__main__":
main()
```
#### File: code/python/acquire_gscholar.py
```python
from bs4 import BeautifulSoup
import datetime
import json
import lxml
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from serpapi import GoogleSearch
import re
import requests
import time
from a0001_retrieve_meta import retrieve_path
from a0001_retrieve_meta import retrieve_datetime
"""
from c0010_analyze_general import analyze_general
from c0010_analyze_general import name_paths
from c0020_compare_terms import compare_terms
from scrape_gscholar import scrape_gscholar
from find_color import find_color
"""
def acquire_gscholar(term):
"""
"""
print('beginning acquire_gscholar')
num_list = np.arange(0, 8000000, 1, dtype=int)
scrape_lxml(term, num_list)
scrape_lxml_per_article(term)
print('completed acquire_gscholar')
def scrape_lxml(term, num_list):
"""
scrape html from website
save as lxml
"""
headers = {
'User-agent':
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
proxies = {
'http': os.getenv('HTTP_PROXY') # or just type proxy here without os.getenv()
}
for num in num_list:
print('num = ' + str(num))
url = 'https://scholar.google.com/scholar?'
url = url + 'start=' + str(int(num*10))
url = url + '&q=' + term
url = url + '&hl=en&as_sdt=0,5'
print('url = ')
print(url)
print('Wait: ' + str(retrieve_datetime()))
time.sleep(60)
html = requests.get(url, headers=headers, proxies=proxies).text
print('Wait: ' + str(retrieve_datetime()))
soup = BeautifulSoup(html, 'lxml')
print('soup = ')
print(soup)
path = retrieve_path('lxml_gscholar')
file = os.path.join(path, term + ' ' + str(num) + ' ' + str(retrieve_datetime()) + '.xml')
print('lxml file = ' + str(file))
f = open(file , "w")
f.write(str(soup))
f.close()
def scrape_lxml_per_article(term):
"""
"""
path = retrieve_path('df_gscholar_query')
df_file = os.path.join(path, search_term + '.csv')
df = pd.read_csv(df_file)
print(df)
title_link = list(df['title_link'])
for url in title_link:
row_number = df[df['title_link'] == url].index
print('row_number = ')
print(row_number)
if len(list(row_number)) < 1:
continue
row_number = list(row_number)[0]
print('row_number = ' + str(row_number))
print('url = ')
print(url)
print('Wait: ' + str(retrieve_datetime()))
time.sleep(60)
html = requests.get(url).text
soup = BeautifulSoup(html, "html.parser")
path = retrieve_path('lxml_gscholar_article')
file = os.path.join(path, term + ' ' + str(num) + ' ' + str(retrieve_datetime()) + '.xml')
print('article lxml file = ' + str(file))
f = open(file , "w")
f.write(str(soup))
f.close()
```
|
{
"source": "jesnyder/publicationMap",
"score": 3
}
|
#### File: code/python/c0001_retrieve_ref.py
```python
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def retrieve_ref(variableName):
ref_path = os.path.join( 'metadata')
ref_file = os.path.join(ref_path, 'ref.csv')
df = pd.read_csv(ref_file)
variableNames = list(df['name'])
variableValues = list(df['value'])
value = 0
for i in range(len(variableNames)):
if variableName == variableNames[i]:
value = variableValues[i]
break
# print('value = ' + str(value))
return value
```
#### File: code/python/c0102_search_clinic.py
```python
from bs4 import BeautifulSoup
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pypatent
import requests
def search_clinic():
"""
"""
print("running search_clinic")
url = 'https://clinicaltrials.gov/ct2/search/advanced'
search_terms = ['mesenchymal', 'exosome']
df = pd.DataFrame()
for term in search_terms:
print('search ' + term + ' in ' + str(url) )
clinic_path = os.path.join('searchResults', 'clinical')
clinic_file = os.path.join(clinic_path, term + '.csv')
df = df.append(pd.read_csv(clinic_file), ignore_index = True)
df = df.drop_duplicates(subset ="NCT Number")
df = df.sort_values(by=['Start Date'], ascending=[False])
df = df.reset_index()
del df['Rank']
del df['index']
cols = df.columns
print(cols)
print(df)
clinical_file = os.path.join(clinic_path, 'clinicalRetrieved' + '.csv')
df.to_csv(clinical_file)
print('clinicalMSC saved: ' + clinical_file)
print("completed search_clinic")
```
#### File: code/python/c0600_build_mp4.py
```python
from PIL import Image
import glob
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def build_mp4():
"""
"""
print("running main")
```
|
{
"source": "jesnyder/scraped_guts",
"score": 3
}
|
#### File: python/archive/a0100_acquire_datasets.py
```python
import numpy as np
import os
import pandas as pd
import wikipedia
from a0001_retrieve_meta import retrieve_path
from acquire_nsf_awards import acquire_nsf_awards
from acquire_nih_awards import acquire_nih_awards
from acquire_clinical_trials import acquire_clinical_trials
from acquire_patents import acquire_patents
from acquire_gscholar import acquire_gscholar
def acquire_datasets():
"""
Objective:
Scrape information from websites
Tasks:
1. Acquire NIH Awards
2. Acquire NSF Awards
3. Acquire Clinical Trials
4. Acquire US Patent Office
5. Acquire Peer Reviewed Literature
"""
tasks = [6]
print('beginning main')
if 0 in tasks: tasks = np.arange(1, 101, 1)
if 1 in tasks: acquire_nsf_awards()
if 2 in tasks: acquire_nih_awards()
if 3 in tasks: acquire_clinical_trials()
# list search terms
df = pd.read_csv(os.path.join(retrieve_path('search_terms')))
for term in list(df['term']):
if 4 in tasks: acquire_patents(term)
if 5 in tasks: acquire_gscholar(term)
if 6 in tasks: acquire_wikipedia(term)
print('completed main')
def acquire_wikipedia(term):
"""
"""
result = wikipedia.summary(term)
print(result)
result = wikipedia.summary('allogeneic')
print(result)
result = wikipedia.summary('autologous')
print(result)
```
#### File: code/python/query_patents.py
```python
from uspto.peds.client import UsptoPatentExaminationDataSystemClient
from uspto.peds.tasks import UsptoPatentExaminationDataSystemDownloader
import datetime
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pypatent
import statistics
from selenium import webdriver
from a0001_admin import retrieve_format
from a0001_admin import retrieve_path
from a0001_admin import retrieve_datetime
from a0001_admin import retrieve_list
from a0001_admin import name_paths
from scrape_gscholar import check_scraped
def query_patents(name_dataset, term, result_limits):
"""
"""
print('beginning query_patents')
# Try pypatent
# https://pypi.org/project/pypatent/
conn = pypatent.WebConnection(use_selenium=False, user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36')
#driver = webdriver.Firefox() # Requires geckodriver in your PATH
#conn = pypatent.WebConnection(use_selenium=True, selenium_driver=driver)
for term in retrieve_list('search_terms'):
limits = [50, 100, 200, 500, 1000, 2000, 4000, 6000, 8000, 10000]
for result_limit in limits:
print('term = ' + term)
print('result_limit = ' + str(result_limit))
result_limit = int(result_limit)
print('result_limit = ' + str(result_limit) + ' ' + str(retrieve_datetime()))
print(term + ' searching with pypatent: began ' + str(retrieve_datetime()))
#query_term = str('abst=' + term + ' , ' + 'aclm=' + term + ' , ' + 'spec=' + term + ' , ' + ' isd=' + str(2020) )
#query_term = str('isd=' + str(year) )
#query_term = str('aclm/' + term + ' and ' + ' isd/' + str(year) )
#query_term = str(term + ' and ' + ' isd/' + str(year) )
# did not work query_term = str(term + ', ' + ' isd=' + str(year) )
query_term = str(term)
# did not work query_term = str('aclm=' + term)
print('query_term = ' + str(query_term))
if check_scraped('patents', term, 0, result_limit) == True:
print('json found.')
continue
df = pd.DataFrame()
df = pypatent.Search(term, results_limit=result_limit, get_patent_details=True , web_connection=conn).as_dataframe()
# df = pypatent.Search((query_term), web_connection=conn).as_dataframe()
# *** [Makefile:6: pythonanalysis] Error 1 df = pypatent.Search(query_term).as_dataframe()
# df = pypatent.Search(query_term, web_connection=conn).as_dataframe()
print(term + ' searching with pypatent: ended ' + str(retrieve_datetime()))
print('result_limit = ' + str(result_limit) + ' ' + str(retrieve_datetime()))
print(' df = ')
print(df)
print('len(df[url]) = ' )
print(len(list(df['url'])))
print('column names = ')
print(df.columns.values.tolist())
name_src, name_dst, name_summary, name_unique, plot_unique = name_paths(name_dataset)
df_patent = os.path.join(retrieve_path(name_src), term + ' ' + str(result_limit) + ' ' + str(retrieve_datetime()) + '.csv')
print('file saved to df_patent = ' + str(df_patent))
df.to_csv(df_patent)
print(df.iloc[:,0])
print(list(df.iloc[:,0]))
print(len(list(df.iloc[:,0])))
if len(list(df.iloc[:,0])) < result_limit:
break
df = pd.DataFrame()
print('completed query_patents')
```
|
{
"source": "Jeson1g/Dawson",
"score": 3
}
|
#### File: Dawson/note/decorator.py
```python
def add_params(params):
def my_decorator(func):
def wrapper(*args,**kwargs):
print("装饰器{}".format(params))
return func(*args,**kwargs)
return wrapper
return my_decorator
if __name__ == '__main__':
@add_params(1)
def test1(num,num2):
return num + num2
nums = test1(1,2)
print(nums)
```
#### File: Dawson/note/Sington.py
```python
class Singleclass(object):
"""单例模式"""
def __init__(self,num):
self.num = num
def __new__(cls, *args, **kwargs):
if not hasattr(cls,'_isinstance'):
cls._isinstance = super().__new__(cls)
return cls._isinstance
def __str__(self):
return "my_num{}".format(self.num)
if __name__ == '__main__':
s1 = Singleclass(3)
s2 = Singleclass(2)
print(s1)
print(s2)
```
#### File: Dawson/sort_algorithm/03_binary_tree.py
```python
class Node(object):
"""结点类"""
def __init__(self, item):
self.item = item
self.lchild = None
self.rchild = None
class BinaryTree(object):
"""二叉树"""
def __init__(self, node=None):
self.root = node
def add(self, item):
"""
广度优先遍历方式添加结点
:param item:
:return:
"""
if self.root is None:
self.root = Node(item)
else:
queue = []
queue.append(self.root)
while len(queue) > 0:
node = queue.pop(0)
if not node.lchild:
node.lchild = Node(item)
return
else:
queue.append(node.lchild)
if not node.rchild:
node.rchild = Node(item)
return
else:
queue.append(node.rchild)
def breadh_travel(self):
"""广度优先遍历"""
if self.root is None:
return
queue = []
queue.append(self.root)
while len(queue) > 0:
node = queue.pop(0)
print(node.item, end=" ")
if node.lchild:
queue.append(node.lchild)
if node.rchild:
queue.append(node.rchild)
def preorder_travel(self, root):
"""先序 根 左 右"""
if root:
print(root.item, end=" ")
self.preorder_travel(root.lchild)
self.preorder_travel(root.rchild)
def inorder_travel(self, root):
"""中序 左 根 右"""
if root:
self.inorder_travel(root.lchild)
print(root.item, end=" ")
self.inorder_travel(root.rchild)
def postorder_travel(self, root):
"""后序 左 右 根"""
if root:
self.postorder_travel(root.lchild)
self.postorder_travel(root.rchild)
print(root.item, end=" ")
if __name__ == '__main__':
tree = BinaryTree()
tree.add(0)
tree.add(1)
tree.add(2)
tree.add(3)
tree.add(4)
tree.add(5)
tree.add(6)
tree.add(7)
tree.add(8)
tree.add(9)
tree.breadh_travel()
print("")
tree.preorder_travel(tree.root)
print("")
tree.inorder_travel(tree.root)
print("")
tree.postorder_travel(tree.root)
print("")
```
|
{
"source": "JesonZhang822/CarND-Behavioral-Cloning-P3",
"score": 3
}
|
#### File: JesonZhang822/CarND-Behavioral-Cloning-P3/model.py
```python
import pandas as pd
import cv2
import math
import numpy as np
import random
import math
import tensorflow as tf
from scipy.stats import norm
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten,Lambda
from keras.layers import Conv2D, MaxPooling2D,Cropping2D
from keras.optimizers import SGD
from keras import regularizers
from keras import backend as K
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import sklearn
#get cvs data
data_path = './data/'
df = pd.read_csv('./data/driving_log.csv')
# sample data
sample_data = df[['center','left','right','steering']]
n_train = len(sample_data)
# camreas and angle offset
camera_pos = ['left','center','right']
angle_offset = {'center':0,'left':0.2,'right':0.2}
# image shape
x_image = (160,320,3)
# training data
X_train_data = []
y_train_data = []
def EqualizeHist_brightness(image):
#applies histogram equilization on V channel of HSV
image_HSV = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)
image_HSV[:,:,2] = cv2.equalizeHist(image_HSV[:,:,2])
image = cv2.cvtColor(image_HSV,cv2.COLOR_HSV2RGB)
return image
# get the angle of camera images
def get_angle(camera_position,center_angle,image_index):
if camera_position == 'left':
#left camera
left_angle_tan = angle_offset['left'] + math.tan(center_angle)
angle = math.atan(left_angle_tan)
elif camera_position == 'right':
#right camera
right_angle_tan = math.tan(center_angle) - angle_offset['right']
angle = math.atan(right_angle_tan)
else:
angle = center_angle
if angle >= 1.0 :
angle = 1.0
elif angle <= -1.0:
angle = -1.0
return angle
# get traning data
def get_data():
for i in range(n_train):
for j in range(len(camera_pos)):
image_name = data_path + sample_data[camera_pos[j]][i].strip()
center_angle = sample_data['steering'][i]
angel = get_angle(camera_pos[j],center_angle,i)
X_train_data.append(image_name)
y_train_data.append(angel)
return len(y_train_data)
# get sample weights
def get_weight(y_train,num_bins=10):
weights_bin = np.zeros(num_bins)
weights = np.zeros(len(y_train))
# Historgram and Gaussian distribution
nums,bins = np.histogram(y_train,num_bins)
prob = norm.pdf(bins,0,0.8)
# weight of each bin
for i in range(num_bins):
if nums[i]:
weights_bin[i] = prob[i+1]
else :
weights_bin[i] = 0
nums[i] = 1
#weight of each training data
weights_bin = weights_bin / np.sum(weights_bin)
weights_bin = weights_bin / nums
bin_index = np.digitize(y_train,bins)
for i in range(len(y_train)):
if bin_index[i] > num_bins :
bin_index[i] -= 1
weights[i] = weights_bin[bin_index[i]-1]
return weights,prob
# image generator
def generator(X_train,y_train,batch_size = 32,augment = False):
num_sample = len (y_train)
X_train_index = range(num_sample)
#get the weight of each sample
weights = np.zeros(len(y_train),dtype = np.float32)
weights,_ = get_weight(y_train,num_bins=50)
while True:
X_train_index,X_weights = sklearn.utils.shuffle(X_train_index,weights)
#generate data for each batch
for offset in range(0,num_sample,batch_size):
# select a batch samples base on the weight of each sample
X_batch_index = np.random.choice(X_train_index,batch_size,replace=True,p=X_weights)
images = np.zeros((len(X_batch_index),160,320,3),dtype = np.float32)
angles = np.zeros((len(X_batch_index),),dtype = np.float32)
for i in range(len(X_batch_index)):
image_index = X_batch_index[i]
# original data
image_name = X_train[image_index]
image = cv2.imread(image_name)
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
angle = y_train[image_index]
# augment data
if augment :
image_name = X_train[image_index]
image = cv2.imread(image_name)
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
angle = y_train[image_index]
#Applies histogram equilization on V channel of HSV
if (np.random.choice([0,1]) == 0):
image = EqualizeHist_brightness(image)
# randomly flip
if (np.random.choice([0,1]) == 0):
image = cv2.flip(image,1)
angle = - angle
images[i] = image
angles[i] = angle
yield (images,angles)
# build model,Nvidia model
def build_model():
model = Sequential()
#Cropping
model.add(Cropping2D(cropping = ((55,25),(0,0)),input_shape = x_image))
#resize images
model.add(Lambda(lambda x: tf.image.resize_images(x,(66,200),0)))
#normalize the image data
model.add(Lambda(lambda x: x/255.0 - 0.5))
model.add(Conv2D(24, (5, 5),strides=(2, 2), kernel_initializer='TruncatedNormal',activation='relu',kernel_regularizer=regularizers.l2(0.01)))
model.add(Conv2D(36, (5, 5),strides=(2, 2), kernel_initializer='TruncatedNormal',activation='relu',kernel_regularizer=regularizers.l2(0.01)))
model.add(Conv2D(48, (5, 5),strides=(2, 2), kernel_initializer='TruncatedNormal',activation='relu',kernel_regularizer=regularizers.l2(0.01)))
model.add(Conv2D(64, (3, 3), kernel_initializer='TruncatedNormal',activation='relu',kernel_regularizer=regularizers.l2(0.01)))
model.add(Conv2D(64, (3, 3), kernel_initializer='TruncatedNormal',activation='relu',kernel_regularizer=regularizers.l2(0.01)))
model.add(Flatten())
model.add(Dense(100,activation='relu'))
model.add(Dense(50,activation='relu'))
model.add(Dense(10,activation='relu'))
model.add(Dense(1,activation = 'tanh'))
model.compile(loss = 'mean_squared_error',optimizer = 'adam')
return model
if __name__=="__main__":
num_data = get_data()
print ("Num of data : ",num_data)
sample_data = np.stack((X_train_data,y_train_data),axis = 1)
#split sample data to training data and validation data
sample_data = shuffle(sample_data,random_state = 8)
train_data,validation_data = train_test_split(sample_data,test_size = 0.2)
#training data
X_train = train_data[:,0]
y_train = np.float64(train_data[:,1])
#validation data
X_valid = validation_data[:,0]
y_valid = np.float64(validation_data[:,1])
train_gen = generator(X_train,y_train,batch_size = 64,augment = True)
valid_gen = generator(X_valid,y_valid,batch_size = 64,augment = False)
print ("Number of training :",len(X_train))
print ("Number of validation :",len(X_valid))
model = build_model()
history_object = model.fit_generator(generator=train_gen,steps_per_epoch = 2000,epochs = 10,validation_data = valid_gen,validation_steps = 400,verbose=1)
model.save('model.h5')
print ("Saved model !!")
```
|
{
"source": "jesopo/curite",
"score": 2
}
|
#### File: curite/curite/httpd.py
```python
import asyncio, traceback
from argparse import ArgumentParser
from asyncio import StreamReader, StreamWriter
from typing import List
from urllib.parse import unquote as url_unquote
from async_timeout import timeout as timeout_
from irctokens import build
from ircrobots import Bot
from ircrobots.matching import Response, Nick, Folded, Formatless, SELF
from ircrobots.formatting import strip as format_strip
from .config import Config
async def _headers(
reader: StreamReader
) -> List[str]:
headers: List[bytes] = []
buffer = b""
while True:
data = await reader.read(2048)
if data:
buffer += data
headers.extend(buffer.split(b"\r\n"))
buffer = headers.pop(-1)
if b"" in headers:
break
else:
raise ConnectionError("client closed connection")
return headers
NICKSERV = Nick("NickServ")
async def _verify(
bot: Bot,
account: str,
token: str) -> bool:
success = f"{account} has now been verified."
server = list(bot.servers.values())[0]
async with server.read_lock:
await server.send(build("PRIVMSG", ["NickServ", f"VERIFY REGISTER {account} {token}"]))
verify_line = await server.wait_for({
Response("NOTICE", [SELF, Formatless(Folded(f"{account} is not awaiting verification."))], source=NICKSERV),
Response("NOTICE", [SELF, Formatless(Folded(f"verification failed. invalid key for {account}."))], source=NICKSERV),
Response("NOTICE", [SELF, Formatless(Folded(f"{account} is not registered."))], source=NICKSERV),
Response("NOTICE", [SELF, Formatless(Folded(success))], source=NICKSERV)
})
verify_msg = format_strip(verify_line.params[1])
return server.casefold_equals(success, verify_msg)
async def run(
bot: Bot,
config: Config):
async def _client(
reader: StreamReader,
writer: StreamWriter
):
try:
async with timeout_(10):
headers = await _headers(reader)
except asyncio.TimeoutError:
print("! header timeout")
return
except ConnectionError as e:
print(f"! header error {str(e)}")
return
method, path, _ = headers[0].decode("ascii").split(" ", 2)
if not method == "POST":
return
path_match = config.path_pattern.search(path)
if not path_match:
return
account = url_unquote(path_match.group("account"))
token = path_match.group("token")
try:
async with timeout_(5):
verified = await _verify(bot, account, token)
except asyncio.TimeoutError:
print("! verify timeout")
return
if verified:
url = config.url_success
else:
url = config.url_failure
data = "\r\n".join([
"HTTP/1.1 302 Moved",
f"Location: {url}"
]).encode("utf8")
# HTTP headers end with an empty line
data += b"\r\n\r\n"
try:
async with timeout_(5):
writer.write(data)
await writer.drain()
writer.close()
await writer.wait_closed()
except Exception as e:
traceback.print_exc()
return
server = await asyncio.start_server(_client, "", config.httpd_port)
async with server:
await server.serve_forever()
```
#### File: curite/curite/__main__.py
```python
import asyncio
from argparse import ArgumentParser
from ircrobots import ConnectionParams
from . import Bot
from .config import Config, load as config_load
from .httpd import run as httpd_run
async def main(config: Config):
bot = Bot()
host, port, tls = config.server
params = ConnectionParams(
config.nickname,
host,
port,
tls,
realname=config.nickname,
password=config.password
)
await bot.add_server(host, params)
await asyncio.wait([
httpd_run(bot, config),
bot.run()
])
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("config")
args = parser.parse_args()
config = config_load(args.config)
asyncio.run(main(config))
```
|
{
"source": "jesopo/dronebl-tools",
"score": 3
}
|
#### File: dronebl-tools/vpngate/check.py
```python
import argparse, os, socket, ssl, sys
from typing import Set
from OpenSSL import crypto
from dronebl import DroneBL
CERT_CN = "*.opengw.net"
COMMENT = "VPNGate {proto} server (connect verified)"
UDP_SID = os.urandom(8)
UDP_DATA = b"8"
UDP_DATA += UDP_SID
UDP_DATA += b"\x00\x00\x00\x00\x00"
def _cn(ip: str, port: int) -> bool:
sock = ssl.wrap_socket(socket.socket())
sock.settimeout(5)
try:
sock.connect((ip, port))
except (socket.timeout, OSError):
return None
cert = sock.getpeercert(True)
sock.close()
x509 = crypto.load_certificate(crypto.FILETYPE_ASN1, cert)
cn = x509.get_subject().CN
return cn == CERT_CN
def _udp(ip: str, port: int) -> bool:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(5)
try:
sock.sendto(UDP_DATA, (ip, port))
data, addr = sock.recvfrom(1024)
except socket.timeout:
return False
else:
return data[14:22] == UDP_SID
def _main(
key: str,
master: str):
known: Set[str] = set()
if os.path.isfile(master):
with open(master, "r") as f_read:
lines = f_read.read().split("\n")
lines = list(filter(bool, lines))
known.update(lines)
f_app = open(master, "a")
with open(key) as key_file:
d = DroneBL(key_file.read().strip())
for host in iter(sys.stdin.readline, ""):
key = host.strip()
proto, ip, po = key.rsplit(" ", 2)
port = int(po)
host = f"{ip}:{port}"
if not key in known:
if ((proto == "tcp" and _cn(ip, port)) or
(proto == "udp" and _udp(ip, port))):
look = d.lookup(ip, 19, limit=1)
if not look:
comment = COMMENT.format(proto=proto.upper())
id, msg = d.add(ip, 19, comment, port)
print(f"+ {proto} {host} - {id}")
else:
print(f"- {proto} {host}")
else:
print(f"! {proto} {host}")
known.add(key)
f_app.write(f"{key}\n")
else:
print(f"= {proto} {host}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("key")
parser.add_argument("master")
args = parser.parse_args()
_main(args.key, args.master)
```
|
{
"source": "jesopo/ircchallenge",
"score": 3
}
|
#### File: ircchallenge/ircchallenge/__init__.py
```python
from base64 import b64decode, b64encode
from hashlib import sha1
from typing import Optional
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding, rsa
def _read_key(keyfile: str):
with open(keyfile, "r") as f:
return f.read()
def _load_key(
key: str,
password: Optional[str]
) -> rsa.RSAPrivateKey:
password_b: Optional[bytes] = None
if password is not None:
password_b = password.encode("utf8")
return serialization.load_pem_private_key(
key.encode("utf8"),
password=<PASSWORD>,
backend=default_backend()
)
def _compute(
ciphertext: bytes,
key: rsa.RSAPrivateKey,
) -> str:
plain = key.decrypt(
ciphertext,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
)
)
return b64encode(sha1(plain).digest()).decode("ascii")
class Challenge(object):
def __init__(self,
key: Optional[str] = None,
keyfile: Optional[str] = None,
password: Optional[str] = None):
if key is None:
if keyfile is not None:
key = _read_key(keyfile)
else:
raise ValueError("must provide either 'key' or 'keyfile'")
self._key = _load_key(key, password)
self._buf = ""
def push(self, data: str):
self._buf += data
def finalise(self):
buf = b64decode(self._buf)
self._buf = ""
return _compute(buf, self._key)
```
|
{
"source": "jesopo/proxyscrape",
"score": 3
}
|
#### File: proxyscrape/proxyscrape/checkers.py
```python
import asyncio, ssl, sys
from argparse import ArgumentParser
from asyncio import StreamReader, StreamWriter
from typing import Dict, List, Optional, Tuple
from anyio import create_task_group
from async_timeout import timeout as timeout_
from . import scrapers, proxies
async def _connect(
ip: str,
port: int,
istls: bool
) -> Tuple[StreamReader, StreamWriter]:
tls: Optional[ssl.SSLContext] = None
if istls:
tls = ssl.SSLContext(ssl.PROTOCOL_TLS)
async with timeout_(30):
reader, writer = await asyncio.open_connection(ip, port, ssl=tls)
return (reader, writer)
async def check_one(
type_: str,
ip: str,
port: int,
target_ip: str,
target_port: int,
token: str
) -> Optional[str]:
tls = False
types: List[str] = []
if type_ in ["http", "https"]:
types = ["http-connect", "http-get"]
else:
types = [type_]
for type_ in types:
try:
reader, writer = await _connect(ip, port, tls)
except Exception:
break
try:
async with timeout_(30):
resp = await proxies.from_type(
type_, reader, writer, target_ip, target_port
)
except Exception as e:
pass
else:
if resp is not None:
lines = resp.decode("latin-1").split("\n")
if (lines[0] == token and
not lines[1] == target_ip):
return lines[1]
else:
pass
finally:
writer.close()
return None
async def check_many(
items: List[Tuple[str, str, int]],
target_ip: str,
target_port: int,
token: str,
concurrent: int,
queue: "Queue[Tuple[str, str, int, str]]"
) -> Dict[Tuple[str, int], str]:
sema = asyncio.Semaphore(concurrent)
async def _check(type: str, ip: str, port: int):
out = await check_one(type, ip, port, target_ip, target_port, token)
if out is not None:
await queue.put((type, ip, port, out))
sema.release()
async with create_task_group() as tg:
for type, ip, port in items:
await sema.acquire()
await tg.spawn(_check, type, ip, port)
```
|
{
"source": "jesopo/sandcatbot",
"score": 3
}
|
#### File: sandcatbot/sandcatbot/__main__.py
```python
import glob, os.path
from argparse import ArgumentParser
from configparser import ConfigParser
from dataclasses import dataclass
from datetime import datetime
from random import Random
from twitter import Api
@dataclass
class Config(object):
consumer_key: str
consumer_secret: str
access_key: str
access_secret: str
files: str
random_seed: str
def _log(message: str):
print(datetime.utcnow().isoformat(), message)
def main(config: Config, state_fname: str):
files = glob.glob(config.files)
Random(config.random_seed).shuffle(files)
if os.path.isfile(state_fname):
with open(state_fname, "r") as state_file:
call_count = int(state_file.readline().strip())
else:
call_count = 0
_log(f"call count {call_count}")
index = call_count % len(files)
fname = files[index]
_log(f"tweeting {fname}")
tweet = Api(
consumer_key =config.consumer_key,
consumer_secret =config.consumer_secret,
access_token_key =config.access_key,
access_token_secret =config.access_secret
)
with open(fname, "rb") as tweet_media:
tweet.PostUpdate("", media=tweet_media)
with open(state_fname, "w") as state_file:
state_file.write(f"{call_count+1}\n")
if __name__ == "__main__":
aparse = ArgumentParser()
aparse.add_argument("config", help="config file")
aparse.add_argument("state", help="file to hold bot state")
args = aparse.parse_args()
with open(args.config, "r") as cfile:
(cparse := ConfigParser()).read_file(cfile)
config = Config(
cparse["twitter"]["consumer-key"],
cparse["twitter"]["consumer-secret"],
cparse["twitter"]["access-key"],
cparse["twitter"]["access-secret"],
os.path.expanduser(cparse["other"]["files"]),
cparse["other"]["random-seed"]
)
main(config, args.state)
```
|
{
"source": "jesopo/scpl",
"score": 4
}
|
#### File: scpl/common/util.py
```python
from typing import Iterator, Optional, Sequence
def find_unescaped(
s: str,
c: str
) -> Iterator[int]:
i = 0
while i < len(s):
c2 = s[i]
if c2 == "\\":
i += 1
elif c2 == c:
yield i
i += 1
def find_unused_delimiter(
s: str,
chars: Sequence[str]
) -> Optional[str]:
for char in chars:
try:
next(find_unescaped(s, char))
except StopIteration:
return char
else:
return None
def with_delimiter(
s: str,
chars: Sequence[str]
) -> str:
if unused_delim := find_unused_delimiter(s, chars):
delim = unused_delim
else:
delim = chars[0]
found = find_unescaped(s, delim)
rdelim = f"\\{delim}"
for index in reversed(list(found)):
s = s[:index] + rdelim + s[index+1:]
return f"{delim}{s}{delim}"
```
#### File: scpl/lexer/lexer.py
```python
from collections import deque
from typing import Deque, List, Optional, Tuple
from ..common import *
from .tokens import *
class LexerError(Exception):
def __init__(self,
index: int,
error: str):
self.index = index
super().__init__(error)
def __str__(self) -> str:
return f"[c{self.index}] {super().__str__()}"
class LexerUnfinishedError(LexerError):
def __init__(self, token: Token):
self.token = token
super().__init__(token.index, f"unfinished {type(token).__name__}")
def tokenise(expression: str) -> Deque[Token]:
char_stream = deque(expression)
# empty string used to force all tokens to invalidate/finish at the end of
# the expression
char_stream.append("")
# +1 to account for the above empty string
expression_length = len(expression)+1
tokens_finished: Deque[Token] = deque()
tokens_unfinished: List[Token] = []
token_text = ""
token_last: Optional[Token] = None
while char_stream:
char_index = expression_length - len(char_stream)
if not tokens_unfinished:
token_text = ""
tokens_unfinished = [
TokenRegex(char_index, token_last),
TokenString(char_index, token_last),
TokenIPv4(char_index, token_last),
TokenIPv6(char_index, token_last),
TokenParenthesis(char_index, token_last),
TokenBracket(char_index, token_last),
TokenBrace(char_index, token_last),
TokenWord(char_index, token_last),
TokenOperator(char_index, token_last),
TokenSpace(char_index, token_last),
TokenDuration(char_index, token_last),
TokenHex(char_index, token_last),
TokenNumber(char_index, token_last),
]
token_text += char_stream[0]
for token in list(tokens_unfinished):
error = token.push(char_stream[0])
if error is not None or not char_stream[0]:
tokens_unfinished.remove(token)
if len(tokens_unfinished) == 0:
# no more valid tokens left
if token.complete:
# the current character failed to be added to the
# current token, but it didn't invalidate the token
tokens_finished.append(token)
if not isinstance(token, TokenTransparent):
token_last = token
elif token_text:
raise LexerUnfinishedError(token)
elif char_stream[0]:
# unrecognised single character
raise LexerError(char_index, "unknown token")
else:
# we've reached "" (end of expression)
char_stream.popleft()
else:
if tokens_unfinished:
char_stream.popleft()
return tokens_finished
```
#### File: scpl/parser/__main__.py
```python
import json, sys
from collections import deque
from time import monotonic
from typing import Dict, List
from .parser import parse, ParserError
from .operands import ParseAtom
from ..lexer import tokenise, LexerError
from ..lexer.__main__ import main_lexer
def main_parser(line: str, vars: Dict[str, ParseAtom]) -> ParseAtom:
tokens = main_lexer(line)
start = monotonic()
try:
ast, deps = parse(tokens, vars)
except ParserError as e:
print()
print(line)
print(" "*e.token.index + "^")
print(f"parse error: {str(e)}")
sys.exit(2)
else:
end = monotonic()
print(f"parser : {ast!r}")
print(f"deps : {sorted(deps)}")
print(f"duration: {(end-start)*1_000_000:.2f}μs")
#ast = ast.precompile()
#print(f"precomp : {ast!r}")
return ast[0]
if __name__ == "__main__":
vars: Dict[str, ParseAtom] = {}
if len(sys.argv) > 2:
for key, value in json.loads(sys.argv[2]).items():
tokens = deque(tokenise(value))
atoms, deps = parse(tokens, {})
vars[key] = atoms[0]
main_parser(sys.argv[1], vars)
```
#### File: parser/operators/bools.py
```python
from typing import Dict, Optional, Tuple
from .common import ParseBinaryOperator, ParseUnaryOperator
from .cast import find_cast_bool
from ..operands import ParseAtom, ParseBool
class ParseBinaryBoth(ParseBinaryOperator, ParseBool):
def __init__(self, left: ParseBool, right: ParseBool):
super().__init__(left, right)
self._left = left
self._right = right
def __repr__(self):
return f"Both({self._left!r}, {self._right!r})"
def eval(self, vars: Dict[str, ParseAtom]) -> bool:
return self._left.eval(vars) and self._right.eval(vars)
class ParseBinaryEither(ParseBinaryOperator, ParseBool):
def __init__(self, left: ParseBool, right: ParseBool):
super().__init__(left, right)
self._left = left
self._right = right
def __repr__(self):
return f"Either({self._left!r}, {self._right!r})"
def eval(self, vars: Dict[str, ParseAtom]) -> bool:
return self._left.eval(vars) or self._right.eval(vars)
def _double_cast(
aleft: ParseAtom, aright: ParseAtom
) -> Optional[Tuple[ParseBool, ParseBool]]:
if ((left := find_cast_bool(aleft)) is not None
and (right := find_cast_bool(aright)) is not None):
return (left, right)
else:
return None
def _cast(aatom: ParseAtom) -> Optional[ParseBool]:
if (atom := find_cast_bool(aatom)) is not None:
return atom
else:
return None
def find_binary_both(left: ParseAtom, right: ParseAtom) -> Optional[ParseBool]:
if (dcast := _double_cast(left, right)) is not None:
return ParseBinaryBoth(*dcast)
else:
return None
def find_binary_either(left: ParseAtom, right: ParseAtom) -> Optional[ParseBool]:
if (dcast := _double_cast(left, right)) is not None:
return ParseBinaryEither(*dcast)
else:
return None
class ParseUnaryNot(ParseUnaryOperator, ParseBool):
def __init__(self, atom: ParseBool):
super().__init__(atom)
self._atom = atom
def __repr__(self) -> str:
return f"Not({self._atom!r})"
def eval(self, vars: Dict[str, ParseAtom]) -> bool:
return not self._atom.eval(vars)
def find_unary_not(atom: ParseAtom) -> Optional[ParseBool]:
if (cast := _cast(atom)) is not None:
return ParseUnaryNot(cast)
else:
return None
```
#### File: parser/operators/common.py
```python
from typing import Any, Dict
from ..operands import ParseAtom
class ParseOperator(ParseAtom):
def eval(self, vars: Dict[str, ParseAtom]) -> Any:
raise NotImplementedError()
class ParseBinaryOperator(ParseOperator):
def __init__(self, left: ParseAtom, right: ParseAtom):
self._base_left = left
self._base_right = right
def is_constant(self) -> bool:
return self._base_left.is_constant() and self._base_right.is_constant()
class ParseUnaryOperator(ParseOperator):
def __init__(self, atom: ParseAtom):
self._base_atom = atom
def is_constant(self) -> bool:
return self._base_atom.is_constant()
```
#### File: parser/operators/__init__.py
```python
from typing import Optional
from .add import find_binary_add
from ..operands import ParseAtom
from ...lexer import Token
from ...common.operators import OperatorName
# binary
from .add import find_binary_add
from .subtract import find_binary_subtract
from .multiply import find_binary_multiply
from .divide import find_binary_divide
from .modulo import find_binary_modulo
from .exponent import find_binary_exponent
from .lesser import find_binary_lesser
from .greater import find_binary_greater
from .bools import find_binary_both, find_binary_either, find_unary_not
from .match import find_binary_match
from .contains import find_binary_contains
from .equal import find_binary_equal
from .bitwise import (find_binary_and, find_binary_or, find_binary_xor,
find_binary_left, find_binary_right)
# unary
from .negative import find_unary_negative
from .positive import find_unary_positive
from .complement import find_unary_complement
# ✨ special
from .variable import find_variable
from .set import find_set
def find_binary_operator(
op_name: OperatorName, left: ParseAtom, right: ParseAtom
) -> Optional[ParseAtom]:
if op_name == OperatorName.ADD:
return find_binary_add(left, right)
elif op_name == OperatorName.SUBTRACT:
return find_binary_subtract(left, right)
elif op_name == OperatorName.MULTIPLY:
return find_binary_multiply(left, right)
elif op_name == OperatorName.DIVIDE:
return find_binary_divide(left, right)
elif op_name == OperatorName.MODULO:
return find_binary_modulo(left, right)
elif op_name == OperatorName.EXPONENT:
return find_binary_exponent(left, right)
elif op_name == OperatorName.BOTH:
return find_binary_both(left, right)
elif op_name == OperatorName.EITHER:
return find_binary_either(left, right)
elif op_name == OperatorName.MATCH:
return find_binary_match(left, right)
elif op_name == OperatorName.CONTAINS:
return find_binary_contains(left, right)
elif op_name == OperatorName.GREATER:
return find_binary_greater(left, right)
elif op_name == OperatorName.LESSER:
return find_binary_lesser(left, right)
elif op_name == OperatorName.EQUAL:
return find_binary_equal(left, right)
elif op_name == OperatorName.UNEQUAL:
# just treat != as !(==)
if (inner := find_binary_equal(left, right)) is not None:
return find_unary_not(inner)
else:
return None
elif op_name == OperatorName.AND:
return find_binary_and(left, right)
elif op_name == OperatorName.OR:
return find_binary_or(left, right)
elif op_name == OperatorName.XOR:
return find_binary_xor(left, right)
elif op_name == OperatorName.LEFT:
return find_binary_left(left, right)
elif op_name == OperatorName.RIGHT:
return find_binary_right(left, right)
else:
return None
def find_unary_operator(
op_name: OperatorName, atom: ParseAtom
) -> Optional[ParseAtom]:
if op_name == OperatorName.NEGATIVE:
return find_unary_negative(atom)
elif op_name == OperatorName.POSITIVE:
return find_unary_positive(atom)
elif op_name == OperatorName.NOT:
return find_unary_not(atom)
elif op_name == OperatorName.COMPLEMENT:
return find_unary_complement(atom)
else:
return None
```
#### File: parser/operators/lesser.py
```python
from typing import Dict, Optional
from .cast import ParseCastIntegerFloat
from .common import ParseBinaryOperator
from ..operands import ParseAtom, ParseBool, ParseFloat, ParseInteger
class ParseBinaryLesserIntegerInteger(ParseBinaryOperator, ParseBool):
def __init__(self, left: ParseInteger, right: ParseInteger):
super().__init__(left, right)
self._left = left
self._right = right
def __repr__(self):
return f"Lesser({self._left!r}, {self._right!r})"
def eval(self, vars: Dict[str, ParseAtom]) -> bool:
return self._left.eval(vars) < self._right.eval(vars)
class ParseBinaryLesserFloatFloat(ParseBinaryOperator, ParseBool):
def __init__(self, left: ParseFloat, right: ParseFloat):
super().__init__(left, right)
self._left = left
self._right = right
def __repr__(self):
return f"Lesser({self._left!r}, {self._right!r})"
def eval(self, vars: Dict[str, ParseAtom]) -> bool:
return self._left.eval(vars) < self._right.eval(vars)
class ParseBinaryLesserFloatInteger(ParseBinaryLesserFloatFloat):
def __init__(self, left: ParseFloat, right: ParseInteger):
super().__init__(left, ParseCastIntegerFloat(right))
class ParseBinaryLesserIntegerFloat(ParseBinaryLesserFloatFloat):
def __init__(self, left: ParseInteger, right: ParseFloat):
super().__init__(ParseCastIntegerFloat(left), right)
def find_binary_lesser(left: ParseAtom, right: ParseAtom) -> Optional[ParseAtom]:
if isinstance(left, ParseInteger):
if isinstance(right, ParseInteger):
return ParseBinaryLesserIntegerInteger(left, right)
elif isinstance(right, ParseFloat):
return ParseBinaryLesserIntegerFloat(left, right)
else:
return None
elif isinstance(left, ParseFloat):
if isinstance(right, ParseFloat):
return ParseBinaryLesserFloatFloat(left, right)
elif isinstance(right, ParseInteger):
return ParseBinaryLesserFloatInteger(left, right)
else:
return None
else:
return None
```
#### File: parser/operators/positive.py
```python
from typing import Dict
from .common import ParseUnaryOperator
from ..operands import ParseAtom, ParseFloat, ParseInteger
class ParseUnaryPositiveInteger(ParseUnaryOperator, ParseInteger):
def __init__(self, atom: ParseInteger):
super().__init__(atom)
self._atom = atom
def __repr__(self) -> str:
return f"Positive({self._atom!r})"
def eval(self, vars: Dict[str, ParseAtom]) -> int:
return +self._atom.eval(vars)
class ParseUnaryPositiveFloat(ParseUnaryOperator, ParseFloat):
def __init__(self, atom: ParseFloat):
super().__init__(atom)
self._atom = atom
def __repr__(self) -> str:
return f"Positive({self._atom!r})"
def eval(self, vars: Dict[str, ParseAtom]) -> float:
return +self._atom.eval(vars)
def find_unary_positive(atom: ParseAtom):
if isinstance(atom, ParseInteger):
return ParseUnaryPositiveInteger(atom)
elif isinstance(atom, ParseFloat):
return ParseUnaryPositiveFloat(atom)
else:
return None
```
#### File: scpl/regex/lexer.py
```python
import string
from collections import deque
from typing import Deque, Dict, List, Sequence, Tuple
from .ranges import RANGE_CHARS
RANGES: Tuple[str, str, str] = (
string.ascii_uppercase,
string.ascii_lowercase,
string.digits
)
def _find_unescaped(chars: Sequence[str], findchar: int) -> int:
i = 0
while chars[i]:
char = chars[i]
if chars[i] == "\\":
i += 1
elif ord(char) == findchar:
return i
i += 1
return -1
class RegexLexerError(Exception):
def __init__(self, index: int, error: str):
super().__init__(error)
self.index = index
class RegexToken:
def __init__(self, text: str):
self.text = text
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.text!r})"
class RegexTokenScope(RegexToken):
pass
class RegexTokenClass(RegexToken):
pass
class RegexTokenRepeat(RegexToken):
pass
class RegexTokenOperator(RegexToken):
pass
class RegexTokenOpaque(RegexToken):
pass
class RegexTokenLiteral(RegexToken):
pass
class RegexTokenRange(RegexToken):
pass
def tokenise_class(chars: Deque[str], offset: int) -> List[RegexToken]:
startlen = len(chars)
out: List[RegexToken] = []
if chars[0] == "]":
out.append(RegexTokenLiteral(chars.popleft()))
chars.append("") # end of expression
while chars[0]:
index = offset + (startlen - len(chars))
char = chars.popleft()
if char == "]":
# this means the parent scope can know we closed our class correctly
chars.appendleft(char)
break
elif ((r_start := ord(char)) in RANGE_CHARS
and chars[0] == "-" and chars[0]):
range_c = char + chars.popleft()
if (r_start in RANGE_CHARS
and (r_end := ord(chars[0])) in RANGE_CHARS
and RANGE_CHARS[r_start] == RANGE_CHARS[r_end]):
out.append(RegexTokenRange(range_c + chars.popleft()))
else:
raise RegexLexerError(1, "invalid range")
elif char == "\\":
if not chars[0]:
raise RegexLexerError(index, "empty escape")
else:
out.append(RegexTokenOpaque(char + chars.popleft()))
else:
out.append(RegexTokenLiteral(char))
return out
def tokenise_expression(chars: Deque[str], offset: int) -> List[RegexToken]:
startlen = len(chars)
out: List[RegexToken] = []
while chars[0]:
index = offset + (startlen - len(chars))
char = chars.popleft()
if char == ")":
# this means a maybe-existent parent scope can know we closed our scope successfully
chars.appendleft(char)
break
elif char == "(":
group_start = char
if chars[0] == "?":
while chars[0] and not chars[0] == ")":
group_next = chars.popleft()
group_start += group_next
if group_next == ":":
break
group_tokens = tokenise_expression(chars, index+1)
if not chars[0] == ")":
raise RegexLexerError(index, "unterminated group")
else:
out.append(RegexTokenScope(group_start))
out.extend(group_tokens)
out.append(RegexTokenScope(chars.popleft()))
elif char == "[":
class_start = char
if chars[0] == "^":
class_start += chars.popleft()
class_tokens = tokenise_class(chars, index+1)
if not chars[0] == "]":
raise RegexLexerError(index, "unterminated class")
else:
out.append(RegexTokenClass(class_start))
out.extend(class_tokens)
out.append(RegexTokenClass(chars.popleft()))
elif char == "{":
repeat = ""
repeat_end = _find_unescaped(chars, ord("}"))
if repeat_end == -1:
raise RegexLexerError(index, "unterminated range")
else:
for i in range(repeat_end):
repeat += chars.popleft()
out.append(RegexTokenRepeat(char))
out.append(RegexTokenOpaque(repeat))
out.append(RegexTokenRepeat(chars.popleft()))
elif char == "\\":
if not chars[0]:
raise RegexLexerError(index, "empty escape")
else:
out.append(RegexTokenOpaque(char + chars.popleft()))
elif char in set("^.+*?$|"):
out.append(RegexTokenOperator(char))
else:
out.append(RegexTokenLiteral(char))
return out
def tokenise(regex: str):
chars = deque(regex)
chars.append("") # end of expression
out = tokenise_expression(chars, 0)
if chars[0] == "":
return out
else:
raise RegexLexerError(len(regex)-len(chars)+1, "unexpected token")
if __name__ == "__main__":
import sys
regex = sys.argv[1]
try:
out = tokenise(regex)
except RegexLexerError as e:
print(regex)
print(" "*e.index + "^")
print()
print(str(e))
sys.exit(1)
else:
print(out)
```
#### File: scpl/test/parser.py
```python
import unittest
from ipaddress import ip_address, ip_network
from scpl.lexer import tokenise
from scpl.parser import operators, parse, ParserError, ParserTypeError
from scpl.parser import (ParseInteger, ParseCIDRv4, ParseCIDRv6, ParseIPv4, ParseIPv6,
ParseFloat, ParseRegex, ParseString)
class ParserTestString(unittest.TestCase):
def test(self):
atoms, deps = parse(tokenise('"asd"'), {})
self.assertIsInstance(atoms[0], ParseString)
self.assertEqual(atoms[0].value, "asd")
self.assertEqual(atoms[0].delimiter, '"')
class ParserTestRegex(unittest.TestCase):
def test_simple(self):
atoms, deps = parse(tokenise("/a/"), {})
self.assertIsInstance(atoms[0], ParseRegex)
self.assertEqual(atoms[0].pattern, "a")
self.assertEqual(atoms[0].delimiter, "/")
self.assertEqual(atoms[0].flags, set())
def test_flags(self):
atoms, deps = parse(tokenise("/a/abc"), {})
self.assertIsInstance(atoms[0], ParseRegex)
self.assertEqual(atoms[0].pattern, "a")
self.assertEqual(atoms[0].delimiter, "/")
self.assertEqual(atoms[0].flags, set("abc"))
class ParserTestInteger(unittest.TestCase):
def test(self):
atoms, deps = parse(tokenise("123"), {})
self.assertIsInstance(atoms[0], ParseInteger)
self.assertEqual(atoms[0].value, 123)
class ParserTestHex(unittest.TestCase):
def test(self):
atoms, deps = parse(tokenise("0xff"), {})
self.assertIsInstance(atoms[0], ParseInteger)
self.assertEqual(atoms[0].value, 255)
class ParserTestDuration(unittest.TestCase):
def test(self):
atoms, deps = parse(tokenise("1w2d3h4m5s"), {})
self.assertIsInstance(atoms[0], ParseInteger)
self.assertEqual(atoms[0].value, 788645)
class ParserTestFloat(unittest.TestCase):
def test(self):
atoms, deps = parse(tokenise("123.0"), {})
self.assertIsInstance(atoms[0], ParseFloat)
self.assertEqual(atoms[0].value, 123.0)
class ParserTestIPv4(unittest.TestCase):
def test(self):
addr = "10.84.1.1"
atoms, deps = parse(tokenise(addr), {})
self.assertIsInstance(atoms[0], ParseIPv4)
self.assertEqual(atoms[0].integer, int(ip_address(addr)))
class ParserTestCIDRv4(unittest.TestCase):
def test(self):
addr = "10.84.1.1/16"
atoms, deps = parse(tokenise(addr), {})
self.assertIsInstance(atoms[0], ParseCIDRv4)
self.assertEqual(atoms[0].integer, int(ip_network(addr, strict=False).network_address))
self.assertEqual(atoms[0].prefix, 16)
def test_invalid(self):
self.assertRaises(ValueError, lambda: parse(tokenise("10.84.1.1/33"), {}))
class ParserTestIPv6(unittest.TestCase):
def test(self):
addr = "fd84:9d71:8b8:1::1"
atoms, deps = parse(tokenise(addr), {})
self.assertIsInstance(atoms[0], ParseIPv6)
self.assertEqual(atoms[0].integer, int(ip_address(addr)))
class ParserTestCIDRv6(unittest.TestCase):
def test(self):
addr = "fd84:9d71:8b8:1::1/48"
atoms, deps = parse(tokenise(addr), {})
self.assertIsInstance(atoms[0], ParseCIDRv6)
self.assertEqual(atoms[0].integer, int(ip_network(addr, strict=False).network_address))
self.assertEqual(atoms[0].prefix, 48)
def test_invalid(self):
self.assertRaises(ValueError, lambda: parse(tokenise("fd84:9d71:8b8:1::1/129"), {}))
class ParserTestParenthesis(unittest.TestCase):
def test_unwrap(self):
atoms, deps = parse(tokenise("(1)"), {})
self.assertIsInstance(atoms[0], ParseInteger)
def test_nested(self):
atoms, deps = parse(tokenise("((1))"), {})
self.assertIsInstance(atoms[0], ParseInteger)
def test_unfinished(self):
with self.assertRaises(ParserError):
atoms, deps = parse(tokenise("(1"), {})
class ParserTestSet(unittest.TestCase):
def test_empty(self):
atoms, deps = parse(tokenise("{}"), {})
self.assertIsInstance(atoms[0], operators.set.ParseSet)
def test_integer(self):
atoms, deps = parse(tokenise("{1, 2}"), {})
self.assertIsInstance(atoms[0], operators.set.ParseSetInteger)
def test_float(self):
atoms, deps = parse(tokenise("{1.0, 2.0}"), {})
self.assertIsInstance(atoms[0], operators.set.ParseSetFloat)
def test_string(self):
atoms, deps = parse(tokenise('{"a", "b"}'), {})
self.assertIsInstance(atoms[0], operators.set.ParseSetString)
def test_ipv4(self):
atoms, deps = parse(tokenise("{10.84.1.1, 10.84.1.2}"), {})
self.assertIsInstance(atoms[0], operators.set.ParseSetIPv4)
def test_ipv6(self):
atoms, deps = parse(tokenise("{fdfd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, fd8fc00:db20:35b:7399::5}"), {})
self.assertIsInstance(atoms[0], operators.set.ParseSetIPv6)
def test_invalid_mixed(self):
tokens = tokenise("{1, 1.0}")
with self.assertRaises(ParserTypeError) as cm:
parse(tokens.copy(), {})
self.assertEqual(tokens[4], cm.exception.token)
```
|
{
"source": "jesopo/thermite",
"score": 2
}
|
#### File: thermite/thermite/mformat.py
```python
from irctokens import Line
from ircstates import Server
def _status(server: Server, channel_name: str, nickname: str) -> str:
channel = server.channels[server.casefold(channel_name)]
cuser = channel.users[server.casefold(nickname)]
status = ""
# iterate isupport because that's held in order of precedence
for i, mode in enumerate(server.isupport.prefix.modes):
if mode in cuser.modes:
status += server.isupport.prefix.prefixes[i]
return status
def privmsg(server: Server, line: Line) -> str:
nick = line.hostmask.nickname
status = _status(server, line.params[0], nick)
message = line.params[1]
if message.startswith("\x01ACTION "):
message = message.split(" ", 1)[1].rstrip("\x01")
return f"* {status}{nick} {message}"
elif message.startswith("\x01"):
message = message.strip("\x01")
return f"- {status}{nick} sent CTCP request: {message}"
else:
return f"<{status}{nick}> {message}"
def notice(server: Server, line: Line) -> str:
nick = line.hostmask.nickname
status = _status(server, line.params[0], nick)
message = line.params[1]
if message.startswith("\x01"):
message = message.strip("\x01")
return f"- {status}{nick} sent CTCP response: {message}"
else:
return f"-{status}{nick}- {message}"
def quit(line: Line) -> str:
reason = (line.params[0:] or [""])[0]
return f"- {line.hostmask.nickname} quit ({reason})"
def part(line: Line) -> str:
reason = (line.params[1:] or [""])[0]
return f"- {line.hostmask.nickname} parted {line.params[0]} ({reason})"
def join(line: Line) -> str:
# TODO: handle extended-join data
return f"- {line.hostmask.nickname} joined {line.params[0]}"
def nick(line: Line) -> str:
return f"- {line.hostmask.nickname} changed nick to {line.params[0]}"
def mode(line: Line) -> str:
args = " ".join(line.params[2:])
return f"- {line.hostmask.nickname} set mode {line.params[1]} {args}"
```
|
{
"source": "jesperancinha/isbn-stacks",
"score": 2
}
|
#### File: small/webflux/locustfile.py
```python
from locust import HttpUser, task
class ISBNStacksTest(HttpUser):
@task
def retrieve_isbn_via_traditional_reactive(self):
self.client.get("http://localhost:8081/api/traditional/small/isbns")
```
|
{
"source": "JEsperancinhaOrg/omni-coveragereporter-python",
"score": 2
}
|
#### File: src/omni_coveragereporter/codacy_client.py
```python
import json
import os
import git
import requests
url = 'https://api.codacy.com'
repo = git.Repo(os.getcwd())
master = repo.head.reference
commit = master.commit.hexsha
def send_report(reports_pack):
headers = {
'Content-Type': 'application/json',
'project-token': os.getenv('CODACY_PROJECT_TOKEN'),
}
reports_pack_keys = reports_pack.keys()
first_key = list(reports_pack_keys)[0]
if len(reports_pack_keys) == 1 and len(reports_pack[first_key]) == 1:
effective_url = f'{url}/2.0/coverage/{commit}/{first_key}?partial=false'
print(f"- Sending Codacy report to {effective_url}")
r = requests.post(url=effective_url, headers=headers, data=json.dumps(reports_pack[first_key][0]))
return r.content.decode("utf-8")
else:
for lang in reports_pack_keys:
effective_url = f'{url}/2.0/coverage/{commit}/{lang}?partial=true'
print(f"- Sending Codacy report to {effective_url}")
for report in reports_pack[lang]:
r = requests.post(url=effective_url, headers=headers, data=json.dumps(report))
print("- Codacy Report sent!")
print(f"- {r.content.decode('utf-8')}")
effective_final_url = f'{url}/2.0/commit/{commit}/coverageFinal'
print(f"- Sending Final Codacy report to {effective_final_url}")
final_response = requests.post(url=effective_final_url, headers=headers, json="")
final_response.content.decode("utf-8")
return None
```
#### File: src/omni_coveragereporter/codecov_converter.py
```python
import os
import common
import coveragego_parser
import coveragepy_parser
import report_detector
def convert_coverage_py(data, report=None):
if report:
codecov_files = report['coverage']
codecov_report = report
else:
codecov_files = {}
codecov_report = {"coverage": codecov_files}
for file_name in data['files']:
if common.valid(file_name):
file_object = data['files'][file_name]
total_lines = coveragepy_parser.total_lines(file_object)
codecov_file = codecov_files[file_name] if file_name in codecov_files else None
if codecov_file is None:
codecov_file = {}
codecov_files[file_name] = codecov_file
for i in range(1, total_lines):
if str(i) in codecov_file:
curr = 0 if codecov_file[str(i)] is None else codecov_file[str(i)]
offset = 1 if i in file_object['executed_lines'] else 0
codecov_file[str(i)] = curr + offset
else:
codecov_file[str(i)] = 1 if i in file_object['executed_lines'] else None
if codecov_file[str(i)] is None:
codecov_file[str(i)] = 0 if i in file_object['missing_lines'] else None
return codecov_report
def convert_coverage_go(data_text, report=None):
if report:
codecov_files = report['coverage']
codecov_report = report
else:
codecov_files = {}
codecov_report = {"coverage": codecov_files}
all_lines = data_text.split("\n")
for i in range(1, len(all_lines)):
coverage_line = all_lines[i]
if len(coverage_line) > coveragego_parser.MINIMAL_STATS_LENGTH:
file_stats = coverage_line.split(":")
absolute_file_name = file_stats[0]
report_file_name = absolute_file_name.replace(os.getcwd(), '')
codecov_file = codecov_files[report_file_name] if report_file_name in codecov_files else None
if codecov_file is None:
codecov_file = {}
codecov_files[report_file_name] = codecov_file
total_lines = report_detector.total_lines(absolute_file_name)
for line in range(1, total_lines + 1):
codecov_file[str(line)] = None
branch_line = file_stats[1].split(",")
line_coverage = branch_line[1]
line = line_coverage.split(".")[0]
hits = line_coverage.split(" ")[2]
back = int(line_coverage.split(" ")[1])
codecov_file[line] = int(hits)
for i_back in range(1, back):
codecov_file[str(int(line) - i_back)] = hits
return codecov_report
def convert_clover(data_xml, report=None):
if report:
codecov_files = report['coverage']
codecov_report = report
else:
codecov_files = {}
codecov_report = {"coverage": codecov_files}
for project in data_xml.iter('project'):
for file in project.iter('file'):
absolute_filename = file.attrib['name']
file_name = absolute_filename.replace(os.getcwd(), "")
if file_name.startswith("/"):
file_name = file_name[1:]
codecov_file = codecov_files[file_name] if file_name in codecov_files else None
if codecov_file is None:
codecov_file = {}
codecov_files[file_name] = codecov_file
total_lines = report_detector.total_lines(absolute_filename)
for line in range(1, total_lines + 1):
codecov_file[str(line)] = None
for line in file.iter('line'):
codecov_file[str(line.attrib['num'])] = int(line.attrib['count'])
return codecov_report
```
#### File: src/omni_coveragereporter/coveragepy_parser.py
```python
def total_lines(file_object):
executed_lines = max(file_object['executed_lines']) if file_object['executed_lines'] else 0
missing_lines = max(file_object['missing_lines']) if file_object['missing_lines'] else 0
return max(executed_lines, missing_lines)
```
#### File: src/omni_coveragereporter/report_detector.py
```python
def total_lines(file_name):
f = open(file_name)
data = f.readlines()
f.close()
return len(data)
def is_coverage_py(report_text):
if report_text.startswith("{") and report_text.endswith(
"}") and "\"meta\":" in report_text and "\"files\":" in report_text:
return True
return False
def is_coverage_go(report_text):
if report_text.startswith("mode:") and "go:" in report_text:
return True
return False
def is_clover(report_text):
if "coverage generated" in report_text and "project timestamp" in report_text:
return True
return False
```
#### File: omni-coveragereporter-python/tests/test_coveralls_converter.py
```python
import json
import os
import sys
import xml.etree.ElementTree as ET
sys.path.insert(0, os.path.abspath('../src/omni_coveragereporter'))
sys.path.insert(0, os.path.abspath('src/omni_coveragereporter'))
sys.path.insert(0, os.path.abspath('omni-coveragereporter-python/src/omni_coveragereporter'))
from omni_coveragereporter_python import get_text_from_file
import coveralls_converter
def test_convert_coverage_go():
text_from_file = get_text_from_file("coverage.out").replace("${test}", f'{os.getcwd()}/points')
coverage_go_report = coveralls_converter.convert_coverage_go(text_from_file)
assert len(json.dumps(coverage_go_report)) > 10
def test_convert_clover():
text_from_file = get_text_from_file("clover.xml").replace("${test}", f'{os.getcwd()}/src')
coverage_clover_report = coveralls_converter.convert_clover(ET.fromstring(text_from_file))
assert len(json.dumps(coverage_clover_report)) > 10
```
|
{
"source": "jesperancinha/vma-archiver",
"score": 3
}
|
#### File: vma-archiver/vma-demo/send_votes_first_candidates_song_only.py
```python
import uuid
import requests
from requests.structures import CaseInsensitiveDict
def generateUuuid():
return str(uuid.uuid1())
response = requests.get("http://localhost:8080/api/vma/registry/current")
urlSong = "http://localhost:8080/api/vma/voting/song"
headers = CaseInsensitiveDict()
headers["Cookie"] = f"votingId={generateUuuid()}"
for category in response.json():
print(category["category"])
print(category)
if category["type"] == "SONG" or category["type"] == "INSTRUMENTAL":
elected = category["songs"][0]
resp = requests.post(
urlSong,
headers=headers,
json={
"userID": generateUuuid(),
"idC": category["id"],
"idS": elected["id"]
})
print(resp.status_code)
```
|
{
"source": "jesperatstockholmuniversity/tor_malware_classification",
"score": 3
}
|
#### File: tor_malware_classification/retomos/retomos_feature_extractor.py
```python
import sqlite3
import sys
import os
import timeit
import json
import argparse
def open_database(database, input_file, tor):
# Open database connection
db_connection = sqlite3.connect(database)
db_cursor = db_connection.cursor()
sha256 = ""
# If input is a file
if os.path.isfile(input_file) is True:
print("Feeding single file to DB.", input_file)
input_opener = open(input_file, "r")
sha256 = feed_database(db_cursor, input_opener, tor)
# If input is a directory
if os.path.isdir(input_file) is True:
# Open all files and feed their content to DB
for filename in os.listdir(input_file):
filename_path = input_file + "/" + filename
directory_file_opener = open(filename_path, "r")
# Feed database with web pages | In future send tor_related label as argument
sha256 = feed_database(db_cursor, directory_file_opener, tor)
# Close DB
try:
db_connection.commit()
db_connection.close()
return sha256
except sqlite3.Error as e:
print(" Sqlite error: ", e.args[0])
else:
sys.exit()
def feed_database(db_cursor, file_opener, tor):
# Suck in content and parse the JSON
tor_related = tor # 0=false, 1=true, 2=unknown
index = 0
document = file_opener.read()
json_obj = json.loads(document)
sha256 = str(json_obj['target']['file']['sha256'])
# Extract signatures and AV organisations
signatures = json_obj['signatures']
label = ""
av_organisation = ""
for listitem in signatures:
if listitem["name"] == "antivirus_virustotal":
for av_organisations in listitem["marks"]:
index += 1
av_organisation = av_organisations["category"]
label = av_organisations["ioc"]
print("Label: ", label)
#label_insert = "UPDATE malware_name SET av_label=\'" + label + "\' WHERE sha256=\'" + sha256 + "\';"
#av_insert = "UPDATE OR IGNORE malware_name SET av_organisation=\'" + av_organisation + "\' WHERE sha256=\'" + sha256 + "\';"
db_cursor.execute("INSERT OR IGNORE INTO av_organisation(name) VALUES(?)", [av_organisation,])
#sha256_sql = "UPDATE OR IGNORE malware_name SET sha256=\'" + sha256 + "\', av_label=\'" + label + "\', av_organisation=\'" + av_organisation + "\';";
db_cursor.execute("INSERT OR IGNORE INTO malware_name VALUES(?,?,?)", (sha256, label, av_organisation))
print("Inserting sha256:", sha256)
db_cursor.execute("INSERT OR IGNORE INTO label(label, sha256, tor_related, av_organisation) VALUES(?,?,?,?)", (label, sha256, tor_related, av_organisation))
# Extract DLLs, registry keys, and API calls
try:
behaviour = json_obj['behavior']['apistats']
generic = json_obj['behavior']['generic']
strings = json_obj['strings']
except:
pass
# Insert API calls into DB
for behaviouritem in behaviour:
for apicalls in behaviour[behaviouritem]:
db_cursor.execute("INSERT INTO api_calls(name, label, tor_related, sha256, av_organisation) VALUES(?,?,?,?,?)", (apicalls, label, tor_related, sha256, av_organisation))
for entry in generic:
file_created = entry['summary']
for iii in file_created:
# Get DLLs
if iii == "dll_loaded":
# Add DLLs to DB
for ii in file_created[iii]:
db_cursor.execute("INSERT OR IGNORE INTO dlls(name, sha256) VALUES(?,?)", (ii, sha256))
# Get registry keys written:
if iii == "regkey_written":
for ii in file_created[iii]:
db_cursor.execute("INSERT OR IGNORE INTO reg_keys(path, access_type, sha256) VALUES(?,?,?)", (ii, "written", sha256))
if iii == "regkey_opened":
for ii in file_created[iii]:
db_cursor.execute("INSERT OR IGNORE INTO reg_keys(path, access_type, sha256) VALUES(?,?,?)", (ii, "opened", sha256))
if iii == "regkey_read":
for ii in file_created[iii]:
db_cursor.execute("INSERT OR IGNORE INTO reg_keys(path, access_type, sha256) VALUES(?,?,?)", (ii, "read", sha256))
strings_dump = ""
for strings_item in strings:
strings_dump = strings_dump + " " + strings_item
db_cursor.execute("INSERT OR IGNORE INTO strings(sha256, strings) VALUES(?,?)", (sha256, strings_dump))
# Get network details
network = json_obj['network']['hosts']
domains = json_obj['network']['domains']
for dns in domains:
db_cursor.execute("INSERT OR IGNORE INTO network(ip, dns, sha256) VALUES(?,?,?)", (dns['ip'], dns['domain'],sha256))
print("Added: ", index, " entries into database.")
return sha256
```
#### File: tor_malware_classification/retomos/retomos_malware_classifier.py
```python
import sys
import sqlite3
import datetime
import timeit
import math
import re
import pandas as pd
import numpy as np
from time import time, sleep
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score, cross_validate, train_test_split
#from sklearn.naive_bayes import *
from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.feature_selection import SelectKBest, chi2, VarianceThreshold
from sklearn.tree import DecisionTreeClassifier, export_text, export_graphviz
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn import tree
from mglearn import make_blobs
import matplotlib.pyplot as plt
import graphviz
'''
OPEN UP DATABASE AND FETCH DATA
'''
def connect_to_database(action, training_db, urls, unknown_samples, sha256):
# Open up training data set
training_db_connection = ""
training_db_cursor = ""
clfnb = MultinomialNB()
clfrf = RandomForestClassifier(random_state=0)
if action == False:
try:
# Connect to training set database
training_db_connection = sqlite3.connect(str(training_db))
training_db_cursor = training_db_connection.cursor()
# Queries for retrieving data to analyse
sql_reg_keys_query = "SELECT sha256, path FROM reg_keys;"
sql_strings_query = "SELECT strings FROM strings;"
training_db_cursor.execute(sql_reg_keys_query)
reg_key_pairs = training_db_cursor.fetchall()
reg_keys_dict = {}
unknown_samples_dict = {}
cur_sha = ""
cur_class_label = 3
class_label=0
reg_keys_list = []
dns_list = []
api_list = []
dll_list = []
tor_related = int(0)
api_string = ""
reg_keys_string = ""
dns_string =""
counter = 0
counter_length = len(reg_key_pairs)
reg_keys_combined = {}
unknown_samples_combined = {}
print("Fetching data from database. Processing.")
for pair in reg_key_pairs:
counter += 1
# Print progress
if counter % 100 == 0:
sys.stdout.write(".")
sys.stdout.flush()
if counter == (math.ceil(0.1 * counter_length)):
print("10%")
if counter == (math.ceil(0.2* counter_length)):
print("20%")
if counter == (math.ceil(0.5 * counter_length)):
print("50%")
if counter == (math.ceil(0.7 * counter_length)):
print("70%")
if counter == (math.ceil(0.8 * counter_length)):
print("80%")
if counter == (math.ceil(0.9 * counter_length)):
print("90%")
if counter == (math.ceil(0.95 * counter_length)):
print("95%")
if cur_sha != pair[0]:
cur_sha = pair[0]
reg_keys_list = []
api_list = []
dll_list = []
api_string = ""
dll_string = ""
dns_string = ""
reg_keys_string = ""
class_label =[]
else:
reg_keys_list.append(pair[1])
dns_query = "SELECT dns FROM network WHERE sha256=\'" + cur_sha + "\';"
training_db_cursor.execute(dns_query)
dns_list = training_db_cursor.fetchall()
api_query = "SELECT name,tor_related FROM api_calls WHERE sha256=\'" + cur_sha + "\';"
training_db_cursor.execute(api_query)
api_list = training_db_cursor.fetchall()
dll_query = "SELECT name FROM dlls WHERE sha256=\'" + cur_sha + "\';"
training_db_cursor.execute(dll_query)
dll_list = training_db_cursor.fetchall()
class_query = "SELECT tor_related FROM label WHERE sha256=\'" + cur_sha + "\';"
training_db_cursor.execute(class_query)
class_label = training_db_cursor.fetchall()
# Append data from database
api_string = "".join(str(api_list))
reg_keys_string = "".join(str(reg_keys_list))
dns_string = "".join(str(dns_list))
dll_string = "".join(str(dll_list))
# If 1 or 0, samples are correctly classified. 2 are prediction candidates.
if class_label:
if 0 in class_label[0]:
tor_related = int(0)
reg_keys_dict.update({cur_sha : [reg_keys_string, dns_string, dll_string, api_string, tor_related]})
reg_keys_combined.update({cur_sha : [reg_keys_string + " " + dns_string + " " + dll_string + " " + api_string, tor_related]})
if 1 in class_label[0]:
tor_related = int(1)
reg_keys_dict.update({cur_sha : [reg_keys_string, dns_string, dll_string, api_string, tor_related]})
reg_keys_combined.update({cur_sha : [reg_keys_string + " " + dns_string + " " + dll_string + " " + api_string, tor_related]})
if 2 in class_label[0]:
tor_related = int(2)
unknown_samples_dict.update({cur_sha : [reg_keys_string, dns_string, dll_string, api_string, tor_related]})
unknown_samples_combined.update({cur_sha : [reg_keys_string + " " + dns_string + dll_string + " " + api_string, tor_related]})
# Construct data frames from the feature dictionaries
training_df2 = pd.DataFrame(reg_keys_dict).T
training_df3 = pd.DataFrame(reg_keys_combined).T
# Construct a data frame for the unknown sample to be classified as well
unknown_df2 = pd.DataFrame(unknown_samples_dict).T
unknown_df3 = pd.DataFrame(unknown_samples_combined).T
# predictions_SHA256_list = build_classifiers(training_df2, training_df3, unknown_df2, unknown_df3)
predictions_SHA256_list = build_classifiers(training_df2, training_df3, unknown_df2, unknown_df3)
# If URLs flag enabled, go fetch URLs
if urls == True:
unique_onion_urls = []
print("|-- Tor Malware\n", predictions_SHA256_list)
for prediction_SHA256 in predictions_SHA256_list:
strings_query = "SELECT strings FROM strings WHERE sha256=\'" + prediction_SHA256 + "\';"
dns_query = "SELECT dns FROM network WHERE sha256=\'" + prediction_SHA256 + "\';"
training_db_cursor.execute(strings_query)
predicted_strings = training_db_cursor.fetchall()
# Find .onion URL
for onion_url in predicted_strings:
for string in onion_url:
#tmp_list = re.findall("http[s]?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+", string)
#tmp_list = re.findall("(\w+)://([\w\-\.]+)/(\w+).(\w+)", string)
tmp_list = re.findall(r"(?<=\.)([^.]+)(?:\.(?:onion|[^.]+(?:$|\n)))", string)
for i in tmp_list:
if i not in unique_onion_urls:
unique_onion_urls.append(i)
print("|--- Onion URLs \n", unique_onion_urls)
# Close DB connection
training_db_connection.commit()
training_db_connection.close()
except sqlite3.Error as err:
print("Sqlite error:", err)
finally:
training_db_connection.close()
"""
BUILD CLASSIFICATION MODELS
"""
def build_classifiers(df2, df3, unknown_df2, unknown_df3):
# Create bag of words for label:
vect = CountVectorizer(lowercase=False)
vect.fit_transform(df3[0])
X = vect.transform(df3[0])
# If there are unknown samples, make predictions on them.
X_unknown = vect.transform(unknown_df3[0])
# unknown_samples_SHA256 = df3[0].index
#X = pd.DataFrame(X_cand, columns=vect.get_feature_names())
# Target/class labels
y = df2[4]
y = y.astype('int')
# Feature selection
selector = VarianceThreshold(threshold=12)
selector.fit_transform(X)
# 80 / 20 split training and testing data. Shuffle just in case.
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=True, test_size=0.2)
y_train = y_train.astype('int')
y_test = y_test.astype('int')
# Naive Bayes
mnb = MultinomialNB()
nb_clf = mnb.fit(X_train.toarray(), y_train.to_numpy())
mnb_prediction = nb_clf.predict(X_test.toarray())
mnb_proba = nb_clf.predict_proba(X_test)[:, 1]
mnb_cross_validation_scores = cross_validate(nb_clf, X_test.toarray(), y_test.to_numpy(), cv=5, scoring=["accuracy", "f1", "recall", "precision", "roc_auc"], n_jobs=-1, return_train_score=True)
mnb_cross_validation_score = cross_val_score(nb_clf, X_test.toarray(), y_test.to_numpy(), cv=5, scoring="accuracy")
mnb_roc_auc_avg = roc_auc_score(y_test, mnb_prediction)
mnb_balanced_accuracy = balanced_accuracy_score(y_test, mnb_prediction)
mnb_precision, mnb_recall, mnb_threshold = precision_recall_curve(y_test, nb_clf.predict(X_test.toarray()))
mnb_fpr = dict()
mnb_tpr = dict()
mnb_roc_auc = dict()
mnb_fpr[0], mnb_tpr[0], _ = roc_curve(y_test, mnb_proba)
mnb_roc_auc[0] = auc(mnb_fpr[0], mnb_tpr[0])
# Compute micro-average ROC curve and ROC area
mnb_fpr["micro"], mnb_tpr["micro"], _ = roc_curve(y_test.ravel(), mnb_proba.ravel())
mnb_roc_auc["micro"] = auc(mnb_fpr["micro"], mnb_tpr["micro"])
print("\n | ---- MNB cross validation score: ", mnb_cross_validation_score.mean())
print(classification_report(y_test, mnb_prediction))
# Support Vector Machine
clf = svm.SVC(C=2, cache_size=9000, probability=True).fit(X_train, y_train)
svm_proba = clf.predict_proba(X_test)[:, 1]
svm_prediction = clf.predict(X_test)
svm_unknown_sample_predicition = clf.predict(X_unknown)
svm_y_score = clf.decision_function(X_test)
svm_roc_auc_avg = roc_auc_score(y_test, svm_prediction)
svm_cross_validation_scores = cross_validate(clf, X_test, y_test, cv=5, scoring=["accuracy", "balanced_accuracy","precision","f1","recall","roc_auc"], return_train_score=True)
svm_cross_validation_score = cross_val_score(clf, X_test, y_test, cv=5, scoring="accuracy")
svm_precision, svm_recall, svm_threshold = precision_recall_curve(y_test, clf.decision_function(X_test))
svm_close_zero = np.argmin(np.abs(svm_threshold))
svm_fpr = dict()
svm_tpr = dict()
svm_roc_auc = dict()
#svm_fpr[0], svm_tpr[0], _ = roc_curve(y_test, svm_prediction)
svm_fpr[0], svm_tpr[0], _ = roc_curve(y_test, svm_proba)
#svm_fpr[1], svm_tpr[1], _ = roc_curve(y_test[:,1], svm_y_score[:, 1])
svm_roc_auc[0] = auc(svm_fpr[0], svm_tpr[0])
# Compute micro-average ROC curve and ROC area
svm_fpr["micro"], svm_tpr["micro"], _ = roc_curve(y_test.ravel(), svm_proba.ravel())
svm_roc_auc["micro"] = auc(svm_fpr["micro"], svm_tpr["micro"])
print("\n\n|---- SVM 10-fold cross validation accuracy score:{}".format(np.mean(svm_cross_validation_score)))
# Logistic regression classifier
logreg = LogisticRegression(max_iter=4000).fit(X_train, y_train)
lr_prediction = logreg.predict(X_test)
lr_unknown_predictions = logreg.predict(X_unknown)
lr_proba = logreg.predict_proba(X_test)[:, 1]
lr_decision_function = logreg.decision_function(X_test)
lr_cross_validation_scores = cross_validate(logreg, X_test, y_test, cv=5 , scoring=["accuracy", "balanced_accuracy", "precision", "f1", "recall","roc_auc"], n_jobs=-1, return_train_score=True)
lr_cross_validation_score = cross_val_score(logreg, X_test, y_test, cv=5 , scoring="accuracy")
lr_roc_auc = roc_auc_score(y_test, lr_prediction)
lr_fpr = dict()
lr_tpr = dict()
lr_roc_auc = dict()
lr_fpr[0], lr_tpr[0], _ = roc_curve(y_test, lr_proba)
lr_roc_auc[0] = auc(lr_fpr[0], lr_tpr[0])
lr_fpr["micro"], lr_tpr["micro"], _ = roc_curve(y_test.ravel(), lr_proba.ravel())
lr_roc_auc["micro"] = auc(lr_fpr["micro"], lr_tpr["micro"])
average_precision = average_precision_score(y_test, lr_decision_function)
precision, recall, threshold = precision_recall_curve(y_test, lr_decision_function)
precision1, recall1, f1, supp = precision_recall_fscore_support(y_test, lr_prediction, average="weighted", zero_division=1)
print("\n\n|---- LR 10-fold cross validation accuracy score:{}".format(np.mean(lr_cross_validation_score)))
print(classification_report(y_test, lr_prediction, zero_division=1))
# Random forest classifier
rf_clf = RandomForestClassifier(max_depth=2, random_state=0)
rf_clf.fit(X_train, y_train)
rf_prediction = rf_clf.predict(X_test)
rf_unknown_prediction = rf_clf.predict(X_unknown)
rf_proba = rf_clf.predict_proba(X_test)[:, 1]
rf_fpr = dict()
rf_tpr = dict()
rf_roc_auc = dict()
rf_fpr[0], rf_tpr[0], _ = roc_curve(y_test, rf_prediction)
rf_roc_auc[0] = auc(rf_fpr[0], rf_tpr[0])
rf_fpr["micro"], rf_tpr["micro"], _ = roc_curve(y_test.ravel(), rf_prediction.ravel())
rf_roc_auc["micro"] = auc(rf_fpr["micro"], rf_tpr["micro"])
rf_precision, rf_recall, rf_threshold = precision_recall_curve(y_test, rf_prediction)
rf_cross_validation_score = cross_val_score(rf_clf, X_test, y_test, cv=5 , scoring="accuracy")
print("\n\n|---- RF 10-fold cross validation accuracy score: {}", rf_cross_validation_score.mean())
print(classification_report(y_test,rf_prediction))
# Decision tree classifier
dt_clf = DecisionTreeClassifier()
dt_clf.fit(X_train, y_train)
dt_prediction = dt_clf.predict(X_test)
dt_unknown_prediction = dt_clf.predict(X_unknown)
dt_proba = dt_clf.predict_proba(X_test)[:, 1]
dt_fpr = dict()
dt_tpr = dict()
dt_roc_auc = dict()
dt_fpr[0], dt_tpr[0], _ = roc_curve(y_test, dt_prediction)
dt_roc_auc[0] = auc(dt_fpr[0], dt_tpr[0])
dt_fpr["micro"], dt_tpr["micro"], _ = roc_curve(y_test.ravel(), dt_prediction.ravel())
dt_roc_auc["micro"] = auc(dt_fpr["micro"], dt_tpr["micro"])
dt_precision, dt_recall, dt_threshold = precision_recall_curve(y_test, dt_prediction)
dt_cross_validation_score = cross_val_score(dt_clf, X_test, y_test, cv=5 , scoring="accuracy")
print("\n\n|---- DT 10-fold cross validation accuracy score:{} ", dt_cross_validation_score.mean())
print("\nDT score: ", dt_clf.score(X_test, y_test), "\nDT classification report\n\n", classification_report(y_test, dt_prediction), export_text(dt_clf, show_weights=True))
print("DT y_predictions: ", dt_prediction, "y_test: ", y_test)
# Verify predictions with the true labels
verified_predictions_SHA256_list = verify_predictions(dt_prediction, y_test)
# Unseen samples predictions
"""
# Draw AuC RoC
roc_plt = plt
roc_plt.figure()
lw = 2
roc_plt.plot(svm_fpr[0], svm_tpr[0], color='red', lw=lw, label='Support vector machine ROC curve (area = %0.2f)' % svm_roc_auc[0])
roc_plt.plot(lr_fpr[0], lr_tpr[0], color='yellow', lw=lw, label='Logistic regression ROC curve (area = %0.2f)' % lr_roc_auc[0])
roc_plt.plot(mnb_fpr[0], mnb_tpr[0], color='green', lw=lw, label='Multinomial naive Bayes ROC curve (area = %0.2f)' % mnb_roc_auc[0])
roc_plt.plot(rf_fpr[0], rf_tpr[0], color='blue', lw=lw, label='Random Forest ROC curve (area = %0.2f)' % rf_roc_auc[0])
roc_plt.plot(dt_fpr[0], dt_tpr[0], color='purple', lw=lw, label='Decision tree ROC curve (area = %0.2f)' % dt_roc_auc[0])
roc_plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
roc_plt.xlim([0.0, 1.0])
roc_plt.ylim([0.0, 1.05])
roc_plt.xlabel('False Positive Rate')
roc_plt.ylabel('True Positive Rate')
roc_plt.title('Receiver operating characteristic.')
roc_plt.legend(loc="lower right")
roc_plt.grid(True)
#fig_file = str(datetime.datetime.now() + ".png"
roc_plt.savefig("roc.tiff", format="tiff")
# Plot precision and recall graph
plt.plot(precision, recall, label="Logistic regression")
plt.plot(svm_precision, svm_recall, label="Support vector machine")
plt.plot(mnb_precision, mnb_recall, label="Multinomial naive Bayes")
plt.plot(rf_precision, rf_recall, label="Random forest")
plt.plot(dt_precision, dt_recall, label="Decision tree")
plt.xlabel("Precision")
plt.ylabel("Recall")
plt.legend(loc="best")
fig2_file = str(datetime.datetime.now()) + ".tiff"
plt.savefig(fig2_file, format="tiff")
"""
return verified_predictions_SHA256_list
def verify_predictions(X_predictions_list, y_true):
counter = 0;
X_prediction = int(X_predictions_list[counter])
verified_predictions_SHA256_list = []
for y_index, y_value in y_true.items():
if X_prediction == y_value:
print("|--- Prediction matches the true label on file with SHA256: ", y_index)
verified_predictions_SHA256_list.append(y_index)
counter += 1
return verified_predictions_SHA256_list
# Constructor
if __name__ == "__main__":
arguments = docopt(__doc__, version='retomos 0.1')
main(arguments)
```
|
{
"source": "jesperbagge/quakeworld-status",
"score": 3
}
|
#### File: quakeworld-status/qwstatus/__init__.py
```python
import socket
class QuakeWorldServer:
def __init__(self, address, port=27500, timeout=10):
self.address = address
self.port = int(port)
self.timeout = timeout
def __repr__(self):
return 'QuakeWorld server at {}'.format(self.address)
def _get_status(self):
msg = '\xff\xff\xff\xffstatus\x00'
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.settimeout(self.timeout)
s.sendto(bytes(msg, 'latin1'), (self.address, self.port))
response = s.recv(4096).decode('latin1').split('\n')
info = response[0].split('\\')[1:]
server = dict(zip(info[0::2], info[1::2]))
players = [i for i in response[1:] if '\x00' not in i]
return server, players
def info(self):
try:
server, players = self._get_status()
return server
except:
return 'Unable to reach server.'
def players(self):
try:
server, players = self._get_status()
return players
except:
return 'Unable to reach server.'
```
|
{
"source": "jesperborgstrup/PyBitmessageVote",
"score": 2
}
|
#### File: src/bitmessageqt/__init__.py
```python
withMessagingMenu = False
try:
from gi.repository import MessagingMenu
from gi.repository import Notify
withMessagingMenu = True
except ImportError:
MessagingMenu = None
from addresses import *
import shared
from bitmessageui import *
from namecoin import namecoinConnection, ensureNamecoinOptions
from newaddressdialog import *
from addaddressdialog import *
from newsubscriptiondialog import *
from regenerateaddresses import *
from newchandialog import *
from specialaddressbehavior import *
from settings import *
from about import *
from help import *
from iconglossary import *
from connect import *
import sys
from time import strftime, localtime, gmtime
import time
import os
import hashlib
from pyelliptic.openssl import OpenSSL
import pickle
import platform
import textwrap
import debug
from debug import logger
import subprocess
import datetime
from helper_sql import *
import l10n
from consensus import BitcoinThread, ConsensusProtocol, ConsensusTimeData, helper_keys, VotingData
from createelectiondialog import Ui_CreateElectionDialog
from electiondetailsdialog import Ui_ElectionDetailsDialog
from timestampersettingsdialog import Ui_TimestamperSettingsDialog
try:
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
except Exception as err:
print 'PyBitmessage requires PyQt unless you want to run it as a daemon and interact with it using the API. You can download it from http://www.riverbankcomputing.com/software/pyqt/download or by searching Google for \'PyQt Download\' (without quotes).'
print 'Error message:', err
sys.exit()
try:
_encoding = QtGui.QApplication.UnicodeUTF8
except AttributeError:
print 'QtGui.QApplication.UnicodeUTF8 error:', err
def _translate(context, text):
return QtGui.QApplication.translate(context, text)
def identiconize(address):
size = 48
# If you include another identicon library, please generate an
# example identicon with the following md5 hash:
# 3fd4bf901b9d4ea1394f0fb358725b28
try:
identicon_lib = shared.config.get('bitmessagesettings', 'identiconlib')
except:
# default to qidenticon_two_x
identicon_lib = 'qidenticon_two_x'
# As an 'identiconsuffix' you could put "@bitmessge.ch" or "@bm.addr" to make it compatible with other identicon generators. (Note however, that E-Mail programs might convert the BM-address to lowercase first.)
# It can be used as a pseudo-password to salt the generation of the identicons to decrease the risk
# of attacks where someone creates an address to mimic someone else's identicon.
identiconsuffix = shared.config.get('bitmessagesettings', 'identiconsuffix')
if not shared.config.getboolean('bitmessagesettings', 'useidenticons'):
idcon = QtGui.QIcon()
return idcon
if (identicon_lib[:len('qidenticon')] == 'qidenticon'):
# print identicon_lib
# originally by:
# :Author:<NAME> <<EMAIL>>
# Licesensed under FreeBSD License.
# stripped from PIL and uses QT instead (by sendiulo, same license)
import qidenticon
hash = hashlib.md5(addBMIfNotPresent(address)+identiconsuffix).hexdigest()
use_two_colors = (identicon_lib[:len('qidenticon_two')] == 'qidenticon_two')
opacity = int(not((identicon_lib == 'qidenticon_x') | (identicon_lib == 'qidenticon_two_x') | (identicon_lib == 'qidenticon_b') | (identicon_lib == 'qidenticon_two_b')))*255
penwidth = 0
image = qidenticon.render_identicon(int(hash, 16), size, use_two_colors, opacity, penwidth)
# filename = './images/identicons/'+hash+'.png'
# image.save(filename)
idcon = QtGui.QIcon()
idcon.addPixmap(image, QtGui.QIcon.Normal, QtGui.QIcon.Off)
return idcon
elif identicon_lib == 'pydenticon':
# print identicon_lib
# Here you could load pydenticon.py (just put it in the "src" folder of your Bitmessage source)
from pydenticon import Pydenticon
# It is not included in the source, because it is licensed under GPLv3
# GPLv3 is a copyleft license that would influence our licensing
# Find the source here: http://boottunes.googlecode.com/svn-history/r302/trunk/src/pydenticon.py
# note that it requires PIL to be installed: http://www.pythonware.com/products/pil/
idcon_render = Pydenticon(addBMIfNotPresent(address)+identiconsuffix, size*3)
rendering = idcon_render._render()
data = rendering.convert("RGBA").tostring("raw", "RGBA")
qim = QImage(data, size, size, QImage.Format_ARGB32)
pix = QPixmap.fromImage(qim)
idcon = QtGui.QIcon()
idcon.addPixmap(pix, QtGui.QIcon.Normal, QtGui.QIcon.Off)
return idcon
def avatarize(address):
"""
loads a supported image for the given address' hash form 'avatars' folder
falls back to default avatar if 'default.*' file exists
falls back to identiconize(address)
"""
idcon = QtGui.QIcon()
hash = hashlib.md5(addBMIfNotPresent(address)).hexdigest()
str_broadcast_subscribers = '[Broadcast subscribers]'
if address == str_broadcast_subscribers:
# don't hash [Broadcast subscribers]
hash = address
# http://pyqt.sourceforge.net/Docs/PyQt4/qimagereader.html#supportedImageFormats
# print QImageReader.supportedImageFormats ()
# QImageReader.supportedImageFormats ()
extensions = ['PNG', 'GIF', 'JPG', 'JPEG', 'SVG', 'BMP', 'MNG', 'PBM', 'PGM', 'PPM', 'TIFF', 'XBM', 'XPM', 'TGA']
# try to find a specific avatar
for ext in extensions:
lower_hash = shared.appdata + 'avatars/' + hash + '.' + ext.lower()
upper_hash = shared.appdata + 'avatars/' + hash + '.' + ext.upper()
if os.path.isfile(lower_hash):
# print 'found avatar of ', address
idcon.addFile(lower_hash)
return idcon
elif os.path.isfile(upper_hash):
# print 'found avatar of ', address
idcon.addFile(upper_hash)
return idcon
# if we haven't found any, try to find a default avatar
for ext in extensions:
lower_default = shared.appdata + 'avatars/' + 'default.' + ext.lower()
upper_default = shared.appdata + 'avatars/' + 'default.' + ext.upper()
if os.path.isfile(lower_default):
default = lower_default
idcon.addFile(lower_default)
return idcon
elif os.path.isfile(upper_default):
default = upper_default
idcon.addFile(upper_default)
return idcon
# If no avatar is found
return identiconize(address)
class MyForm(QtGui.QMainWindow):
# sound type constants
SOUND_NONE = 0
SOUND_KNOWN = 1
SOUND_UNKNOWN = 2
SOUND_CONNECTED = 3
SOUND_DISCONNECTED = 4
SOUND_CONNECTION_GREEN = 5
# the last time that a message arrival sound was played
lastSoundTime = datetime.datetime.now() - datetime.timedelta(days=1)
# the maximum frequency of message sounds in seconds
maxSoundFrequencySec = 60
str_broadcast_subscribers = '[Broadcast subscribers]'
str_chan = '[chan]'
def init_file_menu(self):
QtCore.QObject.connect(self.ui.actionExit, QtCore.SIGNAL(
"triggered()"), self.quit)
QtCore.QObject.connect(self.ui.actionManageKeys, QtCore.SIGNAL(
"triggered()"), self.click_actionManageKeys)
QtCore.QObject.connect(self.ui.actionDeleteAllTrashedMessages,
QtCore.SIGNAL(
"triggered()"),
self.click_actionDeleteAllTrashedMessages)
QtCore.QObject.connect(self.ui.actionRegenerateDeterministicAddresses,
QtCore.SIGNAL(
"triggered()"),
self.click_actionRegenerateDeterministicAddresses)
QtCore.QObject.connect(self.ui.actionJoinChan, QtCore.SIGNAL(
"triggered()"),
self.click_actionJoinChan) # also used for creating chans.
QtCore.QObject.connect(self.ui.pushButtonNewAddress, QtCore.SIGNAL(
"clicked()"), self.click_NewAddressDialog)
QtCore.QObject.connect(self.ui.comboBoxSendFrom, QtCore.SIGNAL(
"activated(int)"), self.redrawLabelFrom)
QtCore.QObject.connect(self.ui.pushButtonAddAddressBook, QtCore.SIGNAL(
"clicked()"), self.click_pushButtonAddAddressBook)
QtCore.QObject.connect(self.ui.pushButtonAddSubscription, QtCore.SIGNAL(
"clicked()"), self.click_pushButtonAddSubscription)
QtCore.QObject.connect(self.ui.pushButtonAddBlacklist, QtCore.SIGNAL(
"clicked()"), self.click_pushButtonAddBlacklist)
QtCore.QObject.connect(self.ui.pushButtonSend, QtCore.SIGNAL(
"clicked()"), self.click_pushButtonSend)
QtCore.QObject.connect(self.ui.pushButtonLoadFromAddressBook,
QtCore.SIGNAL(
"clicked()"),
self.click_pushButtonLoadFromAddressBook)
QtCore.QObject.connect(self.ui.pushButtonFetchNamecoinID, QtCore.SIGNAL(
"clicked()"), self.click_pushButtonFetchNamecoinID)
QtCore.QObject.connect(self.ui.radioButtonBlacklist, QtCore.SIGNAL(
"clicked()"), self.click_radioButtonBlacklist)
QtCore.QObject.connect(self.ui.radioButtonWhitelist, QtCore.SIGNAL(
"clicked()"), self.click_radioButtonWhitelist)
QtCore.QObject.connect(self.ui.pushButtonStatusIcon, QtCore.SIGNAL(
"clicked()"), self.click_pushButtonStatusIcon)
QtCore.QObject.connect(self.ui.actionSettings, QtCore.SIGNAL(
"triggered()"), self.click_actionSettings)
QtCore.QObject.connect(self.ui.actionAbout, QtCore.SIGNAL(
"triggered()"), self.click_actionAbout)
QtCore.QObject.connect(self.ui.actionHelp, QtCore.SIGNAL(
"triggered()"), self.click_actionHelp)
def init_inbox_popup_menu(self):
# Popup menu for the Inbox tab
self.ui.inboxContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionReply = self.ui.inboxContextMenuToolbar.addAction(_translate(
"MainWindow", "Reply"), self.on_action_InboxReply)
self.actionAddSenderToAddressBook = self.ui.inboxContextMenuToolbar.addAction(
_translate(
"MainWindow", "Add sender to your Address Book"),
self.on_action_InboxAddSenderToAddressBook)
self.actionTrashInboxMessage = self.ui.inboxContextMenuToolbar.addAction(
_translate("MainWindow", "Move to Trash"),
self.on_action_InboxTrash)
self.actionForceHtml = self.ui.inboxContextMenuToolbar.addAction(
_translate(
"MainWindow", "View HTML code as formatted text"),
self.on_action_InboxMessageForceHtml)
self.actionSaveMessageAs = self.ui.inboxContextMenuToolbar.addAction(
_translate(
"MainWindow", "Save message as..."),
self.on_action_InboxSaveMessageAs)
self.actionMarkUnread = self.ui.inboxContextMenuToolbar.addAction(
_translate(
"MainWindow", "Mark Unread"), self.on_action_InboxMarkUnread)
self.ui.tableWidgetInbox.setContextMenuPolicy(
QtCore.Qt.CustomContextMenu)
self.connect(self.ui.tableWidgetInbox, QtCore.SIGNAL(
'customContextMenuRequested(const QPoint&)'),
self.on_context_menuInbox)
self.popMenuInbox = QtGui.QMenu(self)
self.popMenuInbox.addAction(self.actionForceHtml)
self.popMenuInbox.addAction(self.actionMarkUnread)
self.popMenuInbox.addSeparator()
self.popMenuInbox.addAction(self.actionReply)
self.popMenuInbox.addAction(self.actionAddSenderToAddressBook)
self.popMenuInbox.addSeparator()
self.popMenuInbox.addAction(self.actionSaveMessageAs)
self.popMenuInbox.addAction(self.actionTrashInboxMessage)
def init_identities_popup_menu(self):
# Popup menu for the Your Identities tab
self.ui.addressContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionNew = self.ui.addressContextMenuToolbar.addAction(_translate(
"MainWindow", "New"), self.on_action_YourIdentitiesNew)
self.actionEnable = self.ui.addressContextMenuToolbar.addAction(
_translate(
"MainWindow", "Enable"), self.on_action_YourIdentitiesEnable)
self.actionDisable = self.ui.addressContextMenuToolbar.addAction(
_translate(
"MainWindow", "Disable"), self.on_action_YourIdentitiesDisable)
self.actionSetAvatar = self.ui.addressContextMenuToolbar.addAction(
_translate(
"MainWindow", "Set avatar..."),
self.on_action_YourIdentitiesSetAvatar)
self.actionClipboard = self.ui.addressContextMenuToolbar.addAction(
_translate(
"MainWindow", "Copy address to clipboard"),
self.on_action_YourIdentitiesClipboard)
self.actionSpecialAddressBehavior = self.ui.addressContextMenuToolbar.addAction(
_translate(
"MainWindow", "Special address behavior..."),
self.on_action_SpecialAddressBehaviorDialog)
self.ui.tableWidgetYourIdentities.setContextMenuPolicy(
QtCore.Qt.CustomContextMenu)
self.connect(self.ui.tableWidgetYourIdentities, QtCore.SIGNAL(
'customContextMenuRequested(const QPoint&)'),
self.on_context_menuYourIdentities)
self.popMenu = QtGui.QMenu(self)
self.popMenu.addAction(self.actionNew)
self.popMenu.addSeparator()
self.popMenu.addAction(self.actionClipboard)
self.popMenu.addSeparator()
self.popMenu.addAction(self.actionEnable)
self.popMenu.addAction(self.actionDisable)
self.popMenu.addAction(self.actionSetAvatar)
self.popMenu.addAction(self.actionSpecialAddressBehavior)
def init_addressbook_popup_menu(self):
# Popup menu for the Address Book page
self.ui.addressBookContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionAddressBookSend = self.ui.addressBookContextMenuToolbar.addAction(
_translate(
"MainWindow", "Send message to this address"),
self.on_action_AddressBookSend)
self.actionAddressBookClipboard = self.ui.addressBookContextMenuToolbar.addAction(
_translate(
"MainWindow", "Copy address to clipboard"),
self.on_action_AddressBookClipboard)
self.actionAddressBookSubscribe = self.ui.addressBookContextMenuToolbar.addAction(
_translate(
"MainWindow", "Subscribe to this address"),
self.on_action_AddressBookSubscribe)
self.actionAddressBookSetAvatar = self.ui.addressBookContextMenuToolbar.addAction(
_translate(
"MainWindow", "Set avatar..."),
self.on_action_AddressBookSetAvatar)
self.actionAddressBookNew = self.ui.addressBookContextMenuToolbar.addAction(
_translate(
"MainWindow", "Add New Address"), self.on_action_AddressBookNew)
self.actionAddressBookDelete = self.ui.addressBookContextMenuToolbar.addAction(
_translate(
"MainWindow", "Delete"), self.on_action_AddressBookDelete)
self.ui.tableWidgetAddressBook.setContextMenuPolicy(
QtCore.Qt.CustomContextMenu)
self.connect(self.ui.tableWidgetAddressBook, QtCore.SIGNAL(
'customContextMenuRequested(const QPoint&)'),
self.on_context_menuAddressBook)
self.popMenuAddressBook = QtGui.QMenu(self)
self.popMenuAddressBook.addAction(self.actionAddressBookSend)
self.popMenuAddressBook.addAction(self.actionAddressBookClipboard)
self.popMenuAddressBook.addAction(self.actionAddressBookSubscribe)
self.popMenuAddressBook.addAction(self.actionAddressBookSetAvatar)
self.popMenuAddressBook.addSeparator()
self.popMenuAddressBook.addAction(self.actionAddressBookNew)
self.popMenuAddressBook.addAction(self.actionAddressBookDelete)
def init_subscriptions_popup_menu(self):
# Popup menu for the Subscriptions page
self.ui.subscriptionsContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionsubscriptionsNew = self.ui.subscriptionsContextMenuToolbar.addAction(
_translate("MainWindow", "New"), self.on_action_SubscriptionsNew)
self.actionsubscriptionsDelete = self.ui.subscriptionsContextMenuToolbar.addAction(
_translate("MainWindow", "Delete"),
self.on_action_SubscriptionsDelete)
self.actionsubscriptionsClipboard = self.ui.subscriptionsContextMenuToolbar.addAction(
_translate("MainWindow", "Copy address to clipboard"),
self.on_action_SubscriptionsClipboard)
self.actionsubscriptionsEnable = self.ui.subscriptionsContextMenuToolbar.addAction(
_translate("MainWindow", "Enable"),
self.on_action_SubscriptionsEnable)
self.actionsubscriptionsDisable = self.ui.subscriptionsContextMenuToolbar.addAction(
_translate("MainWindow", "Disable"),
self.on_action_SubscriptionsDisable)
self.actionsubscriptionsSetAvatar = self.ui.subscriptionsContextMenuToolbar.addAction(
_translate("MainWindow", "Set avatar..."),
self.on_action_SubscriptionsSetAvatar)
self.ui.tableWidgetSubscriptions.setContextMenuPolicy(
QtCore.Qt.CustomContextMenu)
self.connect(self.ui.tableWidgetSubscriptions, QtCore.SIGNAL(
'customContextMenuRequested(const QPoint&)'),
self.on_context_menuSubscriptions)
self.popMenuSubscriptions = QtGui.QMenu(self)
self.popMenuSubscriptions.addAction(self.actionsubscriptionsNew)
self.popMenuSubscriptions.addAction(self.actionsubscriptionsDelete)
self.popMenuSubscriptions.addSeparator()
self.popMenuSubscriptions.addAction(self.actionsubscriptionsEnable)
self.popMenuSubscriptions.addAction(self.actionsubscriptionsDisable)
self.popMenuSubscriptions.addAction(self.actionsubscriptionsSetAvatar)
self.popMenuSubscriptions.addSeparator()
self.popMenuSubscriptions.addAction(self.actionsubscriptionsClipboard)
def init_sent_popup_menu(self):
# Popup menu for the Sent page
self.ui.sentContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionTrashSentMessage = self.ui.sentContextMenuToolbar.addAction(
_translate(
"MainWindow", "Move to Trash"), self.on_action_SentTrash)
self.actionSentClipboard = self.ui.sentContextMenuToolbar.addAction(
_translate(
"MainWindow", "Copy destination address to clipboard"),
self.on_action_SentClipboard)
self.actionForceSend = self.ui.sentContextMenuToolbar.addAction(
_translate(
"MainWindow", "Force send"), self.on_action_ForceSend)
self.ui.tableWidgetSent.setContextMenuPolicy(
QtCore.Qt.CustomContextMenu)
self.connect(self.ui.tableWidgetSent, QtCore.SIGNAL(
'customContextMenuRequested(const QPoint&)'),
self.on_context_menuSent)
# self.popMenuSent = QtGui.QMenu( self )
# self.popMenuSent.addAction( self.actionSentClipboard )
# self.popMenuSent.addAction( self.actionTrashSentMessage )
def init_blacklist_popup_menu(self):
# Popup menu for the Blacklist page
self.ui.blacklistContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionBlacklistNew = self.ui.blacklistContextMenuToolbar.addAction(
_translate(
"MainWindow", "Add new entry"), self.on_action_BlacklistNew)
self.actionBlacklistDelete = self.ui.blacklistContextMenuToolbar.addAction(
_translate(
"MainWindow", "Delete"), self.on_action_BlacklistDelete)
self.actionBlacklistClipboard = self.ui.blacklistContextMenuToolbar.addAction(
_translate(
"MainWindow", "Copy address to clipboard"),
self.on_action_BlacklistClipboard)
self.actionBlacklistEnable = self.ui.blacklistContextMenuToolbar.addAction(
_translate(
"MainWindow", "Enable"), self.on_action_BlacklistEnable)
self.actionBlacklistDisable = self.ui.blacklistContextMenuToolbar.addAction(
_translate(
"MainWindow", "Disable"), self.on_action_BlacklistDisable)
self.actionBlacklistSetAvatar = self.ui.blacklistContextMenuToolbar.addAction(
_translate(
"MainWindow", "Set avatar..."),
self.on_action_BlacklistSetAvatar)
self.ui.tableWidgetBlacklist.setContextMenuPolicy(
QtCore.Qt.CustomContextMenu)
self.connect(self.ui.tableWidgetBlacklist, QtCore.SIGNAL(
'customContextMenuRequested(const QPoint&)'),
self.on_context_menuBlacklist)
self.popMenuBlacklist = QtGui.QMenu(self)
# self.popMenuBlacklist.addAction( self.actionBlacklistNew )
self.popMenuBlacklist.addAction(self.actionBlacklistDelete)
self.popMenuBlacklist.addSeparator()
self.popMenuBlacklist.addAction(self.actionBlacklistClipboard)
self.popMenuBlacklist.addSeparator()
self.popMenuBlacklist.addAction(self.actionBlacklistEnable)
self.popMenuBlacklist.addAction(self.actionBlacklistDisable)
self.popMenuBlacklist.addAction(self.actionBlacklistSetAvatar)
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# Ask the user if we may delete their old version 1 addresses if they
# have any.
configSections = shared.config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile != 'bitmessagesettings':
status, addressVersionNumber, streamNumber, hash = decodeAddress(
addressInKeysFile)
if addressVersionNumber == 1:
displayMsg = _translate(
"MainWindow", "One of your addresses, %1, is an old version 1 address. Version 1 addresses are no longer supported. "
+ "May we delete it now?").arg(addressInKeysFile)
reply = QtGui.QMessageBox.question(
self, 'Message', displayMsg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
shared.config.remove_section(addressInKeysFile)
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
# Configure Bitmessage to start on startup (or remove the
# configuration) based on the setting in the keys.dat file
if 'win32' in sys.platform or 'win64' in sys.platform:
# Auto-startup for Windows
RUN_PATH = "HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run"
self.settings = QSettings(RUN_PATH, QSettings.NativeFormat)
self.settings.remove(
"PyBitmessage") # In case the user moves the program and the registry entry is no longer valid, this will delete the old registry entry.
if shared.config.getboolean('bitmessagesettings', 'startonlogon'):
self.settings.setValue("PyBitmessage", sys.argv[0])
elif 'darwin' in sys.platform:
# startup for mac
pass
elif 'linux' in sys.platform:
# startup for linux
pass
self.totalNumberOfBytesReceived = 0
self.totalNumberOfBytesSent = 0
self.ui.labelSendBroadcastWarning.setVisible(False)
self.timer = QtCore.QTimer()
self.timer.start(2000) # milliseconds
QtCore.QObject.connect(self.timer, QtCore.SIGNAL("timeout()"), self.runEveryTwoSeconds)
self.init_file_menu()
self.init_inbox_popup_menu()
self.init_identities_popup_menu()
self.init_addressbook_popup_menu()
self.init_subscriptions_popup_menu()
self.init_sent_popup_menu()
self.init_blacklist_popup_menu()
# Initialize the user's list of addresses on the 'Your Identities' tab.
configSections = shared.config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile != 'bitmessagesettings':
isEnabled = shared.config.getboolean(
addressInKeysFile, 'enabled')
newItem = QtGui.QTableWidgetItem(unicode(
shared.config.get(addressInKeysFile, 'label'), 'utf-8)'))
if not isEnabled:
newItem.setTextColor(QtGui.QColor(128, 128, 128))
self.ui.tableWidgetYourIdentities.insertRow(0)
newItem.setIcon(avatarize(addressInKeysFile))
self.ui.tableWidgetYourIdentities.setItem(0, 0, newItem)
newItem = QtGui.QTableWidgetItem(addressInKeysFile)
newItem.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
if shared.safeConfigGetBoolean(addressInKeysFile, 'chan'):
newItem.setTextColor(QtGui.QColor(216, 119, 0)) # orange
if not isEnabled:
newItem.setTextColor(QtGui.QColor(128, 128, 128))
if shared.safeConfigGetBoolean(addressInKeysFile, 'mailinglist'):
newItem.setTextColor(QtGui.QColor(137, 04, 177)) # magenta
self.ui.tableWidgetYourIdentities.setItem(0, 1, newItem)
newItem = QtGui.QTableWidgetItem(str(
decodeAddress(addressInKeysFile)[2]))
newItem.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
if not isEnabled:
newItem.setTextColor(QtGui.QColor(128, 128, 128))
self.ui.tableWidgetYourIdentities.setItem(0, 2, newItem)
if isEnabled:
status, addressVersionNumber, streamNumber, hash = decodeAddress(
addressInKeysFile)
# Load inbox from messages database file
self.loadInbox()
# Load Sent items from database
self.loadSent()
# Initialize the address book
self.rerenderAddressBook()
# Initialize the Subscriptions
self.rerenderSubscriptions()
self.init_voting()
# Initialize the inbox search
QtCore.QObject.connect(self.ui.inboxSearchLineEdit, QtCore.SIGNAL(
"returnPressed()"), self.inboxSearchLineEditPressed)
# Initialize the sent search
QtCore.QObject.connect(self.ui.sentSearchLineEdit, QtCore.SIGNAL(
"returnPressed()"), self.sentSearchLineEditPressed)
# Initialize the Blacklist or Whitelist
if shared.config.get('bitmessagesettings', 'blackwhitelist') == 'black':
self.loadBlackWhiteList()
else:
self.ui.tabWidget.setTabText(6, 'Whitelist')
self.ui.radioButtonWhitelist.click()
self.loadBlackWhiteList()
QtCore.QObject.connect(self.ui.tableWidgetYourIdentities, QtCore.SIGNAL(
"itemChanged(QTableWidgetItem *)"), self.tableWidgetYourIdentitiesItemChanged)
QtCore.QObject.connect(self.ui.tableWidgetAddressBook, QtCore.SIGNAL(
"itemChanged(QTableWidgetItem *)"), self.tableWidgetAddressBookItemChanged)
QtCore.QObject.connect(self.ui.tableWidgetSubscriptions, QtCore.SIGNAL(
"itemChanged(QTableWidgetItem *)"), self.tableWidgetSubscriptionsItemChanged)
QtCore.QObject.connect(self.ui.tableWidgetInbox, QtCore.SIGNAL(
"itemSelectionChanged ()"), self.tableWidgetInboxItemClicked)
QtCore.QObject.connect(self.ui.tableWidgetSent, QtCore.SIGNAL(
"itemSelectionChanged ()"), self.tableWidgetSentItemClicked)
# Put the colored icon on the status bar
# self.ui.pushButtonStatusIcon.setIcon(QIcon(":/newPrefix/images/yellowicon.png"))
self.statusbar = self.statusBar()
self.statusbar.insertPermanentWidget(0, self.ui.pushButtonStatusIcon)
self.ui.labelStartupTime.setText(_translate("MainWindow", "Since startup on %1").arg(
l10n.formatTimestamp()))
self.numberOfMessagesProcessed = 0
self.numberOfBroadcastsProcessed = 0
self.numberOfPubkeysProcessed = 0
# Set the icon sizes for the identicons
identicon_size = 3*7
self.ui.tableWidgetInbox.setIconSize(QtCore.QSize(identicon_size, identicon_size))
self.ui.tableWidgetSent.setIconSize(QtCore.QSize(identicon_size, identicon_size))
self.ui.tableWidgetYourIdentities.setIconSize(QtCore.QSize(identicon_size, identicon_size))
self.ui.tableWidgetSubscriptions.setIconSize(QtCore.QSize(identicon_size, identicon_size))
self.ui.tableWidgetAddressBook.setIconSize(QtCore.QSize(identicon_size, identicon_size))
self.ui.tableWidgetBlacklist.setIconSize(QtCore.QSize(identicon_size, identicon_size))
self.UISignalThread = UISignaler()
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.writeNewAddressToTable)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByHash)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByAckdata)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"displayNewInboxMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.displayNewInboxMessage)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"displayNewSentMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.displayNewSentMessage)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"updateNetworkStatusTab()"), self.updateNetworkStatusTab)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"updateNumberOfMessagesProcessed()"), self.updateNumberOfMessagesProcessed)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"updateNumberOfPubkeysProcessed()"), self.updateNumberOfPubkeysProcessed)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"updateNumberOfBroadcastsProcessed()"), self.updateNumberOfBroadcastsProcessed)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"setStatusIcon(PyQt_PyObject)"), self.setStatusIcon)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"changedInboxUnread(PyQt_PyObject)"), self.changedInboxUnread)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"rerenderInboxFromLabels()"), self.rerenderInboxFromLabels)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"rerenderSentToLabels()"), self.rerenderSentToLabels)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"rerenderAddressBook()"), self.rerenderAddressBook)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"rerenderSubscriptions()"), self.rerenderSubscriptions)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"removeInboxRowByMsgid(PyQt_PyObject)"), self.removeInboxRowByMsgid)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"refresh_election_ui(PyQt_PyObject)"), self.refresh_election_ui)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"election_initialized(PyQt_PyObject)"), self.election_initialized)
QtCore.QObject.connect(self.UISignalThread, QtCore.SIGNAL(
"displayAlert(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.displayAlert)
self.UISignalThread.start()
# Below this point, it would be good if all of the necessary global data
# structures were initialized.
self.rerenderComboBoxSendFrom()
# Check to see whether we can connect to namecoin. Hide the 'Fetch Namecoin ID' button if we can't.
try:
options = {}
options["type"] = shared.config.get('bitmessagesettings', 'namecoinrpctype')
options["host"] = shared.config.get('bitmessagesettings', 'namecoinrpchost')
options["port"] = shared.config.get('bitmessagesettings', 'namecoinrpcport')
options["user"] = shared.config.get('bitmessagesettings', 'namecoinrpcuser')
options["password"] = shared.config.get('bitmessagesettings', 'namecoinrpcpassword')
nc = namecoinConnection(options)
if nc.test()[0] == 'failed':
self.ui.pushButtonFetchNamecoinID.hide()
except:
print 'There was a problem testing for a Namecoin daemon. Hiding the Fetch Namecoin ID button'
self.ui.pushButtonFetchNamecoinID.hide()
# Show or hide the application window after clicking an item within the
# tray icon or, on Windows, the try icon itself.
def appIndicatorShowOrHideWindow(self):
if not self.actionShow.isChecked():
self.hide()
else:
if sys.platform[0:3] == 'win':
self.setWindowFlags(Qt.Window)
# else:
# self.showMaximized()
self.show()
self.setWindowState(
self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
self.activateWindow()
# pointer to the application
# app = None
# The most recent message
newMessageItem = None
# The most recent broadcast
newBroadcastItem = None
# show the application window
def appIndicatorShow(self):
if self.actionShow is None:
return
if not self.actionShow.isChecked():
self.actionShow.setChecked(True)
self.appIndicatorShowOrHideWindow()
# unchecks the show item on the application indicator
def appIndicatorHide(self):
if self.actionShow is None:
return
if self.actionShow.isChecked():
self.actionShow.setChecked(False)
self.appIndicatorShowOrHideWindow()
# application indicator show or hide
"""# application indicator show or hide
def appIndicatorShowBitmessage(self):
#if self.actionShow == None:
# return
print self.actionShow.isChecked()
if not self.actionShow.isChecked():
self.hide()
#self.setWindowState(self.windowState() & QtCore.Qt.WindowMinimized)
else:
self.appIndicatorShowOrHideWindow()"""
# Show the program window and select inbox tab
def appIndicatorInbox(self, mm_app, source_id):
self.appIndicatorShow()
# select inbox
self.ui.tabWidget.setCurrentIndex(0)
selectedItem = None
if source_id == 'Subscriptions':
# select unread broadcast
if self.newBroadcastItem is not None:
selectedItem = self.newBroadcastItem
self.newBroadcastItem = None
else:
# select unread message
if self.newMessageItem is not None:
selectedItem = self.newMessageItem
self.newMessageItem = None
# make it the current item
if selectedItem is not None:
try:
self.ui.tableWidgetInbox.setCurrentItem(selectedItem)
except Exception:
self.ui.tableWidgetInbox.setCurrentCell(0, 0)
self.tableWidgetInboxItemClicked()
else:
# just select the first item
self.ui.tableWidgetInbox.setCurrentCell(0, 0)
self.tableWidgetInboxItemClicked()
# Show the program window and select send tab
def appIndicatorSend(self):
self.appIndicatorShow()
self.ui.tabWidget.setCurrentIndex(1)
# Show the program window and select subscriptions tab
def appIndicatorSubscribe(self):
self.appIndicatorShow()
self.ui.tabWidget.setCurrentIndex(4)
# Show the program window and select the address book tab
def appIndicatorAddressBook(self):
self.appIndicatorShow()
self.ui.tabWidget.setCurrentIndex(5)
# Load Sent items from database
def loadSent(self, where="", what=""):
what = "%" + what + "%"
if where == "To":
where = "toaddress"
elif where == "From":
where = "fromaddress"
elif where == "Subject":
where = "subject"
elif where == "Message":
where = "message"
else:
where = "toaddress || fromaddress || subject || message"
sqlStatement = '''
SELECT toaddress, fromaddress, subject, status, ackdata, lastactiontime
FROM sent WHERE folder="sent" AND %s LIKE ? AND encodingtype != %d
ORDER BY lastactiontime
''' % (where,ConsensusProtocol.ENCODING_TYPE)
while self.ui.tableWidgetSent.rowCount() > 0:
self.ui.tableWidgetSent.removeRow(0)
queryreturn = sqlQuery(sqlStatement, what)
for row in queryreturn:
toAddress, fromAddress, subject, status, ackdata, lastactiontime = row
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
if shared.config.has_section(fromAddress):
fromLabel = shared.config.get(fromAddress, 'label')
else:
fromLabel = fromAddress
toLabel = ''
queryreturn = sqlQuery(
'''select label from addressbook where address=?''', toAddress)
if queryreturn != []:
for row in queryreturn:
toLabel, = row
if toLabel == '':
# It might be a broadcast message. We should check for that
# label.
queryreturn = sqlQuery(
'''select label from subscriptions where address=?''', toAddress)
if queryreturn != []:
for row in queryreturn:
toLabel, = row
if toLabel == '':
if shared.config.has_section(toAddress):
toLabel = shared.config.get(toAddress, 'label')
if toLabel == '':
toLabel = toAddress
self.ui.tableWidgetSent.insertRow(0)
toAddressItem = QtGui.QTableWidgetItem(unicode(toLabel, 'utf-8'))
toAddressItem.setToolTip(unicode(toLabel, 'utf-8'))
toAddressItem.setIcon(avatarize(toAddress))
toAddressItem.setData(Qt.UserRole, str(toAddress))
toAddressItem.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.ui.tableWidgetSent.setItem(0, 0, toAddressItem)
if fromLabel == '':
fromLabel = fromAddress
fromAddressItem = QtGui.QTableWidgetItem(unicode(fromLabel, 'utf-8'))
fromAddressItem.setToolTip(unicode(fromLabel, 'utf-8'))
fromAddressItem.setIcon(avatarize(fromAddress))
fromAddressItem.setData(Qt.UserRole, str(fromAddress))
fromAddressItem.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.ui.tableWidgetSent.setItem(0, 1, fromAddressItem)
subjectItem = QtGui.QTableWidgetItem(unicode(subject, 'utf-8'))
subjectItem.setToolTip(unicode(subject, 'utf-8'))
subjectItem.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.ui.tableWidgetSent.setItem(0, 2, subjectItem)
if status == 'awaitingpubkey':
statusText = _translate(
"MainWindow", "Waiting for their encryption key. Will request it again soon.")
elif status == 'doingpowforpubkey':
statusText = _translate(
"MainWindow", "Encryption key request queued.")
elif status == 'msgqueued':
statusText = _translate(
"MainWindow", "Queued.")
elif status == 'msgsent':
statusText = _translate("MainWindow", "Message sent. Waiting for acknowledgement. Sent at %1").arg(
l10n.formatTimestamp(lastactiontime))
elif status == 'msgsentnoackexpected':
statusText = _translate("MainWindow", "Message sent. Sent at %1").arg(
l10n.formatTimestamp(lastactiontime))
elif status == 'doingmsgpow':
statusText = _translate(
"MainWindow", "Need to do work to send message. Work is queued.")
elif status == 'ackreceived':
statusText = _translate("MainWindow", "Acknowledgement of the message received %1").arg(
l10n.formatTimestamp(lastactiontime))
elif status == 'broadcastqueued':
statusText = _translate(
"MainWindow", "Broadcast queued.")
elif status == 'broadcastsent':
statusText = _translate("MainWindow", "Broadcast on %1").arg(
l10n.formatTimestamp(lastactiontime))
elif status == 'toodifficult':
statusText = _translate("MainWindow", "Problem: The work demanded by the recipient is more difficult than you are willing to do. %1").arg(
l10n.formatTimestamp(lastactiontime))
elif status == 'badkey':
statusText = _translate("MainWindow", "Problem: The recipient\'s encryption key is no good. Could not encrypt message. %1").arg(
l10n.formatTimestamp(lastactiontime))
elif status == 'forcepow':
statusText = _translate(
"MainWindow", "Forced difficulty override. Send should start soon.")
else:
statusText = _translate("MainWindow", "Unknown status: %1 %2").arg(status).arg(
l10n.formatTimestamp(lastactiontime))
newItem = myTableWidgetItem(statusText)
newItem.setToolTip(statusText)
newItem.setData(Qt.UserRole, QByteArray(ackdata))
newItem.setData(33, int(lastactiontime))
newItem.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.ui.tableWidgetSent.setItem(0, 3, newItem)
self.ui.tableWidgetSent.sortItems(3, Qt.DescendingOrder)
self.ui.tableWidgetSent.keyPressEvent = self.tableWidgetSentKeyPressEvent
# Load inbox from messages database file
def loadInbox(self, where="", what=""):
what = "%" + what + "%"
if where == "To":
where = "toaddress"
elif where == "From":
where = "fromaddress"
elif where == "Subject":
where = "subject"
elif where == "Message":
where = "message"
else:
where = "toaddress || fromaddress || subject || message"
sqlStatement = '''
SELECT msgid, toaddress, fromaddress, subject, received, read
FROM inbox WHERE folder="inbox" AND %s LIKE ?
ORDER BY received
''' % (where,)
while self.ui.tableWidgetInbox.rowCount() > 0:
self.ui.tableWidgetInbox.removeRow(0)
font = QFont()
font.setBold(True)
queryreturn = sqlQuery(sqlStatement, what)
for row in queryreturn:
msgid, toAddress, fromAddress, subject, received, read = row
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
try:
if toAddress == self.str_broadcast_subscribers:
toLabel = self.str_broadcast_subscribers
else:
toLabel = shared.config.get(toAddress, 'label')
except:
toLabel = ''
if toLabel == '':
toLabel = toAddress
fromLabel = ''
if shared.config.has_section(fromAddress):
fromLabel = shared.config.get(fromAddress, 'label')
if fromLabel == '': # If the fromAddress isn't one of our addresses and isn't a chan
queryreturn = sqlQuery(
'''select label from addressbook where address=?''', fromAddress)
if queryreturn != []:
for row in queryreturn:
fromLabel, = row
if fromLabel == '': # If this address wasn't in our address book...
queryreturn = sqlQuery(
'''select label from subscriptions where address=?''', fromAddress)
if queryreturn != []:
for row in queryreturn:
fromLabel, = row
if fromLabel == '':
fromLabel = fromAddress
# message row
self.ui.tableWidgetInbox.insertRow(0)
# to
to_item = QtGui.QTableWidgetItem(unicode(toLabel, 'utf-8'))
to_item.setToolTip(unicode(toLabel, 'utf-8'))
to_item.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
if not read:
to_item.setFont(font)
to_item.setData(Qt.UserRole, str(toAddress))
if shared.safeConfigGetBoolean(toAddress, 'mailinglist'):
to_item.setTextColor(QtGui.QColor(137, 04, 177)) # magenta
if shared.safeConfigGetBoolean(str(toAddress), 'chan'):
to_item.setTextColor(QtGui.QColor(216, 119, 0)) # orange
to_item.setIcon(avatarize(toAddress))
self.ui.tableWidgetInbox.setItem(0, 0, to_item)
# from
from_item = QtGui.QTableWidgetItem(unicode(fromLabel, 'utf-8'))
from_item.setToolTip(unicode(fromLabel, 'utf-8'))
from_item.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
if not read:
from_item.setFont(font)
from_item.setData(Qt.UserRole, str(fromAddress))
if shared.safeConfigGetBoolean(str(fromAddress), 'chan'):
from_item.setTextColor(QtGui.QColor(216, 119, 0)) # orange
from_item.setIcon(avatarize(fromAddress))
self.ui.tableWidgetInbox.setItem(0, 1, from_item)
# subject
subject_item = QtGui.QTableWidgetItem(unicode(subject, 'utf-8'))
subject_item.setToolTip(unicode(subject, 'utf-8'))
subject_item.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
if not read:
subject_item.setFont(font)
self.ui.tableWidgetInbox.setItem(0, 2, subject_item)
# time received
time_item = myTableWidgetItem(l10n.formatTimestamp(received))
time_item.setToolTip(l10n.formatTimestamp(received))
time_item.setData(Qt.UserRole, QByteArray(msgid))
time_item.setData(33, int(received))
time_item.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
if not read:
time_item.setFont(font)
self.ui.tableWidgetInbox.setItem(0, 3, time_item)
self.ui.tableWidgetInbox.sortItems(3, Qt.DescendingOrder)
self.ui.tableWidgetInbox.keyPressEvent = self.tableWidgetInboxKeyPressEvent
# create application indicator
def appIndicatorInit(self, app):
self.initTrayIcon("can-icon-24px-red.png", app)
if sys.platform[0:3] == 'win':
traySignal = "activated(QSystemTrayIcon::ActivationReason)"
QtCore.QObject.connect(self.tray, QtCore.SIGNAL(
traySignal), self.__icon_activated)
m = QMenu()
self.actionStatus = QtGui.QAction(_translate(
"MainWindow", "Not Connected"), m, checkable=False)
m.addAction(self.actionStatus)
# separator
actionSeparator = QtGui.QAction('', m, checkable=False)
actionSeparator.setSeparator(True)
m.addAction(actionSeparator)
# show bitmessage
self.actionShow = QtGui.QAction(_translate(
"MainWindow", "Show Bitmessage"), m, checkable=True)
self.actionShow.setChecked(not shared.config.getboolean(
'bitmessagesettings', 'startintray'))
self.actionShow.triggered.connect(self.appIndicatorShowOrHideWindow)
if not sys.platform[0:3] == 'win':
m.addAction(self.actionShow)
# Send
actionSend = QtGui.QAction(_translate(
"MainWindow", "Send"), m, checkable=False)
actionSend.triggered.connect(self.appIndicatorSend)
m.addAction(actionSend)
# Subscribe
actionSubscribe = QtGui.QAction(_translate(
"MainWindow", "Subscribe"), m, checkable=False)
actionSubscribe.triggered.connect(self.appIndicatorSubscribe)
m.addAction(actionSubscribe)
# Address book
actionAddressBook = QtGui.QAction(_translate(
"MainWindow", "Address Book"), m, checkable=False)
actionAddressBook.triggered.connect(self.appIndicatorAddressBook)
m.addAction(actionAddressBook)
# separator
actionSeparator = QtGui.QAction('', m, checkable=False)
actionSeparator.setSeparator(True)
m.addAction(actionSeparator)
# Quit
m.addAction(_translate(
"MainWindow", "Quit"), self.quit)
self.tray.setContextMenu(m)
self.tray.show()
# Ubuntu Messaging menu object
mmapp = None
# is the operating system Ubuntu?
def isUbuntu(self):
for entry in platform.uname():
if "Ubuntu" in entry:
return True
return False
# When an unread inbox row is selected on then clear the messaging menu
def ubuntuMessagingMenuClear(self, inventoryHash):
global withMessagingMenu
# if this isn't ubuntu then don't do anything
if not self.isUbuntu():
return
# has messageing menu been installed
if not withMessagingMenu:
return
# if there are no items on the messaging menu then
# the subsequent query can be avoided
if not (self.mmapp.has_source("Subscriptions") or self.mmapp.has_source("Messages")):
return
queryreturn = sqlQuery(
'''SELECT toaddress, read FROM inbox WHERE msgid=?''', inventoryHash)
for row in queryreturn:
toAddress, read = row
if not read:
if toAddress == self.str_broadcast_subscribers:
if self.mmapp.has_source("Subscriptions"):
self.mmapp.remove_source("Subscriptions")
else:
if self.mmapp.has_source("Messages"):
self.mmapp.remove_source("Messages")
# returns the number of unread messages and subscriptions
def getUnread(self):
unreadMessages = 0
unreadSubscriptions = 0
queryreturn = sqlQuery(
'''SELECT msgid, toaddress, read FROM inbox where folder='inbox' ''')
for row in queryreturn:
msgid, toAddress, read = row
try:
if toAddress == self.str_broadcast_subscribers:
toLabel = self.str_broadcast_subscribers
else:
toLabel = shared.config.get(toAddress, 'label')
except:
toLabel = ''
if toLabel == '':
toLabel = toAddress
if not read:
if toLabel == self.str_broadcast_subscribers:
# increment the unread subscriptions
unreadSubscriptions = unreadSubscriptions + 1
else:
# increment the unread messages
unreadMessages = unreadMessages + 1
return unreadMessages, unreadSubscriptions
# show the number of unread messages and subscriptions on the messaging
# menu
def ubuntuMessagingMenuUnread(self, drawAttention):
unreadMessages, unreadSubscriptions = self.getUnread()
# unread messages
if unreadMessages > 0:
self.mmapp.append_source(
"Messages", None, "Messages (" + str(unreadMessages) + ")")
if drawAttention:
self.mmapp.draw_attention("Messages")
# unread subscriptions
if unreadSubscriptions > 0:
self.mmapp.append_source("Subscriptions", None, "Subscriptions (" + str(
unreadSubscriptions) + ")")
if drawAttention:
self.mmapp.draw_attention("Subscriptions")
# initialise the Ubuntu messaging menu
def ubuntuMessagingMenuInit(self):
global withMessagingMenu
# if this isn't ubuntu then don't do anything
if not self.isUbuntu():
return
# has messageing menu been installed
if not withMessagingMenu:
print 'WARNING: MessagingMenu is not available. Is libmessaging-menu-dev installed?'
return
# create the menu server
if withMessagingMenu:
try:
self.mmapp = MessagingMenu.App(
desktop_id='pybitmessage.desktop')
self.mmapp.register()
self.mmapp.connect('activate-source', self.appIndicatorInbox)
self.ubuntuMessagingMenuUnread(True)
except Exception:
withMessagingMenu = False
print 'WARNING: messaging menu disabled'
# update the Ubuntu messaging menu
def ubuntuMessagingMenuUpdate(self, drawAttention, newItem, toLabel):
global withMessagingMenu
# if this isn't ubuntu then don't do anything
if not self.isUbuntu():
return
# has messageing menu been installed
if not withMessagingMenu:
print 'WARNING: messaging menu disabled or libmessaging-menu-dev not installed'
return
# remember this item to that the messaging menu can find it
if toLabel == self.str_broadcast_subscribers:
self.newBroadcastItem = newItem
else:
self.newMessageItem = newItem
# Remove previous messages and subscriptions entries, then recreate them
# There might be a better way to do it than this
if self.mmapp.has_source("Messages"):
self.mmapp.remove_source("Messages")
if self.mmapp.has_source("Subscriptions"):
self.mmapp.remove_source("Subscriptions")
# update the menu entries
self.ubuntuMessagingMenuUnread(drawAttention)
# returns true if the given sound category is a connection sound
# rather than a received message sound
def isConnectionSound(self, category):
if (category is self.SOUND_CONNECTED or
category is self.SOUND_DISCONNECTED or
category is self.SOUND_CONNECTION_GREEN):
return True
return False
# play a sound
def playSound(self, category, label):
# filename of the sound to be played
soundFilename = None
# whether to play a sound or not
play = True
# if the address had a known label in the address book
if label is not None:
# Does a sound file exist for this particular contact?
if (os.path.isfile(shared.appdata + 'sounds/' + label + '.wav') or
os.path.isfile(shared.appdata + 'sounds/' + label + '.mp3')):
soundFilename = shared.appdata + 'sounds/' + label
# Avoid making sounds more frequently than the threshold.
# This suppresses playing sounds repeatedly when there
# are many new messages
if (soundFilename is None and
not self.isConnectionSound(category)):
# elapsed time since the last sound was played
dt = datetime.datetime.now() - self.lastSoundTime
# suppress sounds which are more frequent than the threshold
if dt.total_seconds() < self.maxSoundFrequencySec:
play = False
if soundFilename is None:
# the sound is for an address which exists in the address book
if category is self.SOUND_KNOWN:
soundFilename = shared.appdata + 'sounds/known'
# the sound is for an unknown address
elif category is self.SOUND_UNKNOWN:
soundFilename = shared.appdata + 'sounds/unknown'
# initial connection sound
elif category is self.SOUND_CONNECTED:
soundFilename = shared.appdata + 'sounds/connected'
# disconnected sound
elif category is self.SOUND_DISCONNECTED:
soundFilename = shared.appdata + 'sounds/disconnected'
# sound when the connection status becomes green
elif category is self.SOUND_CONNECTION_GREEN:
soundFilename = shared.appdata + 'sounds/green'
if soundFilename is not None and play is True:
if not self.isConnectionSound(category):
# record the last time that a received message sound was played
self.lastSoundTime = datetime.datetime.now()
# if not wav then try mp3 format
if not os.path.isfile(soundFilename + '.wav'):
soundFilename = soundFilename + '.mp3'
else:
soundFilename = soundFilename + '.wav'
if os.path.isfile(soundFilename):
if 'linux' in sys.platform:
# Note: QSound was a nice idea but it didn't work
if '.mp3' in soundFilename:
gst_available=False
try:
subprocess.call(["gst123", soundFilename],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
gst_available=True
except:
print "WARNING: gst123 must be installed in order to play mp3 sounds"
if not gst_available:
try:
subprocess.call(["mpg123", soundFilename],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
gst_available=True
except:
print "WARNING: mpg123 must be installed in order to play mp3 sounds"
else:
try:
subprocess.call(["aplay", soundFilename],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
except:
print "WARNING: aplay must be installed in order to play WAV sounds"
elif sys.platform[0:3] == 'win':
# use winsound on Windows
import winsound
winsound.PlaySound(soundFilename, winsound.SND_FILENAME)
# initialise the message notifier
def notifierInit(self):
global withMessagingMenu
if withMessagingMenu:
Notify.init('pybitmessage')
# shows a notification
def notifierShow(self, title, subtitle, fromCategory, label):
global withMessagingMenu
self.playSound(fromCategory, label);
if withMessagingMenu:
n = Notify.Notification.new(
title, subtitle, 'notification-message-email')
try:
n.show()
except:
# n.show() has been known to throw this exception:
# gi._glib.GError: GDBus.Error:org.freedesktop.Notifications.
# MaxNotificationsExceeded: Exceeded maximum number of
# notifications
pass
return
else:
self.tray.showMessage(title, subtitle, 1, 2000)
def tableWidgetInboxKeyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Delete:
self.on_action_InboxTrash()
return QtGui.QTableWidget.keyPressEvent(self.ui.tableWidgetInbox, event)
def tableWidgetSentKeyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Delete:
self.on_action_SentTrash()
return QtGui.QTableWidget.keyPressEvent(self.ui.tableWidgetSent, event)
def click_actionManageKeys(self):
if 'darwin' in sys.platform or 'linux' in sys.platform:
if shared.appdata == '':
# reply = QtGui.QMessageBox.information(self, 'keys.dat?','You
# may manage your keys by editing the keys.dat file stored in
# the same directory as this program. It is important that you
# back up this file.', QMessageBox.Ok)
reply = QtGui.QMessageBox.information(self, 'keys.dat?', _translate(
"MainWindow", "You may manage your keys by editing the keys.dat file stored in the same directory as this program. It is important that you back up this file."), QMessageBox.Ok)
else:
QtGui.QMessageBox.information(self, 'keys.dat?', _translate(
"MainWindow", "You may manage your keys by editing the keys.dat file stored in\n %1 \nIt is important that you back up this file.").arg(shared.appdata), QMessageBox.Ok)
elif sys.platform == 'win32' or sys.platform == 'win64':
if shared.appdata == '':
reply = QtGui.QMessageBox.question(self, _translate("MainWindow", "Open keys.dat?"), _translate(
"MainWindow", "You may manage your keys by editing the keys.dat file stored in the same directory as this program. It is important that you back up this file. Would you like to open the file now? (Be sure to close Bitmessage before making any changes.)"), QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
else:
reply = QtGui.QMessageBox.question(self, _translate("MainWindow", "Open keys.dat?"), _translate(
"MainWindow", "You may manage your keys by editing the keys.dat file stored in\n %1 \nIt is important that you back up this file. Would you like to open the file now? (Be sure to close Bitmessage before making any changes.)").arg(shared.appdata), QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.openKeysFile()
def click_actionDeleteAllTrashedMessages(self):
if QtGui.QMessageBox.question(self, _translate("MainWindow", "Delete trash?"), _translate("MainWindow", "Are you sure you want to delete all trashed messages?"), QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) == QtGui.QMessageBox.No:
return
sqlStoredProcedure('deleteandvacuume')
def click_actionRegenerateDeterministicAddresses(self):
self.regenerateAddressesDialogInstance = regenerateAddressesDialog(
self)
if self.regenerateAddressesDialogInstance.exec_():
if self.regenerateAddressesDialogInstance.ui.lineEditPassphrase.text() == "":
QMessageBox.about(self, _translate("MainWindow", "bad passphrase"), _translate(
"MainWindow", "You must type your passphrase. If you don\'t have one then this is not the form for you."))
return
streamNumberForAddress = int(
self.regenerateAddressesDialogInstance.ui.lineEditStreamNumber.text())
try:
addressVersionNumber = int(
self.regenerateAddressesDialogInstance.ui.lineEditAddressVersionNumber.text())
except:
QMessageBox.about(self, _translate("MainWindow", "Bad address version number"), _translate(
"MainWindow", "Your address version number must be a number: either 3 or 4."))
return
if addressVersionNumber < 3 or addressVersionNumber > 4:
QMessageBox.about(self, _translate("MainWindow", "Bad address version number"), _translate(
"MainWindow", "Your address version number must be either 3 or 4."))
return
shared.addressGeneratorQueue.put(('createDeterministicAddresses', addressVersionNumber, streamNumberForAddress, "regenerated deterministic address", self.regenerateAddressesDialogInstance.ui.spinBoxNumberOfAddressesToMake.value(
), self.regenerateAddressesDialogInstance.ui.lineEditPassphrase.text().toUtf8(), self.regenerateAddressesDialogInstance.ui.checkBoxEighteenByteRipe.isChecked()))
self.ui.tabWidget.setCurrentIndex(3)
def click_actionJoinChan(self):
self.newChanDialogInstance = newChanDialog(self)
if self.newChanDialogInstance.exec_():
if self.newChanDialogInstance.ui.radioButtonCreateChan.isChecked():
if self.newChanDialogInstance.ui.lineEditChanNameCreate.text() == "":
QMessageBox.about(self, _translate("MainWindow", "Chan name needed"), _translate(
"MainWindow", "You didn't enter a chan name."))
return
shared.apiAddressGeneratorReturnQueue.queue.clear()
shared.addressGeneratorQueue.put(('createChan', 4, 1, self.str_chan + ' ' + str(self.newChanDialogInstance.ui.lineEditChanNameCreate.text().toUtf8()), self.newChanDialogInstance.ui.lineEditChanNameCreate.text().toUtf8()))
addressGeneratorReturnValue = shared.apiAddressGeneratorReturnQueue.get()
print 'addressGeneratorReturnValue', addressGeneratorReturnValue
if len(addressGeneratorReturnValue) == 0:
QMessageBox.about(self, _translate("MainWindow", "Address already present"), _translate(
"MainWindow", "Could not add chan because it appears to already be one of your identities."))
return
createdAddress = addressGeneratorReturnValue[0]
QMessageBox.about(self, _translate("MainWindow", "Success"), _translate(
"MainWindow", "Successfully created chan. To let others join your chan, give them the chan name and this Bitmessage address: %1. This address also appears in 'Your Identities'.").arg(createdAddress))
self.ui.tabWidget.setCurrentIndex(3)
elif self.newChanDialogInstance.ui.radioButtonJoinChan.isChecked():
if self.newChanDialogInstance.ui.lineEditChanNameJoin.text() == "":
QMessageBox.about(self, _translate("MainWindow", "Chan name needed"), _translate(
"MainWindow", "You didn't enter a chan name."))
return
if decodeAddress(self.newChanDialogInstance.ui.lineEditChanBitmessageAddress.text())[0] == 'versiontoohigh':
QMessageBox.about(self, _translate("MainWindow", "Address too new"), _translate(
"MainWindow", "Although that Bitmessage address might be valid, its version number is too new for us to handle. Perhaps you need to upgrade Bitmessage."))
return
if decodeAddress(self.newChanDialogInstance.ui.lineEditChanBitmessageAddress.text())[0] != 'success':
QMessageBox.about(self, _translate("MainWindow", "Address invalid"), _translate(
"MainWindow", "That Bitmessage address is not valid."))
return
shared.apiAddressGeneratorReturnQueue.queue.clear()
shared.addressGeneratorQueue.put(('joinChan', addBMIfNotPresent(self.newChanDialogInstance.ui.lineEditChanBitmessageAddress.text()), self.str_chan + ' ' + str(self.newChanDialogInstance.ui.lineEditChanNameJoin.text().toUtf8()), self.newChanDialogInstance.ui.lineEditChanNameJoin.text().toUtf8()))
addressGeneratorReturnValue = shared.apiAddressGeneratorReturnQueue.get()
print 'addressGeneratorReturnValue', addressGeneratorReturnValue
if addressGeneratorReturnValue == 'chan name does not match address':
QMessageBox.about(self, _translate("MainWindow", "Address does not match chan name"), _translate(
"MainWindow", "Although the Bitmessage address you entered was valid, it doesn\'t match the chan name."))
return
if len(addressGeneratorReturnValue) == 0:
QMessageBox.about(self, _translate("MainWindow", "Address already present"), _translate(
"MainWindow", "Could not add chan because it appears to already be one of your identities."))
return
createdAddress = addressGeneratorReturnValue[0]
QMessageBox.about(self, _translate("MainWindow", "Success"), _translate(
"MainWindow", "Successfully joined chan. "))
self.ui.tabWidget.setCurrentIndex(3)
def showConnectDialog(self):
self.connectDialogInstance = connectDialog(self)
if self.connectDialogInstance.exec_():
if self.connectDialogInstance.ui.radioButtonConnectNow.isChecked():
shared.config.remove_option('bitmessagesettings', 'dontconnect')
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
else:
self.click_actionSettings()
def openKeysFile(self):
if 'linux' in sys.platform:
subprocess.call(["xdg-open", shared.appdata + 'keys.dat'])
else:
os.startfile(shared.appdata + 'keys.dat')
def changeEvent(self, event):
if event.type() == QtCore.QEvent.WindowStateChange:
if self.windowState() & QtCore.Qt.WindowMinimized:
self.actionShow.setChecked(False)
if shared.config.getboolean('bitmessagesettings', 'minimizetotray') and not 'darwin' in sys.platform:
if event.type() == QtCore.QEvent.WindowStateChange:
if self.windowState() & QtCore.Qt.WindowMinimized:
self.appIndicatorHide()
if 'win32' in sys.platform or 'win64' in sys.platform:
self.setWindowFlags(Qt.ToolTip)
elif event.oldState() & QtCore.Qt.WindowMinimized:
# The window state has just been changed to
# Normal/Maximised/FullScreen
pass
# QtGui.QWidget.changeEvent(self, event)
def __icon_activated(self, reason):
if reason == QtGui.QSystemTrayIcon.Trigger:
self.actionShow.setChecked(not self.actionShow.isChecked())
self.appIndicatorShowOrHideWindow()
def updateNumberOfMessagesProcessed(self):
self.ui.labelMessageCount.setText(_translate(
"MainWindow", "Processed %1 person-to-person messages.").arg(str(shared.numberOfMessagesProcessed)))
def updateNumberOfBroadcastsProcessed(self):
self.ui.labelBroadcastCount.setText(_translate(
"MainWindow", "Processed %1 broadcast messages.").arg(str(shared.numberOfBroadcastsProcessed)))
def updateNumberOfPubkeysProcessed(self):
self.ui.labelPubkeyCount.setText(_translate(
"MainWindow", "Processed %1 public keys.").arg(str(shared.numberOfPubkeysProcessed)))
def formatBytes(self, num):
for x in ['bytes','KB','MB','GB']:
if num < 1000.0:
return "%3.0f %s" % (num, x)
num /= 1000.0
return "%3.0f %s" % (num, 'TB')
def formatByteRate(self, num):
num /= 1000
return "%4.0f KB" % num
def updateNumberOfBytes(self):
"""
This function is run every two seconds, so we divide the rate of bytes
sent and received by 2.
"""
self.ui.labelBytesRecvCount.setText(_translate(
"MainWindow", "Down: %1/s Total: %2").arg(self.formatByteRate(shared.numberOfBytesReceived/2), self.formatBytes(self.totalNumberOfBytesReceived)))
self.ui.labelBytesSentCount.setText(_translate(
"MainWindow", "Up: %1/s Total: %2").arg(self.formatByteRate(shared.numberOfBytesSent/2), self.formatBytes(self.totalNumberOfBytesSent)))
self.totalNumberOfBytesReceived += shared.numberOfBytesReceived
self.totalNumberOfBytesSent += shared.numberOfBytesSent
shared.numberOfBytesReceived = 0
shared.numberOfBytesSent = 0
def updateNetworkStatusTab(self):
# print 'updating network status tab'
totalNumberOfConnectionsFromAllStreams = 0 # One would think we could use len(sendDataQueues) for this but the number doesn't always match: just because we have a sendDataThread running doesn't mean that the connection has been fully established (with the exchange of version messages).
streamNumberTotals = {}
for host, streamNumber in shared.connectedHostsList.items():
if not streamNumber in streamNumberTotals:
streamNumberTotals[streamNumber] = 1
else:
streamNumberTotals[streamNumber] += 1
while self.ui.tableWidgetConnectionCount.rowCount() > 0:
self.ui.tableWidgetConnectionCount.removeRow(0)
for streamNumber, connectionCount in streamNumberTotals.items():
self.ui.tableWidgetConnectionCount.insertRow(0)
if streamNumber == 0:
newItem = QtGui.QTableWidgetItem("?")
else:
newItem = QtGui.QTableWidgetItem(str(streamNumber))
newItem.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.ui.tableWidgetConnectionCount.setItem(0, 0, newItem)
newItem = QtGui.QTableWidgetItem(str(connectionCount))
newItem.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.ui.tableWidgetConnectionCount.setItem(0, 1, newItem)
"""for currentRow in range(self.ui.tableWidgetConnectionCount.rowCount()):
rowStreamNumber = int(self.ui.tableWidgetConnectionCount.item(currentRow,0).text())
if streamNumber == rowStreamNumber:
foundTheRowThatNeedsUpdating = True
self.ui.tableWidgetConnectionCount.item(currentRow,1).setText(str(connectionCount))
#totalNumberOfConnectionsFromAllStreams += connectionCount
if foundTheRowThatNeedsUpdating == False:
#Add a line to the table for this stream number and update its count with the current connection count.
self.ui.tableWidgetConnectionCount.insertRow(0)
newItem = QtGui.QTableWidgetItem(str(streamNumber))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetConnectionCount.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(str(connectionCount))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetConnectionCount.setItem(0,1,newItem)
totalNumberOfConnectionsFromAllStreams += connectionCount"""
self.ui.labelTotalConnections.setText(_translate(
"MainWindow", "Total Connections: %1").arg(str(len(shared.connectedHostsList))))
if len(shared.connectedHostsList) > 0 and shared.statusIconColor == 'red': # FYI: The 'singlelistener' thread sets the icon color to green when it receives an incoming connection, meaning that the user's firewall is configured correctly.
self.setStatusIcon('yellow')
elif len(shared.connectedHostsList) == 0:
self.setStatusIcon('red')
# timer driven
def runEveryTwoSeconds(self):
self.ui.labelLookupsPerSecond.setText(_translate(
"MainWindow", "Inventory lookups per second: %1").arg(str(shared.numberOfInventoryLookupsPerformed/2)))
shared.numberOfInventoryLookupsPerformed = 0
self.updateNumberOfBytes()
# Indicates whether or not there is a connection to the Bitmessage network
connected = False
def setStatusIcon(self, color):
global withMessagingMenu
# print 'setting status icon color'
if color == 'red':
self.ui.pushButtonStatusIcon.setIcon(
QIcon(":/newPrefix/images/redicon.png"))
shared.statusIconColor = 'red'
# if the connection is lost then show a notification
if self.connected:
self.notifierShow('Bitmessage', unicode(_translate(
"MainWindow", "Connection lost").toUtf8(),'utf-8'),
self.SOUND_DISCONNECTED, None)
self.connected = False
if self.actionStatus is not None:
self.actionStatus.setText(_translate(
"MainWindow", "Not Connected"))
self.setTrayIconFile("can-icon-24px-red.png")
if color == 'yellow':
if self.statusBar().currentMessage() == 'Warning: You are currently not connected. Bitmessage will do the work necessary to send the message but it won\'t send until you connect.':
self.statusBar().showMessage('')
self.ui.pushButtonStatusIcon.setIcon(QIcon(
":/newPrefix/images/yellowicon.png"))
shared.statusIconColor = 'yellow'
# if a new connection has been established then show a notification
if not self.connected:
self.notifierShow('Bitmessage', unicode(_translate(
"MainWindow", "Connected").toUtf8(),'utf-8'),
self.SOUND_CONNECTED, None)
self.connected = True
if self.actionStatus is not None:
self.actionStatus.setText(_translate(
"MainWindow", "Connected"))
self.setTrayIconFile("can-icon-24px-yellow.png")
if color == 'green':
if self.statusBar().currentMessage() == 'Warning: You are currently not connected. Bitmessage will do the work necessary to send the message but it won\'t send until you connect.':
self.statusBar().showMessage('')
self.ui.pushButtonStatusIcon.setIcon(
QIcon(":/newPrefix/images/greenicon.png"))
shared.statusIconColor = 'green'
if not self.connected:
self.notifierShow('Bitmessage', unicode(_translate(
"MainWindow", "Connected").toUtf8(),'utf-8'),
self.SOUND_CONNECTION_GREEN, None)
self.connected = True
if self.actionStatus is not None:
self.actionStatus.setText(_translate(
"MainWindow", "Connected"))
self.setTrayIconFile("can-icon-24px-green.png")
def initTrayIcon(self, iconFileName, app):
self.currentTrayIconFileName = iconFileName
self.tray = QSystemTrayIcon(
self.calcTrayIcon(iconFileName, self.findInboxUnreadCount()), app)
def setTrayIconFile(self, iconFileName):
self.currentTrayIconFileName = iconFileName
self.drawTrayIcon(iconFileName, self.findInboxUnreadCount())
def calcTrayIcon(self, iconFileName, inboxUnreadCount):
pixmap = QtGui.QPixmap(":/newPrefix/images/"+iconFileName)
if inboxUnreadCount > 0:
# choose font and calculate font parameters
fontName = "Lucida"
fontSize = 10
font = QtGui.QFont(fontName, fontSize, QtGui.QFont.Bold)
fontMetrics = QtGui.QFontMetrics(font)
# text
txt = str(inboxUnreadCount)
rect = fontMetrics.boundingRect(txt)
# margins that we add in the top-right corner
marginX = 2
marginY = 0 # it looks like -2 is also ok due to the error of metric
# if it renders too wide we need to change it to a plus symbol
if rect.width() > 20:
txt = "+"
fontSize = 15
font = QtGui.QFont(fontName, fontSize, QtGui.QFont.Bold)
fontMetrics = QtGui.QFontMetrics(font)
rect = fontMetrics.boundingRect(txt)
# draw text
painter = QPainter()
painter.begin(pixmap)
painter.setPen(QtGui.QPen(QtGui.QColor(255, 0, 0), Qt.SolidPattern))
painter.setFont(font)
painter.drawText(24-rect.right()-marginX, -rect.top()+marginY, txt)
painter.end()
return QtGui.QIcon(pixmap)
def drawTrayIcon(self, iconFileName, inboxUnreadCount):
self.tray.setIcon(self.calcTrayIcon(iconFileName, inboxUnreadCount))
def changedInboxUnread(self):
self.drawTrayIcon(self.currentTrayIconFileName, self.findInboxUnreadCount())
def findInboxUnreadCount(self):
queryreturn = sqlQuery('''SELECT count(*) from inbox WHERE folder='inbox' and read=0''')
cnt = 0
for row in queryreturn:
cnt, = row
return int(cnt)
def updateSentItemStatusByHash(self, toRipe, textToDisplay):
for i in range(self.ui.tableWidgetSent.rowCount()):
toAddress = str(self.ui.tableWidgetSent.item(
i, 0).data(Qt.UserRole).toPyObject())
status, addressVersionNumber, streamNumber, ripe = decodeAddress(
toAddress)
if ripe == toRipe:
self.ui.tableWidgetSent.item(i, 3).setToolTip(textToDisplay)
try:
newlinePosition = textToDisplay.indexOf('\n')
except: # If someone misses adding a "_translate" to a string before passing it to this function, this function won't receive a qstring which will cause an exception.
newlinePosition = 0
if newlinePosition > 1:
self.ui.tableWidgetSent.item(i, 3).setText(
textToDisplay[:newlinePosition])
else:
self.ui.tableWidgetSent.item(i, 3).setText(textToDisplay)
def updateSentItemStatusByAckdata(self, ackdata, textToDisplay):
for i in range(self.ui.tableWidgetSent.rowCount()):
toAddress = str(self.ui.tableWidgetSent.item(
i, 0).data(Qt.UserRole).toPyObject())
tableAckdata = self.ui.tableWidgetSent.item(
i, 3).data(Qt.UserRole).toPyObject()
status, addressVersionNumber, streamNumber, ripe = decodeAddress(
toAddress)
if ackdata == tableAckdata:
self.ui.tableWidgetSent.item(i, 3).setToolTip(textToDisplay)
try:
newlinePosition = textToDisplay.indexOf('\n')
except: # If someone misses adding a "_translate" to a string before passing it to this function, this function won't receive a qstring which will cause an exception.
newlinePosition = 0
if newlinePosition > 1:
self.ui.tableWidgetSent.item(i, 3).setText(
textToDisplay[:newlinePosition])
else:
self.ui.tableWidgetSent.item(i, 3).setText(textToDisplay)
def removeInboxRowByMsgid(self, msgid): # msgid and inventoryHash are the same thing
for i in range(self.ui.tableWidgetInbox.rowCount()):
if msgid == str(self.ui.tableWidgetInbox.item(i, 3).data(Qt.UserRole).toPyObject()):
self.statusBar().showMessage(_translate(
"MainWindow", "Message trashed"))
self.ui.tableWidgetInbox.removeRow(i)
break
self.changedInboxUnread()
def displayAlert(self, title, text, exitAfterUserClicksOk):
self.statusBar().showMessage(text)
QtGui.QMessageBox.critical(self, title, text, QMessageBox.Ok)
if exitAfterUserClicksOk:
os._exit(0)
def rerenderInboxFromLabels(self):
for i in range(self.ui.tableWidgetInbox.rowCount()):
addressToLookup = str(self.ui.tableWidgetInbox.item(
i, 1).data(Qt.UserRole).toPyObject())
fromLabel = ''
queryreturn = sqlQuery(
'''select label from addressbook where address=?''', addressToLookup)
if queryreturn != []:
for row in queryreturn:
fromLabel, = row
if fromLabel == '':
# It might be a broadcast message. We should check for that
# label.
queryreturn = sqlQuery(
'''select label from subscriptions where address=?''', addressToLookup)
if queryreturn != []:
for row in queryreturn:
fromLabel, = row
if fromLabel == '':
# Message might be from an address we own like a chan address. Let's look for that label.
if shared.config.has_section(addressToLookup):
fromLabel = shared.config.get(addressToLookup, 'label')
if fromLabel == '':
fromLabel = addressToLookup
self.ui.tableWidgetInbox.item(
i, 1).setText(unicode(fromLabel, 'utf-8'))
self.ui.tableWidgetInbox.item(
i, 1).setIcon(avatarize(addressToLookup))
# Set the color according to whether it is the address of a mailing
# list or not.
if shared.safeConfigGetBoolean(addressToLookup, 'chan'):
self.ui.tableWidgetInbox.item(i, 1).setTextColor(QtGui.QColor(216, 119, 0)) # orange
else:
self.ui.tableWidgetInbox.item(
i, 1).setTextColor(QApplication.palette().text().color())
def rerenderInboxToLabels(self):
for i in range(self.ui.tableWidgetInbox.rowCount()):
toAddress = str(self.ui.tableWidgetInbox.item(
i, 0).data(Qt.UserRole).toPyObject())
# Message might be to an address we own like a chan address. Let's look for that label.
if shared.config.has_section(toAddress):
toLabel = shared.config.get(toAddress, 'label')
else:
toLabel = toAddress
self.ui.tableWidgetInbox.item(
i, 0).setText(unicode(toLabel, 'utf-8'))
self.ui.tableWidgetInbox.item(
i, 0).setIcon(avatarize(toAddress))
# Set the color according to whether it is the address of a mailing
# list, a chan or neither.
if shared.safeConfigGetBoolean(toAddress, 'chan'):
self.ui.tableWidgetInbox.item(i, 0).setTextColor(QtGui.QColor(216, 119, 0)) # orange
elif shared.safeConfigGetBoolean(toAddress, 'mailinglist'):
self.ui.tableWidgetInbox.item(i, 0).setTextColor(QtGui.QColor(137, 04, 177)) # magenta
else:
self.ui.tableWidgetInbox.item(
i, 0).setTextColor(QApplication.palette().text().color())
def rerenderSentFromLabels(self):
for i in range(self.ui.tableWidgetSent.rowCount()):
fromAddress = str(self.ui.tableWidgetSent.item(
i, 1).data(Qt.UserRole).toPyObject())
# Message might be from an address we own like a chan address. Let's look for that label.
if shared.config.has_section(fromAddress):
fromLabel = shared.config.get(fromAddress, 'label')
else:
fromLabel = fromAddress
self.ui.tableWidgetSent.item(
i, 1).setText(unicode(fromLabel, 'utf-8'))
self.ui.tableWidgetSent.item(
i, 1).setIcon(avatarize(fromAddress))
def rerenderSentToLabels(self):
for i in range(self.ui.tableWidgetSent.rowCount()):
addressToLookup = str(self.ui.tableWidgetSent.item(
i, 0).data(Qt.UserRole).toPyObject())
toLabel = ''
queryreturn = sqlQuery(
'''select label from addressbook where address=?''', addressToLookup)
if queryreturn != []:
for row in queryreturn:
toLabel, = row
if toLabel == '':
# Message might be to an address we own like a chan address. Let's look for that label.
if shared.config.has_section(addressToLookup):
toLabel = shared.config.get(addressToLookup, 'label')
if toLabel == '':
toLabel = addressToLookup
self.ui.tableWidgetSent.item(
i, 0).setText(unicode(toLabel, 'utf-8'))
def rerenderAddressBook(self):
self.ui.tableWidgetAddressBook.setRowCount(0)
queryreturn = sqlQuery('SELECT * FROM addressbook')
for row in queryreturn:
label, address = row
self.ui.tableWidgetAddressBook.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label, 'utf-8'))
newItem.setIcon(avatarize(address))
self.ui.tableWidgetAddressBook.setItem(0, 0, newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.ui.tableWidgetAddressBook.setItem(0, 1, newItem)
def rerenderSubscriptions(self):
self.ui.tableWidgetSubscriptions.setRowCount(0)
queryreturn = sqlQuery('SELECT label, address, enabled FROM subscriptions')
for row in queryreturn:
label, address, enabled = row
self.ui.tableWidgetSubscriptions.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label, 'utf-8'))
if not enabled:
newItem.setTextColor(QtGui.QColor(128, 128, 128))
newItem.setIcon(avatarize(address))
self.ui.tableWidgetSubscriptions.setItem(0, 0, newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
if not enabled:
newItem.setTextColor(QtGui.QColor(128, 128, 128))
self.ui.tableWidgetSubscriptions.setItem(0, 1, newItem)
def click_pushButtonSend(self):
self.statusBar().showMessage('')
toAddresses = str(self.ui.lineEditTo.text())
fromAddress = str(self.ui.labelFrom.text())
subject = str(self.ui.lineEditSubject.text().toUtf8())
message = str(
self.ui.textEditMessage.document().toPlainText().toUtf8())
"""
The whole network message must fit in 2^18 bytes. Let's assume 500
bytes of overhead. If someone wants to get that too an exact
number you are welcome to but I think that it would be a better
use of time to support message continuation so that users can
send messages of any length.
"""
if len(message) > (2 ** 18 - 500):
QMessageBox.about(self, _translate("MainWindow", "Message too long"), _translate(
"MainWindow", "The message that you are trying to send is too long by %1 bytes. (The maximum is 261644 bytes). Please cut it down before sending.").arg(len(message) - (2 ** 18 - 500)))
return
if self.ui.radioButtonSpecific.isChecked(): # To send a message to specific people (rather than broadcast)
toAddressesList = [s.strip()
for s in toAddresses.replace(',', ';').split(';')]
toAddressesList = list(set(
toAddressesList)) # remove duplicate addresses. If the user has one address with a BM- and the same address without the BM-, this will not catch it. They'll send the message to the person twice.
for toAddress in toAddressesList:
if toAddress != '':
status, addressVersionNumber, streamNumber, ripe = decodeAddress(
toAddress)
if status != 'success':
with shared.printLock:
print 'Error: Could not decode', toAddress, ':', status
if status == 'missingbm':
self.statusBar().showMessage(_translate(
"MainWindow", "Error: Bitmessage addresses start with BM- Please check %1").arg(toAddress))
elif status == 'checksumfailed':
self.statusBar().showMessage(_translate(
"MainWindow", "Error: The address %1 is not typed or copied correctly. Please check it.").arg(toAddress))
elif status == 'invalidcharacters':
self.statusBar().showMessage(_translate(
"MainWindow", "Error: The address %1 contains invalid characters. Please check it.").arg(toAddress))
elif status == 'versiontoohigh':
self.statusBar().showMessage(_translate(
"MainWindow", "Error: The address version in %1 is too high. Either you need to upgrade your Bitmessage software or your acquaintance is being clever.").arg(toAddress))
elif status == 'ripetooshort':
self.statusBar().showMessage(_translate(
"MainWindow", "Error: Some data encoded in the address %1 is too short. There might be something wrong with the software of your acquaintance.").arg(toAddress))
elif status == 'ripetoolong':
self.statusBar().showMessage(_translate(
"MainWindow", "Error: Some data encoded in the address %1 is too long. There might be something wrong with the software of your acquaintance.").arg(toAddress))
elif status == 'varintmalformed':
self.statusBar().showMessage(_translate(
"MainWindow", "Error: Some data encoded in the address %1 is malformed. There might be something wrong with the software of your acquaintance.").arg(toAddress))
else:
self.statusBar().showMessage(_translate(
"MainWindow", "Error: Something is wrong with the address %1.").arg(toAddress))
elif fromAddress == '':
self.statusBar().showMessage(_translate(
"MainWindow", "Error: You must specify a From address. If you don\'t have one, go to the \'Your Identities\' tab."))
else:
toAddress = addBMIfNotPresent(toAddress)
if addressVersionNumber > 4 or addressVersionNumber <= 1:
QMessageBox.about(self, _translate("MainWindow", "Address version number"), _translate(
"MainWindow", "Concerning the address %1, Bitmessage cannot understand address version numbers of %2. Perhaps upgrade Bitmessage to the latest version.").arg(toAddress).arg(str(addressVersionNumber)))
continue
if streamNumber > 1 or streamNumber == 0:
QMessageBox.about(self, _translate("MainWindow", "Stream number"), _translate(
"MainWindow", "Concerning the address %1, Bitmessage cannot handle stream numbers of %2. Perhaps upgrade Bitmessage to the latest version.").arg(toAddress).arg(str(streamNumber)))
continue
self.statusBar().showMessage('')
if shared.statusIconColor == 'red':
self.statusBar().showMessage(_translate(
"MainWindow", "Warning: You are currently not connected. Bitmessage will do the work necessary to send the message but it won\'t send until you connect."))
ackdata = OpenSSL.rand(32)
t = ()
sqlExecute(
'''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)''',
'',
toAddress,
ripe,
fromAddress,
subject,
message,
ackdata,
int(time.time()),
'msgqueued',
1,
1,
'sent',
2)
toLabel = ''
queryreturn = sqlQuery('''select label from addressbook where address=?''',
toAddress)
if queryreturn != []:
for row in queryreturn:
toLabel, = row
self.displayNewSentMessage(
toAddress, toLabel, fromAddress, subject, message, ackdata)
shared.workerQueue.put(('sendmessage', toAddress))
self.ui.comboBoxSendFrom.setCurrentIndex(0)
self.ui.labelFrom.setText('')
self.ui.lineEditTo.setText('')
self.ui.lineEditSubject.setText('')
self.ui.textEditMessage.setText('')
self.ui.tabWidget.setCurrentIndex(2)
self.ui.tableWidgetSent.setCurrentCell(0, 0)
else:
self.statusBar().showMessage(_translate(
"MainWindow", "Your \'To\' field is empty."))
else: # User selected 'Broadcast'
if fromAddress == '':
self.statusBar().showMessage(_translate(
"MainWindow", "Error: You must specify a From address. If you don\'t have one, go to the \'Your Identities\' tab."))
else:
self.statusBar().showMessage('')
# We don't actually need the ackdata for acknowledgement since
# this is a broadcast message, but we can use it to update the
# user interface when the POW is done generating.
ackdata = OpenSSL.rand(32)
toAddress = self.str_broadcast_subscribers
ripe = ''
t = ('', toAddress, ripe, fromAddress, subject, message, ackdata, int(
time.time()), 'broadcastqueued', 1, 1, 'sent', 2)
sqlExecute(
'''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)''', *t)
toLabel = self.str_broadcast_subscribers
self.displayNewSentMessage(
toAddress, toLabel, fromAddress, subject, message, ackdata)
shared.workerQueue.put(('sendbroadcast', ''))
self.ui.comboBoxSendFrom.setCurrentIndex(0)
self.ui.labelFrom.setText('')
self.ui.lineEditTo.setText('')
self.ui.lineEditSubject.setText('')
self.ui.textEditMessage.setText('')
self.ui.tabWidget.setCurrentIndex(2)
self.ui.tableWidgetSent.setCurrentCell(0, 0)
def click_pushButtonLoadFromAddressBook(self):
self.ui.tabWidget.setCurrentIndex(5)
for i in range(4):
time.sleep(0.1)
self.statusBar().showMessage('')
time.sleep(0.1)
self.statusBar().showMessage(_translate(
"MainWindow", "Right click one or more entries in your address book and select \'Send message to this address\'."))
def click_pushButtonFetchNamecoinID(self):
nc = namecoinConnection()
err, addr = nc.query(str(self.ui.lineEditTo.text()))
if err is not None:
self.statusBar().showMessage(_translate(
"MainWindow", "Error: " + err))
else:
self.ui.lineEditTo.setText(addr)
self.statusBar().showMessage(_translate(
"MainWindow", "Fetched address from namecoin identity."))
def redrawLabelFrom(self, index):
self.ui.labelFrom.setText(
self.ui.comboBoxSendFrom.itemData(index).toPyObject())
self.setBroadcastEnablementDependingOnWhetherThisIsAChanAddress(self.ui.comboBoxSendFrom.itemData(index).toPyObject())
def setBroadcastEnablementDependingOnWhetherThisIsAChanAddress(self, address):
# If this is a chan then don't let people broadcast because no one
# should subscribe to chan addresses.
if shared.safeConfigGetBoolean(str(address), 'chan'):
self.ui.radioButtonSpecific.click()
self.ui.radioButtonBroadcast.setEnabled(False)
else:
self.ui.radioButtonBroadcast.setEnabled(True)
def rerenderComboBoxSendFrom(self):
self.ui.comboBoxSendFrom.clear()
self.ui.labelFrom.setText('')
configSections = shared.config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile != 'bitmessagesettings':
isEnabled = shared.config.getboolean(
addressInKeysFile, 'enabled') # I realize that this is poor programming practice but I don't care. It's easier for others to read.
if isEnabled:
self.ui.comboBoxSendFrom.insertItem(0, avatarize(addressInKeysFile), unicode(shared.config.get(
addressInKeysFile, 'label'), 'utf-8'), addressInKeysFile)
self.ui.comboBoxSendFrom.insertItem(0, '', '')
if(self.ui.comboBoxSendFrom.count() == 2):
self.ui.comboBoxSendFrom.setCurrentIndex(1)
self.redrawLabelFrom(self.ui.comboBoxSendFrom.currentIndex())
else:
self.ui.comboBoxSendFrom.setCurrentIndex(0)
# This function is called by the processmsg function when that function
# receives a message to an address that is acting as a
# pseudo-mailing-list. The message will be broadcast out. This function
# puts the message on the 'Sent' tab.
def displayNewSentMessage(self, toAddress, toLabel, fromAddress, subject, message, ackdata):
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
message = shared.fixPotentiallyInvalidUTF8Data(message)
try:
fromLabel = shared.config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress
self.ui.tableWidgetSent.setSortingEnabled(False)
self.ui.tableWidgetSent.insertRow(0)
if toLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(toAddress, 'utf-8'))
newItem.setToolTip(unicode(toAddress, 'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(toLabel, 'utf-8'))
newItem.setToolTip(unicode(toLabel, 'utf-8'))
newItem.setData(Qt.UserRole, str(toAddress))
newItem.setIcon(avatarize(toAddress))
self.ui.tableWidgetSent.setItem(0, 0, newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress, 'utf-8'))
newItem.setToolTip(unicode(fromAddress, 'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel, 'utf-8'))
newItem.setToolTip(unicode(fromLabel, 'utf-8'))
newItem.setData(Qt.UserRole, str(fromAddress))
newItem.setIcon(avatarize(fromAddress))
self.ui.tableWidgetSent.setItem(0, 1, newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject, 'utf-8)'))
newItem.setToolTip(unicode(subject, 'utf-8)'))
#newItem.setData(Qt.UserRole, unicode(message, 'utf-8)')) # No longer hold the message in the table; we'll use a SQL query to display it as needed.
self.ui.tableWidgetSent.setItem(0, 2, newItem)
# newItem = QtGui.QTableWidgetItem('Doing work necessary to send
# broadcast...'+
# l10n.formatTimestamp())
newItem = myTableWidgetItem(_translate("MainWindow", "Work is queued. %1").arg(l10n.formatTimestamp()))
newItem.setToolTip(_translate("MainWindow", "Work is queued. %1").arg(l10n.formatTimestamp()))
newItem.setData(Qt.UserRole, QByteArray(ackdata))
newItem.setData(33, int(time.time()))
self.ui.tableWidgetSent.setItem(0, 3, newItem)
self.ui.textEditSentMessage.setPlainText(unicode(message, 'utf-8)'))
self.ui.tableWidgetSent.setSortingEnabled(True)
def displayNewInboxMessage(self, inventoryHash, toAddress, fromAddress, subject, message):
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
fromLabel = ''
queryreturn = sqlQuery(
'''select label from addressbook where address=?''', fromAddress)
if queryreturn != []:
for row in queryreturn:
fromLabel, = row
else:
# There might be a label in the subscriptions table
queryreturn = sqlQuery(
'''select label from subscriptions where address=?''', fromAddress)
if queryreturn != []:
for row in queryreturn:
fromLabel, = row
try:
if toAddress == self.str_broadcast_subscribers:
toLabel = self.str_broadcast_subscribers
else:
toLabel = shared.config.get(toAddress, 'label')
except:
toLabel = ''
if toLabel == '':
toLabel = toAddress
font = QFont()
font.setBold(True)
self.ui.tableWidgetInbox.setSortingEnabled(False)
newItem = QtGui.QTableWidgetItem(unicode(toLabel, 'utf-8'))
newItem.setToolTip(unicode(toLabel, 'utf-8'))
newItem.setFont(font)
newItem.setData(Qt.UserRole, str(toAddress))
if shared.safeConfigGetBoolean(str(toAddress), 'mailinglist'):
newItem.setTextColor(QtGui.QColor(137, 04, 177)) # magenta
if shared.safeConfigGetBoolean(str(toAddress), 'chan'):
newItem.setTextColor(QtGui.QColor(216, 119, 0)) # orange
self.ui.tableWidgetInbox.insertRow(0)
newItem.setIcon(avatarize(toAddress))
self.ui.tableWidgetInbox.setItem(0, 0, newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress, 'utf-8'))
newItem.setToolTip(unicode(fromAddress, 'utf-8'))
if shared.config.getboolean('bitmessagesettings', 'showtraynotifications'):
self.notifierShow(unicode(_translate("MainWindow",'New Message').toUtf8(),'utf-8'), unicode(_translate("MainWindow",'From ').toUtf8(),'utf-8') + unicode(fromAddress, 'utf-8'), self.SOUND_UNKNOWN, None)
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel, 'utf-8'))
newItem.setToolTip(unicode(unicode(fromLabel, 'utf-8')))
if shared.config.getboolean('bitmessagesettings', 'showtraynotifications'):
self.notifierShow(unicode(_translate("MainWindow",'New Message').toUtf8(),'utf-8'), unicode(_translate("MainWindow",'From ').toUtf8(),'utf-8') + unicode(fromLabel, 'utf-8'), self.SOUND_KNOWN, unicode(fromLabel, 'utf-8'))
newItem.setData(Qt.UserRole, str(fromAddress))
newItem.setFont(font)
newItem.setIcon(avatarize(fromAddress))
self.ui.tableWidgetInbox.setItem(0, 1, newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject, 'utf-8)'))
newItem.setToolTip(unicode(subject, 'utf-8)'))
#newItem.setData(Qt.UserRole, unicode(message, 'utf-8)')) # No longer hold the message in the table; we'll use a SQL query to display it as needed.
newItem.setFont(font)
self.ui.tableWidgetInbox.setItem(0, 2, newItem)
newItem = myTableWidgetItem(l10n.formatTimestamp())
newItem.setToolTip(l10n.formatTimestamp())
newItem.setData(Qt.UserRole, QByteArray(inventoryHash))
newItem.setData(33, int(time.time()))
newItem.setFont(font)
self.ui.tableWidgetInbox.setItem(0, 3, newItem)
self.ui.tableWidgetInbox.setSortingEnabled(True)
self.ubuntuMessagingMenuUpdate(True, newItem, toLabel)
def click_pushButtonAddAddressBook(self):
self.AddAddressDialogInstance = AddAddressDialog(self)
if self.AddAddressDialogInstance.exec_():
if self.AddAddressDialogInstance.ui.labelAddressCheck.text() == _translate("MainWindow", "Address is valid."):
# First we must check to see if the address is already in the
# address book. The user cannot add it again or else it will
# cause problems when updating and deleting the entry.
address = addBMIfNotPresent(str(
self.AddAddressDialogInstance.ui.lineEditAddress.text()))
label = self.AddAddressDialogInstance.ui.newAddressLabel.text().toUtf8()
self.addEntryToAddressBook(address,label)
else:
self.statusBar().showMessage(_translate(
"MainWindow", "The address you entered was invalid. Ignoring it."))
def addEntryToAddressBook(self,address,label):
queryreturn = sqlQuery('''select * from addressbook where address=?''', address)
if queryreturn == []:
self.ui.tableWidgetAddressBook.setSortingEnabled(False)
self.ui.tableWidgetAddressBook.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label, 'utf-8'))
newItem.setIcon(avatarize(address))
self.ui.tableWidgetAddressBook.setItem(0, 0, newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.ui.tableWidgetAddressBook.setItem(0, 1, newItem)
self.ui.tableWidgetAddressBook.setSortingEnabled(True)
sqlExecute('''INSERT INTO addressbook VALUES (?,?)''', str(label), address)
self.rerenderInboxFromLabels()
self.rerenderSentToLabels()
else:
self.statusBar().showMessage(_translate(
"MainWindow", "Error: You cannot add the same address to your address book twice. Try renaming the existing one if you want."))
def addSubscription(self, address, label):
address = addBMIfNotPresent(address)
#This should be handled outside of this function, for error displaying and such, but it must also be checked here.
if shared.isAddressInMySubscriptionsList(address):
return
#Add to UI list
self.ui.tableWidgetSubscriptions.setSortingEnabled(False)
self.ui.tableWidgetSubscriptions.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label, 'utf-8'))
newItem.setIcon(avatarize(address))
self.ui.tableWidgetSubscriptions.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSubscriptions.setItem(0,1,newItem)
self.ui.tableWidgetSubscriptions.setSortingEnabled(True)
#Add to database (perhaps this should be separated from the MyForm class)
sqlExecute('''INSERT INTO subscriptions VALUES (?,?,?)''',str(label),address,True)
self.rerenderInboxFromLabels()
shared.reloadBroadcastSendersForWhichImWatching()
def click_pushButtonAddSubscription(self):
self.NewSubscriptionDialogInstance = NewSubscriptionDialog(self)
if self.NewSubscriptionDialogInstance.exec_():
if self.NewSubscriptionDialogInstance.ui.labelAddressCheck.text() != _translate("MainWindow", "Address is valid."):
self.statusBar().showMessage(_translate("MainWindow", "The address you entered was invalid. Ignoring it."))
return
address = addBMIfNotPresent(str(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text()))
# We must check to see if the address is already in the subscriptions list. The user cannot add it again or else it will cause problems when updating and deleting the entry.
if shared.isAddressInMySubscriptionsList(address):
self.statusBar().showMessage(_translate("MainWindow", "Error: You cannot add the same address to your subsciptions twice. Perhaps rename the existing one if you want."))
return
label = self.NewSubscriptionDialogInstance.ui.newsubscriptionlabel.text().toUtf8()
self.addSubscription(address, label)
# Now, if the user wants to display old broadcasts, let's get them out of the inventory and put them
# in the objectProcessorQueue to be processed
if self.NewSubscriptionDialogInstance.ui.checkBoxDisplayMessagesAlreadyInInventory.isChecked():
status, addressVersion, streamNumber, ripe = decodeAddress(address)
shared.flushInventory()
doubleHashOfAddressData = hashlib.sha512(hashlib.sha512(encodeVarint(
addressVersion) + encodeVarint(streamNumber) + ripe).digest()).digest()
tag = doubleHashOfAddressData[32:]
queryreturn = sqlQuery(
'''select payload from inventory where objecttype=3 and tag=?''', tag)
for row in queryreturn:
payload, = row
objectType = 3
with shared.objectProcessorQueueSizeLock:
shared.objectProcessorQueueSize += len(payload)
shared.objectProcessorQueue.put((objectType,payload))
def loadBlackWhiteList(self):
# Initialize the Blacklist or Whitelist table
listType = shared.config.get('bitmessagesettings', 'blackwhitelist')
if listType == 'black':
queryreturn = sqlQuery('''SELECT label, address, enabled FROM blacklist''')
else:
queryreturn = sqlQuery('''SELECT label, address, enabled FROM whitelist''')
for row in queryreturn:
label, address, enabled = row
self.ui.tableWidgetBlacklist.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label, 'utf-8'))
if not enabled:
newItem.setTextColor(QtGui.QColor(128, 128, 128))
newItem.setIcon(avatarize(address))
self.ui.tableWidgetBlacklist.setItem(0, 0, newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
if not enabled:
newItem.setTextColor(QtGui.QColor(128, 128, 128))
self.ui.tableWidgetBlacklist.setItem(0, 1, newItem)
def click_pushButtonStatusIcon(self):
print 'click_pushButtonStatusIcon'
self.iconGlossaryInstance = iconGlossaryDialog(self)
if self.iconGlossaryInstance.exec_():
pass
def click_actionHelp(self):
self.helpDialogInstance = helpDialog(self)
self.helpDialogInstance.exec_()
def click_actionAbout(self):
self.aboutDialogInstance = aboutDialog(self)
self.aboutDialogInstance.exec_()
def click_actionSettings(self):
self.settingsDialogInstance = settingsDialog(self)
if self.settingsDialogInstance.exec_():
shared.config.set('bitmessagesettings', 'startonlogon', str(
self.settingsDialogInstance.ui.checkBoxStartOnLogon.isChecked()))
shared.config.set('bitmessagesettings', 'minimizetotray', str(
self.settingsDialogInstance.ui.checkBoxMinimizeToTray.isChecked()))
shared.config.set('bitmessagesettings', 'showtraynotifications', str(
self.settingsDialogInstance.ui.checkBoxShowTrayNotifications.isChecked()))
shared.config.set('bitmessagesettings', 'startintray', str(
self.settingsDialogInstance.ui.checkBoxStartInTray.isChecked()))
shared.config.set('bitmessagesettings', 'willinglysendtomobile', str(
self.settingsDialogInstance.ui.checkBoxWillinglySendToMobile.isChecked()))
shared.config.set('bitmessagesettings', 'useidenticons', str(
self.settingsDialogInstance.ui.checkBoxUseIdenticons.isChecked()))
shared.config.set('bitmessagesettings', 'replybelow', str(
self.settingsDialogInstance.ui.checkBoxReplyBelow.isChecked()))
lang_ind = int(self.settingsDialogInstance.ui.languageComboBox.currentIndex())
if not languages[lang_ind] == 'other':
shared.config.set('bitmessagesettings', 'userlocale', languages[lang_ind])
if int(shared.config.get('bitmessagesettings', 'port')) != int(self.settingsDialogInstance.ui.lineEditTCPPort.text()):
if not shared.safeConfigGetBoolean('bitmessagesettings', 'dontconnect'):
QMessageBox.about(self, _translate("MainWindow", "Restart"), _translate(
"MainWindow", "You must restart Bitmessage for the port number change to take effect."))
shared.config.set('bitmessagesettings', 'port', str(
self.settingsDialogInstance.ui.lineEditTCPPort.text()))
#print 'self.settingsDialogInstance.ui.comboBoxProxyType.currentText()', self.settingsDialogInstance.ui.comboBoxProxyType.currentText()
#print 'self.settingsDialogInstance.ui.comboBoxProxyType.currentText())[0:5]', self.settingsDialogInstance.ui.comboBoxProxyType.currentText()[0:5]
if shared.config.get('bitmessagesettings', 'socksproxytype') == 'none' and self.settingsDialogInstance.ui.comboBoxProxyType.currentText()[0:5] == 'SOCKS':
if shared.statusIconColor != 'red':
QMessageBox.about(self, _translate("MainWindow", "Restart"), _translate(
"MainWindow", "Bitmessage will use your proxy from now on but you may want to manually restart Bitmessage now to close existing connections (if any)."))
if shared.config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS' and self.settingsDialogInstance.ui.comboBoxProxyType.currentText()[0:5] != 'SOCKS':
self.statusBar().showMessage('')
if self.settingsDialogInstance.ui.comboBoxProxyType.currentText()[0:5] == 'SOCKS':
shared.config.set('bitmessagesettings', 'socksproxytype', str(
self.settingsDialogInstance.ui.comboBoxProxyType.currentText()))
else:
shared.config.set('bitmessagesettings', 'socksproxytype', 'none')
shared.config.set('bitmessagesettings', 'socksauthentication', str(
self.settingsDialogInstance.ui.checkBoxAuthentication.isChecked()))
shared.config.set('bitmessagesettings', 'sockshostname', str(
self.settingsDialogInstance.ui.lineEditSocksHostname.text()))
shared.config.set('bitmessagesettings', 'socksport', str(
self.settingsDialogInstance.ui.lineEditSocksPort.text()))
shared.config.set('bitmessagesettings', 'socksusername', str(
self.settingsDialogInstance.ui.lineEditSocksUsername.text()))
shared.config.set('bitmessagesettings', 'sockspassword', str(
self.settingsDialogInstance.ui.lineEditSocksPassword.text()))
shared.config.set('bitmessagesettings', 'sockslisten', str(
self.settingsDialogInstance.ui.checkBoxSocksListen.isChecked()))
try:
# Rounding to integers just for aesthetics
shared.config.set('bitmessagesettings', 'maxdownloadrate', str(
int(float(self.settingsDialogInstance.ui.lineEditMaxDownloadRate.text()))))
shared.config.set('bitmessagesettings', 'maxuploadrate', str(
int(float(self.settingsDialogInstance.ui.lineEditMaxUploadRate.text()))))
except:
QMessageBox.about(self, _translate("MainWindow", "Number needed"), _translate(
"MainWindow", "Your maximum download and upload rate must be numbers. Ignoring what you typed."))
shared.config.set('bitmessagesettings', 'namecoinrpctype',
self.settingsDialogInstance.getNamecoinType())
shared.config.set('bitmessagesettings', 'namecoinrpchost', str(
self.settingsDialogInstance.ui.lineEditNamecoinHost.text()))
shared.config.set('bitmessagesettings', 'namecoinrpcport', str(
self.settingsDialogInstance.ui.lineEditNamecoinPort.text()))
shared.config.set('bitmessagesettings', 'namecoinrpcuser', str(
self.settingsDialogInstance.ui.lineEditNamecoinUser.text()))
shared.config.set('bitmessagesettings', 'namecoinrpcpassword', str(
self.settingsDialogInstance.ui.lineEditNamecoinPassword.text()))
# Demanded difficulty tab
if float(self.settingsDialogInstance.ui.lineEditTotalDifficulty.text()) >= 1:
shared.config.set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(int(float(
self.settingsDialogInstance.ui.lineEditTotalDifficulty.text()) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
if float(self.settingsDialogInstance.ui.lineEditSmallMessageDifficulty.text()) >= 1:
shared.config.set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(int(float(
self.settingsDialogInstance.ui.lineEditSmallMessageDifficulty.text()) * shared.networkDefaultPayloadLengthExtraBytes)))
acceptableDifficultyChanged = False
if float(self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) >= 1 or float(self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) == 0:
if shared.config.get('bitmessagesettings','maxacceptablenoncetrialsperbyte') != str(int(float(
self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)):
# the user changed the max acceptable total difficulty
acceptableDifficultyChanged = True
shared.config.set('bitmessagesettings', 'maxacceptablenoncetrialsperbyte', str(int(float(
self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
if float(self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) >= 1 or float(self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) == 0:
if shared.config.get('bitmessagesettings','maxacceptablepayloadlengthextrabytes') != str(int(float(
self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) * shared.networkDefaultPayloadLengthExtraBytes)):
# the user changed the max acceptable small message difficulty
acceptableDifficultyChanged = True
shared.config.set('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes', str(int(float(
self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) * shared.networkDefaultPayloadLengthExtraBytes)))
if acceptableDifficultyChanged:
# It might now be possible to send msgs which were previously marked as toodifficult.
# Let us change them to 'msgqueued'. The singleWorker will try to send them and will again
# mark them as toodifficult if the receiver's required difficulty is still higher than
# we are willing to do.
sqlExecute('''UPDATE sent SET status='msgqueued' WHERE status='toodifficult' ''')
shared.workerQueue.put(('sendmessage', ''))
#start:UI setting to stop trying to send messages after X days/months
# I'm open to changing this UI to something else if someone has a better idea.
if ((self.settingsDialogInstance.ui.lineEditDays.text()=='') and (self.settingsDialogInstance.ui.lineEditMonths.text()=='')):#We need to handle this special case. Bitmessage has its default behavior. The input is blank/blank
shared.config.set('bitmessagesettings', 'stopresendingafterxdays', '')
shared.config.set('bitmessagesettings', 'stopresendingafterxmonths', '')
shared.maximumLengthOfTimeToBotherResendingMessages = float('inf')
try:
float(self.settingsDialogInstance.ui.lineEditDays.text())
lineEditDaysIsValidFloat = True
except:
lineEditDaysIsValidFloat = False
try:
float(self.settingsDialogInstance.ui.lineEditMonths.text())
lineEditMonthsIsValidFloat = True
except:
lineEditMonthsIsValidFloat = False
if lineEditDaysIsValidFloat and not lineEditMonthsIsValidFloat:
self.settingsDialogInstance.ui.lineEditMonths.setText("0")
if lineEditMonthsIsValidFloat and not lineEditDaysIsValidFloat:
self.settingsDialogInstance.ui.lineEditDays.setText("0")
if lineEditDaysIsValidFloat or lineEditMonthsIsValidFloat:
if (float(self.settingsDialogInstance.ui.lineEditDays.text()) >=0 and float(self.settingsDialogInstance.ui.lineEditMonths.text()) >=0):
shared.maximumLengthOfTimeToBotherResendingMessages = (float(str(self.settingsDialogInstance.ui.lineEditDays.text())) * 24 * 60 * 60) + (float(str(self.settingsDialogInstance.ui.lineEditMonths.text())) * (60 * 60 * 24 *365)/12)
if shared.maximumLengthOfTimeToBotherResendingMessages < 432000: # If the time period is less than 5 hours, we give zero values to all fields. No message will be sent again.
QMessageBox.about(self, _translate("MainWindow", "Will not resend ever"), _translate(
"MainWindow", "Note that the time limit you entered is less than the amount of time Bitmessage waits for the first resend attempt therefore your messages will never be resent."))
shared.config.set('bitmessagesettings', 'stopresendingafterxdays', '0')
shared.config.set('bitmessagesettings', 'stopresendingafterxmonths', '0')
shared.maximumLengthOfTimeToBotherResendingMessages = 0
else:
shared.config.set('bitmessagesettings', 'stopresendingafterxdays', str(float(
self.settingsDialogInstance.ui.lineEditDays.text())))
shared.config.set('bitmessagesettings', 'stopresendingafterxmonths', str(float(
self.settingsDialogInstance.ui.lineEditMonths.text())))
#end
# if str(self.settingsDialogInstance.ui.comboBoxMaxCores.currentText()) == 'All':
# shared.config.set('bitmessagesettings', 'maxcores', '99999')
# else:
# shared.config.set('bitmessagesettings', 'maxcores',
# str(self.settingsDialogInstance.ui.comboBoxMaxCores.currentText()))
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
if 'win32' in sys.platform or 'win64' in sys.platform:
# Auto-startup for Windows
RUN_PATH = "HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run"
self.settings = QSettings(RUN_PATH, QSettings.NativeFormat)
if shared.config.getboolean('bitmessagesettings', 'startonlogon'):
self.settings.setValue("PyBitmessage", sys.argv[0])
else:
self.settings.remove("PyBitmessage")
elif 'darwin' in sys.platform:
# startup for mac
pass
elif 'linux' in sys.platform:
# startup for linux
pass
if shared.appdata != '' and self.settingsDialogInstance.ui.checkBoxPortableMode.isChecked(): # If we are NOT using portable mode now but the user selected that we should...
# Write the keys.dat file to disk in the new location
sqlStoredProcedure('movemessagstoprog')
with open('keys.dat', 'wb') as configfile:
shared.config.write(configfile)
# Write the knownnodes.dat file to disk in the new location
shared.knownNodesLock.acquire()
output = open('knownnodes.dat', 'wb')
pickle.dump(shared.knownNodes, output)
output.close()
shared.knownNodesLock.release()
os.remove(shared.appdata + 'keys.dat')
os.remove(shared.appdata + 'knownnodes.dat')
previousAppdataLocation = shared.appdata
shared.appdata = ''
debug.restartLoggingInUpdatedAppdataLocation()
try:
os.remove(previousAppdataLocation + 'debug.log')
os.remove(previousAppdataLocation + 'debug.log.1')
except:
pass
if shared.appdata == '' and not self.settingsDialogInstance.ui.checkBoxPortableMode.isChecked(): # If we ARE using portable mode now but the user selected that we shouldn't...
shared.appdata = shared.lookupAppdataFolder()
if not os.path.exists(shared.appdata):
os.makedirs(shared.appdata)
sqlStoredProcedure('movemessagstoappdata')
# Write the keys.dat file to disk in the new location
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
# Write the knownnodes.dat file to disk in the new location
shared.knownNodesLock.acquire()
output = open(shared.appdata + 'knownnodes.dat', 'wb')
pickle.dump(shared.knownNodes, output)
output.close()
shared.knownNodesLock.release()
os.remove('keys.dat')
os.remove('knownnodes.dat')
debug.restartLoggingInUpdatedAppdataLocation()
try:
os.remove('debug.log')
os.remove('debug.log.1')
except:
pass
def click_radioButtonBlacklist(self):
if shared.config.get('bitmessagesettings', 'blackwhitelist') == 'white':
shared.config.set('bitmessagesettings', 'blackwhitelist', 'black')
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
# self.ui.tableWidgetBlacklist.clearContents()
self.ui.tableWidgetBlacklist.setRowCount(0)
self.loadBlackWhiteList()
self.ui.tabWidget.setTabText(6, 'Blacklist')
def click_radioButtonWhitelist(self):
if shared.config.get('bitmessagesettings', 'blackwhitelist') == 'black':
shared.config.set('bitmessagesettings', 'blackwhitelist', 'white')
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
# self.ui.tableWidgetBlacklist.clearContents()
self.ui.tableWidgetBlacklist.setRowCount(0)
self.loadBlackWhiteList()
self.ui.tabWidget.setTabText(6, 'Whitelist')
def click_pushButtonAddBlacklist(self):
self.NewBlacklistDialogInstance = AddAddressDialog(self)
if self.NewBlacklistDialogInstance.exec_():
if self.NewBlacklistDialogInstance.ui.labelAddressCheck.text() == _translate("MainWindow", "Address is valid."):
address = addBMIfNotPresent(str(
self.NewBlacklistDialogInstance.ui.lineEditAddress.text()))
# First we must check to see if the address is already in the
# address book. The user cannot add it again or else it will
# cause problems when updating and deleting the entry.
t = (address,)
if shared.config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sql = '''select * from blacklist where address=?'''
else:
sql = '''select * from whitelist where address=?'''
queryreturn = sqlQuery(sql,*t)
if queryreturn == []:
self.ui.tableWidgetBlacklist.setSortingEnabled(False)
self.ui.tableWidgetBlacklist.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(
self.NewBlacklistDialogInstance.ui.newAddressLabel.text().toUtf8(), 'utf-8'))
newItem.setIcon(avatarize(address))
self.ui.tableWidgetBlacklist.setItem(0, 0, newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.ui.tableWidgetBlacklist.setItem(0, 1, newItem)
self.ui.tableWidgetBlacklist.setSortingEnabled(True)
t = (str(self.NewBlacklistDialogInstance.ui.newAddressLabel.text().toUtf8()), address, True)
if shared.config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sql = '''INSERT INTO blacklist VALUES (?,?,?)'''
else:
sql = '''INSERT INTO whitelist VALUES (?,?,?)'''
sqlExecute(sql, *t)
else:
self.statusBar().showMessage(_translate(
"MainWindow", "Error: You cannot add the same address to your list twice. Perhaps rename the existing one if you want."))
else:
self.statusBar().showMessage(_translate(
"MainWindow", "The address you entered was invalid. Ignoring it."))
def on_action_SpecialAddressBehaviorDialog(self):
self.dialog = SpecialAddressBehaviorDialog(self)
# For Modal dialogs
if self.dialog.exec_():
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = str(
self.ui.tableWidgetYourIdentities.item(currentRow, 1).text())
if shared.safeConfigGetBoolean(addressAtCurrentRow, 'chan'):
return
if self.dialog.ui.radioButtonBehaveNormalAddress.isChecked():
shared.config.set(str(
addressAtCurrentRow), 'mailinglist', 'false')
# Set the color to either black or grey
if shared.config.getboolean(addressAtCurrentRow, 'enabled'):
self.ui.tableWidgetYourIdentities.item(
currentRow, 1).setTextColor(QApplication.palette()
.text().color())
else:
self.ui.tableWidgetYourIdentities.item(
currentRow, 1).setTextColor(QtGui.QColor(128, 128, 128))
else:
shared.config.set(str(
addressAtCurrentRow), 'mailinglist', 'true')
shared.config.set(str(addressAtCurrentRow), 'mailinglistname', str(
self.dialog.ui.lineEditMailingListName.text().toUtf8()))
self.ui.tableWidgetYourIdentities.item(currentRow, 1).setTextColor(QtGui.QColor(137, 04, 177)) # magenta
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
self.rerenderInboxToLabels()
def click_NewAddressDialog(self):
self.dialog = NewAddressDialog(self)
# For Modal dialogs
if self.dialog.exec_():
# self.dialog.ui.buttonBox.enabled = False
if self.dialog.ui.radioButtonRandomAddress.isChecked():
if self.dialog.ui.radioButtonMostAvailable.isChecked():
streamNumberForAddress = 1
else:
# User selected 'Use the same stream as an existing
# address.'
streamNumberForAddress = decodeAddress(
self.dialog.ui.comboBoxExisting.currentText())[2]
shared.addressGeneratorQueue.put(('createRandomAddress', 4, streamNumberForAddress, str(
self.dialog.ui.newaddresslabel.text().toUtf8()), 1, "", self.dialog.ui.checkBoxEighteenByteRipe.isChecked()))
else:
if self.dialog.ui.lineEditPassphrase.text() != self.dialog.ui.lineEditPassphraseAgain.text():
QMessageBox.about(self, _translate("MainWindow", "Passphrase mismatch"), _translate(
"MainWindow", "The passphrase you entered twice doesn\'t match. Try again."))
elif self.dialog.ui.lineEditPassphrase.text() == "":
QMessageBox.about(self, _translate(
"MainWindow", "Choose a passphrase"), _translate("MainWindow", "You really do need a passphrase."))
else:
streamNumberForAddress = 1 # this will eventually have to be replaced by logic to determine the most available stream number.
shared.addressGeneratorQueue.put(('createDeterministicAddresses', 4, streamNumberForAddress, "unused deterministic address", self.dialog.ui.spinBoxNumberOfAddressesToMake.value(
), self.dialog.ui.lineEditPassphrase.text().toUtf8(), self.dialog.ui.checkBoxEighteenByteRipe.isChecked()))
else:
print 'new address dialog box rejected'
# Quit selected from menu or application indicator
def quit(self):
'''quit_msg = "Are you sure you want to exit Bitmessage?"
reply = QtGui.QMessageBox.question(self, 'Message',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply is QtGui.QMessageBox.No:
return
'''
shared.doCleanShutdown()
self.tray.hide()
# unregister the messaging system
if self.mmapp is not None:
self.mmapp.unregister()
self.statusBar().showMessage(_translate(
"MainWindow", "All done. Closing user interface..."))
os._exit(0)
# window close event
def closeEvent(self, event):
self.appIndicatorHide()
minimizeonclose = False
try:
minimizeonclose = shared.config.getboolean(
'bitmessagesettings', 'minimizeonclose')
except Exception:
pass
if minimizeonclose:
# minimize the application
event.ignore()
else:
# quit the application
event.accept()
self.quit()
def on_action_InboxMessageForceHtml(self):
currentInboxRow = self.ui.tableWidgetInbox.currentRow()
msgid = str(self.ui.tableWidgetInbox.item(
currentInboxRow, 3).data(Qt.UserRole).toPyObject())
queryreturn = sqlQuery(
'''select message from inbox where msgid=?''', msgid)
if queryreturn != []:
for row in queryreturn:
messageText, = row
lines = messageText.split('\n')
totalLines = len(lines)
for i in xrange(totalLines):
if 'Message ostensibly from ' in lines[i]:
lines[i] = '<p style="font-size: 12px; color: grey;">%s</span></p>' % (
lines[i])
elif lines[i] == '------------------------------------------------------':
lines[i] = '<hr>'
elif lines[i] == '' and (i+1) < totalLines and \
lines[i+1] != '------------------------------------------------------':
lines[i] = '<br><br>'
content = ' '.join(lines) # To keep the whitespace between lines
content = shared.fixPotentiallyInvalidUTF8Data(content)
content = unicode(content, 'utf-8)')
self.ui.textEditInboxMessage.setHtml(QtCore.QString(content))
def on_action_InboxMarkUnread(self):
font = QFont()
font.setBold(True)
for row in self.ui.tableWidgetInbox.selectedIndexes():
currentRow = row.row()
inventoryHashToMarkUnread = str(self.ui.tableWidgetInbox.item(
currentRow, 3).data(Qt.UserRole).toPyObject())
sqlExecute('''UPDATE inbox SET read=0 WHERE msgid=?''', inventoryHashToMarkUnread)
self.ui.tableWidgetInbox.item(currentRow, 0).setFont(font)
self.ui.tableWidgetInbox.item(currentRow, 1).setFont(font)
self.ui.tableWidgetInbox.item(currentRow, 2).setFont(font)
self.ui.tableWidgetInbox.item(currentRow, 3).setFont(font)
self.changedInboxUnread()
# self.ui.tableWidgetInbox.selectRow(currentRow + 1)
# This doesn't de-select the last message if you try to mark it unread, but that doesn't interfere. Might not be necessary.
# We could also select upwards, but then our problem would be with the topmost message.
# self.ui.tableWidgetInbox.clearSelection() manages to mark the message as read again.
# Format predefined text on message reply.
def quoted_text(self, message):
if not shared.safeConfigGetBoolean('bitmessagesettings', 'replybelow'):
return '\n\n------------------------------------------------------\n' + message
quoteWrapper = textwrap.TextWrapper(replace_whitespace = False,
initial_indent = '> ',
subsequent_indent = '> ',
break_long_words = False,
break_on_hyphens = False)
def quote_line(line):
# Do quote empty lines.
if line == '' or line.isspace():
return '> '
# Quote already quoted lines, but do not wrap them.
elif line[0:2] == '> ':
return '> ' + line
# Wrap and quote lines/paragraphs new to this message.
else:
return quoteWrapper.fill(line)
return '\n'.join([quote_line(l) for l in message.splitlines()]) + '\n\n'
def on_action_InboxReply(self):
currentInboxRow = self.ui.tableWidgetInbox.currentRow()
toAddressAtCurrentInboxRow = str(self.ui.tableWidgetInbox.item(
currentInboxRow, 0).data(Qt.UserRole).toPyObject())
fromAddressAtCurrentInboxRow = str(self.ui.tableWidgetInbox.item(
currentInboxRow, 1).data(Qt.UserRole).toPyObject())
msgid = str(self.ui.tableWidgetInbox.item(
currentInboxRow, 3).data(Qt.UserRole).toPyObject())
queryreturn = sqlQuery(
'''select message from inbox where msgid=?''', msgid)
if queryreturn != []:
for row in queryreturn:
messageAtCurrentInboxRow, = row
if toAddressAtCurrentInboxRow == self.str_broadcast_subscribers:
self.ui.labelFrom.setText('')
elif not shared.config.has_section(toAddressAtCurrentInboxRow):
QtGui.QMessageBox.information(self, _translate("MainWindow", "Address is gone"), _translate(
"MainWindow", "Bitmessage cannot find your address %1. Perhaps you removed it?").arg(toAddressAtCurrentInboxRow), QMessageBox.Ok)
self.ui.labelFrom.setText('')
elif not shared.config.getboolean(toAddressAtCurrentInboxRow, 'enabled'):
QtGui.QMessageBox.information(self, _translate("MainWindow", "Address disabled"), _translate(
"MainWindow", "Error: The address from which you are trying to send is disabled. You\'ll have to enable it on the \'Your Identities\' tab before using it."), QMessageBox.Ok)
self.ui.labelFrom.setText('')
else:
self.ui.labelFrom.setText(toAddressAtCurrentInboxRow)
self.setBroadcastEnablementDependingOnWhetherThisIsAChanAddress(toAddressAtCurrentInboxRow)
self.ui.lineEditTo.setText(str(fromAddressAtCurrentInboxRow))
# If the previous message was to a chan then we should send our reply to the chan rather than to the particular person who sent the message.
if shared.config.has_section(toAddressAtCurrentInboxRow):
if shared.safeConfigGetBoolean(toAddressAtCurrentInboxRow, 'chan'):
print 'original sent to a chan. Setting the to address in the reply to the chan address.'
self.ui.lineEditTo.setText(str(toAddressAtCurrentInboxRow))
listOfAddressesInComboBoxSendFrom = [str(self.ui.comboBoxSendFrom.itemData(i).toPyObject()) for i in range(self.ui.comboBoxSendFrom.count())]
if toAddressAtCurrentInboxRow in listOfAddressesInComboBoxSendFrom:
currentIndex = listOfAddressesInComboBoxSendFrom.index(toAddressAtCurrentInboxRow)
self.ui.comboBoxSendFrom.setCurrentIndex(currentIndex)
else:
self.ui.comboBoxSendFrom.setCurrentIndex(0)
quotedText = self.quoted_text(unicode(messageAtCurrentInboxRow, 'utf-8'))
self.ui.textEditMessage.setText(quotedText)
if self.ui.tableWidgetInbox.item(currentInboxRow, 2).text()[0:3] in ['Re:', 'RE:']:
self.ui.lineEditSubject.setText(
self.ui.tableWidgetInbox.item(currentInboxRow, 2).text())
else:
self.ui.lineEditSubject.setText(
'Re: ' + self.ui.tableWidgetInbox.item(currentInboxRow, 2).text())
self.ui.radioButtonSpecific.setChecked(True)
self.ui.tabWidget.setCurrentIndex(1)
def on_action_InboxAddSenderToAddressBook(self):
currentInboxRow = self.ui.tableWidgetInbox.currentRow()
# self.ui.tableWidgetInbox.item(currentRow,1).data(Qt.UserRole).toPyObject()
addressAtCurrentInboxRow = str(self.ui.tableWidgetInbox.item(
currentInboxRow, 1).data(Qt.UserRole).toPyObject())
# Let's make sure that it isn't already in the address book
queryreturn = sqlQuery('''select * from addressbook where address=?''',
addressAtCurrentInboxRow)
if queryreturn == []:
self.ui.tableWidgetAddressBook.insertRow(0)
newItem = QtGui.QTableWidgetItem(
'--New entry. Change label in Address Book.--')
self.ui.tableWidgetAddressBook.setItem(0, 0, newItem)
newItem.setIcon(avatarize(addressAtCurrentInboxRow))
newItem = QtGui.QTableWidgetItem(addressAtCurrentInboxRow)
newItem.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.ui.tableWidgetAddressBook.setItem(0, 1, newItem)
sqlExecute('''INSERT INTO addressbook VALUES (?,?)''',
'--New entry. Change label in Address Book.--',
addressAtCurrentInboxRow)
self.ui.tabWidget.setCurrentIndex(5)
self.ui.tableWidgetAddressBook.setCurrentCell(0, 0)
self.statusBar().showMessage(_translate(
"MainWindow", "Entry added to the Address Book. Edit the label to your liking."))
else:
self.statusBar().showMessage(_translate(
"MainWindow", "Error: You cannot add the same address to your address book twice. Try renaming the existing one if you want."))
# Send item on the Inbox tab to trash
def on_action_InboxTrash(self):
while self.ui.tableWidgetInbox.selectedIndexes() != []:
currentRow = self.ui.tableWidgetInbox.selectedIndexes()[0].row()
inventoryHashToTrash = str(self.ui.tableWidgetInbox.item(
currentRow, 3).data(Qt.UserRole).toPyObject())
sqlExecute('''UPDATE inbox SET folder='trash' WHERE msgid=?''', inventoryHashToTrash)
self.ui.textEditInboxMessage.setText("")
self.ui.tableWidgetInbox.removeRow(currentRow)
self.statusBar().showMessage(_translate(
"MainWindow", "Moved items to trash. There is no user interface to view your trash, but it is still on disk if you are desperate to get it back."))
if currentRow == 0:
self.ui.tableWidgetInbox.selectRow(currentRow)
else:
self.ui.tableWidgetInbox.selectRow(currentRow - 1)
def on_action_InboxSaveMessageAs(self):
currentInboxRow = self.ui.tableWidgetInbox.currentRow()
try:
subjectAtCurrentInboxRow = str(self.ui.tableWidgetInbox.item(currentInboxRow,2).text())
except:
subjectAtCurrentInboxRow = ''
# Retrieve the message data out of the SQL database
msgid = str(self.ui.tableWidgetInbox.item(
currentInboxRow, 3).data(Qt.UserRole).toPyObject())
queryreturn = sqlQuery(
'''select message from inbox where msgid=?''', msgid)
if queryreturn != []:
for row in queryreturn:
message, = row
defaultFilename = "".join(x for x in subjectAtCurrentInboxRow if x.isalnum()) + '.txt'
filename = QFileDialog.getSaveFileName(self, _translate("MainWindow","Save As..."), defaultFilename, "Text files (*.txt);;All files (*.*)")
if filename == '':
return
try:
f = open(filename, 'w')
f.write(message)
f.close()
except Exception, e:
sys.stderr.write('Write error: '+ e)
self.statusBar().showMessage(_translate("MainWindow", "Write error."))
# Send item on the Sent tab to trash
def on_action_SentTrash(self):
while self.ui.tableWidgetSent.selectedIndexes() != []:
currentRow = self.ui.tableWidgetSent.selectedIndexes()[0].row()
ackdataToTrash = str(self.ui.tableWidgetSent.item(
currentRow, 3).data(Qt.UserRole).toPyObject())
sqlExecute('''UPDATE sent SET folder='trash' WHERE ackdata=?''', ackdataToTrash)
self.ui.textEditSentMessage.setPlainText("")
self.ui.tableWidgetSent.removeRow(currentRow)
self.statusBar().showMessage(_translate(
"MainWindow", "Moved items to trash. There is no user interface to view your trash, but it is still on disk if you are desperate to get it back."))
if currentRow == 0:
self.ui.tableWidgetSent.selectRow(currentRow)
else:
self.ui.tableWidgetSent.selectRow(currentRow - 1)
def on_action_ForceSend(self):
currentRow = self.ui.tableWidgetSent.currentRow()
addressAtCurrentRow = str(self.ui.tableWidgetSent.item(
currentRow, 0).data(Qt.UserRole).toPyObject())
toRipe = decodeAddress(addressAtCurrentRow)[3]
sqlExecute(
'''UPDATE sent SET status='forcepow' WHERE toripe=? AND status='toodifficult' and folder='sent' ''',
toRipe)
queryreturn = sqlQuery('''select ackdata FROM sent WHERE status='forcepow' ''')
for row in queryreturn:
ackdata, = row
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
ackdata, 'Overriding maximum-difficulty setting. Work queued.')))
shared.workerQueue.put(('sendmessage', ''))
def on_action_SentClipboard(self):
currentRow = self.ui.tableWidgetSent.currentRow()
addressAtCurrentRow = str(self.ui.tableWidgetSent.item(
currentRow, 0).data(Qt.UserRole).toPyObject())
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
# Group of functions for the Address Book dialog box
def on_action_AddressBookNew(self):
self.click_pushButtonAddAddressBook()
def on_action_AddressBookDelete(self):
while self.ui.tableWidgetAddressBook.selectedIndexes() != []:
currentRow = self.ui.tableWidgetAddressBook.selectedIndexes()[
0].row()
labelAtCurrentRow = self.ui.tableWidgetAddressBook.item(
currentRow, 0).text().toUtf8()
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(
currentRow, 1).text()
sqlExecute('''DELETE FROM addressbook WHERE label=? AND address=?''',
str(labelAtCurrentRow), str(addressAtCurrentRow))
self.ui.tableWidgetAddressBook.removeRow(currentRow)
self.rerenderInboxFromLabels()
self.rerenderSentToLabels()
def on_action_AddressBookClipboard(self):
fullStringOfAddresses = ''
listOfSelectedRows = {}
for i in range(len(self.ui.tableWidgetAddressBook.selectedIndexes())):
listOfSelectedRows[
self.ui.tableWidgetAddressBook.selectedIndexes()[i].row()] = 0
for currentRow in listOfSelectedRows:
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(
currentRow, 1).text()
if fullStringOfAddresses == '':
fullStringOfAddresses = addressAtCurrentRow
else:
fullStringOfAddresses += ', ' + str(addressAtCurrentRow)
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(fullStringOfAddresses)
def on_action_AddressBookSend(self):
listOfSelectedRows = {}
for i in range(len(self.ui.tableWidgetAddressBook.selectedIndexes())):
listOfSelectedRows[
self.ui.tableWidgetAddressBook.selectedIndexes()[i].row()] = 0
for currentRow in listOfSelectedRows:
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(
currentRow, 1).text()
if self.ui.lineEditTo.text() == '':
self.ui.lineEditTo.setText(str(addressAtCurrentRow))
else:
self.ui.lineEditTo.setText(str(
self.ui.lineEditTo.text()) + '; ' + str(addressAtCurrentRow))
if listOfSelectedRows == {}:
self.statusBar().showMessage(_translate(
"MainWindow", "No addresses selected."))
else:
self.statusBar().showMessage('')
self.ui.tabWidget.setCurrentIndex(1)
def on_action_AddressBookSubscribe(self):
listOfSelectedRows = {}
for i in range(len(self.ui.tableWidgetAddressBook.selectedIndexes())):
listOfSelectedRows[self.ui.tableWidgetAddressBook.selectedIndexes()[i].row()] = 0
for currentRow in listOfSelectedRows:
addressAtCurrentRow = str(self.ui.tableWidgetAddressBook.item(currentRow,1).text())
# Then subscribe to it... provided it's not already in the address book
if shared.isAddressInMySubscriptionsList(addressAtCurrentRow):
self.statusBar().showMessage(QtGui.QApplication.translate("MainWindow", "Error: You cannot add the same address to your subsciptions twice. Perhaps rename the existing one if you want."))
continue
labelAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,0).text().toUtf8()
self.addSubscription(addressAtCurrentRow, labelAtCurrentRow)
self.ui.tabWidget.setCurrentIndex(4)
def on_context_menuAddressBook(self, point):
self.popMenuAddressBook.exec_(
self.ui.tableWidgetAddressBook.mapToGlobal(point))
# Group of functions for the Subscriptions dialog box
def on_action_SubscriptionsNew(self):
self.click_pushButtonAddSubscription()
def on_action_SubscriptionsDelete(self):
print 'clicked Delete'
currentRow = self.ui.tableWidgetSubscriptions.currentRow()
labelAtCurrentRow = self.ui.tableWidgetSubscriptions.item(
currentRow, 0).text().toUtf8()
addressAtCurrentRow = self.ui.tableWidgetSubscriptions.item(
currentRow, 1).text()
sqlExecute('''DELETE FROM subscriptions WHERE label=? AND address=?''',
str(labelAtCurrentRow), str(addressAtCurrentRow))
self.ui.tableWidgetSubscriptions.removeRow(currentRow)
self.rerenderInboxFromLabels()
shared.reloadBroadcastSendersForWhichImWatching()
def on_action_SubscriptionsClipboard(self):
currentRow = self.ui.tableWidgetSubscriptions.currentRow()
addressAtCurrentRow = self.ui.tableWidgetSubscriptions.item(
currentRow, 1).text()
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
def on_action_SubscriptionsEnable(self):
currentRow = self.ui.tableWidgetSubscriptions.currentRow()
labelAtCurrentRow = self.ui.tableWidgetSubscriptions.item(
currentRow, 0).text().toUtf8()
addressAtCurrentRow = self.ui.tableWidgetSubscriptions.item(
currentRow, 1).text()
sqlExecute(
'''update subscriptions set enabled=1 WHERE label=? AND address=?''',
str(labelAtCurrentRow), str(addressAtCurrentRow))
self.ui.tableWidgetSubscriptions.item(
currentRow, 0).setTextColor(QApplication.palette().text().color())
self.ui.tableWidgetSubscriptions.item(
currentRow, 1).setTextColor(QApplication.palette().text().color())
shared.reloadBroadcastSendersForWhichImWatching()
def on_action_SubscriptionsDisable(self):
currentRow = self.ui.tableWidgetSubscriptions.currentRow()
labelAtCurrentRow = self.ui.tableWidgetSubscriptions.item(
currentRow, 0).text().toUtf8()
addressAtCurrentRow = self.ui.tableWidgetSubscriptions.item(
currentRow, 1).text()
sqlExecute(
'''update subscriptions set enabled=0 WHERE label=? AND address=?''',
str(labelAtCurrentRow), str(addressAtCurrentRow))
self.ui.tableWidgetSubscriptions.item(
currentRow, 0).setTextColor(QtGui.QColor(128, 128, 128))
self.ui.tableWidgetSubscriptions.item(
currentRow, 1).setTextColor(QtGui.QColor(128, 128, 128))
shared.reloadBroadcastSendersForWhichImWatching()
def on_context_menuSubscriptions(self, point):
self.popMenuSubscriptions.exec_(
self.ui.tableWidgetSubscriptions.mapToGlobal(point))
# Group of functions for the Blacklist dialog box
def on_action_BlacklistNew(self):
self.click_pushButtonAddBlacklist()
def on_action_BlacklistDelete(self):
currentRow = self.ui.tableWidgetBlacklist.currentRow()
labelAtCurrentRow = self.ui.tableWidgetBlacklist.item(
currentRow, 0).text().toUtf8()
addressAtCurrentRow = self.ui.tableWidgetBlacklist.item(
currentRow, 1).text()
if shared.config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sqlExecute(
'''DELETE FROM blacklist WHERE label=? AND address=?''',
str(labelAtCurrentRow), str(addressAtCurrentRow))
else:
sqlExecute(
'''DELETE FROM whitelist WHERE label=? AND address=?''',
str(labelAtCurrentRow), str(addressAtCurrentRow))
self.ui.tableWidgetBlacklist.removeRow(currentRow)
def on_action_BlacklistClipboard(self):
currentRow = self.ui.tableWidgetBlacklist.currentRow()
addressAtCurrentRow = self.ui.tableWidgetBlacklist.item(
currentRow, 1).text()
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
def on_context_menuBlacklist(self, point):
self.popMenuBlacklist.exec_(
self.ui.tableWidgetBlacklist.mapToGlobal(point))
def on_action_BlacklistEnable(self):
currentRow = self.ui.tableWidgetBlacklist.currentRow()
addressAtCurrentRow = self.ui.tableWidgetBlacklist.item(
currentRow, 1).text()
self.ui.tableWidgetBlacklist.item(
currentRow, 0).setTextColor(QApplication.palette().text().color())
self.ui.tableWidgetBlacklist.item(
currentRow, 1).setTextColor(QApplication.palette().text().color())
if shared.config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sqlExecute(
'''UPDATE blacklist SET enabled=1 WHERE address=?''',
str(addressAtCurrentRow))
else:
sqlExecute(
'''UPDATE whitelist SET enabled=1 WHERE address=?''',
str(addressAtCurrentRow))
def on_action_BlacklistDisable(self):
currentRow = self.ui.tableWidgetBlacklist.currentRow()
addressAtCurrentRow = self.ui.tableWidgetBlacklist.item(
currentRow, 1).text()
self.ui.tableWidgetBlacklist.item(
currentRow, 0).setTextColor(QtGui.QColor(128, 128, 128))
self.ui.tableWidgetBlacklist.item(
currentRow, 1).setTextColor(QtGui.QColor(128, 128, 128))
if shared.config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sqlExecute(
'''UPDATE blacklist SET enabled=0 WHERE address=?''', str(addressAtCurrentRow))
else:
sqlExecute(
'''UPDATE whitelist SET enabled=0 WHERE address=?''', str(addressAtCurrentRow))
# Group of functions for the Your Identities dialog box
def on_action_YourIdentitiesNew(self):
self.click_NewAddressDialog()
def on_action_YourIdentitiesEnable(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = str(
self.ui.tableWidgetYourIdentities.item(currentRow, 1).text())
shared.config.set(addressAtCurrentRow, 'enabled', 'true')
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
self.ui.tableWidgetYourIdentities.item(
currentRow, 0).setTextColor(QApplication.palette().text().color())
self.ui.tableWidgetYourIdentities.item(
currentRow, 1).setTextColor(QApplication.palette().text().color())
self.ui.tableWidgetYourIdentities.item(
currentRow, 2).setTextColor(QApplication.palette().text().color())
if shared.safeConfigGetBoolean(addressAtCurrentRow, 'mailinglist'):
self.ui.tableWidgetYourIdentities.item(currentRow, 1).setTextColor(QtGui.QColor(137, 04, 177)) # magenta
if shared.safeConfigGetBoolean(addressAtCurrentRow, 'chan'):
self.ui.tableWidgetYourIdentities.item(currentRow, 1).setTextColor(QtGui.QColor(216, 119, 0)) # orange
shared.reloadMyAddressHashes()
def on_action_YourIdentitiesDisable(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = str(
self.ui.tableWidgetYourIdentities.item(currentRow, 1).text())
if shared.safeConfigGetBoolean(str(addressAtCurrentRow), 'consensus'):
QMessageBox.information( self, "Deleting a voting address", "This is a voting address. To remove it, please do so from the voting tab." )
return
shared.config.set(str(addressAtCurrentRow), 'enabled', 'false')
self.ui.tableWidgetYourIdentities.item(
currentRow, 0).setTextColor(QtGui.QColor(128, 128, 128))
self.ui.tableWidgetYourIdentities.item(
currentRow, 1).setTextColor(QtGui.QColor(128, 128, 128))
self.ui.tableWidgetYourIdentities.item(
currentRow, 2).setTextColor(QtGui.QColor(128, 128, 128))
if shared.safeConfigGetBoolean(addressAtCurrentRow, 'mailinglist'):
self.ui.tableWidgetYourIdentities.item(currentRow, 1).setTextColor(QtGui.QColor(137, 04, 177)) # magenta
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
shared.reloadMyAddressHashes()
def on_action_YourIdentitiesClipboard(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = self.ui.tableWidgetYourIdentities.item(
currentRow, 1).text()
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
def on_action_YourIdentitiesSetAvatar(self):
self.on_action_SetAvatar(self.ui.tableWidgetYourIdentities)
def on_action_AddressBookSetAvatar(self):
self.on_action_SetAvatar(self.ui.tableWidgetAddressBook)
def on_action_SubscriptionsSetAvatar(self):
self.on_action_SetAvatar(self.ui.tableWidgetSubscriptions)
def on_action_BlacklistSetAvatar(self):
self.on_action_SetAvatar(self.ui.tableWidgetBlacklist)
def on_action_SetAvatar(self, thisTableWidget):
# thisTableWidget = self.ui.tableWidgetYourIdentities
if not os.path.exists(shared.appdata + 'avatars/'):
os.makedirs(shared.appdata + 'avatars/')
currentRow = thisTableWidget.currentRow()
addressAtCurrentRow = thisTableWidget.item(
currentRow, 1).text()
hash = hashlib.md5(addBMIfNotPresent(addressAtCurrentRow)).hexdigest()
extensions = ['PNG', 'GIF', 'JPG', 'JPEG', 'SVG', 'BMP', 'MNG', 'PBM', 'PGM', 'PPM', 'TIFF', 'XBM', 'XPM', 'TGA']
# http://pyqt.sourceforge.net/Docs/PyQt4/qimagereader.html#supportedImageFormats
names = {'BMP':'Windows Bitmap', 'GIF':'Graphic Interchange Format', 'JPG':'Joint Photographic Experts Group', 'JPEG':'Joint Photographic Experts Group', 'MNG':'Multiple-image Network Graphics', 'PNG':'Portable Network Graphics', 'PBM':'Portable Bitmap', 'PGM':'Portable Graymap', 'PPM':'Portable Pixmap', 'TIFF':'Tagged Image File Format', 'XBM':'X11 Bitmap', 'XPM':'X11 Pixmap', 'SVG':'Scalable Vector Graphics', 'TGA':'Targa Image Format'}
filters = []
all_images_filter = []
current_files = []
for ext in extensions:
filters += [ names[ext] + ' (*.' + ext.lower() + ')' ]
all_images_filter += [ '*.' + ext.lower() ]
upper = shared.appdata + 'avatars/' + hash + '.' + ext.upper()
lower = shared.appdata + 'avatars/' + hash + '.' + ext.lower()
if os.path.isfile(lower):
current_files += [lower]
elif os.path.isfile(upper):
current_files += [upper]
filters[0:0] = ['Image files (' + ' '.join(all_images_filter) + ')']
filters[1:1] = ['All files (*.*)']
sourcefile = QFileDialog.getOpenFileName(self, _translate("MainWindow","Set avatar..."), filter = ';;'.join(filters))
# determine the correct filename (note that avatars don't use the suffix)
destination = shared.appdata + 'avatars/' + hash + '.' + sourcefile.split('.')[-1]
exists = QtCore.QFile.exists(destination)
if sourcefile == '':
# ask for removal of avatar
if exists | (len(current_files)>0):
displayMsg = _translate("MainWindow", "Do you really want to remove this avatar?")
overwrite = QtGui.QMessageBox.question(
self, 'Message', displayMsg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
else:
overwrite = QtGui.QMessageBox.No
else:
# ask whether to overwrite old avatar
if exists | (len(current_files)>0):
displayMsg = _translate("MainWindow", "You have already set an avatar for this address. Do you really want to overwrite it?")
overwrite = QtGui.QMessageBox.question(
self, 'Message', displayMsg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
else:
overwrite = QtGui.QMessageBox.No
# copy the image file to the appdata folder
if (not exists) | (overwrite == QtGui.QMessageBox.Yes):
if overwrite == QtGui.QMessageBox.Yes:
for file in current_files:
QtCore.QFile.remove(file)
QtCore.QFile.remove(destination)
# copy it
if sourcefile != '':
copied = QtCore.QFile.copy(sourcefile, destination)
if not copied:
print 'couldn\'t copy :('
return False
# set the icon
thisTableWidget.item(
currentRow, 0).setIcon(avatarize(addressAtCurrentRow))
self.rerenderSubscriptions()
self.rerenderComboBoxSendFrom()
self.rerenderInboxFromLabels()
self.rerenderInboxToLabels()
self.rerenderSentFromLabels()
self.rerenderSentToLabels()
def on_context_menuYourIdentities(self, point):
self.popMenu.exec_(
self.ui.tableWidgetYourIdentities.mapToGlobal(point))
def on_context_menuInbox(self, point):
self.popMenuInbox.exec_(self.ui.tableWidgetInbox.mapToGlobal(point))
def on_context_menuSent(self, point):
self.popMenuSent = QtGui.QMenu(self)
self.popMenuSent.addAction(self.actionSentClipboard)
self.popMenuSent.addAction(self.actionTrashSentMessage)
# Check to see if this item is toodifficult and display an additional
# menu option (Force Send) if it is.
currentRow = self.ui.tableWidgetSent.currentRow()
ackData = str(self.ui.tableWidgetSent.item(
currentRow, 3).data(Qt.UserRole).toPyObject())
queryreturn = sqlQuery('''SELECT status FROM sent where ackdata=?''', ackData)
for row in queryreturn:
status, = row
if status == 'toodifficult':
self.popMenuSent.addAction(self.actionForceSend)
self.popMenuSent.exec_(self.ui.tableWidgetSent.mapToGlobal(point))
def inboxSearchLineEditPressed(self):
searchKeyword = self.ui.inboxSearchLineEdit.text().toUtf8().data()
searchOption = self.ui.inboxSearchOptionCB.currentText().toUtf8().data()
self.ui.inboxSearchLineEdit.setText(QString(""))
self.ui.textEditInboxMessage.setPlainText(QString(""))
self.loadInbox(searchOption, searchKeyword)
def sentSearchLineEditPressed(self):
searchKeyword = self.ui.sentSearchLineEdit.text().toUtf8().data()
searchOption = self.ui.sentSearchOptionCB.currentText().toUtf8().data()
self.ui.sentSearchLineEdit.setText(QString(""))
self.ui.textEditInboxMessage.setPlainText(QString(""))
self.loadSent(searchOption, searchKeyword)
def tableWidgetInboxItemClicked(self):
currentRow = self.ui.tableWidgetInbox.currentRow()
if currentRow >= 0:
font = QFont()
font.setBold(False)
self.ui.textEditInboxMessage.setCurrentFont(font)
fromAddress = str(self.ui.tableWidgetInbox.item(
currentRow, 1).data(Qt.UserRole).toPyObject())
msgid = str(self.ui.tableWidgetInbox.item(
currentRow, 3).data(Qt.UserRole).toPyObject())
queryreturn = sqlQuery(
'''select message from inbox where msgid=?''', msgid)
if queryreturn != []:
for row in queryreturn:
messageText, = row
messageText = shared.fixPotentiallyInvalidUTF8Data(messageText)
messageText = unicode(messageText, 'utf-8)')
if len(messageText) > 30000:
messageText = (
messageText[:30000] + '\n' +
'--- Display of the remainder of the message ' +
'truncated because it is too long.\n' +
'--- To see the full message, right-click in the ' +
'Inbox view and select "View HTML code as formatted ' +
'text",\n' +
'--- or select "Save message as..." to save it to a ' +
'file, or select "Reply" and ' +
'view the full message in the quote.')
# If we have received this message from either a broadcast address
# or from someone in our address book, display as HTML
if decodeAddress(fromAddress)[3] in shared.broadcastSendersForWhichImWatching or shared.isAddressInMyAddressBook(fromAddress):
self.ui.textEditInboxMessage.setText(messageText)
else:
self.ui.textEditInboxMessage.setPlainText(messageText)
self.ui.tableWidgetInbox.item(currentRow, 0).setFont(font)
self.ui.tableWidgetInbox.item(currentRow, 1).setFont(font)
self.ui.tableWidgetInbox.item(currentRow, 2).setFont(font)
self.ui.tableWidgetInbox.item(currentRow, 3).setFont(font)
inventoryHash = str(self.ui.tableWidgetInbox.item(
currentRow, 3).data(Qt.UserRole).toPyObject())
self.ubuntuMessagingMenuClear(inventoryHash)
sqlExecute('''update inbox set read=1 WHERE msgid=?''', inventoryHash)
self.changedInboxUnread()
def tableWidgetSentItemClicked(self):
currentRow = self.ui.tableWidgetSent.currentRow()
if currentRow >= 0:
ackdata = str(self.ui.tableWidgetSent.item(
currentRow, 3).data(Qt.UserRole).toPyObject())
queryreturn = sqlQuery(
'''select message from sent where ackdata=?''', ackdata)
if queryreturn != []:
for row in queryreturn:
message, = row
else:
message = "Error occurred: could not load message from disk."
message = unicode(message, 'utf-8)')
self.ui.textEditSentMessage.setPlainText(message)
def tableWidgetYourIdentitiesItemChanged(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
if currentRow >= 0:
addressAtCurrentRow = self.ui.tableWidgetYourIdentities.item(
currentRow, 1).text()
shared.config.set(str(addressAtCurrentRow), 'label', str(
self.ui.tableWidgetYourIdentities.item(currentRow, 0).text().toUtf8()))
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
self.rerenderComboBoxSendFrom()
# self.rerenderInboxFromLabels()
self.rerenderInboxToLabels()
self.rerenderSentFromLabels()
# self.rerenderSentToLabels()
def tableWidgetAddressBookItemChanged(self):
currentRow = self.ui.tableWidgetAddressBook.currentRow()
if currentRow >= 0:
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(
currentRow, 1).text()
sqlExecute('''UPDATE addressbook set label=? WHERE address=?''',
str(self.ui.tableWidgetAddressBook.item(currentRow, 0).text().toUtf8()),
str(addressAtCurrentRow))
self.rerenderInboxFromLabels()
self.rerenderSentToLabels()
def tableWidgetSubscriptionsItemChanged(self):
currentRow = self.ui.tableWidgetSubscriptions.currentRow()
if currentRow >= 0:
addressAtCurrentRow = self.ui.tableWidgetSubscriptions.item(
currentRow, 1).text()
sqlExecute('''UPDATE subscriptions set label=? WHERE address=?''',
str(self.ui.tableWidgetSubscriptions.item(currentRow, 0).text().toUtf8()),
str(addressAtCurrentRow))
self.rerenderInboxFromLabels()
self.rerenderSentToLabels()
def writeNewAddressToTable(self, label, address, streamNumber):
self.ui.tableWidgetYourIdentities.setSortingEnabled(False)
self.ui.tableWidgetYourIdentities.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label, 'utf-8'))
newItem.setIcon(avatarize(address))
self.ui.tableWidgetYourIdentities.setItem(
0, 0, newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
if shared.safeConfigGetBoolean(address, 'chan'):
newItem.setTextColor(QtGui.QColor(216, 119, 0)) # orange
self.ui.tableWidgetYourIdentities.setItem(0, 1, newItem)
newItem = QtGui.QTableWidgetItem(streamNumber)
newItem.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.ui.tableWidgetYourIdentities.setItem(0, 2, newItem)
# self.ui.tableWidgetYourIdentities.setSortingEnabled(True)
self.rerenderComboBoxSendFrom()
def updateStatusBar(self, data):
if data != "":
with shared.printLock:
print 'Status bar:', data
self.statusBar().showMessage(data)
def init_voting(self):
self.ui.comboBoxVotingElection.currentIndexChanged.connect( self.comboBoxVotingElectionIndexChanged )
self.ui.pushButtonVotingCreateElection.clicked.connect( self.pushButtonVotingCreateElectionClicked )
self.ui.pushButtonVotingJoinElection.clicked.connect( self.pushButtonVotingJoinElectionClicked )
self.ui.pushButtonVotingDebug.clicked.connect( self.pushButtonVotingDebugClicked )
self.ui.pushButtonVotingTimestamper.clicked.connect( self.pushButtonVotingTimestamperClicked )
self.ui.pushButtonVotingDetails.clicked.connect( self.pushButtonVotingDetailsClicked )
self.ui.tableWidgetVotingAllResults.itemSelectionChanged.connect( self.tableWidgetVotingAllResultsItemChanged )
self.ui.comboBoxVotingMultipleAddresses.currentIndexChanged.connect( self.comboBoxVotingMultipleAddressesIndexChanged )
self.ui.comboBoxVotingVote.currentIndexChanged.connect( self.comboBoxVotingVoteIndexChanged )
self.ui.pushButtonVotingCastVote.clicked.connect( self.pushButtonVotingCastVoteClicked )
self.init_voting_options_button()
self.voting_phase_timer = None
self.current_election = None
self.current_election_result = None
self.current_election_voter_address = None
self.current_election_answer = None
self.refresh_elections()
# def fix_voting_widgets_stretch(self):
def refresh_elections(self, selected_election=None):
self.ui.comboBoxVotingElection.clear()
selected_election = selected_election or self.current_election
self.elections = [cp for cp in ConsensusProtocol.get_all()]
for el in self.elections:
# Don't mind the empty label, we'll set it later in refresh_election_combobox_statuses()
self.ui.comboBoxVotingElection.addItem( avatarize( el.hash ), "" )
if selected_election in self.elections:
self.ui.comboBoxVotingElection.setCurrentIndex( self.elections.index( selected_election ) )
if len( self.elections ) > 0:
self.ui.comboBoxVotingElection.setEnabled( True )
else:
self.ui.comboBoxVotingElection.setEnabled( False )
self.ui.comboBoxVotingElection.addItem( "(Create or import an election first)" )
self.refresh_election_combobox_statuses()
def refresh_election_ui(self, election ):
self.refresh_election_combobox_statuses()
if election == self.current_election:
self.refresh_election( election )
def refresh_election_combobox_statuses(self):
for el, i in zip( self.elections, range( len( self.elections ) ) ):
name = el.data.question if el.data is not None else el.hash[:16]
self.ui.comboBoxVotingElection.setItemText( i, "%s (%s)" % ( name, get_election_status_text( el ) ) )
def comboBoxVotingElectionIndexChanged(self, index):
if len( self.elections ) == 0:
self.current_election = None
else:
self.current_election = self.elections[index] if index >= 0 else None
self.refresh_election( self.current_election )
def format_seconds_as_time_left(self, seconds):
plural = lambda count: "s" if count != 1 else ""
if seconds < 60:
return "very soon..."
minutes, seconds = int( seconds / 60 ), seconds % 60
if minutes < 60:
return "%d minute%s" % ( minutes, plural(minutes) )
hours, minutes = int( minutes / 60 ), minutes % 60
if hours < 24:
if minutes > 0:
return "%d hour%s and %d minute%s" % ( hours, plural(hours), minutes, plural(minutes) )
else:
return "%d hour%s" % ( hours, plural(hours) )
days, hours = int( hours / 24 ), hours % 24
if hours > 0:
return "%d day%s, %d hour%s" % ( days, plural(days), hours, plural(hours) )
else:
return "%d day%s" % ( days, plural(days) )
def refresh_election(self, election):
valid_election = election is not None and election.data is not None \
and election.initialized and election.data.has_all_public_keys()
awaiting_pubkeys = False
if valid_election:
self.ui.labelVotingQuestion.setText( election.data.question )
self.ui.labelVotingStatus.setText( get_election_status_text( election ) )
status = election.get_status()
if status <= ConsensusProtocol.STATUS_COMMITMENT_PHASE:
if self.voting_phase_timer is None:
self.voting_phase_timer = QTimer( self )
self.voting_phase_timer.timeout.connect( self.voting_phase_timer_tick )
self.voting_phase_timer.setInterval( 1000 )
self.voting_phase_timer.start()
self.voting_phase_timer_tick()
self.ui.widgetVotingTimer.show()
else:
self.ui.widgetVotingTimer.hide()
else:
unknown_election = election is not None and election.data is None
if unknown_election:
self.ui.labelVotingQuestion.setText( election.hash[:32] )
self.ui.labelVotingStatus.setText( get_election_status_text( election ) )
else:
self.ui.labelVotingQuestion.setText( "No election selected" )
self.ui.labelVotingStatus.setText( "" )
if self.voting_phase_timer is not None:
self.voting_phase_timer.stop()
self.ui.widgetVotingTimer.hide()
if election is not None and election.data is not None:
self.ui.labelVotingQuestion.setText( election.data.question )
if not election.initialized:
self.ui.labelVotingStatus.setText( "Loading... Please wait" )
self.ui.widgetVotingTimer.hide()
election = None
elif not election.data.has_all_public_keys():
self.ui.labelVotingStatus.setText( "Waiting for public keys... Please wait" )
self.ui.widgetVotingTimer.hide()
election = None
awaiting_pubkeys = True
self.ui.pushButtonVotingDebug.setEnabled( valid_election )
self.ui.pushButtonVotingTimestamper.setEnabled( valid_election and status <= ConsensusProtocol.STATUS_POSTING )
self.ui.pushButtonVotingDetails.setEnabled( valid_election or awaiting_pubkeys )
self.ui.pushButtonVotingOptions.setEnabled( valid_election or awaiting_pubkeys )
self.refresh_votes_table(election)
self.refresh_pushButtonVotingTimestamperText(election)
self.refresh_result_table(election)
self.refresh_results_widget(election)
self.refresh_vote_actions(election)
def pushButtonVotingCreateElectionClicked(self):
self.dialog = NewCreateElectionDialog(self)
if self.dialog.exec_():
election = self.dialog.result
def pushButtonVotingJoinElectionClicked(self):
self.voting_join_menu = QtGui.QMenu(self)
action = QtGui.QAction( "Join by election hash", self )
action.triggered.connect( self.actionVotingJoinHash )
self.voting_join_menu.addAction( action )
self.voting_join_menu.addSeparator()
action = QtGui.QAction( "Import election from file", self )
action.triggered.connect( self.actionVotingJoinImportFile )
self.voting_join_menu.addAction( action )
self.voting_join_menu.exec_( self.ui.pushButtonVotingJoinElection.mapToGlobal(QPoint(0,self.ui.pushButtonVotingJoinElection.size().height())));
def actionVotingJoinHash(self):
text, ok = QtGui.QInputDialog.getText(self, 'Join election', '<b>Enter the full election hash (64 characters):</b><br><br>Note that another peer with the election data must broadcast<br>their data after you have joined this election.')
hash = str( text.trimmed() )
if ok:
try:
hash.decode('hex')
assert len( hash ) == 64
except Exception, e:
QtGui.QMessageBox.warning(self, "Invalid hash", "You didn't enter a valid hash, %s" % e )
# Let user try again
self.actionVotingJoinHash()
else:
ConsensusProtocol.join( hash )
def actionVotingJoinImportFile(self):
filename = QtGui.QFileDialog.getOpenFileName(self, _translate("MainWindow", "Export election"),
shared.appdata, _translate("MainWindow", "Election file (*.vote)") )
if filename == '':
return
election = ConsensusProtocol.read_from_file( filename )
def election_initialized(self, election):
self.refresh_elections(election)
if election.data is not None:
if election.data.settings_get_first_load():
election.data.settings_set_first_load(False)
self.show_dialog_asking_about_timestamper(election)
def show_dialog_asking_about_timestamper(self, election):
if election is None:
return
def callback():
election.settings_set_timestamper_settings( True, [] )
self.pushButtonVotingTimestamperClicked()
mb = QtGui.QMessageBox( self )
mb.setIcon( QtGui.QMessageBox.Information)
mb.setWindowTitle( "Do you want to be a timestamper?" )
mb.setTextFormat( Qt.RichText )
mb.setText( """By being a timestamper, you contribute to the execution of the election by taking part in the timestamping and results phases of the election.<br><br>
To be a timestamper, you must agree to possibly spend a very small amount of bitcoin (%.08f BTC / %d satoshi) during the election. This is used to prove that a commitment was created before the deadline and thus ensure that only votes cast before the deadline can be validated as such.<br><br>
<b>The election cannot be executed if nobody volunteers as timestamper!</b><br><br>
<b>Do you want to be a timestamper?</b>""" % ( BitcoinThread.BTC_UNSPENT_MIN_AVAILABLE, BitcoinThread.SATOSHI_UNSPENT_MIN_AVAILABLE ) )
yes_button = mb.addButton( "Yes", QMessageBox.AcceptRole )
yes_button.clicked.connect( callback )
no_button = mb.addButton( "No", QMessageBox.RejectRole )
mb.show()
def voting_phase_timer_tick(self):
if self.current_election is None:
self.voting_phase_timer.stop()
return
status = self.current_election.get_status()
seconds = self.current_election.get_time_for_next_phase( status )
if seconds is None:
self.voting_phase_timer.stop()
return
if seconds >= 60:
time_left = self.format_seconds_as_time_left( seconds )
self.ui.labelVotingPhaseTimer.setText( "Approximately %s left..." % time_left)
else:
self.ui.labelVotingPhaseTimer.setText( "Very soon... %d" % seconds )
def pushButtonVotingDebugClicked(self):
if self.current_election is None:
return
self.voting_debug_menu = QtGui.QMenu(self)
action = QtGui.QAction( "Trigger posting phase", self )
action.triggered.connect( self.actionVotingDebugTriggerPostingPhase )
action.setEnabled( self.current_election.get_status() == ConsensusProtocol.STATUS_NOT_OPEN_YET )
self.voting_debug_menu.addAction( action )
action = QtGui.QAction( "Trigger posting phase (broadcast)", self )
action.triggered.connect( self.actionVotingDebugTriggerPostingPhaseBroadcast )
action.setEnabled( self.current_election.get_status() == ConsensusProtocol.STATUS_NOT_OPEN_YET )
self.voting_debug_menu.addAction( action )
action = QtGui.QAction( "Revoke posting phase", self )
action.triggered.connect( self.actionVotingDebugRevokePostingPhase )
action.setEnabled( self.current_election.get_status() == ConsensusProtocol.STATUS_POSTING )
self.voting_debug_menu.addAction( action )
self.voting_debug_menu.addSeparator()
action = QtGui.QAction( "Trigger timestamping phase", self )
action.triggered.connect( self.actionVotingDebugTriggerCommitmentPhase )
action.setEnabled( self.current_election.get_status() == ConsensusProtocol.STATUS_POSTING )
self.voting_debug_menu.addAction( action )
action = QtGui.QAction( "Trigger timestamping phase (broadcast)", self )
action.triggered.connect( self.actionVotingDebugTriggerCommitmentPhaseBroadcast )
action.setEnabled( self.current_election.get_status() == ConsensusProtocol.STATUS_POSTING )
self.voting_debug_menu.addAction( action )
action = QtGui.QAction( "Revoke timestamping phase", self )
action.triggered.connect( self.actionVotingDebugRevokeCommitmentPhase )
action.setEnabled( self.current_election.get_status() == ConsensusProtocol.STATUS_COMMITMENT_PHASE )
self.voting_debug_menu.addAction( action )
self.voting_debug_menu.addSeparator()
action = QtGui.QAction( "Trigger results phase", self )
action.triggered.connect( self.actionVotingDebugTriggerResultsPhase )
action.setEnabled( self.current_election.get_status() == ConsensusProtocol.STATUS_COMMITMENT_PHASE )
self.voting_debug_menu.addAction( action )
action = QtGui.QAction( "Trigger results phase (broadcast)", self )
action.triggered.connect( self.actionVotingDebugTriggerResultsPhaseBroadcast )
action.setEnabled( self.current_election.get_status() == ConsensusProtocol.STATUS_COMMITMENT_PHASE )
self.voting_debug_menu.addAction( action )
action = QtGui.QAction( "Revoke results phase", self )
action.triggered.connect( self.actionVotingDebugRevokeResultsPhase )
action.setEnabled( self.current_election.get_status() >= ConsensusProtocol.STATUS_RESULTS_PHASE )
self.voting_debug_menu.addAction( action )
self.voting_debug_menu.addSeparator()
action = QtGui.QAction( "Clear all messages", self )
action.triggered.connect( self.actionVotingDebugClearMessages )
self.voting_debug_menu.addAction( action )
self.voting_debug_menu.exec_( self.ui.pushButtonVotingDebug.mapToGlobal(QPoint(0,self.ui.pushButtonVotingDebug.size().height())));
def actionVotingDebugTriggerPostingPhase(self):
if self.current_election is None:
return
self.current_election.debug_trigger_posting_phase()
def actionVotingDebugTriggerPostingPhaseBroadcast(self):
if self.current_election is None:
return
self.current_election.debug_trigger_posting_phase(True)
def actionVotingDebugRevokePostingPhase(self):
if self.current_election is None:
return
self.current_election.debug_revoke_posting_phase()
self.current_election.data.clear_settings()
self.refresh_election( self.current_election )
def actionVotingDebugTriggerCommitmentPhase(self):
if self.current_election is None:
return
self.current_election.debug_trigger_commitment_phase()
def actionVotingDebugTriggerCommitmentPhaseBroadcast(self):
if self.current_election is None:
return
self.current_election.debug_trigger_commitment_phase(True)
def actionVotingDebugRevokeCommitmentPhase(self):
if self.current_election is None:
return
self.current_election.debug_revoke_commitment_phase()
self.refresh_election( self.current_election )
def actionVotingDebugTriggerResultsPhase(self):
if self.current_election is None:
return
self.current_election.debug_trigger_results_phase()
def actionVotingDebugTriggerResultsPhaseBroadcast(self):
if self.current_election is None:
return
self.current_election.debug_trigger_results_phase(True)
def actionVotingDebugRevokeResultsPhase(self):
if self.current_election is None:
return
self.current_election.debug_revoke_results_phase()
self.refresh_election( self.current_election )
def actionVotingDebugClearMessages(self):
if self.current_election is None:
return
self.current_election.debug_clear_messages()
self.current_election.data.clear_settings()
self.refresh_election( self.current_election )
self.refresh_election_combobox_statuses()
def pushButtonVotingTimestamperClicked(self):
if self.current_election is None:
return
self.dialog = NewTimestamperSettingsDialog(self, self.current_election)
if self.dialog.exec_():
enabled = self.dialog.is_timestamper
if enabled:
# ( bm_address, private_key, btc_address ) tuples
addresses = map( lambda addr: ( addr[1], addr[2], addr[3] ), self.dialog.result )
else:
addresses = []
self.current_election.settings_set_timestamper_settings( enabled, addresses )
self.refresh_pushButtonVotingTimestamperText( self.current_election )
def refresh_pushButtonVotingTimestamperText(self, election):
if election is None or election.data is None:
return
settings = election.settings_get_timestamper_settings()
if settings is None:
settings = ( False, [] )
enabled, addresses = settings
if not enabled:
text = "Timestamper disabled"
elif len( addresses ) == 1:
text = "Timestamper enabled"
else:
text = "Timestamper enabled (%d)" % len( addresses )
self.ui.pushButtonVotingTimestamper.setText( text )
def pushButtonVotingDetailsClicked(self):
if self.current_election is None:
return
self.dialog = NewElectionDetailsDialog(self, self.current_election)
self.dialog.exec_()
def init_voting_options_button(self):
self.ui.pushButtonVotingOptions.clicked.connect( self.pushButtonVotingOptionsClicked )
self.voting_options_menu = QtGui.QMenu(self)
action = QtGui.QAction( "Broadcast election data", self )
action.triggered.connect( self.actionVotingBroadcastElectionData )
self.voting_options_menu.addAction( action )
self.voting_options_menu.addSeparator()
action = QtGui.QAction( "Export election to file", self )
action.triggered.connect( self.actionVotingExportTriggered )
self.voting_options_menu.addAction( action )
self.voting_options_menu.addSeparator()
action = QtGui.QAction( "Remove election", self )
action.triggered.connect( self.actionVotingRemoveTriggered )
self.voting_options_menu.addAction( action )
def pushButtonVotingOptionsClicked(self):
self.voting_options_menu.exec_( self.ui.pushButtonVotingOptions.mapToGlobal(QPoint(0,self.ui.pushButtonVotingOptions.size().height())));
def actionVotingBroadcastElectionData(self):
if self.current_election is None:
return
if not self.current_election.data.has_all_public_keys():
QMessageBox.about(self, _translate("MainWindow", "Not all public keys available"), _translate(
"MainWindow", "We don't have all the public keys for this election. Please wait until they have arrived."))
return
self.current_election.broadcast_consensus_metadata()
def actionVotingExportTriggered(self):
if self.current_election is None:
return
if not self.current_election.data.has_all_public_keys():
QMessageBox.about(self, _translate("MainWindow", "Not all public keys available"), _translate(
"MainWindow", "We don't have all the public keys for this election. Please wait until they have arrived."))
return
filename = QtGui.QFileDialog.getSaveFileName(self, _translate("MainWindow", "Export election"),
shared.appdata, _translate("MainWindow", "Election file (*.vote)") )
if filename == '':
return
self.current_election.saveToFile( filename )
def actionVotingRemoveTriggered(self):
if self.current_election is None:
return
result = QtGui.QMessageBox.question(self, _translate("MainWindow", "Remove election"),
_translate("MainWindow", "Remove election %s?" % self.current_election.data.question), QtGui.QMessageBox.Ok |
QtGui.QMessageBox.Cancel, QtGui.QMessageBox.Cancel)
if result == QtGui.QMessageBox.Ok:
self.current_election.delete()
self.refresh_elections()
def refresh_votes_table(self, election):
"""
Show individual votes
"""
self.ui.tableWidgetVotingVotes.setRowCount( 0 )
if election is None or election.data is None:
return
answers_dict = election.data.get_answers_dict()
for vote in election.data.get_individual_votes_with_validity():
row= self.ui.tableWidgetVotingVotes.rowCount()
self.ui.tableWidgetVotingVotes.insertRow( row )
time, tag, answer, vote_hash, previous_vote_hash, valid = vote
color = Qt.black if valid else Qt.gray
# Time
item = QTableWidgetItem( format_time( int( time ) ) )
item.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
item.setForeground( color )
self.ui.tableWidgetVotingVotes.setItem( row, 0, item )
# Vote
item = QTableWidgetItem( answers_dict[ answer ] )
item.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
item.setForeground( color )
self.ui.tableWidgetVotingVotes.setItem( row, 1, item )
# Tag
item = QTableWidgetItem( str( tag.encode_binary().encode('hex')[:32] ) )
item.setIcon(avatarize(tag))
item.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
item.setForeground( color )
self.ui.tableWidgetVotingVotes.setItem( row, 2, item )
# Vote hash
item = QTableWidgetItem( str( vote_hash.encode('hex')[:32] ) )
item.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
item.setForeground( color )
self.ui.tableWidgetVotingVotes.setItem( row, 3, item )
# Previous vote hash
item = QTableWidgetItem( str( previous_vote_hash.encode('hex')[:32] ) if previous_vote_hash is not None else "" )
item.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
item.setForeground( color )
self.ui.tableWidgetVotingVotes.setItem( row, 4, item )
def refresh_result_table(self, election):
"""
Show our own current results
"""
self.ui.tableWidgetVotingResults.setRowCount( 0 )
if election is None or election.data is None:
return
answers_and_votes = election.data.get_answers_and_votes()
answers_and_votes.sort( key=lambda (a_no, a, votes): votes )
for _, answer, votes in answers_and_votes:
self.ui.tableWidgetVotingResults.insertRow(0)
newItem = QTableWidgetItem( answer )
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.ui.tableWidgetVotingResults.setItem(0, 0, newItem)
newItem = QTableWidgetItem( str( votes ) )
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
newItem.setTextAlignment( Qt.AlignRight | Qt.AlignVCenter )
self.ui.tableWidgetVotingResults.setItem(0, 1, newItem)
def refresh_results_widget(self, election):
if election is None or election.get_status() != ConsensusProtocol.STATUS_RESULTS_PHASE:
self.ui.widgetVotingAllResults.hide()
return
self.ui.widgetVotingAllResults.show()
self.refresh_all_voting_results(election)
def refresh_all_voting_results(self, election):
selected_result = self.current_election_result
self.ui.tableWidgetVotingAllResults.setRowCount( 0 )
# Find all results messages
result_messages = election.filter_messages( message_type=ConsensusProtocol.MESSAGE_RESULTS )
# Transform into ( message_hash, ( list_hash, results_json ) ) tuples
self.election_results = map( lambda msg: ( msg[3], ConsensusProtocol.unpack_results_message( msg[2] ) ), result_messages )
for message_hash, _ in self.election_results:
self.ui.tableWidgetVotingAllResults.insertRow(0)
newItem = QTableWidgetItem( str( message_hash.encode('hex') ) )
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.ui.tableWidgetVotingAllResults.setItem(0, 0, newItem)
if selected_result in self.election_results:
self.ui.tableWidgetVotingAllResults.setCurrentRow( self.election_results.index( selected_result ) )
def tableWidgetVotingAllResultsItemChanged(self):
self.ui.tableWidgetVotingResultDetails.setRowCount( 0 )
index = self.ui.tableWidgetVotingAllResults.currentRow()
if index < 0 or index >= len( self.election_results ) :
return
_, (_, results_json ) = self.current_election_result = self.election_results[ index ]
import json
results = json.loads( results_json )
results.sort( key=lambda (a_no, a, votes): votes )
self.ui.tableWidgetVotingResultDetails.setRowCount( 0 )
for _, answer, votes in results:
self.ui.tableWidgetVotingResultDetails.insertRow(0)
newItem = QTableWidgetItem( answer )
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.ui.tableWidgetVotingResultDetails.setItem(0, 0, newItem)
newItem = QTableWidgetItem( str( votes ) )
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
newItem.setTextAlignment( Qt.AlignRight | Qt.AlignVCenter )
self.ui.tableWidgetVotingResultDetails.setItem(0, 1, newItem)
def refresh_vote_actions(self, election):
"""
Show voting panel
"""
if election is None or election.get_status() != ConsensusProtocol.STATUS_POSTING:
self.current_election_voter_address = None
self.current_election_answer = None
self.ui.widgetVotingActions.hide()
return
# self.voting_addresses is a list of ( address, previous_vote_hash ) -tuples
self.voting_addresses = election.data.get_my_voter_addresses()
# Check if user has any addresses to vote from, and if we are in posting phase
if len( self.voting_addresses ) == 0 or election.get_status() != ConsensusProtocol.STATUS_POSTING:
self.ui.widgetVotingActions.hide()
return
self.ui.widgetVotingActions.show()
selected_address, _ = self.current_election_voter_address or ( None, None )
# Single address, just show it on a label
if len( self.voting_addresses ) == 1:
address, previous_vote_hash = self.voting_addresses[0]
self.ui.comboBoxVotingMultipleAddresses.hide()
self.ui.labelVotingVoteSingleAddress.show()
address_text = self.get_address_text(address, previous_vote_hash)
self.ui.labelVotingVoteSingleAddress.setText( address_text )
self.current_election_voter_address = ( address, previous_vote_hash )
# Multiple addresses, show the choices in a combobox
else:
self.ui.labelVotingVoteSingleAddress.hide()
self.ui.comboBoxVotingMultipleAddresses.show()
self.ui.comboBoxVotingMultipleAddresses.clear()
selected_index = i = 0
for address, previous_vote_hash in self.voting_addresses:
address_text = self.get_address_text(address, previous_vote_hash)
self.ui.comboBoxVotingMultipleAddresses.addItem( address_text )
if address == selected_address:
selected_index = i
i += 1
self.ui.comboBoxVotingMultipleAddresses.setCurrentIndex( selected_index )
# Show vote possibilities
selected_answer = self.current_election_answer
self.ui.comboBoxVotingVote.clear()
self.voting_vote_answers = election.data.get_answers_dict().items()
for answer_no, answer in self.voting_vote_answers:
self.ui.comboBoxVotingVote.addItem( answer, answer_no )
if selected_answer is not None:
if selected_answer in self.voting_vote_answers:
self.ui.comboBoxVotingVote.setCurrentIndex( self.voting_vote_answers.index( selected_answer ) )
def get_address_text(self, address, previous_vote_hash):
if previous_vote_hash is None:
return address
elif previous_vote_hash == VotingData.COMPUTING_VOTE:
return "%s (casting vote...)" % address
else:
return "%s (already voted)" % address
def comboBoxVotingMultipleAddressesIndexChanged(self, index):
# ( address, previous_vote_hash ) -tuple
self.current_election_voter_address = self.voting_addresses[index] if index >= 0 else None
def comboBoxVotingVoteIndexChanged(self, index):
self.current_election_answer = self.voting_vote_answers[index] if index >= 0 else None
def pushButtonVotingCastVoteClicked(self):
if self.current_election_voter_address is None or self.current_election_answer is None:
return
if self.current_election_voter_address[1] == VotingData.COMPUTING_VOTE:
QMessageBox.information( self, "Casting vote", "A vote is currently being cast from this address. Please wait..." )
return
answer_no = self.current_election_answer[0]
vote_address, previous_vote_hash = self.current_election_voter_address
cp = self.current_election
cp.data.cast_vote( vote_address, answer_no, previous_vote_hash )
# Casting a vote removes the address from the list of addresses,
# so we should update the voting actions
self.refresh_vote_actions( self.current_election )
class helpDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_helpDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.labelHelpURI.setOpenExternalLinks(True)
QtGui.QWidget.resize(self, QtGui.QWidget.sizeHint(self))
class connectDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_connectDialog()
self.ui.setupUi(self)
self.parent = parent
QtGui.QWidget.resize(self, QtGui.QWidget.sizeHint(self))
class aboutDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_aboutDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.labelVersion.setText('version ' + shared.softwareVersion)
class regenerateAddressesDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_regenerateAddressesDialog()
self.ui.setupUi(self)
self.parent = parent
QtGui.QWidget.resize(self, QtGui.QWidget.sizeHint(self))
class settingsDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_settingsDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.checkBoxStartOnLogon.setChecked(
shared.config.getboolean('bitmessagesettings', 'startonlogon'))
self.ui.checkBoxMinimizeToTray.setChecked(
shared.config.getboolean('bitmessagesettings', 'minimizetotray'))
self.ui.checkBoxShowTrayNotifications.setChecked(
shared.config.getboolean('bitmessagesettings', 'showtraynotifications'))
self.ui.checkBoxStartInTray.setChecked(
shared.config.getboolean('bitmessagesettings', 'startintray'))
self.ui.checkBoxWillinglySendToMobile.setChecked(
shared.safeConfigGetBoolean('bitmessagesettings', 'willinglysendtomobile'))
self.ui.checkBoxUseIdenticons.setChecked(
shared.safeConfigGetBoolean('bitmessagesettings', 'useidenticons'))
self.ui.checkBoxReplyBelow.setChecked(
shared.safeConfigGetBoolean('bitmessagesettings', 'replybelow'))
global languages
languages = ['system','en','eo','fr','de','es','ru','no','ar','zh_cn','ja','nl','en_pirate','other']
user_countrycode = str(shared.config.get('bitmessagesettings', 'userlocale'))
if user_countrycode in languages:
curr_index = languages.index(user_countrycode)
else:
curr_index = languages.index('other')
self.ui.languageComboBox.setCurrentIndex(curr_index)
if shared.appdata == '':
self.ui.checkBoxPortableMode.setChecked(True)
if 'darwin' in sys.platform:
self.ui.checkBoxStartOnLogon.setDisabled(True)
self.ui.checkBoxStartOnLogon.setText(_translate(
"MainWindow", "Start-on-login not yet supported on your OS."))
self.ui.checkBoxMinimizeToTray.setDisabled(True)
self.ui.checkBoxMinimizeToTray.setText(_translate(
"MainWindow", "Minimize-to-tray not yet supported on your OS."))
self.ui.checkBoxShowTrayNotifications.setDisabled(True)
self.ui.checkBoxShowTrayNotifications.setText(_translate(
"MainWindow", "Tray notifications not yet supported on your OS."))
elif 'linux' in sys.platform:
self.ui.checkBoxStartOnLogon.setDisabled(True)
self.ui.checkBoxStartOnLogon.setText(_translate(
"MainWindow", "Start-on-login not yet supported on your OS."))
self.ui.checkBoxMinimizeToTray.setDisabled(True)
self.ui.checkBoxMinimizeToTray.setText(_translate(
"MainWindow", "Minimize-to-tray not yet supported on your OS."))
# On the Network settings tab:
self.ui.lineEditTCPPort.setText(str(
shared.config.get('bitmessagesettings', 'port')))
self.ui.checkBoxAuthentication.setChecked(shared.config.getboolean(
'bitmessagesettings', 'socksauthentication'))
self.ui.checkBoxSocksListen.setChecked(shared.config.getboolean(
'bitmessagesettings', 'sockslisten'))
if str(shared.config.get('bitmessagesettings', 'socksproxytype')) == 'none':
self.ui.comboBoxProxyType.setCurrentIndex(0)
self.ui.lineEditSocksHostname.setEnabled(False)
self.ui.lineEditSocksPort.setEnabled(False)
self.ui.lineEditSocksUsername.setEnabled(False)
self.ui.lineEditSocksPassword.setEnabled(False)
self.ui.checkBoxAuthentication.setEnabled(False)
self.ui.checkBoxSocksListen.setEnabled(False)
elif str(shared.config.get('bitmessagesettings', 'socksproxytype')) == 'SOCKS4a':
self.ui.comboBoxProxyType.setCurrentIndex(1)
self.ui.lineEditTCPPort.setEnabled(False)
elif str(shared.config.get('bitmessagesettings', 'socksproxytype')) == 'SOCKS5':
self.ui.comboBoxProxyType.setCurrentIndex(2)
self.ui.lineEditTCPPort.setEnabled(False)
self.ui.lineEditSocksHostname.setText(str(
shared.config.get('bitmessagesettings', 'sockshostname')))
self.ui.lineEditSocksPort.setText(str(
shared.config.get('bitmessagesettings', 'socksport')))
self.ui.lineEditSocksUsername.setText(str(
shared.config.get('bitmessagesettings', 'socksusername')))
self.ui.lineEditSocksPassword.setText(str(
shared.config.get('bitmessagesettings', 'sockspassword')))
QtCore.QObject.connect(self.ui.comboBoxProxyType, QtCore.SIGNAL(
"currentIndexChanged(int)"), self.comboBoxProxyTypeChanged)
self.ui.lineEditMaxDownloadRate.setText(str(
shared.config.get('bitmessagesettings', 'maxdownloadrate')))
self.ui.lineEditMaxUploadRate.setText(str(
shared.config.get('bitmessagesettings', 'maxuploadrate')))
# Demanded difficulty tab
self.ui.lineEditTotalDifficulty.setText(str((float(shared.config.getint(
'bitmessagesettings', 'defaultnoncetrialsperbyte')) / shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
self.ui.lineEditSmallMessageDifficulty.setText(str((float(shared.config.getint(
'bitmessagesettings', 'defaultpayloadlengthextrabytes')) / shared.networkDefaultPayloadLengthExtraBytes)))
# Max acceptable difficulty tab
self.ui.lineEditMaxAcceptableTotalDifficulty.setText(str((float(shared.config.getint(
'bitmessagesettings', 'maxacceptablenoncetrialsperbyte')) / shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
self.ui.lineEditMaxAcceptableSmallMessageDifficulty.setText(str((float(shared.config.getint(
'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes')) / shared.networkDefaultPayloadLengthExtraBytes)))
# Namecoin integration tab
nmctype = shared.config.get('bitmessagesettings', 'namecoinrpctype')
self.ui.lineEditNamecoinHost.setText(str(
shared.config.get('bitmessagesettings', 'namecoinrpchost')))
self.ui.lineEditNamecoinPort.setText(str(
shared.config.get('bitmessagesettings', 'namecoinrpcport')))
self.ui.lineEditNamecoinUser.setText(str(
shared.config.get('bitmessagesettings', 'namecoinrpcuser')))
self.ui.lineEditNamecoinPassword.setText(str(
shared.config.get('bitmessagesettings', 'namecoinrpcpassword')))
if nmctype == "namecoind":
self.ui.radioButtonNamecoinNamecoind.setChecked(True)
elif nmctype == "nmcontrol":
self.ui.radioButtonNamecoinNmcontrol.setChecked(True)
self.ui.lineEditNamecoinUser.setEnabled(False)
self.ui.labelNamecoinUser.setEnabled(False)
self.ui.lineEditNamecoinPassword.setEnabled(False)
self.ui.labelNamecoinPassword.setEnabled(False)
else:
assert False
QtCore.QObject.connect(self.ui.radioButtonNamecoinNamecoind, QtCore.SIGNAL(
"toggled(bool)"), self.namecoinTypeChanged)
QtCore.QObject.connect(self.ui.radioButtonNamecoinNmcontrol, QtCore.SIGNAL(
"toggled(bool)"), self.namecoinTypeChanged)
QtCore.QObject.connect(self.ui.pushButtonNamecoinTest, QtCore.SIGNAL(
"clicked()"), self.click_pushButtonNamecoinTest)
#Message Resend tab
self.ui.lineEditDays.setText(str(
shared.config.get('bitmessagesettings', 'stopresendingafterxdays')))
self.ui.lineEditMonths.setText(str(
shared.config.get('bitmessagesettings', 'stopresendingafterxmonths')))
#'System' tab removed for now.
"""try:
maxCores = shared.config.getint('bitmessagesettings', 'maxcores')
except:
maxCores = 99999
if maxCores <= 1:
self.ui.comboBoxMaxCores.setCurrentIndex(0)
elif maxCores == 2:
self.ui.comboBoxMaxCores.setCurrentIndex(1)
elif maxCores <= 4:
self.ui.comboBoxMaxCores.setCurrentIndex(2)
elif maxCores <= 8:
self.ui.comboBoxMaxCores.setCurrentIndex(3)
elif maxCores <= 16:
self.ui.comboBoxMaxCores.setCurrentIndex(4)
else:
self.ui.comboBoxMaxCores.setCurrentIndex(5)"""
QtGui.QWidget.resize(self, QtGui.QWidget.sizeHint(self))
def comboBoxProxyTypeChanged(self, comboBoxIndex):
if comboBoxIndex == 0:
self.ui.lineEditSocksHostname.setEnabled(False)
self.ui.lineEditSocksPort.setEnabled(False)
self.ui.lineEditSocksUsername.setEnabled(False)
self.ui.lineEditSocksPassword.setEnabled(False)
self.ui.checkBoxAuthentication.setEnabled(False)
self.ui.checkBoxSocksListen.setEnabled(False)
self.ui.lineEditTCPPort.setEnabled(True)
elif comboBoxIndex == 1 or comboBoxIndex == 2:
self.ui.lineEditSocksHostname.setEnabled(True)
self.ui.lineEditSocksPort.setEnabled(True)
self.ui.checkBoxAuthentication.setEnabled(True)
self.ui.checkBoxSocksListen.setEnabled(True)
if self.ui.checkBoxAuthentication.isChecked():
self.ui.lineEditSocksUsername.setEnabled(True)
self.ui.lineEditSocksPassword.setEnabled(True)
self.ui.lineEditTCPPort.setEnabled(False)
# Check status of namecoin integration radio buttons and translate
# it to a string as in the options.
def getNamecoinType(self):
if self.ui.radioButtonNamecoinNamecoind.isChecked():
return "namecoind"
if self.ui.radioButtonNamecoinNmcontrol.isChecked():
return "nmcontrol"
assert False
# Namecoin connection type was changed.
def namecoinTypeChanged(self, checked):
nmctype = self.getNamecoinType()
assert nmctype == "namecoind" or nmctype == "nmcontrol"
isNamecoind = (nmctype == "namecoind")
self.ui.lineEditNamecoinUser.setEnabled(isNamecoind)
self.ui.labelNamecoinUser.setEnabled(isNamecoind)
self.ui.lineEditNamecoinPassword.setEnabled(isNamecoind)
self.ui.labelNamecoinPassword.setEnabled(isNamecoind)
if isNamecoind:
self.ui.lineEditNamecoinPort.setText(shared.namecoinDefaultRpcPort)
else:
self.ui.lineEditNamecoinPort.setText("9000")
# Test the namecoin settings specified in the settings dialog.
def click_pushButtonNamecoinTest(self):
self.ui.labelNamecoinTestResult.setText(_translate(
"MainWindow", "Testing..."))
options = {}
options["type"] = self.getNamecoinType()
options["host"] = self.ui.lineEditNamecoinHost.text()
options["port"] = self.ui.lineEditNamecoinPort.text()
options["user"] = self.ui.lineEditNamecoinUser.text()
options["password"] = self.ui.lineEditNamecoinPassword.text()
nc = namecoinConnection(options)
response = nc.test()
responseStatus = response[0]
responseText = response[1]
self.ui.labelNamecoinTestResult.setText(responseText)
if responseStatus== 'success':
self.parent.ui.pushButtonFetchNamecoinID.show()
class SpecialAddressBehaviorDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_SpecialAddressBehaviorDialog()
self.ui.setupUi(self)
self.parent = parent
currentRow = parent.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = str(
parent.ui.tableWidgetYourIdentities.item(currentRow, 1).text())
if not shared.safeConfigGetBoolean(addressAtCurrentRow, 'chan'):
if shared.safeConfigGetBoolean(addressAtCurrentRow, 'mailinglist'):
self.ui.radioButtonBehaviorMailingList.click()
else:
self.ui.radioButtonBehaveNormalAddress.click()
try:
mailingListName = shared.config.get(
addressAtCurrentRow, 'mailinglistname')
except:
mailingListName = ''
self.ui.lineEditMailingListName.setText(
unicode(mailingListName, 'utf-8'))
else: # if addressAtCurrentRow is a chan address
self.ui.radioButtonBehaviorMailingList.setDisabled(True)
self.ui.lineEditMailingListName.setText(_translate(
"MainWindow", "This is a chan address. You cannot use it as a pseudo-mailing list."))
QtGui.QWidget.resize(self, QtGui.QWidget.sizeHint(self))
class AddAddressDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_AddAddressDialog()
self.ui.setupUi(self)
self.parent = parent
QtCore.QObject.connect(self.ui.lineEditAddress, QtCore.SIGNAL(
"textChanged(QString)"), self.addressChanged)
def addressChanged(self, QString):
status, a, b, c = decodeAddress(str(QString))
if status == 'missingbm':
self.ui.labelAddressCheck.setText(_translate(
"MainWindow", "The address should start with ''BM-''"))
elif status == 'checksumfailed':
self.ui.labelAddressCheck.setText(_translate(
"MainWindow", "The address is not typed or copied correctly (the checksum failed)."))
elif status == 'versiontoohigh':
self.ui.labelAddressCheck.setText(_translate(
"MainWindow", "The version number of this address is higher than this software can support. Please upgrade Bitmessage."))
elif status == 'invalidcharacters':
self.ui.labelAddressCheck.setText(_translate(
"MainWindow", "The address contains invalid characters."))
elif status == 'ripetooshort':
self.ui.labelAddressCheck.setText(_translate(
"MainWindow", "Some data encoded in the address is too short."))
elif status == 'ripetoolong':
self.ui.labelAddressCheck.setText(_translate(
"MainWindow", "Some data encoded in the address is too long."))
elif status == 'varintmalformed':
self.ui.labelAddressCheck.setText(_translate(
"MainWindow", "Some data encoded in the address is malformed."))
elif status == 'success':
self.ui.labelAddressCheck.setText(
_translate("MainWindow", "Address is valid."))
class NewSubscriptionDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_NewSubscriptionDialog()
self.ui.setupUi(self)
self.parent = parent
QtCore.QObject.connect(self.ui.lineEditSubscriptionAddress, QtCore.SIGNAL(
"textChanged(QString)"), self.addressChanged)
self.ui.checkBoxDisplayMessagesAlreadyInInventory.setText(
_translate("MainWindow", "Enter an address above."))
def addressChanged(self, QString):
self.ui.checkBoxDisplayMessagesAlreadyInInventory.setEnabled(False)
self.ui.checkBoxDisplayMessagesAlreadyInInventory.setChecked(False)
status, addressVersion, streamNumber, ripe = decodeAddress(str(QString))
if status == 'missingbm':
self.ui.labelAddressCheck.setText(_translate(
"MainWindow", "The address should start with ''BM-''"))
elif status == 'checksumfailed':
self.ui.labelAddressCheck.setText(_translate(
"MainWindow", "The address is not typed or copied correctly (the checksum failed)."))
elif status == 'versiontoohigh':
self.ui.labelAddressCheck.setText(_translate(
"MainWindow", "The version number of this address is higher than this software can support. Please upgrade Bitmessage."))
elif status == 'invalidcharacters':
self.ui.labelAddressCheck.setText(_translate(
"MainWindow", "The address contains invalid characters."))
elif status == 'ripetooshort':
self.ui.labelAddressCheck.setText(_translate(
"MainWindow", "Some data encoded in the address is too short."))
elif status == 'ripetoolong':
self.ui.labelAddressCheck.setText(_translate(
"MainWindow", "Some data encoded in the address is too long."))
elif status == 'varintmalformed':
self.ui.labelAddressCheck.setText(_translate(
"MainWindow", "Some data encoded in the address is malformed."))
elif status == 'success':
self.ui.labelAddressCheck.setText(
_translate("MainWindow", "Address is valid."))
if addressVersion <= 3:
self.ui.checkBoxDisplayMessagesAlreadyInInventory.setText(
_translate("MainWindow", "Address is an old type. We cannot display its past broadcasts."))
else:
shared.flushInventory()
doubleHashOfAddressData = hashlib.sha512(hashlib.sha512(encodeVarint(
addressVersion) + encodeVarint(streamNumber) + ripe).digest()).digest()
tag = doubleHashOfAddressData[32:]
queryreturn = sqlQuery(
'''select hash from inventory where objecttype=3 and tag=?''', tag)
if len(queryreturn) == 0:
self.ui.checkBoxDisplayMessagesAlreadyInInventory.setText(
_translate("MainWindow", "There are no recent broadcasts from this address to display."))
elif len(queryreturn) == 1:
self.ui.checkBoxDisplayMessagesAlreadyInInventory.setEnabled(True)
self.ui.checkBoxDisplayMessagesAlreadyInInventory.setText(
_translate("MainWindow", "Display the %1 recent broadcast from this address.").arg(str(len(queryreturn))))
else:
self.ui.checkBoxDisplayMessagesAlreadyInInventory.setEnabled(True)
self.ui.checkBoxDisplayMessagesAlreadyInInventory.setText(
_translate("MainWindow", "Display the %1 recent broadcasts from this address.").arg(str(len(queryreturn))))
class NewAddressDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_NewAddressDialog()
self.ui.setupUi(self)
self.parent = parent
row = 1
# Let's fill out the 'existing address' combo box with addresses from
# the 'Your Identities' tab.
while self.parent.ui.tableWidgetYourIdentities.item(row - 1, 1):
self.ui.radioButtonExisting.click()
# print
# self.parent.ui.tableWidgetYourIdentities.item(row-1,1).text()
self.ui.comboBoxExisting.addItem(
self.parent.ui.tableWidgetYourIdentities.item(row - 1, 1).text())
row += 1
self.ui.groupBoxDeterministic.setHidden(True)
QtGui.QWidget.resize(self, QtGui.QWidget.sizeHint(self))
class newChanDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_newChanDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.groupBoxCreateChan.setHidden(True)
QtGui.QWidget.resize(self, QtGui.QWidget.sizeHint(self))
class iconGlossaryDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_iconGlossaryDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.labelPortNumber.setText(_translate(
"MainWindow", "You are using TCP port %1. (This can be changed in the settings).").arg(str(shared.config.getint('bitmessagesettings', 'port'))))
QtGui.QWidget.resize(self, QtGui.QWidget.sizeHint(self))
class NewCreateElectionDialog(QtGui.QDialog):
BLOCKCHAINS = [ ConsensusProtocol.BLOCKCHAIN_BITCOIN,
ConsensusProtocol.BLOCKCHAIN_BITCOIN_TESTNET ]
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent, Qt.WindowTitleHint | Qt.WindowSystemMenuHint)
self.ui = Ui_CreateElectionDialog()
self.ui.setupUi(self)
self.setWindowTitle( "Create election" )
self.parent = parent
self.question = "Best programming language?"
self.answers = ["Java", "Python", "Ruby", "C#", "Haskell", "ML", "Erlang", "C"]
self.voters = []
self.blockchain = ConsensusProtocol.BLOCKCHAIN_BITCOIN
self.start_time = 0
self.deadline = 0
self.commitment_phase_deadline = 0
self.result = None
self.ui.lineEditQuestion.textChanged.connect( self.question_changed )
self.ui.pushButtonAddAnswer.clicked.connect( self.add_answer )
self.ui.pushButtonAddVoter.clicked.connect( self.add_voter )
self.init_answer_popup_menu()
self.init_voter_popup_menu()
self.ui.lineEditQuestion.setText( self.question )
self.refresh_answers()
self.refresh_blockchain()
# self.popMenuElections.exec_( self.ui.tableWidgetElections.mapToGlobal(point))
self.ui.dateTimeStart.setDate( QtCore.QDate.currentDate() )
self.ui.dateTimeStart.setTime( QtCore.QTime(0,0,0) )
self.ui.dateTimeStop.setDate( QtCore.QDate.currentDate().addDays(1) )
self.ui.dateTimeStop.setTime( QtCore.QTime(0,0,0) )
self.ui.dateTimeCommitmentPhaseDeadline.setDate( QtCore.QDate.currentDate().addDays(1) )
self.ui.dateTimeCommitmentPhaseDeadline.setTime( QtCore.QTime(6,0,0) )
def init_answer_popup_menu(self):
self.ui.electionAnswerContextMenuToolbar = QtGui.QToolBar()
self.actionAnswerRemove = self.ui.electionAnswerContextMenuToolbar.addAction(
_translate( "MainWindow", "Remove" ), self.on_action_answerRemove)
self.ui.listWidgetAnswers.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.ui.listWidgetAnswers.customContextMenuRequested.connect( lambda point: self.popMenuElectionAnswers.exec_(QCursor.pos()) )
self.popMenuElectionAnswers = QtGui.QMenu(self)
self.popMenuElectionAnswers.addAction(self.actionAnswerRemove)
def init_voter_popup_menu(self):
self.ui.electionVoterContextMenuToolbar = QtGui.QToolBar()
self.actionVoterRemove = self.ui.electionVoterContextMenuToolbar.addAction(
_translate( "MainWindow", "Remove" ), self.on_action_voterRemove)
self.ui.listWidgetVoters.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.ui.listWidgetVoters.customContextMenuRequested.connect( lambda point: self.popMenuElectionVoters.exec_(QCursor.pos()) )
self.popMenuElectionVoters = QtGui.QMenu(self)
self.popMenuElectionVoters.addAction(self.actionVoterRemove)
def on_action_answerRemove(self):
currentRow = self.ui.listWidgetAnswers.currentRow()
if currentRow >= 0:
self.answers.remove( self.answers[currentRow] )
self.refresh_answers()
def on_action_voterRemove(self):
currentRow = self.ui.listWidgetVoters.currentRow()
if currentRow >= 0:
self.voters.remove( self.voters[currentRow] )
self.refresh_voters()
def accept(self):
self.blockchain = NewCreateElectionDialog.BLOCKCHAINS[ self.ui.comboBoxBlockchain.currentIndex() ]
# We want the timestamps in seconds, so we divide by 1000
self.start_time = int( self.ui.dateTimeStart.dateTime().toMSecsSinceEpoch() / 1000 )
self.deadline = int( self.ui.dateTimeStop.dateTime().toMSecsSinceEpoch() / 1000 )
self.commitment_phase_deadline = int( self.ui.dateTimeCommitmentPhaseDeadline.dateTime().toMSecsSinceEpoch() / 1000 )
if len( self.question ) < 1:
QMessageBox.about(self, _translate("MainWindow", "No question"), _translate(
"MainWindow", "You need to enter a question."))
elif len( self.answers ) <= 1:
QMessageBox.about(self, _translate("MainWindow", "Not enough answers"), _translate(
"MainWindow", "You need to enter at least two answers."))
elif len( self.voters ) <= 2:
QMessageBox.about(self, _translate("MainWindow", "Not enough voters"), _translate(
"MainWindow", "You need to enter at least three voter addresses."))
elif self.start_time >= self.deadline:
QMessageBox.about(self, _translate("MainWindow", "Wrong deadline"), _translate(
"MainWindow", "Election deadline must be after election start."))
elif self.deadline >= self.commitment_phase_deadline:
QMessageBox.about(self, _translate("MainWindow", "Wrong deadline"), _translate(
"MainWindow", "Timestamping phase deadline must be after election deadline."))
else:
# First convert from QStrings to normal Python strings
question = str( self.question )
answers = map( lambda s: str( s ), self.answers )
voters = map( lambda s: str( s ), self.voters )
# Time should be changed to use the blockchain
time_data = ConsensusTimeData( self.start_time, self.deadline, self.commitment_phase_deadline )
data = VotingData( self.blockchain, time_data, question, answers, voters)
time_data.data = data
ConsensusProtocol.create( data )
QtGui.QDialog.accept(self)
def refresh_answers(self):
self.ui.listWidgetAnswers.clear()
for i in range( len( self.answers ) ):
self.ui.listWidgetAnswers.addItem( "%d. %s" % ( i+1, self.answers[i] ) )
def refresh_voters(self):
self.ui.listWidgetVoters.clear()
for voter in self.voters:
item = QListWidgetItem()
item.setText( voter )
if not helper_keys.has_pubkey_for(voter, decodeAddress( voter ) ):
item.setForeground( Qt.gray )
self.ui.listWidgetVoters.addItem( item )
def refresh_blockchain(self):
self.ui.comboBoxBlockchain.setCurrentIndex( NewCreateElectionDialog.BLOCKCHAINS.index( self.blockchain ) )
def question_changed(self):
self.question = self.ui.lineEditQuestion.text()
def add_answer(self):
text, ok = QtGui.QInputDialog.getText(self, 'Add answer', 'Enter answer:')
if ok and text.trimmed() != "":
self.answers.append( text.trimmed() )
self.refresh_answers()
def add_voter(self):
text, ok = QtGui.QInputDialog.getText(self, 'Add voter', 'Enter one or more (comma-separated) Bitmessage addresses:')
if ok:
changed_anything = False
invalid_addresses = []
addresses = map( lambda a: a.strip(), str( text ).split( "," ) )
for address in addresses:
if address != '' and not address in self.voters:
status, addressVersionNumber, streamNumber, ripe = decodeAddress( address )
if status == "success":
changed_anything = True
self.voters.append( address )
else:
invalid_addresses.append( address )
if len( invalid_addresses ) > 0:
QMessageBox.about( self, _translate("MainWindow", "%1 invalid addresses").arg( len(invalid_addresses) ),
_translate("MainWindow", "The following addresses were not valid:\n%1").arg( "\n".join( invalid_addresses ) ) )
if changed_anything:
self.voters.sort()
self.refresh_voters()
class NewElectionDetailsDialog(QtGui.QDialog):
def __init__(self, parent, election):
QtGui.QWidget.__init__(self, parent, Qt.WindowTitleHint | Qt.WindowSystemMenuHint )
self.ui = Ui_ElectionDetailsDialog()
self.ui.setupUi(self)
self.setWindowTitle( "Election details" )
self.parent = parent
self.election = election
QtCore.QObject.connect(parent.UISignalThread, QtCore.SIGNAL(
"refresh_election_ui(PyQt_PyObject)"), self.refresh_details)
self.refresh_details( election )
def refresh_details(self, election):
status = election.get_status()
self.ui.labelQuestion.setText( election.data.question )
self.ui.labelStatus.setText( get_election_status_text( election ) )
self.ui.labelHash.setText( election.hash )
self.ui.labelChanAddress.setText( election.chan_address )
self.ui.labelStartTime.setText( format_time( election.data.time_data.start * 1000 ) )
self.ui.labelDeadline.setText( format_time( election.data.time_data.post_deadline * 1000 ) )
self.ui.labelCommitmentPhaseDeadline.setText( format_time( election.data.time_data.commitment_phase_deadline * 1000 ) )
if election.data.blockchain in ConsensusProtocol.BLOCKCHAIN_NAMES:
self.ui.labelBlockchain.setText( ConsensusProtocol.BLOCKCHAIN_NAMES[election.data.blockchain] )
else:
self.ui.labelBlockchain.setText( "N/A" )
self.ui.labelAmountAddresses.setText( str( len( election.data.addresses ) ) )
self.ui.labelAmountPublicKeys.setText( "%d/%d" % ( election.data.get_amount_public_keys(), len( election.data.addresses ) ) )
if status >= ConsensusProtocol.STATUS_POSTING:
self.ui.labelAmountVotes.setText( str( len( election.filter_messages( ConsensusProtocol.MESSAGE_MESSAGE, ConsensusProtocol.MESSAGE_STATE_ACCEPTED ) ) ) )
else:
self.ui.labelAmountVotes.setText( "N/A" )
commits_processed = len( election.filter_messages(ConsensusProtocol.MESSAGE_COMMITMENT, ConsensusProtocol.MESSAGE_STATE_PROCESSED ) )
total_commits = len( election.filter_messages(ConsensusProtocol.MESSAGE_COMMITMENT ) )
amount_commits_processed = "%d/%d" % ( commits_processed, total_commits )
self.ui.labelAmountCommitments.setText( amount_commits_processed )
self.ui.labelAmountResults.setText( str( len( election.filter_messages( ConsensusProtocol.MESSAGE_RESULTS ) ) ) )
if status >= ConsensusProtocol.STATUS_COMMITMENT_PHASE:
self.ui.labelAmountVotesTooLate.setText( str( len( election.filter_messages( ConsensusProtocol.MESSAGE_MESSAGE, ConsensusProtocol.MESSAGE_STATE_RECEIVED_AFTER_DEADLINE ) ) ) )
else:
self.ui.labelAmountVotesTooLate.setText( "N/A" )
if status >= ConsensusProtocol.STATUS_RESULTS_PHASE:
self.ui.labelAmountMissingValidVotes.setText( str( len( election.settings_get_missing_accepted_message_hashes() ) ) )
self.ui.labelAmountVotesValidatedFromCommitments.setText( str( election.settings_get_messages_accepted_by_commitments() ) )
self.ui.labelAmountInvalidCommitments.setText( str( len( election.filter_messages(ConsensusProtocol.MESSAGE_COMMITMENT, ConsensusProtocol.MESSAGE_STATE_INVALID_COMMITMENT) ) ) )
else:
self.ui.labelAmountMissingValidVotes.setText( "N/A" )
self.ui.labelAmountVotesValidatedFromCommitments.setText( "N/A" )
self.ui.labelAmountInvalidCommitments.setText( "N/A" )
"""
Address list
"""
self.ui.listWidgetAddresses.clear()
for address in election.data.addresses:
self.ui.listWidgetAddresses.addItem( QtGui.QListWidgetItem( avatarize( address ), address ) )
class NewTimestamperSettingsDialog(QtGui.QDialog):
def __init__(self, parent, election, selected_private_keys=[]):
QtGui.QWidget.__init__(self, parent, Qt.WindowTitleHint | Qt.WindowSystemMenuHint)
self.ui = Ui_TimestamperSettingsDialog()
self.ui.setupUi(self)
self.setWindowTitle( "Timestamper settings" )
self.parent = parent
self.election = election
self.testnet = election.data.is_testnet_blockchain()
self.is_timestamper, addresses_initially_selected = election.settings_get_timestamper_settings()
# List of [ type, bm_address, private_key, btc_address, balance ]-sublists
# type is either "bm-derived" or "imported"
self.addresses = []
self.addresses_selected = []
self.initialize_addresses( addresses_initially_selected )
self.result = None
self.ui.checkBoxTimestamper.stateChanged.connect( self.checkBoxTimestamperStateChanged )
self.ui.pushButtonImportBitcoinAddress.clicked.connect( self.pushButtonImportBitcoinAddressClicked )
self.ui.pushButtonRefreshBalances.clicked.connect( self.pushButtonRefreshBalancesClicked )
self.ui.tableWidgetAddresses.itemSelectionChanged.connect( self.tableWidgetAddressesItemSelectionChanged )
self.init_table_popup()
self.refresh_dialog_state()
self.refresh_address_list()
self.refresh_balances()
QtCore.QObject.connect(parent.UISignalThread, QtCore.SIGNAL(
"refreshBitcoinAddresses()"), self.refresh_address_list)
def initialize_addresses(self, addresses):
# Expecting addresses to be a list of ( bm_address=None, private_key, btc_address )-tuples
bm_addresses = []
for bm_address, private_key, btc_address in addresses:
type = "imported" if bm_address == None else "bm-derived"
addr_tuple = [ type, bm_address, private_key, btc_address, -1.0 ]
self.addresses.append( addr_tuple )
self.addresses_selected.append( addr_tuple )
if bm_address is not None:
bm_addresses.append( bm_address )
# Filter out addresses already added above.
derived_addresses = filter( lambda addr: addr[1] not in bm_addresses, self.get_bm_derived_addresses() )
self.addresses.extend( derived_addresses )
def checkBoxTimestamperStateChanged(self, state):
self.is_timestamper = state == Qt.Checked
self.refresh_dialog_state()
def refresh_dialog_state(self):
if self.ui.checkBoxTimestamper.isChecked() != self.is_timestamper:
self.ui.checkBoxTimestamper.setChecked( self.is_timestamper )
self.ui.pushButtonImportBitcoinAddress.setEnabled( self.is_timestamper )
self.ui.pushButtonRefreshBalances.setEnabled( self.is_timestamper )
self.ui.tableWidgetAddresses.setEnabled( self.is_timestamper )
self.refresh_ok_button_state()
def refresh_ok_button_state(self):
if self.is_timestamper:
enabled = len( self.addresses_selected ) > 0 or ConsensusProtocol.DISABLE_COMMITMENTS_ON_BLOCKCHAIN
else:
enabled = True
self.ui.buttonBox.button( QDialogButtonBox.Ok ).setEnabled( enabled )
def pushButtonImportBitcoinAddressClicked(self):
text, ok = QtGui.QInputDialog.getText(self, 'Import bitcoin address', 'Enter a private key either hex-encoded (64 chars) or as Wallet Import Format (WIF)')
text = str( text.trimmed() )
if ok and text != "":
if len( text ) == 64:
# Private key in hex-encoding
private_key = text.decode('hex')
elif len( text ) >= 50 and len( text ) < 55:
# Wallet Import format
# Two WIF examples had 51 and 52 bytes length, so [50,55] is just a guess
private_key = shared.decodeWalletImportFormat( text )
if private_key is None or private_key == "":
QtGui.QMessageBox.information( self, "Invalid key", "Couldn't import WIF" )
return
private_key = private_key
else:
QtGui.QMessageBox.information( self, "Invalid key", "Couldn't decode private key" )
return
public_key = arithmetic.privtopub( private_key.encode('hex') ).decode('hex')
btc_address = BitcoinThread.get_address( self.testnet, public_key )
if private_key in map( lambda addr: addr[2], self.addresses ):
QtGui.QMessageBox.information( self, "Address already exists", "The address %s is already in the list" % btc_address )
return
self.addresses.append( ["imported", None, private_key, btc_address, -1.0 ] )
self.refresh_balances()
self.refresh_address_list()
pass
def pushButtonRefreshBalancesClicked(self):
for i in range( len( self.addresses ) ):
self.addresses[i][4] = -1.0
self.refresh_balances()
self.refresh_address_list()
def get_bm_derived_addresses(self):
result = []
for address in helper_keys.getMyAddresses():
# [ type, bm_address, private_key, btc_address, balance ]
privSigningKey, pubSigningKey = helper_keys.getPrivateSigningKey( address ), helper_keys.getPublicSigningKey( address )
bitcoin_address = BitcoinThread.get_corresponding_address( self.testnet, None, pubSigningKey )
result.append( [ "bm-derived", address, privSigningKey, bitcoin_address, -1.0 ] )
return result
def refresh_address_list(self):
selected_addresses = self.addresses_selected
selected_btc_addresses = map( lambda a: a[3], selected_addresses )
selected_rows = [i for i in range( len( self.addresses ) ) if self.addresses[i][3] in selected_btc_addresses]
self.ui.tableWidgetAddresses.setRowCount( 0 )
for _, bm_address, _, btc_address, balance in reversed( self.addresses ):
self.ui.tableWidgetAddresses.insertRow( 0 )
color = Qt.black if balance >= BitcoinThread.BTC_UNSPENT_MIN_AVAILABLE else Qt.gray
flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled
item = QTableWidgetItem( bm_address if bm_address is not None else "(Imported address)" )
if bm_address is not None:
item.setIcon(avatarize( bm_address ))
item.setFlags( flags )
item.setForeground( color )
self.ui.tableWidgetAddresses.setItem( 0, 0, item )
item = QTableWidgetItem( btc_address )
item.setFlags( flags )
item.setForeground( color )
self.ui.tableWidgetAddresses.setItem( 0, 1, item )
balance_str = ( "%.8f BTC" % balance ) if balance >= 0 else "Refreshing..."
item = QTableWidgetItem( balance_str )
item.setFlags( flags )
item.setForeground( color )
self.ui.tableWidgetAddresses.setItem( 0, 2, item )
# We'll have to temporarily set the selection mode to MultiSelection
# to programmatically select multiple rows (without using the QItemSelectionModel)
self.ui.tableWidgetAddresses.setSelectionMode( QAbstractItemView.MultiSelection )
for row in selected_rows:
self.ui.tableWidgetAddresses.selectRow( row )
self.ui.tableWidgetAddresses.setSelectionMode( QAbstractItemView.ExtendedSelection )
self.addresses_selected = selected_addresses
def refresh_ui(self):
self.refresh_address_list()
def init_table_popup(self):
self.ui.tableWidgetAddresses.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.ui.tableWidgetAddresses.customContextMenuRequested.connect( self.tableWidgetAddressContentMenuRequested )
def get_selected_address(self):
row = self.ui.tableWidgetAddresses.currentRow()
if row < 0 or row >= len( self.addresses ):
return None
btc_address = self.ui.tableWidgetAddresses.item( row, 1 ).text()
btc_addresses = map( lambda a: a[3], self.addresses )
if btc_address in btc_addresses:
return self.addresses[ btc_addresses.index( btc_address ) ]
else:
return None
def tableWidgetAddressContentMenuRequested(self, point):
address = self.get_selected_address()
if address is None:
return
menu = QtGui.QMenu(self)
action = QtGui.QAction( "Copy bitcoin address to clipboard", self )
action.triggered.connect( self.actionCopyAddressToClipboard )
menu.addAction( action )
menu.addSeparator()
action = QtGui.QAction( "Remove address", self )
action.triggered.connect( self.actionRemoveAddress )
action.setEnabled( type == 'imported' )
menu.addAction( action )
menu.exec_( self.ui.tableWidgetAddresses.mapToGlobal( point ) );
def actionCopyAddressToClipboard(self):
address = self.get_selected_address()
if address is None:
return
_, _, _, btc_address, _ = address
cb = QtGui.QApplication.clipboard()
cb.clear(mode=cb.Clipboard )
cb.setText(btc_address, mode=cb.Clipboard)
def actionRemoveAddress(self):
address = self.get_selected_address()
if address is None:
return
self.addresses.remove( address )
self.refresh_address_list()
def unspent_transactions_received(self, result):
if result is None:
return
if type( result ) != type( [] ):
result = [result]
for obj in result:
balance_btc = 0.0
# Calculate balance
for unspent in obj["unspent"]:
balance_btc += float( unspent["amount"] )
# Find address
for i in range( len( self.addresses ) ):
_, _, _, btc_address, _ = self.addresses[i]
if btc_address == obj["address"]:
self.addresses[i][4] = balance_btc
shared.UISignalQueue.put( ( "refreshBitcoinAddresses", [] ) )
def refresh_balances(self):
addresses = map( lambda addr: addr[3], self.addresses )
BitcoinThread.enqueue( self, "getUnspentTransactions", ( self.testnet, addresses ), self.unspent_transactions_received )
def tableWidgetAddressesItemSelectionChanged(self):
selection_model = self.ui.tableWidgetAddresses.selectionModel()
selected_rows = selection_model.selectedRows()
self.addresses_selected = [ self.addresses[i.row()] for i in selected_rows ]
self.refresh_ok_button_state()
def accept(self):
if not ConsensusProtocol.DISABLE_COMMITMENTS_ON_BLOCKCHAIN:
empty_addresses = [addr for addr in self.addresses_selected if addr[4] < BitcoinThread.BTC_UNSPENT_MIN_AVAILABLE]
if len( empty_addresses ) > 0:
QtGui.QMessageBox.information( self, "Empty address(es) selected", "You cannot choose addresses with balance lower than %.8f BTC\nThe following addresses don't have enough in their balance:\n%s" % ( BitcoinThread.BTC_UNSPENT_MIN_AVAILABLE, ", ".join( map( lambda addr: addr[3], empty_addresses ) ) ) )
for addr in empty_addresses:
self.addresses_selected.remove( addr )
self.refresh_address_list()
return False
self.result = [addr for addr in self.addresses_selected if addr[4] >= BitcoinThread.BTC_UNSPENT_MIN_AVAILABLE]
QtGui.QDialog.accept( self )
def format_time( t ):
return unicode( strftime( shared.config.get('bitmessagesettings', 'timeformat'), localtime( t/1000 ) ),'utf-8')
def get_election_status_text(election):
status = election.get_status()
if status == ConsensusProtocol.STATUS_UNKNOWN:
return "Waiting for metadata"
if status == ConsensusProtocol.STATUS_NOT_OPEN_YET:
return "Not yet open"
elif status == ConsensusProtocol.STATUS_POSTING:
return "Election is open"
elif status == ConsensusProtocol.STATUS_COMMITMENT_PHASE:
return "Timestamping phase"
elif status == ConsensusProtocol.STATUS_RESULTS_PHASE:
return "Election over"
# In order for the time columns on the Inbox and Sent tabs to be sorted
# correctly (rather than alphabetically), we need to overload the <
# operator and use this class instead of QTableWidgetItem.
class myTableWidgetItem(QTableWidgetItem):
def __lt__(self, other):
return int(self.data(33).toPyObject()) < int(other.data(33).toPyObject())
class UISignaler(QThread):
def __init__(self, parent=None):
QThread.__init__(self, parent)
def run(self):
while True:
command, data = shared.UISignalQueue.get()
if command == 'writeNewAddressToTable':
label, address, streamNumber = data
self.emit(SIGNAL(
"writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), label, address, str(streamNumber))
elif command == 'updateStatusBar':
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"), data)
elif command == 'updateSentItemStatusByHash':
hash, message = data
self.emit(SIGNAL(
"updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"), hash, message)
elif command == 'updateSentItemStatusByAckdata':
ackData, message = data
self.emit(SIGNAL(
"updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"), ackData, message)
elif command == 'displayNewInboxMessage':
inventoryHash, toAddress, fromAddress, subject, body = data
self.emit(SIGNAL(
"displayNewInboxMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),
inventoryHash, toAddress, fromAddress, subject, body)
elif command == 'displayNewSentMessage':
toAddress, fromLabel, fromAddress, subject, message, ackdata = data
self.emit(SIGNAL(
"displayNewSentMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),
toAddress, fromLabel, fromAddress, subject, message, ackdata)
elif command == 'updateNetworkStatusTab':
self.emit(SIGNAL("updateNetworkStatusTab()"))
elif command == 'updateNumberOfMessagesProcessed':
self.emit(SIGNAL("updateNumberOfMessagesProcessed()"))
elif command == 'updateNumberOfPubkeysProcessed':
self.emit(SIGNAL("updateNumberOfPubkeysProcessed()"))
elif command == 'updateNumberOfBroadcastsProcessed':
self.emit(SIGNAL("updateNumberOfBroadcastsProcessed()"))
elif command == 'setStatusIcon':
self.emit(SIGNAL("setStatusIcon(PyQt_PyObject)"), data)
elif command == 'changedInboxUnread':
self.emit(SIGNAL("changedInboxUnread(PyQt_PyObject)"), data)
elif command == 'rerenderInboxFromLabels':
self.emit(SIGNAL("rerenderInboxFromLabels()"))
elif command == 'rerenderSentToLabels':
self.emit(SIGNAL("rerenderSentToLabels()"))
elif command == 'rerenderAddressBook':
self.emit(SIGNAL("rerenderAddressBook()"))
elif command == 'rerenderSubscriptions':
self.emit(SIGNAL("rerenderSubscriptions()"))
elif command == 'removeInboxRowByMsgid':
self.emit(SIGNAL("removeInboxRowByMsgid(PyQt_PyObject)"), data)
elif command == 'alert':
title, text, exitAfterUserClicksOk = data
self.emit(SIGNAL("displayAlert(PyQt_PyObject, PyQt_PyObject, PyQt_PyObject)"), title, text, exitAfterUserClicksOk)
elif command == 'refresh_election_ui':
election, = data
self.emit(SIGNAL("refresh_election_ui(PyQt_PyObject)"), election )
elif command == 'election_initialized':
election, = data
self.emit(SIGNAL("election_initialized(PyQt_PyObject)"), election )
elif command == "refreshBitcoinAddresses":
self.emit(SIGNAL("refreshBitcoinAddresses()"))
else:
sys.stderr.write(
'Command sent to UISignaler not recognized: %s\n' % command)
def run():
app = QtGui.QApplication(sys.argv)
translator = QtCore.QTranslator()
translationpath = os.path.join(
getattr(sys, '_MEIPASS', ''),
'translations',
'bitmessage_' + l10n.getTranslationLanguage()
)
translator.load(translationpath)
QtGui.QApplication.installTranslator(translator)
app.setStyleSheet("QStatusBar::item { border: 0px solid black }")
myapp = MyForm()
if not shared.config.getboolean('bitmessagesettings', 'startintray'):
myapp.show()
myapp.appIndicatorInit(app)
myapp.ubuntuMessagingMenuInit()
myapp.notifierInit()
if shared.safeConfigGetBoolean('bitmessagesettings', 'dontconnect'):
myapp.showConnectDialog() # ask the user if we may connect
sys.exit(app.exec_())
```
#### File: consensus/ec/bignum.py
```python
from pyelliptic.openssl import OpenSSL
from echelper import ECHelper
class BigNum:
'''
classdocs
'''
def __init__(self, os_bn=None,decval=None,binval=None):
"""
Constructs a new BN object
and fills it with the value given.
"""
if os_bn is not None:
self.bn = os_bn
self.__created_bn = False
else:
self.bn = OpenSSL.BN_new()
self.__created_bn = True
if decval is None and binval is None:
decval = 0
if decval is not None:
binval = ECHelper.int2bin( decval )
if binval is not None:
OpenSSL.BN_bin2bn( binval, len( binval ), self.bn )
def get_value(self):
binary = OpenSSL.malloc(0, OpenSSL.BN_num_bytes( self.bn ) )
OpenSSL.BN_bn2bin( self.bn, binary )
return int( binary.raw.encode('hex') or '0', 16 )
def __del__(self):
if self.__created_bn:
OpenSSL.BN_free( self.bn )
def __str__(self):
return "BigNum<0x%X>" % self.get_value()
__repr__ = __str__
```
#### File: consensus/ec/curve.py
```python
import ctypes
import math
import hashlib
from pyelliptic.openssl import OpenSSL
from echelper import ECHelper
from asnhelper import ASNHelper
import point as ec_point
class Curve:
'''
classdocs
'''
def __init__(self, curvename=None, curveid=None, openssl_group=None):
'''
Constructor
'''
if curvename != None:
curve = OpenSSL.get_curve( curvename )
self.os_group = OpenSSL.EC_GROUP_new_by_curve_name( curve )
elif curveid != None:
self.os_group = OpenSSL.EC_GROUP_new_by_curve_name( curveid )
elif openssl_group != None:
self.os_group = openssl_group
else:
raise Exception('No curve provided')
self.__set_parameters()
self.__set_base_point()
def __set_parameters(self):
size = OpenSSL.i2d_ECPKParameters(self.os_group, 0)
mb = ctypes.create_string_buffer(size)
OpenSSL.i2d_ECPKParameters(self.os_group, ctypes.byref(ctypes.pointer(mb)))
asntree = [x for x in ASNHelper.consume( mb.raw )][0]
self.ver, self.field, self.curve, self.G_raw, self.order, self.h = asntree
if self.field[0] == '192.168.3.11.61.1.1': # Prime field
self.field_type = 'prime'
self.p = self.field[1]
self.bitlength = int( math.ceil( math.log( self.p, 2 ) ) )
self.a = self.curve[0]
self.b = self.curve[1]
self.f = lambda x: x**3 + self.a*x + self.b
elif self.field[0] == '192.168.3.11.61.1.2': # Characteristic two field
self.field_type = 'power-of-two'
self.m = self.field[1][0]
# Maybe bitlength below is not correct..?
self.bitlength = self.m + 1
if self.field[1][1] == '192.168.3.11.61.1.2.3.2': # Only one coefficient
self.poly_coeffs = [self.field[1][2]]
elif self.field[1][1] == '192.168.3.11.61.1.2.3.3': # Several coefficients
self.poly_coeffs = self.field[1][2]
else:
raise Exception('Unknown field OID %s' % self.field[1][1])
self.a = self.curve[0]
self.b = self.curve[1]
else:
raise Exception( 'Unknown curve field' )
def __set_base_point(self):
self.G = ec_point.Point( self, openssl_point=OpenSSL.EC_GROUP_get0_generator( self.os_group ) )
def hash_to_field(self, in_str):
return int( hashlib.sha512( in_str ).hexdigest()[:self.bitlength//4], 16 )
def hash_to_point(self, in_str):
return self.find_point_try_and_increment( self.hash_to_field( in_str ) )
def find_point_try_and_increment(self, x):
if self.field_type != 'prime':
raise Exception( "find_point_try_and_increment is only implemented for curves over prime fields")
## HUSK AT FREE BIGNUMS
found = False
x -= 1
while not found:
x += 1
f_x = self.f( x )
y = ECHelper.modular_sqrt( f_x, self.p )
if y != 0:
return ec_point.Point( self, x=x, y=y )
def __eq__(self, other):
if type(other) is type(self):
return self.ver == other.ver and \
self.field == other.field and \
self.curve == other.curve and \
self.G_raw == other.G_raw and \
self.order == other.order and \
self.h == other.h
return False
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
if self.field_type == 'prime':
field = "Prime field, p: 0x%X" % self.p
equation = "y^2 = x^3"
if self.a != 0:
equation += "%+dx" % ( self.a )
if self.b != 0:
equation += "%+d" % ( self.b )
equation += " (mod p)"
elif self.field_type == 'power-of-two':
field = "Power-of-two field, f(x): x^%d+%s1" % ( self.m, "".join( map( lambda x: "x^%d+" % x, reversed( self.poly_coeffs ) ) ) )
equation = "y^2+xy = x^3"
if self.a != 0:
equation += "%+dx" % ( self.a )
if self.b != 0:
equation += "%+d" % ( self.b )
return "Curve<Equation: %s, Field: %s>" % ( equation, field )
__repr__ = __str__
```
#### File: src/consensus/helper_keys.py
```python
import hashlib, struct
import debug, highlevelcrypto, shared
from helper_sql import sqlQuery
def getMyAddresses():
"""
Generator which returns all your addresses.
"""
configSections = shared.config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile != 'bitmessagesettings' and not shared.safeConfigGetBoolean(addressInKeysFile, 'chan'):
isEnabled = shared.config.getboolean(
addressInKeysFile, 'enabled') # I realize that this is poor programming practice but I don't care. It's easier for others to read.
if isEnabled:
yield addressInKeysFile
def getPrivateSigningKey( address ):
try:
privSigningKeyBase58 = shared.config.get( address, 'privsigningkey' )
except:
return None
return shared.decodeWalletImportFormat( privSigningKeyBase58 )
def getPublicSigningKey( address ):
privSigningKey = getPrivateSigningKey( address )
if privSigningKey is None:
return None
return highlevelcrypto.privToPub( privSigningKey.encode('hex') ).decode( 'hex' )
def has_pubkey_for(address, decodedAddress):
# Can return True or False
# If we need the public key for our own address or a chan,
# we can compute it from the private key
if shared.config.has_section( address ):
return True
# This is not an address where we know the private key.
# See if we already have the public key in our database:
_, toAddressVersion, toStreamNumber, toRipe = decodedAddress
queryReturn = sqlQuery( "SELECT hash FROM pubkeys WHERE hash=? AND addressversion=?", toRipe, toAddressVersion)
if queryReturn != []:
return True
if toAddressVersion >= 4: # If we are trying to send to address version >= 4 then the needed pubkey might be encrypted in the inventory.
# If we have it we'll need to decrypt it and put it in the pubkeys table.
_, toTag = compute_priv_encryption_key_and_tag(toAddressVersion, toStreamNumber, toRipe)
queryreturn = sqlQuery(
'''SELECT payload FROM inventory WHERE objecttype='pubkey' and tag=? ''', toTag)
if queryreturn != []: # if there was a pubkey in our inventory with the correct tag, we need to try to decrypt it.
for row in queryreturn:
data, = row
if shared.decryptAndCheckPubkeyPayload(data, address) == 'successful':
return True
with shared.inventoryLock:
for hash, storedValue in shared.inventory.items():
objectType, streamNumber, payload, receivedTime, tag = storedValue
if objectType == 'pubkey' and tag == toTag:
result = shared.decryptAndCheckPubkeyPayload(payload, address) #if valid, this function also puts it in the pubkeys table.
if result == 'successful':
return True
# We don't have the public key in our database.
return False
def get_pubkey_for(address, decodedAddress=None):
"""
Retrieve public key for an address.
Provide the decodedAddress if you already have it. No need to decode it more than once.
Returns None if pubkey not found, otherwise the following tuple:
( pubEncryptionKey, pubSigningKey, requiredAvgPOWNonceTrialsPerByte,
requiredPayloadLengthExtraBytes, behaviourBitfield )
The keys returned are in binary format.
"""
# Can return None, "mobile-user-disallowed", or
# ( pubEncryptionKeyBase256, pubsigningKeyBase256,
# requiredAverageProofOfWorkNonceTrialsPerByte,
# requiredPayloadLengthExtraBytes,
# behaviourBitfield )
if decodedAddress is None:
decodedAddress = shared.decodeAddress( address )
requiredAverageProofOfWorkNonceTrialsPerByte = shared.networkDefaultProofOfWorkNonceTrialsPerByte
requiredPayloadLengthExtraBytes = shared.networkDefaultPayloadLengthExtraBytes
# If we need the public key for our own address or a chan,
# we can compute it from the private key
if shared.config.has_section( address ):
try:
privSigningKeyBase58 = shared.config.get(
address, 'privsigningkey')
privEncryptionKeyBase58 = shared.config.get(
address, 'privencryptionkey')
except:
debug.logger.error( tr.translateText("MainWindow", "Error! Could not find sender address (your address) in the keys.dat file." ) )
return None
privSigningKeyHex = shared.decodeWalletImportFormat(
privSigningKeyBase58).encode('hex')
privEncryptionKeyHex = shared.decodeWalletImportFormat(
privEncryptionKeyBase58).encode('hex')
pubSigningKey = highlevelcrypto.privToPub(
privSigningKeyHex).decode('hex')[1:]
pubEncryptionKey = highlevelcrypto.privToPub(
privEncryptionKeyHex).decode('hex')[1:]
return ( pubEncryptionKey, pubSigningKey,
requiredAverageProofOfWorkNonceTrialsPerByte,
requiredPayloadLengthExtraBytes,
"\x00\x00\x00\x01" )
# This is not an address where we know the private key.
# See if we already have the public key in our database:
_, addressVersion, streamNumber, ripe = decodedAddress
queryReturn = sqlQuery( "SELECT transmitdata FROM pubkeys WHERE hash=? AND addressversion=?", ripe, addressVersion)
if queryReturn != []:
pubkeyPayload = queryReturn[0][0]
return decode_pubkey_payload( pubkeyPayload, addressVersion )
# The pubkey message is stored the way we originally received it
# which means that we need to read beyond things like the nonce and
# time to get to the actual public keys.
# We don't have the public key in our database.
return None
def decode_pubkey_payload(pubkeyPayload, addressVersion):
"""
Returns a tuple ( pubEncryptionKey, pubsigningKey,
requiredAverageProofOfWorkNonceTrialsPerByte,
requiredPayloadLengthExtraBytes,
behaviorBitfield )
by decoding the payload of a pubkey message.
Can also return "mobile-user-disallowed"
The keys are in binary format (base 256)
"""
requiredAverageProofOfWorkNonceTrialsPerByte = shared.networkDefaultProofOfWorkNonceTrialsPerByte
requiredPayloadLengthExtraBytes = shared.networkDefaultPayloadLengthExtraBytes
if addressVersion <= 3:
readPosition = 8 # to bypass the nonce
elif addressVersion >= 4:
readPosition = 0 # the nonce is not included here so we don't need to skip over it.
pubkeyEmbeddedTime, = struct.unpack(
'>I', pubkeyPayload[readPosition:readPosition + 4])
# This section is used for the transition from 32 bit time to 64
# bit time in the protocol.
if pubkeyEmbeddedTime == 0:
pubkeyEmbeddedTime, = struct.unpack(
'>Q', pubkeyPayload[readPosition:readPosition + 8])
readPosition += 8
else:
readPosition += 4
readPosition += 1 # to bypass the address version whose length is definitely 1
_, streamNumberLength = shared.decodeVarint(
pubkeyPayload[readPosition:readPosition + 10])
readPosition += streamNumberLength
behaviorBitfield = pubkeyPayload[readPosition:readPosition + 4]
# Mobile users may ask us to include their address's RIPE hash on a message
# unencrypted. Before we actually do it the sending human must check a box
# in the settings menu to allow it.
if shared.isBitSetWithinBitfield(behaviorBitfield,30): # if receiver is a mobile device who expects that their address RIPE is included unencrypted on the front of the message..
if not shared.safeConfigGetBoolean('bitmessagesettings','willinglysendtomobile'): # if we are Not willing to include the receiver's RIPE hash on the message..
# logger.info('The receiver is a mobile user but the sender (you) has not selected that you are willing to send to mobiles. Aborting send.')
# shared.UISignalQueue.put(('updateSentItemStatusByAckdata',(ackdata,tr.translateText("MainWindow",'Problem: Destination is a mobile device who requests that the destination be included in the message but this is disallowed in your settings. %1').arg(unicode(strftime(shared.config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))),'utf-8')))))
# if the human changes their setting and then sends another message or restarts their client, this one will send at that time.
return "mobile-user-disallowed"
readPosition += 4 # to bypass the bitfield of behaviors
pubSigningKeyBase256 = pubkeyPayload[readPosition:readPosition+64]
readPosition += 64
pubEncryptionKeyBase256 = pubkeyPayload[readPosition:readPosition+64]
readPosition += 64
requiredAverageProofOfWorkNonceTrialsPerByte
# Let us fetch the amount of work required by the recipient.
if addressVersion >= 3:
requiredAverageProofOfWorkNonceTrialsPerByte, varintLength = shared.decodeVarint(
pubkeyPayload[readPosition:readPosition + 10])
readPosition += varintLength
requiredPayloadLengthExtraBytes, varintLength = shared.decodeVarint(
pubkeyPayload[readPosition:readPosition + 10])
readPosition += varintLength
if requiredAverageProofOfWorkNonceTrialsPerByte < shared.networkDefaultProofOfWorkNonceTrialsPerByte: # We still have to meet a minimum POW difficulty regardless of what they say is allowed in order to get our message to propagate through the network.
requiredAverageProofOfWorkNonceTrialsPerByte = shared.networkDefaultProofOfWorkNonceTrialsPerByte
if requiredPayloadLengthExtraBytes < shared.networkDefaultPayloadLengthExtraBytes:
requiredPayloadLengthExtraBytes = shared.networkDefaultPayloadLengthExtraBytes
return ( pubEncryptionKeyBase256, pubSigningKeyBase256,
requiredAverageProofOfWorkNonceTrialsPerByte,
requiredPayloadLengthExtraBytes,
behaviorBitfield )
def compute_priv_encryption_key_and_tag( addressVersionNumber, streamNumber, ripe ):
doubleHashOfAddressData = hashlib.sha512(hashlib.sha512(shared.encodeVarint(
addressVersionNumber) + shared.encodeVarint(streamNumber) + ripe).digest()).digest()
privEncryptionKey = doubleHashOfAddressData[:32] # Note that this is the first half of the sha512 hash.
tag = doubleHashOfAddressData[32:] # Note that this is the second half of the sha512 hash.
return privEncryptionKey, tag
```
#### File: src/consensus/ringsignature.py
```python
from random import randint
from pyelliptic import arithmetic
from ec import curve as ec_curve, point as ec_point
class RingSignature:
MAGIC_ID = "RINGSIGNATURE"
CURVE_NAME = 'secp256k1'
DEBUG = False
@staticmethod
def sign_message( message, pubkeys, private_key, signer_index ):
key_count = len( pubkeys )
curve = ec_curve.Curve( RingSignature.CURVE_NAME )
# Retrieve all public keys and their coordinates
public_keys = map( lambda x: RingSignature.pubkey_to_point( curve, x ), pubkeys )
public_keys_coords = map( lambda point: (point.x, point.y), public_keys )
# Make room for c_i, s_i, z'_i, and z''_i variables
cs = [0] * key_count
ss = [0] * key_count
z_s = [0] * key_count
z__s = [0] * key_count
# Step 1
public_keys_hash = curve.hash_to_field( "%s" % public_keys_coords )
H = RingSignature.H2( curve, public_keys_coords )
print "privkey: %s, %s, H: %s, %s" % ( type( private_key ), repr( private_key ), type( H ), repr( H ) )
Y_tilde = private_key * H
# Step 2
u = randint( 0, curve.order )
pi_plus_1 = (signer_index+1) % key_count
cs[pi_plus_1] = RingSignature.H1( curve, public_keys_hash, Y_tilde, message,
u * curve.G, u * H )
# Step 3
for i in range( signer_index+1, key_count ) + range( signer_index ):
ss[i] = randint( 0, curve.order )
next_i = (i+1) % key_count
z_s[i] = ss[i] * curve.G + cs[i] * public_keys[i]
z__s[i] = ss[i] * H + cs[i] * Y_tilde
cs[next_i] = RingSignature.H1( curve, public_keys_hash, Y_tilde, message, z_s[i], z__s[i] )
# Step 4
ss[signer_index] = ( u - private_key * cs[signer_index] ) % curve.order
return ( cs[0], ss, Y_tilde )
@staticmethod
def verify_message(message, pubkeys, c_0, ss, Y_tilde):
curve = ec_curve.Curve( RingSignature.CURVE_NAME )
public_keys = map( lambda x: RingSignature.pubkey_to_point( curve, x ), pubkeys )
public_keys_coords = map( lambda point: (point.x, point.y), public_keys )
n = len( public_keys )
cs = [c_0] + [0] * ( n - 1 )
z_s = [0] * n
z__s = [0] * n
# Step 1
public_keys_hash = curve.hash_to_field( "%s" % public_keys_coords )
H = RingSignature.H2( curve, public_keys_coords )
for i in range( n ):
z_s[i] = ss[i] * curve.G + cs[i] * public_keys[i]
z__s[i] = ss[i] * H + cs[i] * Y_tilde
if i < n - 1:
cs[i+1] = RingSignature.H1( curve, public_keys_hash, Y_tilde, message, z_s[i], z__s[i] )
print "Verify: n: %d, len(z_s): %d, len(z__s): %d" % ( n, len(z_s), len( z__s) )
H1_ver = RingSignature.H1( curve, public_keys_hash, Y_tilde, message, z_s[n-1], z__s[n-1] )
return cs[0] == H1_ver
@staticmethod
def H2( curve, in_str ):
"""
Hash the input as a string and return the hash as a point on the curve.
"""
return curve.hash_to_point( "H2_salt%s" % in_str )
@staticmethod
def H1( curve, keys, Y_tilde, message, P1, P2):
"""
The H1 function that hashes a lot of variables
and returns the hash as an integer.
"""
string_to_hash = "%s,%s,%s,%X,%X,%X,%X" % ( keys, Y_tilde, message,
P1.x, P1.y, P2.x, P2.y)
return curve.hash_to_field( "H1_salt%s" % string_to_hash )
@staticmethod
def pubkey_to_point(curve, pubkey):
assert len( pubkey ) == 64
return ec_point.Point( curve,
x=arithmetic.decode( pubkey[:32], 256 ),
y=arithmetic.decode( pubkey[32:], 256 ) )
```
|
{
"source": "JesperBry/-course-TDT4120---Algorithms",
"score": 4
}
|
#### File: -course-TDT4120---Algorithms/Algorithms/Radix_Sort.py
```python
from math import floor, log
from Counting_Sort_2 import countingSort
# A = array/list
# radix is the base of the number system
def radixSort(A, radix):
k = max(A)
out = A
d = int(floor(log(k, radix) + 1))
for i in range(d):
out = countingSort(out, i, radix)
return out
A = [9,3,1,4,5,7,7,2,20,55]
r = radixSort(A, max(A))
print(r)
```
|
{
"source": "jesperdj9/FantasyFootballProj",
"score": 3
}
|
#### File: jesperdj9/FantasyFootballProj/able_players.py
```python
import numpy as np
import pandas as pd
import random as rd
import random
from random import randint
from random import sample
import matplotlib.pyplot as plt
import requests
pd.options.display.max_rows = None
pd.options.display.max_columns = None
import itertools
# In[12]:
class get_able_players():
def __init__(self, player_predictions):
self.player_predictions = player_predictions
self.retrieve_players()
self.add_cost_chances()
self.select_able_players()
self.organise_tuples()
def retrieve_players(self):
#Method to retrieve the live data of player status
#@params None
#@return None
url = 'https://fantasy.premierleague.com/api/bootstrap-static/'
r = requests.get(url)
json = r.json()
stats_df = pd.DataFrame(json['element_stats'])
self.elements_df = pd.DataFrame(json['elements'])
def add_cost_chances(self):
#Method to add the cost and chances of playing to the dataframe for each player
#@params None
#@return None
player_predictions = self.player_predictions
for index,row in player_predictions.iterrows():
name = player_predictions["web_name"][index]
for _index,_row in self.elements_df.iterrows():
second_name = self.elements_df["web_name"][_index]
if name == second_name:
player_predictions.at[index, "now_cost"] = self.elements_df["now_cost"][_index]
player_predictions.at[index, "chances"] = self.elements_df["chance_of_playing_next_round"][_index]
player_predictions.at[index, "exp_points"] = self.elements_df["ep_next"][_index]
self.player_predictions = player_predictions
def select_able_players(self):
#Method to remove players that have little chance of playing or are predicted to have less than 2 points
#@params None
#@return None
player_predictions = self.player_predictions
player_predictions = player_predictions[player_predictions["chances"]!= 0.0]
player_predictions = player_predictions[player_predictions["chances"]!= 25.0]
player_predictions = player_predictions[player_predictions["chances"]!= 50.0]
for index,row in player_predictions.iterrows():
player_predictions.at[index, "exp_points"] = float(player_predictions.at[index, "exp_points"])
player_predictions = player_predictions[player_predictions["exp_points"] > 2.0]
player_predictions["now_cost"] = (player_predictions["now_cost"] / 10)
self.player_predictions = player_predictions
def organise_tuples(self):
#Method to organise tuples for hill climber algorithm, change from dataframe into tuple
#@params None
#@return None
players = []
player_predictions = self.player_predictions
for index,row in player_predictions.iterrows():
name = player_predictions["web_name"][index]
element = player_predictions["element_type"][index]
team = player_predictions["team"][index]
predicted_points = player_predictions["predicted_points"][index]
now_cost = player_predictions["now_cost"][index]
players.append([name,element,team,predicted_points,now_cost])
self.players = players
# In[ ]:
```
#### File: jesperdj9/FantasyFootballProj/train_fixture_model.py
```python
import numpy as np
import pandas as pd
pd.options.display.max_columns = None
import requests
np.set_printoptions(suppress=True)
from numpy import loadtxt
from tensorflow import keras
from tensorflow.python.keras.layers import Input, Dense
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.python.keras.utils import np_utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from tensorflow.python.keras.models import load_model
from tensorflow.python.keras.callbacks import EarlyStopping
import matplotlib as plt
from matplotlib import pyplot as plt
# In[8]:
class trainrf():
def __init__(self,training_dataset,gameweek_id):
#training_dataset = historcal data used for training
#gameweek_id = to see which data to predict
self.training_dataset = training_dataset
self.gameweek_chosen = gameweek_id
self.fixture_prediction()
self.gameweek_static = self.gameweek.copy()
self.gameweek = self.gameweek.copy()
self.predictions_r = {}
self.train()
def train(self):
self.X,self.y = self.prepare_data(self.dataset)
self.X_test, self.y_test = self.prepare_data(self.gameweek)
self.randomforest()
def fixture_prediction(self):
#Method to see which matches are in gameweek described
#@params None
#@return None
partial_dataset = self.training_dataset.copy()
#Prepare gameweek data to see which matches are being played in gameweek n
url = 'https://fantasy.premierleague.com/api/fixtures?event='+ str(self.gameweek_chosen)
r = requests.get(url)
json_new = r.json()
fixtures_df = pd.DataFrame(json_new)
#Retrieve home and away team ids for the first and last fixture of gameweek n
home_team = fixtures_df.iloc[0]["team_h"]
away_team = fixtures_df.iloc[0]["team_a"]
home_team_last = int(fixtures_df.iloc[-1:]["team_h"])
away_team_last = int(fixtures_df.iloc[-1:]["team_a"])
#Create a new dataset - up to the first fixture of the gameweek n chosen.
#Does not include the gameweek n in dataset as this is what is being predicted
for index,row in partial_dataset.iterrows():
teamh = partial_dataset["Team A"][index]
teama = partial_dataset["Team B"][index]
if int(teamh) == int(home_team) and int(teama) == int(away_team):
#index represents the fixture row which is the first fixture of gameweek n.
self.dataset = partial_dataset[:index]
first = index
if teamh == home_team_last and teama == away_team_last:
#get the index of the dataset corresponding to the last fixture of gameweek n
last = index
#Using the first and last fixtures of gameweek n, we can grab a snipped of the dataset_training file
self.gameweek = partial_dataset[first:last+1]
def prepare_data(self, dataset):
#Method to load dataframe into appropraite format
#@params None
#@return X, Y = Our X data and Y label data
dataset["labels"] = dataset["result"]
del dataset["Team A"]
del dataset["Team B"]
del dataset["Fixture"]
del dataset["result"]
dataset.to_csv(r'datasetML.csv',header = False, index = False)
dataset = loadtxt(r'datasetML.csv', delimiter=',')
X = dataset[:,0:25].astype(float)
Y = dataset[:,25]
return X,Y
def scale(self,X):
#Method to scale down data
#@params X
#@return X
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X.astype(np.float64))
return X_scaled
def randomforest(self, iterations = 15, leaf_iter = 12):
#Method to train a random forest on ideal parameters with scaled and non-scaled data
#@params iterations = for number of trees, leaf_iter = iterations for number of leaf noes
#@return None
y_train = self.y
for run in range(0,2):
if run == 1:
X_train = self.scale(self.X)
self.X_test = self.scale(self.X_test)
else:
X_train = self.X
overall_r = []
for e in range(7,iterations+1):
for j in range(1,leaf_iter):
leafnodes = 24*(2*j)
classifier = RandomForestClassifier(n_estimators = 10 * e, max_leaf_nodes = leafnodes, criterion = 'entropy', random_state = 42)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(self.X_test)
#add each prediction
r_pred = []
for i in y_pred.tolist():
r_pred.append(i)
overall_r.append(r_pred)
#run this only the first time so the dictionaries are initialised
if run == 0:
for i,pred_list in enumerate(overall_r):
for j,each in enumerate(pred_list):
self.predictions_r[j] = {}
for i,pred_list in enumerate(overall_r):
for j,each in enumerate(pred_list):
self.predictions_r[j][each] = self.predictions_r[j].get(each,0)+1
def post_prep_rf(self):
#Method to prepare the results with the most predicted label for each match chosen
#@params None
#@return gameweek predictions, accuracies
#grab the best prediction
prediction = []
second_pred = []
import operator
for key,values in self.predictions_r.items():
prediction.append(max(values.items(), key=operator.itemgetter(1))[0])
del values[(max(values.items(), key=operator.itemgetter(1))[0])]
i = 0
for key,values in self.predictions_r.items():
try:
second_pred.append(max(values.items(), key=operator.itemgetter(1))[0])
except ValueError:
second_pred.append(prediction[i])
i += 1
gameweek_forPM_df = pd.DataFrame()
result_score = {1.0:[3,0], 2.0:[2,1], 3.0:[1,0], 4.0:[2,2], 5.0:[1,1], 6.0:[0,0], 7.0:[0,1], 8.0:[1,2], 9.0:[0,3]}
gameweek_forPM_df["team H"] = self.gameweek_static["Team A"]
gameweek_forPM_df["team A"] = self.gameweek_static["Team B"]
i = 0
for index, row in gameweek_forPM_df.iterrows():
gameweek_forPM_df.at[index,"team_h_score"] = result_score[prediction[i]][0]
gameweek_forPM_df.at[index,"team_a_score"] = result_score[prediction[i]][1]
i+=1
accuracies =[]
full_acc= 0
second_acc = 0
no_hit = 0
for i,value in enumerate(prediction):
if value == self.y_test[i]:
full_acc += 1
elif second_pred[i] == self.y_test[i]:
second_acc += 1
else:
no_hit+=1
accuracies.append([full_acc/len(prediction),second_acc/len(prediction),no_hit/len(prediction)])
#return the gameweek for Player Model, and the accuracies of labels from the Fixture prediction matches.
return gameweek_forPM_df,accuracies
```
|
{
"source": "JesperDramsch/CNN-for-ASI",
"score": 3
}
|
#### File: JesperDramsch/CNN-for-ASI/tensorboard.py
```python
try:
import tensorflow as tf
except:
print 'Tensorflow could not be imported, therefore tensorboard cannot be used.'
from StringIO import StringIO
import matplotlib.pyplot as plt
import numpy as np
import torch
import datetime
class TBLogger(object):
def __init__(self, log_dir, folder_name = '' ):
self.log_dir = log_dir+ '/' + folder_name + ' ' + datetime.datetime.now().strftime("%I:%M%p, %B %d, %Y") + '/'
self.log_dir = self.log_dir.replace('//','/')
self.writer = tf.summary.FileWriter(self.log_dir)
#Add scalar
def log_scalar(self, tag, value, step=0):
summary = tf.Summary(value=[tf.Summary.Value(tag=tag,
simple_value=value)])
self.writer.add_summary(summary, step)
def make_list_of_2D_array(self, im):
if type(im) == type([]):
return im
ims = []
if len(im.shape) == 2:
ims.append(im)
elif len(im.shape) == 3:
for i in range(im.shape[0]):
ims.append(np.squeeze(im[i,:,:]))
elif len(im.shape) == 4:
for i in range(im.shape[0]):
ims.append(np.squeeze(im[i, 0, :, :]))
return ims
def log_images(self, tag, images, step=0, dim = 2, max_imgs = 50,cm='jet'):
#Make sure images are on numpy format in case the input is a Torch-variable
images = self.convert_to_numpy(images)
try:
if len(images.shape)>2:
dim = 3
except:
None
#Make list of images
if dim == 2:
images = self.make_list_of_2D_array(images)
#If 3D we make one list for each slice-type
if dim == 3:
new_images_ts, new_images_il, new_images_cl = self.get_slices_from_3D(images)
self.log_images(tag + '_timeslice', new_images_ts, step, 2, max_imgs)
self.log_images(tag + '_inline', new_images_il, step, 2, max_imgs)
self.log_images(tag + '_crossline', new_images_cl, step, 2, max_imgs)
return
im_summaries = []
for nr, img in enumerate(images):
#Grayscale
if cm == 'gray' or cm == 'grey':
img = img.astype('float')
img = np.repeat(np.expand_dims(img,2),3,2)
img -= img.min()
img /= img.max()
img *= 255
img = img.astype('int8')
# Write the image to a string
s = StringIO()
plt.imsave(s, img, format='png')
# Create an Image object
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=img.shape[0],
width=img.shape[1])
# Create a Summary value
im_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, nr),
image=img_sum))
#if nr == max_imgs-1:
# break
# Create and write Summary
summary = tf.Summary(value=im_summaries)
self.writer.add_summary(summary, step)
# Cuts out middle slices from image
def get_slices_from_3D(self, img):
new_images_ts = []
new_images_il = []
new_images_cl = []
if len(img.shape) == 3:
new_images_ts.append(np.squeeze(img[img.shape[0] / 2, :, :]))
new_images_il.append(np.squeeze(img[:, img.shape[1] / 2, :]))
new_images_cl.append(np.squeeze(img[:, :, img.shape[2] / 2]))
elif len(img.shape) == 4:
for i in range(img.shape[0]):
new_images_ts.append(np.squeeze(img[i, img.shape[1] / 2, :, :]))
new_images_il.append(np.squeeze(img[i, :, img.shape[2] / 2, :]))
new_images_cl.append(np.squeeze(img[i, :, :, img.shape[3] / 2]))
elif len(img.shape) == 5:
for i in range(img.shape[0]):
new_images_ts.append(np.squeeze(img[i, 0, img.shape[2] / 2, :, :]))
new_images_il.append(np.squeeze(img[i, 0, :, img.shape[3] / 2, :]))
new_images_cl.append(np.squeeze(img[i, 0, :, :, img.shape[4] / 2]))
return new_images_ts, new_images_il, new_images_cl
#Convert torch to numpy
def convert_to_numpy(self,im):
if type(im) == torch.autograd.variable.Variable:
#Put on CPU
im = im.cpu()
#Get np-data
im = im.data.numpy()
return im
```
|
{
"source": "jesper-friis/soft7",
"score": 3
}
|
#### File: soft7/dataspace/entitystore.py
```python
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
from urllib.parse import quote_plus
import os
class MongoStore:
"""
Storing soft entities
"""
def __init__(self, client=None):
if not client:
MONGODB_HOST = os.environ.get('MONGODB_HOST')
MONGODB_PORT = int(os.environ.get('MONGODB_PORT', '27017'))
MONGODB_USER = os.environ.get('MONGODB_USER')
MONGODB_PASSWORD = os.environ.get('MONGODB_PASSWORD')
uri = f"mongodb://{quote_plus(MONGODB_USER)}:{quote_plus(MONGODB_PASSWORD)}@{quote_plus(MONGODB_HOST)}:{MONGODB_PORT}"
client = MongoClient(uri)
try:
client.admin.command('ismaster')
except ConnectionFailure:
print ("Cannot connect to server")
self._client = client
self._db = self._client.soft
self._coll = self._db.entities
def __del__(self):
self._client.close()
def read(self, uriref=None):
if not uriref:
return [e['uri'] for e in self._coll.find({})]
entity = self._coll.find_one({"uri": uriref}, {'_id': False})
return entity
def create(self, entity):
assert 'uri' in entity
uriref = entity['uri']
if self._coll.find_one({"uri": uriref}) is not None:
raise Exception(f"Entity with uri: `{uriref}` already exists. Use update() to modify.")
self._coll.insert_one(entity)
def update(self, entity):
assert 'uri' in entity
uriref = entity['uri']
self._coll.replace_one({"uri": uriref}, entity)
class EntityStore:
def __init__(self, client=None):
self.store = MongoStore(client)
def __enter__(self):
return self.store
def __exit__(self, exc_type, exc_val, exc_tb):
del self.store
```
#### File: soft7/soft7/rdf.py
```python
from jinja2 import Template
import uuid
__ttl_template__ = """
@prefix : <{{ base }}#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xml: <http://www.w3.org/XML/1998/namespace> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix soft: <http://www.quaat.com/ontologies#> .
@base <{{ base }}> .
{% set entity_uuid = uuid() %}
{% set uri_uuid = uuid() %}
:Entity_{{ entity_uuid }} rdf:type owl:NamedIndividual ,
soft:Entity ;
soft:uri "{{ dict['uri'] }}"^^xsd:anyURI ;
rdfs:label "{{ name }}"@en .
#################################################################
# Dimensions
#################################################################
{% set dim_uuid = {} %}
{% for dimension in dict['dimensions'] %}
{% set _dummy = dim_uuid.update( {dimension: uuid() }) %}
:Dimension_{{ dim_uuid[dimension] }} rdf:type owl:NamedIndividual ,
soft:Dimension ;
:dp_dimension_id "{{ dimension }}"^^xsd:string ,
"{{ dict['dimensions'][dimension] }}"^^xsd:string ;
rdfs:label "{{ dimension }}"@en .
:Entity_{{ entity_uuid }} soft:dimension :Dimension_{{ dim_uuid[dimension] }} .
{% endfor %}
#################################################################
# Properties
#################################################################
{% set prop_uuid = {} %}
{% for property in dict['properties'] %}
{% set _dummy = prop_uuid.update( {property: uuid() }) %}
{% if 'shape' in dict['properties'][property] %}
####################
# Property shapes
####################
{% set shape_uuid = {} %}
{% for idx, shape in enumerate(dict['properties'][property]['shape']) | reverse %}
{% set _dummy = shape_uuid.update( {shape: uuid() }) %}
:Shape_{{ shape_uuid[shape]}} rdf:type owl:NamedIndividual ,
soft:Shape ;
soft:hasDimension :Dimension_{{dim_uuid[shape]}} ;
{% if idx < (len(dict['properties'][property]['shape']) - 1) %}
soft:hasShape :Shape{{ shape_uuid[(dict['properties'][property]['shape'][idx+1])] }} ;{% endif %}
rdfs:label "{{ shape }}"@en .
{% endfor %}
{% endif %}
############## Property: {{ property }}
:Property_{{ prop_uuid[property] }} rdf:type owl:NamedIndividual ,
soft:Property ;
{% if 'shape' in dict['properties'][property] %}
soft:hasShape :Shape_{{shape_uuid[(dict['properties'][property]['shape'][0])]}} ;
{% endif %}
soft:property_description "{{ dict['properties'][property]['description'] }}"^^xsd:string ;
soft:property_label "{{ dict['properties'][property]['label'] }}"^^xsd:string ;
soft:property_type "{{ dict['properties'][property]['type'] }}"^^xsd:string ;
soft:property_unit "{{ dict['properties'][property]['unit'] }}"^^xsd:string ;
rdfs:label "{{ property }}"@en .
:Entity_{{ entity_uuid }} soft:property :Property_{{ prop_uuid[property] }} .
{% endfor %}
"""
class Turtle:
"""
Turtle RDF format writer
"""
@staticmethod
def dumps(dict):
"""
"""
template = Template(__ttl_template__)
output = template.render(base="http://example.com/entity",
name='Entity',
len=len,
enumerate=enumerate,
dict=dict,
uuid=lambda : str(uuid.uuid4()).replace('-','_'))
return output
@staticmethod
def dump(dict, file):
"""
"""
with open (file, "w") as ttl:
ttl.write(dumps(dict))
```
|
{
"source": "JesperGlas/mini_pyprojects",
"score": 4
}
|
#### File: src/hello/test_hello.py
```python
import unittest
from hello_world import hello
class TestHello(unittest.TestCase):
def test_empty(self):
self.assertEqual(hello(), "Hello you!")
def test_one_name(self):
self.assertEqual(hello(["Jesper"]), "Hello Jesper!")
def test_many_name(self):
self.assertEqual(hello(["Kasper", "Jesper", "Jonathan"]), "Hello Kasper, <NAME>!")
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JesperGlas/pyart",
"score": 3
}
|
#### File: pyart/src/pixelate.py
```python
from PIL import Image, ImageDraw
from typing import Tuple, List
from random import randint
from math import gcd, floor
from statistics import mean
WIDTH = 1920
HEIGHT = 1080
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
PATH_IN = 'in.jpg'
PATH_OUT = 'out/pixelate.jpg'
def main():
print('Hello World!')
new_image = Image.open(PATH_IN)
sized_image: Image = transform_size(new_image)
canvas = ImageDraw.Draw(sized_image)
mesh: List[Tuple[int, int, int]] = generate_mesh()
for poly in mesh:
canvas.polygon(poly, fill=average_color(sized_image, poly, 2))
sized_image.save(PATH_OUT)
def generate_mesh(detail: int = 5) -> List[Tuple[int, int, int]]:
poly_list: List[Tuple[int]] = []
x: float = 0.0
y: float = 0.0
max_size: float = gcd(WIDTH, HEIGHT)
size: float = max_size / detail
while y < HEIGHT:
y_next: float = y+size
while x < WIDTH:
x_next: float = x+size
poly_list.append((
(x, y),
(x_next, y),
(x_next, y_next),
(x, y_next)
))
x += size
x = 0
y += size
return poly_list
def transform_size(image, new_width: int=WIDTH, new_height: int=HEIGHT) -> Image:
new_size: Tuple = (new_width, new_height)
return image.resize(new_size)
def randomColor() -> Tuple[int, int, int]:
return (randint(0, 255), randint(0, 255), randint(0, 255))
def average_color(img: Image, square: Tuple, step: int=1) -> Tuple[int, int, int]:
rgb_img = img.convert('RGB')
R: List[int] = []
G: List[int] = []
B: List[int] = []
x_start: int = floor(square[0][0])
x_end: int = floor(square[2][0])
y_start: int = floor(square[0][1])
y_end: int = floor(square[2][1])
for y in range(y_start, y_end, step):
for x in range(x_start, x_end, step):
rp, gp, bp = rgb_img.getpixel((x, y))
R.append(rp)
G.append(gp)
B.append(bp)
return floor(mean(R)), floor(mean(G)), floor(mean(B))
if __name__ == '__main__':
main()
```
|
{
"source": "jesperiksson/SoccermaticsForPython",
"score": 3
}
|
#### File: jesperiksson/SoccermaticsForPython/Club_Performance_BenjaminMeco_Final.py
```python
import matplotlib.pyplot as plt
import numpy as np
import json
import pandas as pd
from pandas import json_normalize
from FCPython import createPitch
import statsmodels.formula.api as smf
def factorial(n):
if(n == 0):
return 1
else:
return n*factorial(n-1)
def pois(l,k):
return (l**k)*np.exp(-l)/factorial(k)
# this is just a help for getting the data
def indexOf(team_name,team_list):
index = -1
for element in team_list:
index = index + 1
if(element[0] == team_name):
return index
return -1
# for getting the distributions:
def getWeights(arr,size):
weights = np.zeros(size)
W = 0
for k in range(0,size):
W = W + arr[k]
for k in range(0,size):
weights[k] = arr[k]/W
return weights
def outcomeWeights(r,probabilities):
s = probabilities[0]
count = 0
for p in probabilities:
if(s > r):
return count
else:
count = count + 1
s = s + p
return count
# this makes a simulation using weights as the probability distribution
def simulate(team_list):
points = np.zeros(len(team_list))
for i in range(0,len(team_list)):
for j in range(1,len(team_list)):
t_1 = team_list[i]
t_2 = team_list[(i+j)%len(team_list)]
lambda_1 = (t_1[1] + t_2[2])/2
lambda_2 = (t_1[2] + t_2[1])/2
g_1 = int(np.random.poisson(lambda_1))
g_2 = int(np.random.poisson(lambda_2))
if(g_1 > g_2):
points[i] = points[i] + 3
elif(g_1 < g_2):
points[(i+j)%len(team_list)] = points[(i+j)%len(team_list)] + 3
else:
points[i] = points[i] + 1
points[(i+j)%len(team_list)] = points[(i+j)%len(team_list)] + 1
result = []
for i in range(0,len(team_list)):
result = result + [[points[i],team_list[i][0]]]
return result
def simulMany(team_list,N):
team_placements = []
for t in team_list:
team_placements = team_placements + [[t[0],np.zeros(21)]]
for n in range(N):
# do a simulation:
s = sorted(simulate(team_list))
# get the placements:
for i in range(0,len(s)):
e = s[i]
index = indexOf(e[1],team_list)
team_placements[index][1][20-i] = team_placements[index][1][20-i] + 1
for t in team_placements:
t[1] = getWeights(t[1],21)[1:]
return team_placements
#Load the data
with open('Wyscout/matches/matches_England.json') as data_file:
data = json.load(data_file)
df = json_normalize(data, sep = "_")
# first we extract the relevant bits of the matches:
matches = []
for i,game in df.iterrows():
label = game['label']
dash = label.find(" -")
comma = label.find(",")
team_1 = label[0:dash]
team_2 = label[dash+3:comma]
score_1 = label[comma+2:comma+3]
score_2 = label[comma+6:]
matches = matches + [[team_1,score_1,team_2,score_2]]
# now we make the distributions for each team:
teamList = []
for m in matches:
index_1 = indexOf(m[0],teamList)
index_2 = indexOf(m[2],teamList)
# update the data for the first team
if(index_1 == -1):
new_team = [m[0],0,0]
new_team[1] = int(m[1])
new_team[2] = int(m[3])
teamList = teamList + [new_team]
else:
teamList[index_1][1] = teamList[index_1][1] + int(m[1])
teamList[index_1][2] = teamList[index_1][2] + int(m[3])
# update the data for the second team
if(index_2 == -1):
new_team = [m[2],0,0]
new_team[1] = int(m[3])
new_team[2] = int(m[1])
teamList = teamList + [new_team]
else:
teamList[index_2][1] = teamList[index_2][1] + int(m[3])
teamList[index_2][2] = teamList[index_2][2] + int(m[1])
teamList.sort()
# now we get the desired data for the weights and the poisson distributions:
teamPoisson = []
for t in teamList:
teamPoisson = teamPoisson + [[t[0],t[1]/38,t[2]/38]]
# finally some simulations, first with the Poisson distributions:
N = 10000
alph = 0.8
W = 0.25
team_placements = simulMany(teamPoisson,N)
col = (1,0,0)
c = -1
plt.figure(dpi = 160)
for t in team_placements:
if(t[0] == "Liverpool" or t[0] == "Manchester City" or t[0] == "Manchester United"):
plt.bar([k+c*(W + 0.03) for k in range(1,21)],t[1], color = col, label = t[0],alpha = 0.9,width = W)
c = c+1
if(col == (1,0,0)):
col = (0,1,0)
else:
col = (0,0,1)
plt.xlabel("Placement")
plt.ylabel("Probability of placement")
plt.xticks(range(1,21))
plt.xlim(0,8)
plt.legend()
plt.show()
# next we look at how the performance of liverpool changes when they
# improve the offence/defence or both. We do this by changing their parameters in the
# poisson distribution.
lambda_off = teamPoisson[indexOf("Liverpool",teamPoisson)][1]
lambda_def = teamPoisson[indexOf("Liverpool",teamPoisson)][2]
# first we look at improving offence:
plt.figure(dpi = 160)
c = -1
for d in np.linspace(20,10,2):
print(str(d))
# make the modifications:
teamPoisson[indexOf("Liverpool",teamPoisson)][1] = lambda_off + d/38
# simulate and plot the distributions of Liverpool:
T = simulMany(teamPoisson,N)
t = T[indexOf("Liverpool",T)]
plt.bar([k+c*(W + 0.03) for k in range(1,21)],t[1], color = (0.5-c*0.5,0,0),width = W, label = "Scoring " +str(d) + " more goals", alpha = alph)
c = c+1
plt.bar([k+c*(W + 0.03) for k in range(1,21)],team_placements[indexOf("Liverpool",team_placements)][1],color = "black", width = W, label = "Baseline")
plt.xlabel("Placement")
plt.ylabel("Probability of placement for Liverpool\n with improved offence")
plt.xticks(range(1,21))
plt.xlim(0,10)
plt.legend()
plt.show()
plt.figure(dpi = 160)
# secondly we look at improving defence:
c = -1
for d in np.linspace(20,10,2):
print(str(d))
# make the modifications:
teamPoisson[indexOf("Liverpool",teamPoisson)][2] = lambda_def-d/38
# simulate and plot the distributions of Liverpool:
T = simulMany(teamPoisson,N)
t = T[indexOf("Liverpool",T)]
plt.bar([k+c*(W + 0.03) for k in range(1,21)],t[1], color = (0,0,0.5-c*0.5),width = W, label = "Conceding " +str(d) + " fewer goals",alpha = alph)
c = c+1
plt.bar([k+c*(W + 0.03) for k in range(1,21)],team_placements[indexOf("Liverpool",team_placements)][1],color = "black",width = W, label = "Baseline")
plt.xlabel("Placement")
plt.ylabel("Probability of placement for Liverpool\n with defence")
plt.xticks(range(1,21))
plt.xlim(0,10)
plt.legend()
plt.show()
c = -1
col = (1,0,0)
plt.figure(dpi = 160)
for t in T:
if(t[0] == "Liverpool" or t[0] == "Manchester City" or t[0] == "Manchester United"):
plt.bar([k+c*(W + 0.03) for k in range(1,21)],t[1], color = col, label = t[0],alpha = 0.9,width = W)
c = c+1
if(col == (1,0,0)):
col = (0,1,0)
else:
col = (0,0,1)
plt.xlabel("Placement")
plt.ylabel("Probability of placement")
plt.xticks(range(1,21))
plt.xlim(0,8)
plt.legend()
plt.show()
```
|
{
"source": "jesperjoachim/JupyterWorkflow",
"score": 4
}
|
#### File: JupyterWorkflow/jupyter_workflow/data.py
```python
import pandas as pd
import os
from urllib.request import urlretrieve
def get_url_rawdata(name_rawdata_file=None, url=None, force_url_download=False):
"""Download and cache data
PARAMETERS
----------
name_rawdata_file : string
location and name to save the data file
url : string
loacation to download the data
force_url_download : boolean (optional)
option to force downloading the raw data
Returns
-------
rawdata : file, type as input in name_rawdata_file (e.g. .csv etc)
"""
if force_url_download or not os.path.exists(name_rawdata_file):
rawdata, _ = urlretrieve(url, name_rawdata_file)
else:
rawdata = name_rawdata_file
return rawdata
def read_csvdata_to_df_with_dateindex(
rawdata_filename, column_names=None, parse_dates=True
):
"""Reading a rawdata file into pandas dataframe
OPTIONINAL
----------
column_names :
Pass a list of column names to substitute the current names (from left to right)
parse_dates :
Transforming the string dates in raw data to pandas DateIndex type
PARAMETERS
----------
name_rawdata_file : string
location and name to save the data file
url : string
loacation to download the data
force_url_download : boolean (optional)
option to force downloading the raw data
Returns
-------
data : pandas.Dataframe
"""
data = pd.read_csv(
rawdata_filename, index_col="Date", parse_dates=parse_dates
) # Note the parse_dates=True 'magically' convert the date format to pandas datetime
if column_names:
data.columns = column_names
return data
# Testing
test_rawdata = get_url_rawdata(
name_rawdata_file="fremont.csv",
url="https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD",
)
print(test_rawdata)
# df = read_csvdata_to_df_with_dateindex(
# rawdata_filename=test_rawdata, column_names=["Total", "East", "West"]
# )
# print(df)
```
|
{
"source": "JesperK123456/msafuse",
"score": 3
}
|
#### File: JesperK123456/msafuse/eval.py
```python
import jams
from joblib import Parallel, delayed
import logging
import mir_eval
import numpy as np
import os
import pandas as pd
import six
# Local stuff
import msaf
from msaf.exceptions import NoReferencesError
import msaf.input_output as io
from msaf import utils
def print_results(results):
"""Print all the results.
Parameters
----------
results: pd.DataFrame
Dataframe with all the results
"""
if len(results) == 0:
logging.warning("No results to print!")
return
res = results.mean()
logging.info("Results:\n%s" % res)
def compute_results(ann_inter, est_inter, ann_labels, est_labels, bins,
est_file, weight=0.58):
"""Compute the results using all the available evaluations.
Parameters
----------
ann_inter : np.array
Annotated intervals in seconds.
est_inter : np.array
Estimated intervals in seconds.
ann_labels : np.array
Annotated labels.
est_labels : np.array
Estimated labels
bins : int
Number of bins for the information gain.
est_file : str
Path to the output file to store results.
weight: float
Weight the Precision and Recall values of the hit rate boundaries
differently (<1 will weight Precision higher, >1 will weight Recall
higher).
The default parameter (0.58) is the one proposed in (Nieto et al. 2014)
Return
------
results : dict
Contains the results of all the evaluations for the given file.
Keys are the following:
track_id: Name of the track
HitRate_3F: F-measure of hit rate at 3 seconds
HitRate_3P: Precision of hit rate at 3 seconds
HitRate_3R: Recall of hit rate at 3 seconds
HitRate_0.5F: F-measure of hit rate at 0.5 seconds
HitRate_0.5P: Precision of hit rate at 0.5 seconds
HitRate_0.5R: Recall of hit rate at 0.5 seconds
HitRate_w3F: F-measure of hit rate at 3 seconds weighted
HitRate_w0.5F: F-measure of hit rate at 0.5 seconds weighted
HitRate_wt3F: F-measure of hit rate at 3 seconds weighted and
trimmed
HitRate_wt0.5F: F-measure of hit rate at 0.5 seconds weighted
and trimmed
HitRate_t3F: F-measure of hit rate at 3 seconds (trimmed)
HitRate_t3P: Precision of hit rate at 3 seconds (trimmed)
HitRate_t3F: Recall of hit rate at 3 seconds (trimmed)
HitRate_t0.5F: F-measure of hit rate at 0.5 seconds (trimmed)
HitRate_t0.5P: Precision of hit rate at 0.5 seconds (trimmed)
HitRate_t0.5R: Recall of hit rate at 0.5 seconds (trimmed)
DevA2E: Median deviation of annotation to estimation
DevE2A: Median deviation of estimation to annotation
D: Information gain
PWF: F-measure of pair-wise frame clustering
PWP: Precision of pair-wise frame clustering
PWR: Recall of pair-wise frame clustering
Sf: F-measure normalized entropy score
So: Oversegmentation normalized entropy score
Su: Undersegmentation normalized entropy score
"""
res = {}
# --Boundaries-- #
# Hit Rate standard
res["HitRate_3P"], res["HitRate_3R"], res["HitRate_3F"] = \
mir_eval.segment.detection(ann_inter, est_inter, window=3, trim=False)
res["HitRate_0.5P"], res["HitRate_0.5R"], res["HitRate_0.5F"] = \
mir_eval.segment.detection(ann_inter, est_inter, window=.5, trim=False)
# Hit rate trimmed
res["HitRate_t3P"], res["HitRate_t3R"], res["HitRate_t3F"] = \
mir_eval.segment.detection(ann_inter, est_inter, window=3, trim=True)
res["HitRate_t0.5P"], res["HitRate_t0.5R"], res["HitRate_t0.5F"] = \
mir_eval.segment.detection(ann_inter, est_inter, window=.5, trim=True)
# Hit rate weighted
_, _, res["HitRate_w3F"] = mir_eval.segment.detection(
ann_inter, est_inter, window=3, trim=False, beta=weight)
_, _, res["HitRate_w0.5F"] = mir_eval.segment.detection(
ann_inter, est_inter, window=.5, trim=False, beta=weight)
# Hit rate weighted and trimmed
_, _, res["HitRate_wt3F"] = mir_eval.segment.detection(
ann_inter, est_inter, window=3, trim=True, beta=weight)
_, _, res["HitRate_wt0.5F"] = mir_eval.segment.detection(
ann_inter, est_inter, window=.5, trim=True, beta=weight)
# Information gain
res["D"] = compute_information_gain(ann_inter, est_inter, est_file,
bins=bins)
# Median Deviations
res["DevR2E"], res["DevE2R"] = mir_eval.segment.deviation(
ann_inter, est_inter, trim=False)
res["DevtR2E"], res["DevtE2R"] = mir_eval.segment.deviation(
ann_inter, est_inter, trim=True)
# --Labels-- #
if est_labels is not None and ("-1" in est_labels or "@" in est_labels):
est_labels = None
if est_labels is not None and len(est_labels) != 0:
# Align labels with intervals
ann_labels = list(ann_labels)
est_labels = list(est_labels)
ann_inter, ann_labels = mir_eval.util.adjust_intervals(ann_inter,
ann_labels)
est_inter, est_labels = mir_eval.util.adjust_intervals(
est_inter, est_labels, t_min=0.0, t_max=ann_inter.max())
# Pair-wise frame clustering
res["PWP"], res["PWR"], res["PWF"] = mir_eval.segment.pairwise(
ann_inter, ann_labels, est_inter, est_labels)
# Normalized Conditional Entropies
res["So"], res["Su"], res["Sf"] = mir_eval.segment.nce(
ann_inter, ann_labels, est_inter, est_labels)
# Names
base = os.path.basename(est_file)
res["track_id"] = base[:-5]
res["ds_name"] = base.split("_")[0]
return res
def compute_gt_results(est_file, ref_file, boundaries_id, labels_id, config,
bins=251, annotator_id=0):
"""Computes the results by using the ground truth dataset identified by
the annotator parameter.
Return
------
results : dict
Dictionary of the results (see function compute_results).
"""
if config["hier"]:
ref_times, ref_labels, ref_levels = \
msaf.io.read_hier_references(
ref_file, annotation_id=annotator_id,
exclude_levels=["segment_salami_function"])
else:
jam = jams.load(ref_file, validate=False)
ann = jam.search(namespace='segment_.*')[annotator_id]
ref_inter, ref_labels = ann.to_interval_values()
# Read estimations with correct configuration
est_inter, est_labels = io.read_estimations(est_file, boundaries_id,
labels_id, **config)
# Compute the results and return
logging.info("Evaluating %s" % os.path.basename(est_file))
if config["hier"]:
# Hierarchical
assert len(est_inter) == len(est_labels), "Same number of levels " \
"are required in the boundaries and labels for the hierarchical " \
"evaluation."
est_times = []
est_labels = []
# Sort based on how many segments per level
est_inter = sorted(est_inter, key=lambda level: len(level))
for inter in est_inter:
est_times.append(msaf.utils.intervals_to_times(inter))
# Add fake labels (hierarchical eval does not use labels --yet--)
est_labels.append(np.ones(len(est_times[-1]) - 1) * -1)
# Align the times
utils.align_end_hierarchies(est_times, ref_times, thres=1)
# To intervals
est_hier = [utils.times_to_intervals(times) for times in est_times]
ref_hier = [utils.times_to_intervals(times) for times in ref_times]
# Compute evaluations
res = {}
res["t_recall10"], res["t_precision10"], res["t_measure10"] = \
mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=10)
res["t_recall15"], res["t_precision15"], res["t_measure15"] = \
mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=15)
res["track_id"] = os.path.basename(est_file)[:-5]
return res
else:
# Flat
return compute_results(ref_inter, est_inter, ref_labels, est_labels,
bins, est_file)
def compute_information_gain(ann_inter, est_inter, est_file, bins):
"""Computes the information gain of the est_file from the annotated
intervals and the estimated intervals."""
ann_times = utils.intervals_to_times(ann_inter)
est_times = utils.intervals_to_times(est_inter)
return mir_eval.beat.information_gain(ann_times, est_times, bins=bins)
def process_track(file_struct, boundaries_id, labels_id, config,
annotator_id=0):
"""Processes a single track.
Parameters
----------
file_struct : object (FileStruct) or str
File struct or full path of the audio file to be evaluated.
boundaries_id : str
Identifier of the boundaries algorithm.
labels_id : str
Identifier of the labels algorithm.
config : dict
Configuration of the algorithms to be evaluated.
annotator_id : int
Number identifiying the annotator.
Returns
-------
one_res : dict
Dictionary of the results (see function compute_results).
"""
# Convert to file_struct if string is passed
if isinstance(file_struct, six.string_types):
file_struct = io.FileStruct(file_struct)
est_file = file_struct.est_file
ref_file = file_struct.ref_file
# Sanity check
assert os.path.basename(est_file)[:-4] == \
os.path.basename(ref_file)[:-4], "File names are different %s --- %s" \
% (os.path.basename(est_file)[:-4], os.path.basename(ref_file)[:-4])
if not os.path.isfile(ref_file):
raise NoReferencesError("Reference file %s does not exist. You must "
"have annotated references to run "
"evaluations." % ref_file)
one_res = compute_gt_results(est_file, ref_file, boundaries_id, labels_id,
config, annotator_id=annotator_id)
return one_res
def get_results_file_name(boundaries_id, labels_id, config,
annotator_id):
"""Based on the config and the dataset, get the file name to store the
results."""
utils.ensure_dir(msaf.config.results_dir)
file_name = os.path.join(msaf.config.results_dir, "results")
file_name += "_boundsE%s_labelsE%s" % (boundaries_id, labels_id)
file_name += "_annotatorE%d" % (annotator_id)
sorted_keys = sorted(config.keys(), key=str.lower)
for key in sorted_keys:
file_name += "_%sE%s" % (key, str(config[key]).replace("/", "_"))
# Check for max file length
if len(file_name) > 255 - len(msaf.config.results_ext):
file_name = file_name[:255 - len(msaf.config.results_ext)]
return file_name + msaf.config.results_ext
def process(in_path, boundaries_id=msaf.config.default_bound_id,
labels_id=msaf.config.default_label_id, annot_beats=False,
framesync=False, feature="pcp", hier=False, save=False,
out_file=None, n_jobs=4, annotator_id=0, config=None,
feat_def=None):
"""Main process to evaluate algorithms' results.
Parameters
----------
in_path : str
Path to the dataset root folder.
boundaries_id : str
Boundaries algorithm identifier (e.g. siplca, cnmf)
labels_id : str
Labels algorithm identifier (e.g. siplca, cnmf)
ds_name : str
Name of the dataset to be evaluated (e.g. SALAMI). * stands for all.
annot_beats : boolean
Whether to use the annotated beats or not.
framesync: str
Whether to use framesync features or not (default: False -> beatsync)
feature: str
String representing the feature to be used (e.g. pcp, mfcc, tonnetz)
hier : bool
Whether to compute a hierarchical or flat segmentation.
save: boolean
Whether to save the results into the `out_file` csv file.
out_file: str
Path to the csv file to save the results (if `None` and `save = True`
it will save the results in the default file name obtained by
calling `get_results_file_name`).
n_jobs: int
Number of processes to run in parallel. Only available in collection
mode.
annotator_id : int
Number identifiying the annotator.
config: dict
Dictionary containing custom configuration parameters for the
algorithms. If None, the default parameters are used.
Return
------
results : pd.DataFrame
DataFrame containing the evaluations for each file.
"""
# Set up configuration based on algorithms parameters
if config is None:
config = io.get_configuration(feature, annot_beats, framesync,
boundaries_id, labels_id, feat_def)
if isinstance(feature, list):
feature.sort()
# Hierarchical segmentation
config["hier"] = hier
# Remove actual features
config.pop("features", None)
# Get out file in case we want to save results
if out_file is None:
out_file = get_results_file_name(boundaries_id, labels_id, config,
annotator_id)
# If out_file already exists, read and return them
if os.path.exists(out_file):
logging.warning("Results already exists, reading from file %s" %
out_file)
results = pd.read_csv(out_file)
print_results(results)
return results
# Perform actual evaluations
if os.path.isfile(in_path):
# Single File mode
evals = [process_track(in_path, boundaries_id, labels_id, config,
annotator_id=annotator_id)]
else:
# Collection mode
# Get files
file_structs = io.get_dataset_files(in_path)
# Evaluate in parallel
logging.info("Evaluating %d tracks..." % len(file_structs))
evals = Parallel(n_jobs=n_jobs)(delayed(process_track)(
file_struct, boundaries_id, labels_id, config,
annotator_id=annotator_id) for file_struct in file_structs[:])
# Aggregate evaluations in pandas format
results = pd.DataFrame()
for e in evals:
if e != []:
results = results.append(e, ignore_index=True)
logging.info("%d tracks analyzed" % len(results))
# Print results
print_results(results)
# Save all results
if save:
logging.info("Writing results in %s" % out_file)
results.to_csv(out_file)
return results
```
|
{
"source": "JesperK123456/spotifyy",
"score": 3
}
|
#### File: spotifyy/spotifyy/extensions.py
```python
import os
import os.path
import sys
import time
import pickle
from .sessions import SpotifySession
from programy.extensions.base import Extension
from programy.utils.logging.ylogger import YLogger
from spotipy.client import SpotifyException
class SpotifyExtension(Extension):
spotify_exception_handlers = {
"Player command failed: No active device found" : "NODEVICE",
"Player command failed: Restriction violated" : "RESTRICTIONVIOLATED",
"invalid request" : "INVALIDREQUEST",
"The access token expired" : "TOKENEXPIRED"
}
def __init__(self):
if os.path.isfile('session.pickle'):
with open('session.pickle', 'rb') as file:
self.session = pickle.load(file)
else:
self.session = SpotifySession()
def execute(self, context, data):
params = [x.strip().lower() for x in data.split(',')]
YLogger.debug(context, f"Received command {' '.join(params)}")
if params[0] == "close":
os.remove('session.pickle')
return "EXIT"
retry = False
if params[0] == "retry":
retry = True
params = params[1:]
if ((not self.session.is_logged_in) or self.session.is_token_expired()) and params[0] != "login":
if os.path.isfile('uname.txt'):
self.session.login()
else:
return "LOGINFIRST"
cmd = 'self.session.' + params[0] + '(' + ','.join(params[1:]) + ')'
succeeded = False
try:
result = eval(cmd)
succeeded = True
except SpotifyException as ex:
YLogger.exception(context, "Spotify exception raised", ex)
message = str(ex).splitlines()[1].strip()
#YLogger.exception_nostack(context, "Spotify exception raised", message)
if message == "Device not found":
self.session.reset_device()
return self.execute(context, data)
code = self.spotify_exception_handlers.get(message, "GENERIC")
fail = f"PYFAIL SPOTIFYY {code} {params[0].replace('_', '').upper()}"
except (NameError, AttributeError, SyntaxError) as ex:
YLogger.exception(context, f"Couldn't evaluate command {cmd}", ex)
fail = f"PYFAIL INVALIDCOMMAND {params[0].replace('_', '').upper()}"
except Exception as ex:
YLogger.exception(context, f"Spotify extension fail with {cmd}", ex)
fail = "PYFAIL ERROR"
with open('session.pickle', 'wb') as file:
pickle.dump(self.session, file, pickle.HIGHEST_PROTOCOL)
if retry:
if os.path.isfile('latestfail.txt'):
with open('latestfail.txt', 'r') as file:
cmd = file.read()
os.remove('latestfail.txt')
try:
return eval(cmd)
except:
pass
if succeeded:
return result
else:
with open("latestfail.txt", 'w') as file:
file.write(cmd)
return fail
```
#### File: spotifyy/spotifyy/sessions.py
```python
import os
import sys
import time
import random
import datetime
import spotipy
import spotipy.util as util
from spotipy.oauth2 import SpotifyClientCredentials
from programy.utils.logging.ylogger import YLogger
class SpotifySession():
########
# INIT #
########
def __init__(self):
self._sp = None
self._token_time = None
self._test = 0
self._query_limit = 20
self._query_results = None
self._query_index = None
self._query_kind = None
self._query_buffer = str()
self._query_nresults = None
self._query_page = None
self.is_logged_in = False
self._username = None
self._device = None
self._device_id = None
self._available_devices = None
########
# HELP #
########
def help(self):
print("Hello I am SpotBot.")
print("I am able to have a conversation with you and control your Spotify.")
print("In all the help functions I will refer to commands you can give to me between brackets, like [pause].")
print("")
print("For me to work optimally I want you to be a friendly person and always speak in multiple words, except when I ask very specific questions like yes/no-questions.")
print("If I still don't understand you, you may need to be more specific in your answer, e.g.:")
print("me: Do you have a specific genre in mind?")
print("you: [The genre is called [name of the genre]]")
print("")
print("You can use [what can I do with [name of topic]] for certain topics to get more information about that topic. I can help you with the following topics:")
print("-[functions]: This shows you information about most of my commands.")
print("-[playback]: This shows you information about my playback functions.")
print("-[volume]: This shows you information about my volume functions.")
print("-[shuffle]: This shows my shuffle options.")
print("-[repeat]: This shows my repeat options.")
print("-[current]: This shows my functions regarding your current playback.")
print("-[find]: This shows information about how to use my find function.")
print("-[play]: This shows information about how to use my play function.")
print("-[enqueue]: This shows information about how to enqueue items to your queue.")
print("-[device] This shows information about how to select your playback device. This however does not work always as good as I want but I blame Spotify for this.")
print("")
print("I can understand many versions of everything you tell me, but bear with me if I do not understand you right away.")
print("")
print("Before you can use my full capabilities you first need to login.")
def help_login(self):
print("I you have not logged in yet, you can either type in \"login\",")
print("I will then ask for you Spotify username with which you can reply: \"my username is [username]\" or \"my spotify username is [username]\".")
print("Or you spare yourself some typing and type: \"login [spotify username]\".")
print("If the username provided is already registered with me I will take care of the rest.")
print("If not, I will direct you to your browser in which you need to give me permission to control your Spotify.")
print("When you have given the permission, you will be directed to a webpage that does not seem to work, however I only need the link of that page.")
print("You will be prompted to give me an url, it is this url you need to give.")
print("When this is done, all should be ok and I will be able to control your Spotify for you.")
def help_functions(self):
print("My main aim is to hold a conversation with you to assist you with Spotify.")
print("If you are logged in I can control your Spotify. I can...")
print("- [play] / [resume] / [pause] your Spotify.")
print("- [skip] to the [next track].")
print("- Go back to the [previous track].")
print("- [rewind] to the start of the track.")
print("- Turn [shuffle on] and [shuffle off].")
print("- Set [repeat] to track, context or off.")
print("- Ask for the [currently playing] track and ask [is current track saved].")
print("- You can then [add current to saved] or [remove current from saved]")
print("- I can [find track [name of track]], find you an album, artist or playlist in the same way.")
print("- I can also [play track [name of track]], or play an album, artist or playlist in the same way too.")
print("- You can also [enqueue [name of track]], however this only works with tracks.")
print("- Setting your playback device is also possible, but Spotify does not quite like that sometime.")
print(" This can be done by asking your [current device] or [select device]")
print("")
print("Each time I show you a list of items you can type [item [item number]] to select an item.")
print("You can often ask for [more items].")
print("You can also [query nextpage] or [query prevpage] to navigate the pages.")
print("")
print("Since I am a very sentimental bot, you can also talk about your feelings with me.")
print("I can also make you recomendations, this can be done in several ways.")
print("[can you make me a recommendation] is my preferred way of giving you recommendations.")
print("Or you can ask [play me a recommendation] if you want to listen to a recommendation right away.")
def help_play_find(self):
print("You can use the [play [query]] function to immediately play the top result found with your provided [query].")
print("You can use the [find [query]] function to let me find you some results for the given query.")
print("You will then able to pick the item you want using [item [item_number]]")
print("I default to finding tracks unless you specify this earlier.")
print("You can specify this using [play [item_type] [query]], [find [item_type] [query]], or [set type [item_type]]")
print("The possible item types are:")
print("- track")
print("- album")
print("- artist")
print("- playlist")
print("You can also enqueue items by typing [enqueue item [name of the item]], however this only works with tracks.")
#########
# LOGIN #
#########
def login(self, uname=""):
'''
Used to login a user into the Spotify API.
Parameters:
- uname - User name to log in, if not specified this function tries to login an already saved username.
Returns:
- "PYFAIL LOGIN NONAME" - No username was provided and no username was saved.
- "PYFAIL LOGIN" - Something went wrong trying to log the user in.
- "PYOK LOGIN" - User is logged in successfully.
'''
try:
if len(uname) == 0:
try:
with open('uname.txt', 'r') as file:
uname=file.read()
except FileNotFoundError:
return "PYFAIL LOGIN NONAME"
if self.is_logged_in and _username == uname:
return "PYOK LOGIN"
token = util.prompt_for_user_token( username=uname
, scope='ugc-image-upload user-read-playback-state user-modify-playback-state user-read-currently-playing streaming app-remote-control playlist-read-collaborative playlist-modify-public playlist-read-private playlist-modify-private user-library-modify user-library-read user-top-read user-read-playback-position user-read-recently-played user-follow-read user-follow-modify'
, client_id='b1559f5b27ff48a09d34e95c68c4a95d'
, client_secret='5c17274ad83843259af8bd4e62b4a354'
, redirect_uri='http://localhost/'
)
if token:
self._sp = spotipy.Spotify(auth=token)
self._token_time = datetime.datetime.now()
with open("uname.txt", "w") as file:
file.write(uname)
return "PYOK LOGIN"
else:
return "PYFAIL LOGIN The authentication procedure was interrupted"
except Exception as fail:
return "PYFAIL LOGIN"
def logout(self, uname, all=0):
'''
Used to logout a user from the Spotify API.
Parameters:
- uname - The username to logout.
- all - 0 to just remove the user locally, 1 to open Spotify to all the user to remove Spotbot as authorized app.
Returns:
- "PYFAIL" - Something went wrong.
- "PYOK LOGOUT" - User was logged out successfully.
'''
saved_uname = "_"
try:
with open('uname.txt', 'r') as file:
saved_uname = file.read()
except:
pass
if (uname == saved_uname):
try:
os.remove('uname.txt')
os.remove('.cache-' + uname.strip())
except Exception as fail:
return "PYFAIL " + fail
if (all == 1):
try:
import webbrowser
webbrowser.open("https://www.spotify.com/nl/account/apps/")
print("Login on the user you want to remove, and remove me from the authorized apps.")
return "PYOK LOGOUT ALL"
except Exception as fail:
return "PYFAIL LOGOUT BROWSER https://www.spotify.com/nl/account/apps/"
return "PYOK LOGOUT"
else:
return "PYOK LOGOUT ALREADYLOGGEDOUT"
def is_token_expired(self):
'''
Checks if the current token used to interact with the Spotify is still useful.
(A token expires after 3600 seconds)
Returns:
- True - Token is expired.
- False - Token is still valid.
'''
res = (datetime.datetime.now() - self._token_time).seconds >= 3600
if res:
self.is_logged_in = False
return res
##############################
# DEVICE CONTROL & SELECTION #
##############################
def current_device(self):
'''
Returns:
- "PYOK CURRDEVICE NONE" - No current device detected.
- "PYOK CURRDEVICE [device_name]" - Current device detected and the name of it.
'''
if not self._device:
return "PYOK CURRDEVICE NONE"
return f"PYOK CURRDEVICE {self._device['name']}"
def refresh_print_devices(self):
'''
Refreshes the available playback devices and prints them.
Returns:
- "PYOK DEVICESREFRESH [number of availble devices]" - Devices refreshed successfully and the number of currently available devices.
'''
self._available_devices = self._sp.devices()['devices']
n = len(self._available_devices)
if n > 1:
print('\n'.join([f"{index+1}: {x['name']} ({x['type']})" for (index, x) in enumerate(self._available_devices)]))
return f"PYOK DEVICESREFRESH {n}"
def set_device(self, index=-1):
'''
Used to set the playback device for Spotify.
Parameters:
- index - The index of the avaible playback devices to set as active.
Returns:
- "PYOK SETDEVICE NONE" - Current active device set to no device.
- "PYFAIL SETDEVICE OUTOFRANGE [number of available devices]" - The item to select is not in the rang of the amount of available devices.
- "PYOK SETDEVICE [device name]" - The currently active playback device is successfully set and its name.
'''
if index < 0:
self._device = None
self._device_id = None
return "PYOK SETDEVICE NONE"
index-=1 #Humans are 1-based
self._available_devices = self._sp.devices()['devices']
if index >= len(self._available_devices):
return f"PYFAIL SETDEVICE OUTOFRANGE {len(self._available_devices)}"
else:
self._device = self._available_devices[index]
self._device_id = self._device['id']
return f"PYOK SETDEVICE {self._device['name']}"
def reset_device(self):
'''
Sets the playback device to the default.
Returns:
- The return value of set_device() -
'''
return self.set_device()
#########################
# BASIC SPOTIFY CONTROL #
#########################
def play(self):
'''
Resumes the Spotify playback.
Returns:
- "PYOK PLAYB" - Playback successfully changed.
'''
self._sp.start_playback(self._device_id)
return "PYOK PLAYB"
def pause(self):
'''
Pauses the Spotify playback.
Returns:
- "PYOK PAUSE" - Playback successfully paused.
'''
self._sp.pause_playback(self._device_id)
return "PYOK PAUSE"
def next_track(self):
'''
Sets the Spotify playback to the next track.
Returns:
- "PYOK PLAYB" - Playback successfully changed.
'''
self._sp.next_track(self._device_id)
return "PYOK PLAYB"
def prev_track(self):
'''
Sets the Spotify playback to the previous track.
Returns:
- "PYOK PLAYB" - Playback successfully changed.
'''
self._sp.previous_track(self._device_id)
return "PYOK PLAYB"
def rewind(self):
'''
Sets the Spotify playback to the start of the current playing track.
Returns:
- "PYOK PLAYB" - Playback successfully changed.
'''
self._sp.seek_track(0)
return "PYOK PLAYB"
def shuffle(self, state):
'''
Sets the shuffle state of Spoitfy to a new state.
Parameters:
- state - "on" or "off"
Returns:
- "PYOK SHUFFLE [new state]" - Shuffle state successfully changed to new state and the new state name.
'''
stb = state == "on"
self._sp.shuffle(stb, self._device_id)
return "PYOK SHUFFLE " + state.upper()
def repeat(self, state):
'''
Sets the repeat state of Spotify to a new state.
Parameters:
- state - "track", "context" or "off"
Returns:
- "PYOK PLAYB" - Playback successfully changed.
'''
self._sp.repeat(state, self._device_id)
return "PYOK REPEAT " + state.upper()
def current_volume(self):
'''
Returns the current volume.
Returns:
- "PYOK PLAYB" - Playback successfully changed.
'''
volume = self._sp.current_playback()['device']['volume_percent']
return "PYOK VOLUME " + str(volume)
def change_volume(self, increase=0, step=10):
'''
Used to change the Spotify playback volume with a certain value.
Parameters:
- increase - 0 to decrease the volume, 1 to increase the volume.
- step - percentage (in full percent or as floating point value between 0-1) to de- or increase the volume with.
Returns:
- "PYOK VOLUME [new volume value]" - Volume successfully changed and the new volume percentage.
'''
step = int(str(step).strip())
if 0 < step < 1:
step = step * 100
if step > 100:
step = 100
elif step < 0:
step = 0
curr_playback = self._sp.current_playback()
new_volume = curr_playback['device']['volume_percent']
new_volume = new_volume - step if increase == 0 else new_volume + step
if new_volume > 100:
new_volume = 100
elif new_volume < 0:
new_volume = 0
return(self.set_volume(new_volume))
def set_volume(self, volume):
'''
Used to set the Spotify playback volume to a certain value.
Parameters:
- volume - The new volume percentage (in full percent or as floating point value between 0-1).
Returns:
- "PYOK VOLUME [new volume value]" - Volume successfully set and the new volume percentage.
'''
if not str(volume.isdigit()):
return "PYFAIL INVALID VOLUME"
volume = float(volume)
if 0 < volume < 1:
volume = volume * 100
if volume > 100:
volume = 100
elif volume < 0:
volume = 0
volume = int(volume)
self._sp.volume(volume, self._device_id)
return "PYOK VOLUME " + str(volume)
###################################
# CURRENT PLAYBACK & SAVED TRACKS #
###################################
def current_playback(self):
'''
Gets the currently playing track on Spotify.
Returns:
- "PYOK CURRPLAYB [name of track] by [name of artist]" -
'''
playing = self._sp.current_playback()
name = playing['item']['name']
artist = playing['item']['artists'][0]['name']
return "PYOK CURRPLAYB " + name + " by " + artist
def is_curr_on_saved(self):
'''
Gets if the currently playing track is saved in the users Spotify Music Library.
Returns:
- "PYOK ISONSAVED YES" - The currently playing track is saved in the users Spotify Music Library,
- "PYOK ISONSAVED NO" - The currently playing track is not saved in the users Spotify Music Libary.
'''
curr_track = self._sp.current_playback()
is_on_saved = self._sp.current_user_saved_tracks_contains([curr_track['item']['uri']])
if is_on_saved:
return "PYOK ISONSAVED YES"
else:
return "PYOK ISONSAVED NO"
def add_curr_to_saved(self):
'''
Adds the currently playing track to the users Spotify Music Library.
Returns:
- "PYOK ADDTOSAVED [name of track] by [name of artist]" - The currently playing track is successfully added to the users Spotify Music Library.
'''
curr_track = self._sp.current_playback()
self._sp.current_user_saved_tracks_add([curr_track['item']['uri']])
return "PYOK ADDTOSAVED " + curr_track['item']['name'] + " by " + curr_track['item']['artists'][0]['name']
def remove_curr_from_saved(self):
'''
Removes the currently playing track from the users Spotify Music Library.
Returns:
- "PYOK REMOVEFROMSAVED [name of track] by [name of artist]" - The currently playing track is successfully removed from the users Spotify Music Library.
'''
curr_track = self._sp.current_playback()
self._sp.current_user_saved_tracks_delete([curr_track['item']['uri']])
return "PYOK REMOVEFROMSAVED " + curr_track['item']['name'] + " by " + curr_track['item']['artists'][0]['name']
######################
# FIND & PLAY TRACKS #
######################
def play_from_query(self, index=-1):
'''
Plays an item from the currently saved query results.
To prevent the Spotify of stopping playback after playing one track,
this function enqueues a track to the users queue and immediately skips to it.
If the index is greater than the amount of available items, the playback is just set to the next track.
Parameters:
- index - The index of the item to play.
Returns:
- "PYOK PLAY [name of item] by [name of owner of item]" - successfully started playback of the requested item.
'''
if index > 0:
self._query_index = int(index) - 1 # Humans are 1-based.
if self._query_index >= self._query_nresults:
return self.next_track()
if self._query_kind == "track":
self._sp.add_to_queue(device_id=self._device_id, uri=self._query_results[self._query_index]['uri'])
self.next_track()
else:
self._sp.start_playback(device_id=self._device_id, context_uri=self._query_results[self._query_index]['uri'])
name = self._query_results[self._query_index]['name']
if self._query_kind == "artist":
by = " "
elif self._query_kind == "playlist":
by = f", owned by {self._query_results[self._query_index]['owner']['id']}"
else:
by = f" by {self._query_results[self._query_index]['artists'][0]['name']}"
return "PYOK PLAY " + name + by
def enqueue_from_query(self, index=-1, play=0):
'''
Adds an item to the playback queue of the user.
Parameters:
- index - The index of the track to enqueue.
- play - 0 to just enqueue the track, 1 to immediately start playing this track.
Returns:
- "PYOK ENQUEUE [name of track] by [name of artist]" - successfully enqueued the track.
- "PYOK PLAY [name of track] by [name of artist]" - successfully started playback of the track.
!!! This function can only enqueue tracks due to the Spotify API !!!
'''
if self._query_kind != "track":
return "PYFAIL ENQUEUE INVALIDTYPE"
if index > 0:
self._query_index = int(index) - 1
track = self._query_results[self._query_index]
self._sp.add_to_queue(device_id=self._device_id, uri=track['uri'])
name = track['name']
artist = track['artists'][0]['name']
if play:
self.next_track()
return "PYOK PLAY " + name + " by " + artist
return "PYOK ENQUEUE " + name + " by " + artist
def play_next_from_query(self):
'''
Plays the next item from the currently saved query results.
Returns:
- The return value of play_from_query() -
'''
return self.play_from_query(index=self._query_index+2)
def find(self, query, kind, offset=0, limit=10, play=0, enqueue=0):
'''
Uses Spotify's search function to find items from a query.
A kind is needed for the search function to specify the kind of items to search for.
Find can either return a list of the results, enqueue the result (if it is a track), or immediately start playing the results.
Parameters:
- query - The search query.
- kind - The type of item to search for. "track", "album", "artist" or "playlist".
- offset - The index of the first item to return.
- limit - The amount of items to search for.
- play - 0 to do nothing, 1 to immediately start playing the first result.
- enqueue - 0 to do nothing, 1 to enqueue the stop result (if it is a track).
Returns:
- "PYFAIL FIND INVALIDTYPE [provided type]" - Provided type is not a valid item type.
- "PYFAIL FIND NORESULTS" - No results were found.
- "PYOK FIND [number of results found]" -
- A return value of play_from_query() - If play == 1
- A return value of enqueue_from_query() - If enqueue == 1-
'''
kind = kind.strip()
if not (kind in ["track", "album", "artist", "playlist"]):
return "PYFAIL FIND INVALIDTYPE " + kind
self._query_limit = limit
self._query_index = 0
self._query_kind = kind
self._query = query
self._offset = offset
self._query_page = 0
q = self._sp.search(query, type=kind, limit=self._query_limit)
self._query_results = q[kind+'s']['items']
self._query_nresults = len(self._query_results)
if self._query_nresults == 0:
return "PYFAIL FIND NORESULTS"
if play and kind == "track":
return self.enqueue_from_query(play=1)
elif play:
return self.play_from_query()
elif enqueue:
return self.enqueue_from_query()
else:
return "PYOK FIND " + str(self._query_nresults)
def print_query_result(self, page=-1):
'''
Prints 5 query results, the indices of these results are [page*5:(page+1)*5]
Parameters:
- page - Which 'page' of results to show. If not provided the last printed page will be printed again.
Returns:
- "PYOK NOMORERESULTS" - No more results to show on the provided page.
- "PYOK PRINTRESULTS" - All results from provided page are printed.
'''
if page == -1:
page = self._query_page
start = page*5
end = start + 5
if end > self._query_nresults:
end = self._query_nresults
if start == end:
return "PYOK NOMORERESULTS"
if self._query_kind == "playlist":
print('\n'.join([str((index+1)+(page*5)) + ': ' + x['name'] + ", owned by " + x['owner']['display_name']
for (index, x) in enumerate(self._query_results[start:end])]))
elif self._query_kind == "artist":
print('\n'.join([str((index+1)+(page*5)) + ': ' + x['name']
for (index, x) in enumerate(self._query_results[start:end])]))
else:
print('\n'.join([str((index+1)+(page*5)) + ': ' + x['name'] + " by " + x['artists'][0]['name']
for (index, x) in enumerate(self._query_results[start:end])]))
return "PYOK PRINTRESULTS"
def print_next_query_page(self):
'''
Prints the next page of results.
Returns:
- A return value of print_query_result() -
'''
self._query_page += 1
return self.print_query_result()
def print_prev_query_page(self):
'''
Prints the previous page of results.
Returns:
- A return value of print_query_result() -
'''
self._query_page -= 1
if self._query_page < 0:
self._query_page = 0
return "PYFAIL NEGATIVEPAGE"
return self.print_query_result()
#####################
# EMOTION FUNCTIONS #
#####################
def calm_down(self):
'''
Called when the user needs to calm down because it used profanity or other bad language.
To cool the user down, a random song of the chill playlist will be played.
Returns:
- "PYOK COOLDOWN" - Playback of a calming song was succesfull.
'''
self._sp.start_playback(context_uri='spotify:playlist:37i9dQZF1DWSf2RDTDayIx')
self.shuffle("on")
self.next_track()
return "PYOK COOLDOWN"
def play_track_emotion(self, emotion):
'''
Takes as input a string from one of the emotions.
Parameters:
- emotion - The emotion of the user, used to pick a suitable playlist.
Returns:
- "PYFAIL EMOTION" - Emotion is not a valid emotion.
- "PYOK EMOTION" - Emotion was found and the playback of a suitable playlist has been started.
'''
emotion = str(emotion).upper()
emotion_list = ["HAPPY", "SAD", "RELAX", "ANGRY", "SLEEP", "ENERGETIC", "STUDY", "PARTY", "CHILL", "LOVESICK", "HOLIDAY", "ROADTRIP" ]
if emotion not in emotion_list:
return "PYFAIL EMOTION"
else:
options = {"HAPPY" : ("self._sp.start_playback(self._device_id, 'spotify:playlist:37i9dQZF1DWSf2RDTDayIx')", "What do you think of this track?")
,"SAD" : ("self._sp.start_playback(self._device_id, 'spotify:playlist:54ozEbxQMa0OeozoSoRvcL')", "What do you think of this track?")
,"RELAX" : ("self._sp.start_playback(self._device_id, 'spotify:playlist:0RD0iLzkUjrjFKNUHu2cle')", "What do you think of this track?")
,"ANGRY" : ("self._sp.start_playback(self._device_id, 'spotify:playlist:6ft4ijUITtTeVC0dUCDdvH')", "What do you think of this track?")
,"SLEEP" : ("self._sp.start_playback(self._device_id, 'spotify:playlist:37i9dQZF1DWStLt4f1zJ6I')", "What do you think of this track?")
,"ENERGETIC" : ("self._sp.start_playback(self._device_id, 'spotify:playlist:0gFLYrJoh1tLxJvlKcd5Lv')", "What do you think of this track?")
,"STUDY" : ("self._sp.start_playback(self._device_id, 'spotify:playlist:37i9dQZF1DX9sIqqvKsjG8')", "What do you think of this track?")
,"PARTY" : ("self._sp.start_playback(self._device_id, 'spotify:playlist:37i9dQZF1DX0IlCGIUGBsA')", "What do you think of this track?")
,"CHILL" : ("self._sp.start_playback(self._device_id, 'spotify:playlist:37i9dQZF1DX4WYpdgoIcn6')", "What do you think of this track?")
,"LOVESICK" : ("self._sp.start_playback(self._device_id, 'spotify:playlist:6dm9jZ2p8iGGTLre7nY4hf')", "What do you think of this track?")
,"HOLIDAY" : ("self._sp.start_playback(self._device_id, 'spotify:playlist:1KFOvnwqjeCpYTSC91wM4U')", "What do you think of this track?")
, "ROADTRIP": ("self._sp.start_playback(self._device_id, 'spotify:playlist:27LXgC5xD1s1vpB7E0pA3W')", "What do you think of this track?")
}
cmd, mess = options[emotion]
exec(cmd)
self.shuffle("on")
self.next_track()
return "PYOK EMOTION"
def play_track_positivity(self, score):
'''
Play a suitable track based on the postivity score measured by the sentiment module in the NLTK module.
Parameters:
- score - The positivity score of the conversation with the user.
Returns:
- "PYOK POSITIVITY" - A suitable playlist was found and the playback has been started.
'''
score = float(score)
if score < -0.9:
mood = 'EXTREMELY NEGATIVE'
if score > -0.9 and score <= -0.7:
mood = 'VERY NEGATIVE'
if score > -0.7 and score <= -0.5:
mood = 'QUITE NEGATIVE'
if score > -0.5 and score <= -0.3:
mood = 'NEGATIVE'
if score > -0.3 and score <= -0.1:
mood = 'SOMEWHAT NEGATIVE'
if score > -0.1 and score <= 0.1:
mood = 'NEUTRAL'
if score > 0.1 and score <= 0.3:
mood = 'SOMEWHAT POSITIVE'
if score > 0.3 and score <= 0.5:
mood = 'POSITIVE'
if score > 0.5 and score <= 0.7:
mood = 'QUITE POSITIVE'
if score > 0.7 and score <= 0.9:
mood = 'VERY POSITIVE'
if score > 0.9:
mood = 'EXTREMELY POSITIVE'
print('You seem {}'.format(mood))
if score < -0.1:
self._sp.shuffle(True, device_id=None)
self._sp.start_playback(self._device_id, 'spotify:playlist:7HCXp5mTEkbwb9hYq2JTmO') # starts playing a track from a negative playlist
print('This is a track from a Sad-playlist')
return "PYOK POSITIVITY"
elif score > 0.1:
self._sp.shuffle(True, device_id=None)
self._sp.start_playback(self._device_id, 'spotify:playlist:37i9dQZF1DWUAZoWydCivZ') # starts playing a track from a positive
print('This is a track from a Positive-playlist')
return "PYOK POSITIVITY"
else:
self._sp.shuffle(True, device_id=None)
self._sp.start_playback(self._device_id, 'spotify:playlist:0RD0iLzkUjrjFKNUHu2cle') # starts playing a track from the Relax playlist
print('This is a track from a Relax-playlist')
return "PYOK POSITIVITY"
###################
# RECOMMENDATIONS #
###################
def recommend(self, query="", kind="track", limit=20, play=0):
'''
Finds recommended tracks based on the query and kind provided.
It can either print the results to let the user pick a track or start playing the results immediately.
Parameters:
- query - A 'track', an 'artist' or a 'playlist' to use as reference for the recommendation function.
- kind - The type of query that is given.
- limit - The amount of items to recommend.
- play - 0 to print the results. 1 to start playing them immediately.
Returns:
- "PYFAIL RECOMMEND INVALIDTYPE" - The type of recommendation reference is not a valid type.
- "PYFAIL RECOMMEND NORESULTS" - No recommendations could be found. Make sure the reference is not too specific.
- "PYOK PLAY [name of track] by [name of artist]" - Playback of first recommendation has been started successfully.
- "PYOK FIND [number of recommendations]" - Recommendations have been successfully found and the number of recommendations.
'''
kind = kind.strip()
if not (kind in ["track", "artist", "genre"]):
return "PYFAIL RECOMMEND INVALIDTYPE " + kind
self._query_limit = limit
self._query_index = 0
self._query_kind = "track"
self._query = ""
self._offset = 0
self._query_page = 0
if query == "":
top_tracks = self._sp.current_user_top_tracks(limit=5, time_range="short_term")
tracks = self._sp.recommendations(seed_tracks=[x['uri'] for x in top_tracks['items'][:5]], limit=self._query_limit)
elif kind == "artist":
found_artist = self._sp.search(query.strip(), limit=1, type="artist")
if len(found_artist['artists']['items']) > 0:
tracks = self._sp.recommendations(seed_artists=[found_artist['artists']['items'][0]['id']], limit=self._query_limit)
else:
return "PYFAIL RECOMMEND NORESULTS"
elif kind == "genre":
possible_genres = self._sp.recommendation_genre_seeds()['genres']
if query.strip() in possible_genres:
tracks = self._sp.recommendations(seed_genres=[query.strip()], limit=self._query_limit)
else:
return "PYFAIL RECOMMEND GENREUNKNOWN"
elif kind == "track":
found_track = self._sp.search(query.strip(), limit=1, type="track")
if len(found_track['tracks']['items']) > 0:
tracks = self._sp.recommendations(seed_tracks=[found_track['tracks']['items'][0]['id']], limit=self._query_limit)
else:
return "PYFAIL RECOMMEND NORESULTS"
else:
return "PYFAIL RECOMMEND NORESULTS"
self._query_results = tracks['tracks']
self._query_nresults = len(self._query_results)
if self._query_nresults == 0:
return "PYFAIL RECOMMEND NORESULTS"
elif play:
self._sp.start_playback(self._device_id, uris=[x['uri'] for x in self._query_results])
return "PYOK PLAY " + self._query_results[0]['name'] + " by " + self._query_results[0]['artists'][0]['name']
else:
return "PYOK FIND " + str(self._query_nresults)
def get_recommended_artists(self, ref_artist, play=0):
'''
This functions returns a list of recommended artists for a certain artist.
These artists correspond to the artists shown in "Fans Also Like" when viewing an artist in Spotify.
Most times this are 20 artists.
A random related artist can be played immediately or all results can be returned.
Parameters:
- ref_artist - The reference artist to get related artists from.
- play - 0 to print results, 1 to immediately start playing a random related artist.
Returns:
- "PYFAIL RECOMMENDARTIST ARTISTNOTFOUND [name of reference artist]" - The reference artist could not be found.
- "PYFAIL RECOMMENDARTIST NORELATEDFOUND [name of reference artist]" - No related artists could be found for this reference artist.
- "PYOK FIND [number of results]" - Related artists have been found successfully and the number of results.
- Return value from play_from_query() - When play == 1.
'''
ref_artist = str(ref_artist).strip()
if ref_artist == "":
return "PYFAIL RECOMMENDARTIST ARTISTNOTFOUND" + ref_artist
found_artist = self._sp.search(ref_artist, limit=1, type="artist")
if len(found_artist['artists']) == 0:
return "PYFAIL RECOMMENDARTIST ARTISTNOTFOUND " + ref_artist
self._query_index = 0
self._query_kind = "artist"
self._query = ""
self._offset = 0
self._query_page = 0
found_artist = found_artist['artists']['items'][0]
related_artists = self._sp.artist_related_artists(found_artist['id'])
if len(related_artists['artists']) == 0:
return "PYFAIL RECOMMENDARTIST NORELATEDFOUND " + ref_artist
random.shuffle(related_artists)
self._query_results = related_artists['artists']
self._query_nresults = len(self._query_results)
if play:
return self.play_from_query()
else:
return "PYOK FIND " + str(self._query_nresults)
########
# TEST #
########
def test(self):
'''
For testing purposes only
!!! DO NOT USE UNLESS YOU KNOW WHAT YOU ARE DOING !!!
!!! ALSO, DO NOT DELETE, THIS WILL BREAK THE CODE !!!
'''
return "PYOK"
```
|
{
"source": "JesperKauppinen/DefectiveDickDriver",
"score": 3
}
|
#### File: JesperKauppinen/DefectiveDickDriver/menu.py
```python
from ursina import *
import json
class Menu:
entities = []
def __init__(self, player, player_car):
self.player = player
self.player_car = player_car
self.e = self.entities.append
self.show_main_menu()
@classmethod
def clear_menu(cls):
while cls.entities:
destroy(cls.entities.pop(), delay=0)
def show_main_menu(self):
camera.rotation = Vec3(0, 0, 0)
camera.position = Vec3(0, 0, -20)
self.clear_menu()
main_menu = Entity(scale=Vec2(12, 12),
billboard=True,
position=self.player.position)
self.e(main_menu)
self.e(Entity(parent=main_menu, model="plane", color=color.gray, scale=10, rotation=(90, 90, 90),
position=(2, 2, 2)))
self.e(Text(parent=main_menu, position=(-.2, .5), text="Defective:", scale=4.4))
self.e(Text(parent=main_menu, position=(-.1, .4), text="Dick", scale=4.4*.67))
self.e(Text(parent=main_menu, position=(-.03, .33), text="Driver", scale=4.4*.67))
self.e(Button(parent=main_menu, text='Play', color=color.black10, scale=(0.5, 0.08),
position=(0, 0.1), on_click=self.player_car.pause, tooltip=Tooltip('to game')))
self.e(Button(parent=main_menu, text='Scoreboard', color=color.black10, scale=(0.5, 0.08),
position=(0, 0), on_click=self.show_scoreboard_menu, tooltip=Tooltip('Show High Scores')))
self.e(Button(parent=main_menu, text='Controls', color=color.black10, scale=(0.5, 0.08),
position=(0, -0.1), on_click=self.show_keyboard_menu, tooltip=Tooltip('Show Controls')))
self.e(Button(parent=main_menu, text='Quit!', color=color.black10, scale=(0.5, 0.08),
position=(0, -0.2), on_click=application.quit, tooltip=Tooltip('exit')))
self.e(Button(parent=main_menu, text='Story', color=color.black10, scale=(0.5, 0.08),
position=(0, -0.4), on_click=self.show_story_menu, tooltip=Tooltip('Read story')))
def show_scoreboard_menu(self):
self.clear_menu()
main_menu = Entity(scale=Vec2(12, 12), billboard=True, position=self.player.position)
self.e(main_menu)
def delete_scores():
with open('scores.json', 'w') as f:
f.write("")
self.show_main_menu()
try:
with open('scores.json', 'r') as f:
data = json.load(f)
except:
data = {}
self.e(Entity(parent=main_menu, model="plane", color=color.gray, scale=10, rotation=(90, 90, 90),
position=(2, 2, 2)))
self.e(Button(parent=main_menu, text=f'High Scores', scale=(0.5, 0.08),
position=(0, .35), highlight_color=color.rgba(0, 0, 0, 0),
color=color.rgba(0, 0, 0, 0), pressed_color=color.rgba(0, 0, 0, 0)))
for idx, (date, score) in enumerate(data.items()):
if idx == 5:
break
temp = time.strptime(date, "%X %x")
self.e(Button(parent=main_menu, text=f'{time.strftime("%x", temp)} {round(score)}',
color=color.black10, scale=(0.5, 0.08),
position=(0, 0.2 - idx * 0.1), on_click=self._pass,
tooltip=Tooltip(time.strftime("%X on %A %B %d %Y", temp))))
self.e(Button(parent=main_menu, text='Back!', color=color.black10, scale=(0.5, 0.08),
position=(0, -0.35), on_click=self.show_main_menu, tooltip=Tooltip('Back to Main menu')))
self.e(Button(parent=main_menu, text='Clear', color=color.black10, scale=(0.5, 0.08),
position=(0, -0.50), on_click=delete_scores,
tooltip=Tooltip('Clear all High Scored, cannot be undone.')))
# def show_options_menu(self):
# self.clear_menu()
# main_menu = Entity(scale=Vec2(12, 12), billboard=True, position=self.player.position)
# self.e(main_menu)
# self.e(Entity(parent=main_menu, model="plane", color=color.gray, scale=10, rotation=(90, 90, 90),
# position=(2, 2, 2)))
#
# self.e(Text(parent=main_menu, origin=(0, -10), text="Options"))
#
# self.e(Button(parent=main_menu, text='Mouse & Keys', color=color.black10, scale=(0.5, 0.08),
# position=(0, 0.1), on_click=self.show_keyboard_menu,
# tooltip=Tooltip('Mouse sensitivity & keybindings')))
# self.e(Button(parent=main_menu, text='Graphics', color=color.black10, scale=(0.5, 0.08),
# position=(0, 0), on_click=self.show_graphic_options_menu, tooltip=Tooltip('Graphic settings')))
# self.e(Button(parent=main_menu, text='Other', color=color.black10, scale=(0.5, 0.08),
# position=(0, -0.1), on_click=self.show_other_options_menu, tooltip=Tooltip('Other settings')))
# self.e(Button(parent=main_menu, text='Back!', color=color.black10, scale=(0.5, 0.08),
# position=(0, -0.2), on_click=self.show_main_menu, tooltip=Tooltip('Back to Main menu')))
def show_keyboard_menu(self):
self.clear_menu()
main_menu = Entity(scale=Vec2(12, 12), billboard=True, position=self.player.position)
self.e(main_menu)
self.e(Entity(parent=main_menu, model="plane", color=color.gray, scale=10, rotation=(90, 90, 90),
position=(2, 2, 2)))
self.e(Text(parent=main_menu, origin=(0, -8), text="Controls", scale=(1.5, 1.5)))
# Keybindings
all_keys = ["forward", "siren", "backwards", "brake", "left", "Menu", "Right", "Quit"]
key_bindings = ["W", "E", "S", "SPACE", "A", "Esc", "D", "Shift+q"]
for key in range(len(all_keys)):
if key % 2 == 0:
Button(parent=main_menu, color=color.black10, scale=(0.2, 0.08), position=(-.3, .175 - .05 * key))
# on_click=(Func(get_input_and_send, key)))
Text(parent=main_menu, text=key_bindings[key].upper(), scale=(1, 1),
position=(.325 - .7, .1875 - .05 * key))
Text(parent=main_menu, text=all_keys[key].lower(), scale=(1.2, 1.2),
position=(-0.07 - 0.1, .1875 - .05 * key))
else:
Button(parent=main_menu, color=color.black10, scale=(0.2, 0.08), position=(+.2, .225 - .05 * key))
# on_click=(Func(get_input_and_send, key)))
Text(parent=main_menu, text=key_bindings[key].upper(), scale=(1, 1), position=((.13), .23 - .05 * key))
Text(parent=main_menu, text=all_keys[key].lower(), scale=(1.2, 1.2),
position=(+0.33, .23 - .05 * key))
# Buttons
# self.e(Button(parent=main_menu, text='Save!', color=color.black10, scale=(0.5, 0.08),
# position=(0, -0.2), on_click=save_values,
# tooltip=Tooltip('Save Changes')))
self.e(Button(parent=main_menu, text='Back!', color=color.black10, scale=(0.6, 0.08),
position=(0, -0.3), on_click=self.show_main_menu,
tooltip=Tooltip('Back to main menu')))
# def show_other_options_menu(self):
# self.clear_menu()
# main_menu = Entity(scale=Vec2(12, 12), billboard=True, position=self.player.position)
# self.e(main_menu)
#
# def goback():
# save_values()
# self.show_options_menu()
#
# def on_value_changed():
# global hints
# update_value("settings", "hints", "".join(on_off_switch.value))
# save_values()
#
# self.e(Entity(parent=main_menu, model="plane", color=color.gray, scale=10, rotation=(90, 90, 90),
# position=(2, 2, 2)))
#
# self.e(Text(parent=main_menu, origin=(0, -10), text="Other Settings"))
#
# # Hints
# self.e(Text(parent=main_menu, position=(-.025, .19), scale=1, text="Tips"))
# on_off_switch = ButtonGroup(('off', 'on'), parent=main_menu, min_selection=1, position=(.05, .2),
# default=f"{self.hints}", selected_color=color.red)
# self.e(on_off_switch)
# on_off_switch.on_value_changed = on_value_changed
#
# # Buttons
# self.e(Button(parent=main_menu, text='Back!', color=color.black10, scale=(0.5, 0.08),
# position=(0, -0.2), on_click=goback, tooltip=Tooltip('Back to Options menu')))
def show_score_menu(self, new_hs, txt):
camera.rotation = Vec3(0, 0, 0)
camera.position = Vec3(0, 0, -20)
self.clear_menu()
main_menu = Entity(scale=Vec2(12, 12),
billboard=True,
position=self.player.position)
self.e(main_menu)
self.e(Entity(parent=main_menu, model="plane", color=color.gray, scale=10, rotation=(90, 90, 90),
position=(2, 2, 2)))
self.e(Text(parent=main_menu, origin=(0, -8), scale=2, text=txt[:], color=color.black33))
if new_hs:
score_text = f'New High Score!\n {round(self.player_car.score)}\nNew High Score!'
else:
score_text = f"Final Score: {round(self.player_car.score)}"
self.e(Text(parent=main_menu, origin=(0, 0), scale=4, text=score_text))
self.e(Button(parent=main_menu, text='Back', color=color.black10, scale=(0.5, 0.08),
position=(0, -0.40), on_click=self.show_main_menu,
tooltip=Tooltip('Back to Main menu or just press [ESC] to replay')))
def show_story_menu(self):
camera.rotation = Vec3(0, 0, 0)
camera.position = Vec3(0, 0, -20)
self.clear_menu()
main_menu = Entity(scale=Vec2(12, 12),
billboard=True,
position=self.player.position)
self.e(main_menu)
self.e(Entity(parent=main_menu, model="plane", color=color.gray, scale=10, rotation=(90, 90, 90),
position=(2, 2, 2)))
self.e(Text(parent=main_menu, origin=(0, -6.0), scale=2,
text="<NAME> always knew he was destined for greatness."))
self.e(Text(parent=main_menu, origin=(0, -4.8), scale=2,
text="Having quickly climbed the ranks to Detective in the Shitty City Police Department,"))
self.e(Text(parent=main_menu, origin=(0, -3.6), scale=2,
text="Richard thought he could change the world."))
self.e(Text(parent=main_menu, origin=(0, -2.4), scale=2,
text="Unfortunately for Richard, the city had other plans."))
self.e(Text(parent=main_menu, origin=(0, -1.2), scale=2,
text="20 years of Detective work for the SCPD took its toll,"))
self.e(Text(parent=main_menu, origin=(0, 0), scale=2,
text="and it wasn't long before Dick emerged, hardened by the streets."))
self.e(Text(parent=main_menu, origin=(0, 1.2), scale=2,
text="Until one fateful morning when the Chief came by with a file,"))
self.e(Text(parent=main_menu, origin=(0, 2.4), scale=2,
text='"Close this case and you earn your stripes," he said with a grin'))
self.e(Text(parent=main_menu, origin=(0, 3.6), scale=2,
text="Help Dick fulfill his destiny."))
self.e(Button(parent=main_menu, text='Drive!', color=color.black10, scale=(0.5, 0.08),
position=(0, -0.40), on_click=self.player_car.pause,
tooltip=Tooltip('Back to Main menu or just press [ESC] to replay')))
# def show_graphic_options_menu(self):
# self.clear_menu()
# main_menu = Entity(scale=Vec2(12, 12), billboard=True, position=self.player.position)
# self.e(main_menu)
#
# self.e(Entity(parent=main_menu, model="plane", color=color.gray, scale=10, rotation=(90, 90, 90),
# position=(2, 2, 2)))
#
# self.e(Text(parent=main_menu, origin=(0, -10), text="Graphic Settings"))
#
# # Buttons
# self.e(Button(parent=main_menu, text='Back!', color=color.black10, scale=(0.5, 0.08),
# position=(0, -0.2), on_click=self.show_options_menu, tooltip=Tooltip('Back to Options menu')))
def _pass(self):
pass
if __name__ == '__main__':
app = Ursina()
screen = None # for global statement
menu = Menu()
menu.show_main_menu()
window.exit_button.visible = True
window.fps_counter.visible = True
app.run()
```
#### File: JesperKauppinen/DefectiveDickDriver/run_game.py
```python
from ursina import *
from ursina.prefabs.first_person_controller import FirstPersonController
from ursina.prefabs import health_bar
from classes import TheCar, CheckPoint, Lighting, Obstacle, Arrow
from utils import make_walls, make_floor, reset_game
from constants import COLOR_RUST, COLOR_RUST_2X
from menu import Menu
from story import new_story
from sys import argv
import sys
# The major, minor version numbers your require
MIN_VER = (3, 7)
if sys.version_info[:2] < MIN_VER:
sys.exit(
"This game requires Python {}.{}.".format(*MIN_VER)
)
app = Ursina()
window.show_ursina_splash = True
window.icon = "assets/icon/icon.ico"
window.fullscreen = False
window.title = "Defective: Dick Driver"
window.icon = 'assets/icon/icon'
if len(argv) > 1:
try:
scale = int(argv[1])
resolution = (scale / 9 * 16, scale, 32)
window.fullscreen_size = resolution
window.windowed_size = resolution
if len(argv) > 2:
window.fullscreen = int(argv[2])
except exception as e:
print(
f'correct usage is ``logic.py height fullscreen`` height should be in pixels, 1 for fullscreen, 0 for windowed')
window.vsync = True
scene.fog_color = COLOR_RUST
scene.fog_density = (10, 60)
player = FirstPersonController(gravity=0)
camera.position = Vec3(0, 1, -20)
camera.rotation = Vec3(15, 0, 0)
player.cursor.enabled = False
walls = make_walls(450)
floor = make_floor(9, 60)
lower_floor = Entity(model='cube', color=COLOR_RUST, position=(0, -2, 0),
scale=(10000, 1, 10000),
rotation=(0, 0, 0)
)
siren_light = Lighting(player, player.position + Vec3(1, 7, 0), color.black, rotation=player.down)
CheckPoint.init_light(Entity('cube', color=color.rgba(255,5,5,128), scale=(25,25,25)))
city = Entity(model='assets/models/city800', color=COLOR_RUST, position =(0, .1, 0), collider='mesh', reload=True)
car = Entity(model='assets/models/80scop',
texture='assets/models/cars',
position = (0, 0, 4),
scale=1,
collider='box',
name='player_car'
)
CheckPoint.init_car(car)
Obstacle.init_car(car)
CheckPoint.spawn_new()
arrow = Arrow()
player_car = TheCar(car)
menu = Menu(player, player_car)
cars = [player_car]
camera.parent = player_car.ent
speed_text = Text(text=f"", position=(0, -.4), color=color.white66)
pos_text = Text(text=f"", position=(.3, .5), color=color.black)
score_text = Text(text=f"", position=(-.8, -.35), color=COLOR_RUST_2X)
story_text = Text(text=f"", position=(-.3, .2), color=COLOR_RUST_2X)
health_bar_1 = health_bar.HealthBar(bar_color=COLOR_RUST_2X, roundness=.1, value=100, position=(-.8, -.40), animation_duration=0)
siren_bar_1 = health_bar.HealthBar(bar_color=color.rgb(40, 40, 70), roundness=.1, value=100, position=(-.8, -.4375), animation_duration=0)
ignore_list = [player, car]
inMenu = False
mouse.visible = False
# Audio
music = Audio('assets/music/backaround_music', pitch=1, loop=True, autoplay=True, volume=.1)
siren_audio = Audio('assets/music/siren', pitch=1, loop=True, autoplay=False, volume=.1)
# Lights
driving_light1 = PointLight(shadows=True, color=color.rgb(196, 196, 196))
driving_light2 = PointLight(shadows=True, color=color.rgb(128, 128, 128))
driving_light3 = PointLight(shadows=True, color=color.rgb(64, 64, 64))
menu_light = AmbientLight(position=camera.position, shadows=True)
def update():
# Main Loop - Game Paused
if player_car.paused:
menu_light.color = color.rgb(100, 50, 50)
driving_light1.color = color.black
driving_light2.color = color.black
driving_light3.color = color.black
# Entity(billboard=True, scale=Vec3(10, 10, 10), color=color.black, model="plane", rotation=(-90, 0, 0))
if not inMenu:
invoke(menu.show_main_menu)
dis_able_menu()
# Main Loop - Game Running
else:
camera.rotation = Vec3(25, 0, 0)
camera.position = Vec3(0, 10, -10)
menu_light.color = color.black
driving_light1.color = color.rgb(196, 196, 196)
driving_light2.color = color.rgb(128, 128, 128)
driving_light3.color = color.rgb(64, 64, 64)
driving_light1.position = player_car.ent.position
driving_light1.rotation_x = -90
driving_light2.rotation_x = -90
driving_light3.rotation_x = -90
driving_light2.position = player_car.ent.position + player_car.ent.forward * 15 + Vec3(0, 5, 0)
driving_light3.position = player_car.ent.position + player_car.ent.forward * 40 + Vec3(0, 5, 0)
if inMenu:
dis_able_menu()
Menu.clear_menu()
if player_car.new_game:
while player_car.audio_list:
print(player_car.audio_list)
player_car.audio_list.pop().stop(destroy=True)
player_car.ent.position = Vec3(0, 0, 0)
player_car.new_game = False
player_car.story = new_story()
player_car.story_time = time.time() + 10
player_car.score = 0
# HUD
speed_text.text = f"{round(abs(player_car.speed) * 80, 1)} km/h"
# pos_text.text = f"Pos: {round(player.position[0], 2), round(player.position[1], 2), round(player.position[2], 2)}"
score_text.text = f"SCORE {round(player_car.score)}"
if player_car.story:
if time.time() < player_car.story_time:
story_text.text = f"Solve the case : {player_car.story[0]}, follow the clues..."
story_text.position = (-.8, .45)
story_text.scale = 1.5
else:
story_text.text = ''
health_bar_1.value = round(player_car.hp)
siren_bar_1.value = round(player_car.light_time)
# Arrow
arrow.position = player.position + Vec3(0, 5, 0)
arrow.rotation = arrow.look_at(CheckPoint.checkpoints[0], axis="forward")
if held_keys['w']:
for car in cars:
car.w()
elif held_keys['s']:
for car in cars:
car.s()
if held_keys['space']:
for car in cars:
car.brake(False)
if held_keys['a'] and held_keys['d']:
player_car.steering = None
elif not (held_keys['a'] or held_keys['d']):
player_car.steering = 0
elif held_keys['d']:
for car in cars:
car.d()
elif held_keys['a']:
for car in cars:
car.a()
if player_car.lights:
player_car.light_time -= 1
if player_car.light_time < 0:
player_car.lights = False
siren_audio.stop()
else:
if player_car.light_time < 100:
player_car.light_time += .1
crash_speed = player_car.move([*ignore_list, *CheckPoint.checkpoints])
if crash_speed:
if player_car.hp < 1:
print("end crash")
if not player_car.audio_list:
if crash_speed < (10 / 80):
player_car.audio_list.append(Audio('assets/sfx/slow_crash_end'))
else:
player_car.audio_list.append(Audio('assets/sfx/fast_crash_end'))
player_car.audio_list[-1].play()
else:
if crash_speed > (10 / 80):
print("big crash", crash_speed)
Audio('assets/sfx/short_crash')
player_car.rotate()
if not (held_keys['w'] or held_keys['s]']):
player_car.speed = 0
for checkpoint in CheckPoint.checkpoints:
if checkpoint.is_cleared([]):
player_car.score += checkpoint.lastpoint
Obstacle.shuffle()
player.position = player_car.ent.position
if player_car.lights:
if int(time.time() * 5) % 2 == 0:
siren_light.color = color.red
else:
siren_light.color = color.blue
else:
siren_light.color = color.black33
if player_car.hp <= 0:
player_car.paused = True
reset_game(player_car, Obstacle, CheckPoint, menu)
siren_audio.stop()
dis_able_menu()
def dis_able_menu():
global inMenu
inMenu = not inMenu
for i in range(len(floor)):
floor[i].enabled = not floor[i].enabled
for i in range(len(walls)):
walls[i].enabled = not walls[i].enabled
for i in range(len(Obstacle.obstacles)):
Obstacle.obstacles[i].enabled = not Obstacle.obstacles[i].enabled
arrow.enabled = not arrow.enabled
lower_floor.enabled = not lower_floor.enabled
# menu_light.visible = not menu_light.visible
CheckPoint.checkpoints[0].enabled = not CheckPoint.checkpoints[0].enabled
player.enabled = not player.enabled
mouse.visible = not mouse.visible
mouse.locked = not mouse.locked
player_car.ent.visible = not player_car.ent.visible
pos_text.enabled = not pos_text.enabled
speed_text.enabled = not speed_text.enabled
score_text.enabled = not score_text.enabled
story_text.enabled = not story_text.enabled
siren_bar_1.enabled = not siren_bar_1.enabled
health_bar_1.enabled = not health_bar_1.enabled
city.enabled = not city.enabled
def input(key):
# toggle pause menu
if key == 'escape':
player_car.paused = not player_car.paused
# respawn 0, 0
if held_keys['control'] and key == 'r':
player_car.ent.position = Vec3(0, 0, 0)
player_car.speed = None
# EMS lights toggle
if key == "e":
player_car.lights = not player_car.lights
if player_car.lights:
siren_audio.play()
else:
siren_audio.stop()
# music toggle
if key == "m":
if music.playing:
music.pause()
else:
music.resume()
Sky(texture='night_sky_red_blur')
# EditorCamera()
if __name__ == '__main__':
app.run()
# basic_lighting_shader -- no colored light
# colored_lights_shader -- just white
# fading_shadows_shader -- doesnt exist
# fresnel_shader -- doesnt exist
# lit_with_shadows_shader -- apply color existing white
# matcap_shader -- mirror finish
# normals_shader -- rainbow
# texture_blend_shader -- doesnt exist
# triplanar_shader -- car .png colors
# unlit_shader -- no colored light
```
|
{
"source": "JesperKauppinen/paint-webapp",
"score": 3
}
|
#### File: paint-webapp/api/images.py
```python
from flask import Blueprint, request
from flask_login import current_user, login_required
from models.artwork import Artwork
from models import db
# TODO: Check if this is used
api_images = Blueprint('images', __name__)
@login_required
@api_images.route('/add', methods=['GET', 'POST'])
def add_img():
user_id = current_user.id
url = request.args.get('url', "", type=str)
title = request.args.get('title', "Unknown artwork", type=str)
artwork = Artwork.query.filter_by(filename=url).first()
if artwork:
return False
else:
new_artwork = Artwork(owner_id=user_id, filename=url, title=title)
db.session.add(new_artwork)
db.session.commit()
return True
@api_images.route('/get', methods=['GET'])
def get_img():
return "success"
# return Artwork.query.all()
```
#### File: paint-webapp/api/likes.py
```python
from flask import Blueprint, request, jsonify
from flask_login import current_user, login_required
from models.likes import Likes
from models import db
api_likes = Blueprint('likes', __name__)
@login_required
@api_likes.route('/add', methods=['POST'])
def add_like():
user_id = current_user.id
artwork_id = request.args.get('artwork_id', 0, type=int)
user_likes = Likes.query.filter_by(user_id=user_id, artwork_id=artwork_id).first()
if user_likes:
return jsonify(False)
else:
new_likes = Likes(artwork_id=artwork_id, user_id=user_id)
db.session.add(new_likes)
db.session.commit()
return jsonify(True)
@login_required
@api_likes.route('/remove', methods=['POST'])
def remove_like():
user_id = current_user.id
artwork_id = request.args.get('artwork_id', 0, type=int)
user_likes = Likes.query.filter_by(user_id=user_id, artwork_id=artwork_id).first()
if user_likes:
db.session.delete(user_likes)
db.session.commit()
return jsonify(True)
else:
return jsonify(False)
@api_likes.route('/get/count', methods=['GET'])
def get_like_count():
artwork_id = request.args.get('artwork_id', 0, type=int)
likes_count = Likes.query.filter_by(artwork_id=artwork_id).count()
if likes_count:
return jsonify(likes_count)
else:
return jsonify(False)
```
#### File: paint-webapp/sockets/timer.py
```python
from threading import Thread
from typing import Callable
from time import sleep
class TimerThread(Thread):
def __init__(self, seconds: int, callback: Callable, *args, **kwargs):
self.default_seconds = seconds
self.seconds = seconds
self.callback = callback
self.paused = False
self.__stop = False
super().__init__(*args, **kwargs)
def resume(self):
self.paused = False
def pause(self):
self.paused = True
def reset(self):
self.seconds = self.default_seconds
self.paused = False
def stop(self):
self.__stop = True
def run(self):
while self.seconds >= 0:
if self.__stop:
break
sleep(1)
if not self.paused:
self.seconds -= 1
if not self.__stop:
self.callback()
```
#### File: paint-webapp/views/gallery.py
```python
from flask import Blueprint, render_template
from flask_login import current_user
from models.artwork import Artwork
from models.user import User
from models.likes import Likes
gallery = Blueprint('gallery', __name__)
@gallery.get('/')
def index():
all_artworks = Artwork.query.all()
all_artwork_details = []
for artwork_obj in all_artworks:
if Likes.query.filter_by(artwork_id=artwork_obj.id, user_id=artwork_obj.owner_id).first():
like_status = True
else:
like_status = False
artwork_details = {
"artwork_id": artwork_obj.id,
"href": artwork_obj.filename,
"date": artwork_obj.date,
"title": artwork_obj.title,
"owner_id": artwork_obj.owner_id,
"owner_name": User.query.filter_by(id=artwork_obj.owner_id).first().username,
"like_count": Likes.query.filter_by(artwork_id=artwork_obj.id).count(),
"like_status": like_status,
}
all_artwork_details.append(artwork_details)
return render_template('gallery/index.html', user=current_user, artworks=all_artwork_details)
@gallery.get('/<int:id_>')
def single(id_):
artwork_obj = Artwork.query.filter_by(id=id_).first()
if Likes.query.filter_by(artwork_id=id_, user_id=artwork_obj.owner_id).first():
like_status = True
else:
like_status = False
artwork_details = {
"artwork_id": id_,
"href": artwork_obj.filename,
"date": artwork_obj.date,
"title": artwork_obj.title,
"owner_id": artwork_obj.owner_id,
"owner_name": User.query.filter_by(id=artwork_obj.owner_id).first().username,
"like_count": Likes.query.filter_by(artwork_id=artwork_obj.id).count(),
"like_status": like_status,
}
return render_template('gallery/single.html', user=current_user, artworks=artwork_details)
```
#### File: paint-webapp/views/home.py
```python
from flask import Blueprint, redirect, url_for
home = Blueprint('index', __name__)
@home.get('/')
def index():
return redirect(url_for('artworks.draw'))
```
|
{
"source": "JesperKauppinen/tic-tac-toe",
"score": 3
}
|
#### File: api/resources/register.py
```python
from flask_restful import Resource
from flask_pydantic import validate
from app.api.schemas import ErrorResponse
from app.api.schemas.user import UserCreate
from app.api.schemas.auth import AccountResponse
from app.models.users import User
from app.api.jwt import create_tokens_pair, set_refresh_token
class RegisterResource(Resource):
@validate()
def post(self, body: UserCreate):
print("request recived", body.username, body.password)
"""
Create new User object
"""
# Try get user by username, if username already exist return
if User.query.filter_by(username=body.username).first() is not None:
print("User creation failed")
return ErrorResponse(error=f"Username ({body.username}) already exist in database.").dict(), 400
# Create new user object by calling create function from User class
user = User.create(username=body.username, password=<PASSWORD>)
print("User created")
access_token, refresh_token = create_tokens_pair(user.username)
set_refresh_token(refresh_token)
return AccountResponse(access_token=access_token, user_id=user.id).dict(), 201
```
#### File: app/models/users.py
```python
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from . import db
# User object
class User(db.Model, UserMixin):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(32), nullable=False)
password_hash = db.Column(db.String(200))
def set_password(self, password):
self.password_hash = generate_password_hash(password, method='sha256')
def check_password(self, password):
return check_password_hash(self.password_hash, password)
@classmethod
def create(cls, username: str, password: str) -> "User":
hashed_password = generate_password_hash(password, method='sha256')
user = cls(
username=username,
password_hash=<PASSWORD>
)
db.session.add(user)
db.session.commit()
return user
```
|
{
"source": "jesperkha/DownloadCleanup",
"score": 4
}
|
#### File: DownloadCleanup/lib/smartdict.py
```python
class smartdict(dict):
def __init__(self):
pass
def add_attribute(self, key, value):
self.__setattr__(key, value)
self.__setitem__(key, value)
def convert(obj: list) -> smartdict:
"""
Converts list to dict. Format is expected as a list of lists with either another list as a key and/or value. If both parameters is a list, the values for each key will be asigned the list from the value index.
Allowed formats:
```
["key", "value"]
["key", ["value1", "value2"]]
[["key1", "key2"], "value"]
[["key1", "key2"], ["value1", "value2"]]
# dict.key -> value
# dict["key"] -> value
```
"""
if not isinstance(obj, list):
return None
sm = smartdict()
for elem in obj:
if not isinstance(elem, list):
return None
key, value = elem
if isinstance(key, list):
for e in key:
sm.add_attribute(e, value)
else:
sm.add_attribute(key, value)
return sm
```
|
{
"source": "jesperkha/Fizz",
"score": 3
}
|
#### File: Fizz/lib/autodocs.py
```python
import os
from genericpath import isdir
# Generates documentation for libraries from the function docstrings
# and definitions. The documentation is created as a markdown document.
# Also creates a function 'dump' file. It is used when running the fizz
# 'docs' subommand.
def main():
libraries = os.listdir("./lib")
for foldername in libraries:
# Check for file not directory
if len(foldername.split(".")) != 1:
continue
# Get library files in folder
path = f"./lib/{foldername}"
files = os.listdir(path)
total = []
for file in files:
# Must be Go file
if not file.endswith(".go"):
continue
# Read file and add formatted comments to total
content = open(f"{path}/{file}").readlines()
total.extend(add_doc(content))
if len(total) != 0:
# Write doc content to md file
create_file(path, foldername, total)
# Returns formatted go comment as markdown section
def add_doc(c: str):
# Total comments/messages
docs = []
start = 0
for idx, l in enumerate(c):
# Remove tabs
raw = l.replace("\t", "")
# One liner comment
if raw.startswith("/*") and l.count("*/") == 1:
# Append content bewteen pairs
docs.append(("", f"{raw[3:len(raw)-3]}\n"))
continue
# Multiline comment, get content between pairs
if l.startswith("/*"):
start = idx
if l.startswith("*/"):
interval = c[start+1:idx]
# Remove tabs
doc = "".join(interval[:len(interval)-1]).replace("\t", "")
func = interval[len(interval)-1].replace("\t", "")
docs.append((doc, func))
return docs
# Write docs to file (create file)
def create_file(path: str, libname: str, docs: list[list[str]]):
# Create dump file if it doesnt exist
dump = "lib/_libdump"
if not isdir(dump):
print("[INFO] _libdump was not present, creating it now")
os.mkdir(dump)
filename = f"{dump}/{libname}.txt"
os.open(filename, os.O_CREAT)
# Write function dump
with open(filename, "w+") as f:
for func in docs:
f.write(f"{func[1]}\t{func[0]}\n")
# Create file if it doesnt exist already
filename = f"{path}/{libname}_docs.md"
os.open(filename, os.O_CREAT)
# Open and write formatted with markdown
with open(filename, "w+") as f:
# Write formatted function documentation
f.write(f"# Methods in {libname} library\n\n")
for func in docs:
name = func[1].split(" ")[1].split("(")[0]
f.write(f"## **`{name}`**\n\n")
f.write(f"{func[0]}\n")
f.write(f"```go\n{func[1]}```\n\n")
f.write("<br>\n\n")
if __name__ == "__main__":
main()
```
|
{
"source": "JesperLundbeerg/Slutprojekt_Programmering",
"score": 4
}
|
#### File: JesperLundbeerg/Slutprojekt_Programmering/main.py
```python
from typing import List
from resources import LetterCondition, Wordle
from colorama import Fore
import random
import os
def main():
word_machine = load_word_machine("words.txt")
secretw = random.choice(list(word_machine))
wordle = Wordle(secretw)
while wordle.can_attempt:
print("\nInstruktioner: Detta är ett spel där du ska gissa dig fram till ett hemligt ord.")
print("""Ordet ska vara 5 bokstäver långt och just nu används en ordlista på engelska,
så endast ord på engelska är tillåtna ;)
""")
print("Lycka till!")
x = input("\nSkriv in din gissning: ")
x = x.upper()
if len(x) != wordle.Word_length:
print(Fore.RED + f"Ordet måste vara {wordle.Word_length} bokstäver långt! Testa igen." + Fore.RESET)
continue
if not x in word_machine:
print(Fore.RED + f"{x} är inte ett riktigt ord! Testa igen." + Fore.RESET)
continue
wordle.attempt(x)
display_results(wordle)
if wordle.correct_guess:
print("Du gissade rätt!")
else:
print("Du klarade tyvärr inte att lösa ut det ord som söktes.")
print(f"Ordet som söktes var: {wordle.secretw}")
def display_results(wordle: Wordle):
print("\nDina resultat hittills...")
print(f"Du har {wordle.attempts_left} försök kvar.\n")
lines = []
for word in wordle.attempts:
result = wordle.guess(word)
colored_result_str = convert_result_to_color(result)
lines.append(colored_result_str)
for _ in range(wordle.attempts_left):
#print("_ " * wordle.Word_length)
lines.append(" ".join(["_"] * wordle.Word_length))
border_around_wordle(lines)
def load_word_machine(path : str):
word_machine = set()
with open(path, "r", encoding="utf8") as f:
for line in f.readlines():
word = line.strip().upper()
word_machine.add(word)
return word_machine
def convert_result_to_color(result: List[LetterCondition]):
"""_summary_
Args:
result (List[LetterCondition]): _description_
Returns:
_type_: _description_
"""
result_w_color = []
for letter in result:
if letter.in_position:
color = Fore.GREEN
elif letter.in_word:
color = Fore.YELLOW
else:
color = Fore.RED
colored_letter = color + letter.character + Fore.RESET
result_w_color.append(colored_letter)
return " ".join(result_w_color)
def border_around_wordle(lines : List[str], size: int=9, pad : int=1):
content_length = size + pad * 2
top_border = "┌" + "─" * content_length + "┐"
bottom_border = "└" + "─" * content_length + "┘"
space = " " * pad
print(top_border)
for line in lines:
print("│" + space + line + space + "│")
print(bottom_border)
def clear():
command = "clear"
if os.name in ("nt", "dos"):
command = "cls"
os.system(command)
if __name__ == "__main__":
clear()
enter = input("Välkommen till Wordle! Tryck enter för att starta spelet: ")
if "" in enter:
pass
while True:
main()
choice = input("Vill du spela igen? [ja, nej]: ")
if "ja".casefold() in choice.casefold():
clear()
continue
elif "nej".casefold() in choice.casefold():
break
else:
break
clear()
print("Tack för att du spelat!")
```
#### File: JesperLundbeerg/Slutprojekt_Programmering/resources.py
```python
class Wordle:
Max_attempts = 6
Word_length = 5
def __init__(self, secretw : str):
"""_summary_
Args:
secretw (str): secretw är förkortning för secretword och
är variabeln för ett slumpat ord från ordlistan "wordlist.txt".
"""
self.secretw = secretw.upper()
self.attempts = []
pass
def attempt(self, word : str):
word = word.upper()
self.attempts.append(word)
def guess(self, word : str):
word = word.upper()
result = []
for i in range(self.Word_length):
character = word[i]
letter = LetterCondition(character)
letter.in_word = character in self.secretw
letter.in_position = character == self.secretw[i]
result.append(letter)
return result
# Denna talar om, om du vunnit eller inte
@property
def correct_guess(self):
return len(self.attempts) > 0 and self.attempts[-1] == self.secretw
# Denna talar om hur många försök du har kvar
@property
def attempts_left(self) -> int:
return self.Max_attempts - len(self.attempts)
# Denna talar om, om du får gissa igen
@property
def can_attempt(self):
return self.attempts_left > 0 and not self.correct_guess
class LetterCondition:
def __init__(self, character : str):
"""_summary_
Args:
character (str): Är variabeln för bokstäverna.
Med hjälp av denna klass kan vi ta reda på om bokstäverna finns i ordet eller inte.
"""
self.character = character
self.in_word: bool = False
self.in_position: bool = False
# Talar om bokstaven finns i ordet
def __repr__(self):
return f"[{self.character} in_word: {self.in_word} in_position: {self.in_position}]"
```
|
{
"source": "JesperLundberg/EbookPresenter",
"score": 3
}
|
#### File: EBookPresenter/docker/runDocker.py
```python
import os
from dockerUtilities import get_value
from dockerUtilities import container_exist
from dockerUtilities import remove_container
def run_docker(container_name, release, path_to_bind):
command = "docker run -d -p 5000:80 --mount type=bind,source=" +\
path_to_bind +\
",target=/books,readonly --name " + container_name + " " +\
container_name + ":" + release
result = os.system(command)
return result
if(container_exist(get_value("name"))):
remove_container(get_value("name"))
run_docker(get_value("name"), get_value("release"), get_value("mount"))
```
|
{
"source": "JesperLundberg/s3sync",
"score": 2
}
|
#### File: JesperLundberg/s3sync/buildDocker.py
```python
import os
import subprocess
import dockerUtilities
def build_docker():
print("Building docker file")
command = "docker build -t " + dockerUtilities.get_value("name") + ":" +\
dockerUtilities.get_value("release") + " ."
os.system(command)
# Actual script
build_docker()
```
|
{
"source": "JesperMjornman/FlakyReporter",
"score": 2
}
|
#### File: FlakyReporter/pytest-trace/pytest_trace.py
```python
import pytest, sys, traceback, linecache, inspect, json, collections, os
from flakyreporter.loghandler import LogHandler
def pytest_addoption(parser):
group = parser.getgroup('flakytrace')
group.addoption(
'--flakytrace',
action='store_true',
dest='counter',
default=False,
help='enable tracing for FlakyReporter'
)
try:
to_trace = [line.rstrip() for line in open('./tracelist.lst')]
except:
to_trace = []
logger = LogHandler()
def _trace_lines(frame, event, arg):
"""
Function used for tracing executed lines.
Inserts into log: lineno : Tuple[line_str, locals]
"""
global logger
co = frame.f_code
parent = frame.f_back.f_code
func_name = co.co_name
if event == 'line':
if func_name.split('::')[-1] in to_trace:
logger.log_trace(frame, event, arg)
elif parent.co_name in to_trace:
logger.log_trace(frame, event, arg, True)
elif parent.co_name.split('::')[-1] in to_trace and (event == 'call' or event == 'return'):
if '/usr/lib/' in co.co_filename \
or '_pytest' in co.co_filename \
or 'Python/Python/' in co.co_filename \
or 'Python\\Python' in co.co_filename:
return
logger.log_trace(frame, event, arg)
return _trace_lines
@pytest.hookimpl
def pytest_runtest_call(item):
"""
Hooks pytest oncall function.
Runs when a test is called.
Args:
item - pytest item (provided by pytest)
"""
if item.name in to_trace:
global logger
logger.logs[item.name] = {
'lines' : dict()
}
sys.settrace(_trace_lines)
@pytest.hookimpl
def pytest_runtest_teardown(item, nextitem):
sys.settrace(None)
@pytest.hookimpl
def pytest_terminal_summary(terminalreporter, exitstatus, config):
"""
Hooks writing summary to terminal from pytest.
This enables us to catch explanation on why the assert failed.
"""
for report in terminalreporter.getreports("failed"):
try:
report_json = report._to_json()
func_name = report_json['nodeid'].split("::")[1]
if func_name in to_trace:
logger.log_pytest_fail(func_name, report_json)
except Exception as e:
print(e)
_print_logs()
@pytest.hookimpl
def pytest_assertion_pass(item, lineno, orig, expl):
"""
Experimental hook from pytest.
Hooks passing assertments giving the content to why it passed.
"""
if item.name in to_trace:
logger.log_pytest_pass(item, lineno, orig, expl)
def _print_logs():
"""
Writes the logs to their respective files into either tracelogs/passed or tracelogs/failed.
"""
global logger
logger.save_logs()
# print(logger.dump_logs())
```
#### File: src/flakyreporter/loghandler.py
```python
from flakyreporter.util import bcolors as Color
import sys, inspect, collections, os, traceback, linecache
import json, pytest, platform, datetime, threading, re, glob
class LogHandler():
"""
Represents a logging object that handles logging the tracing and test results.
"""
def __init__(self, meta = None):
self.logs = self._init_log_structure()
self.err = None
self.t_lock = threading.Lock()
self.iteration_info = dict()
def read_logs(self, targets):
"""
Read all logs from ./tracelogs folder.
Note that each log is of different format in comparison to the writing of logs.
Each log is now differentiated by self.logs[func_name][passed/failed][iteration].
This allows for comparisons between several logs.
"""
self.logs = dict()
self.log_meta(None, True)
t_passing = threading.Thread(target = self._async_read_log, args=('passed', targets, ))
t_failing = threading.Thread(target = self._async_read_log, args=('failed', targets, ))
t_passing.start()
t_failing.start()
t_passing.join()
t_failing.join()
if self.err:
raise Exception(self.err)
def log_meta(self, meta, load_cpu_info = True)->None:
"""
Log the meta data of the computer, i.e. version, OS etc.
Also logs meta of Python version etc.
"""
if meta is not None:
self.logs['meta'] = meta
else:
if load_cpu_info:
try:
from cpuinfo import get_cpu_info
cpu = get_cpu_info()['brand']
except:
cpu = platform.processor()
else:
cpu = platform.processor()
self.logs['meta'] = {
'OS' : {
'platform' : platform.platform().__str__(),
'system' : platform.system().__str__(),
'release' : platform.release().__str__(),
'version' : platform.version().__str__(),
'machine' : platform.machine().__str__(),
'processor' : cpu,
'pyversion' : sys.version
},
'DATE' : datetime.datetime.now().strftime("%Y-%d-%m %H:%M:%S")
}
def dump_logs(self)->str:
"""
Dumps logs
"""
return json.dumps(self.logs, indent=4)
def dump_meta(self)->str:
"""
Dumps meta information
"""
return json.dumps(self.logs['meta'], indent=4)
def log_time(self, func_name, t_type, s = None, e = None)->None:
start = ''
end = ''
if t_type == 'start':
start = datetime.datetime.now().strftime("%H:%M:%S:%f")
elif t_type == 'end':
end = datetime.datetime.now().strftime("%H:%M:%S:%f")
elif t_type == None:
start = s
end = e
if func_name not in self.logs:
self.logs[func_name] = dict()
self.logs[func_name]['time'] = {
'start' : start,
'end' : end
}
def save_logs(self)->None:
"""
Store all logs in the ./tracelogs folder
"""
for k, v in self.logs.items():
self.write_log(k)
def log_pytest_fail(self, func_name, report_json):
"""
Logs failed pytest summary.
"""
self._log_function_def(func_name)
try:
self.logs[func_name]['result'] = {
'res' : 'failed',
'line' : report_json['longrepr']['reprcrash']['lineno'],
'expl' : report_json['longrepr']['reprcrash']['message']
}
except Exception as e:
print(e)
def log_pytest_pass(self, item, lineno, orig, expl):
"""
Logs passed pytest summary.
"""
self._log_function_def(item.name)
if item.name in self.logs:
self.logs[item.name]['result'] = {
'res' : 'passed',
'line' : lineno,
'expl' : expl
}
def log_trace(self, frame, event, arg, call = False):
"""
Logs the current trace.
"""
co = frame.f_code
parent = frame.f_back.f_code
filename = co.co_filename
func_name = co.co_name
line_no = frame.f_lineno
f_locals = frame.f_locals
if event == 'line':
if not call:
try:
if func_name not in self.logs:
self.logs[func_name] = {
'lines' : dict(),
'call' : dict()
}
if 'filename' not in self.logs[func_name]:
self.logs[func_name]['filename'] = filename
self.logs[func_name]['lines'][line_no] = {
'str' : str(linecache.getline(filename, line_no).rstrip()),
'locals' : str(f_locals)
}
except Exception as e:
print('Trace line exception, {}\nin file'.format(e, filename))
else:
try:
self.logs[parent.co_name]['call'][frame.f_back.f_lineno][func_name]['lines'][line_no] = {
'str' : str(linecache.getline(filename, line_no).rstrip()),
'locals' : str(f_locals)
}
except Exception as e:
print('Trace line exception, {}\nin file {}'.format(e, filename))
elif event == 'call':
try:
if parent.co_name in self.logs:
if 'call' not in self.logs[parent.co_name]:
self.logs[parent.co_name]['call'] = dict()
if func_name not in self.logs[parent.co_name]['call']:
self.logs[parent.co_name]['call'][frame.f_back.f_lineno] = {
func_name : dict()
}
self.logs[parent.co_name]['call'][frame.f_back.f_lineno][func_name] = {
'lines' : dict(),
'filename' : filename,
'return' : None
}
self.logs[parent.co_name]['call'][frame.f_back.f_lineno][func_name]['lines'][line_no] = {
'str' : str(linecache.getline(filename, line_no).rstrip())
}
except Exception as e:
print('Trace call exception, {}\nin file {}'.format(e, filename))
elif event == 'return':
try:
if parent.co_name in self.logs:
self.logs[parent.co_name]['call'][frame.f_back.f_lineno][func_name]['return'] = arg
except Exception as e:
print('Trace return exception, {}\nin file {}'.format(e, filename))
def write_log(self, func_name):
"""
Writes a log to its respective folder.
"""
log = self.logs[func_name]
try:
f = open("./tracelogs/{}/{}.txt".format(log['result']['res'], func_name), "a+")
for k, v in collections.OrderedDict(sorted(log['lines'].items())).items():
f.write("___line {} {}\n".format(k, v['str']))
if 'call' in log and k in log['call']:
f.write(self._get_call(log['call'][k])) # Add C-> locals
if 'result' in log and log['result']['line'] == k:
f.write(">\t({})\n".format(log['result']['expl']))
else:
f.write(self._get_locals(log['lines'], k))
except Exception as e:
print('Writing log failed, {}'.format(e))
try:
f.write("{}\n\n".format("="*20))
f.close()
except:
pass
def _get_locals(self, trace, lineno)->str:
"""
Places locals fetched from sys below the correct line.
If a line contains a variable assigned a value the value of the variable will be printed below it
Args:
func_name - function name
lineno - line number
Returns
string containing that line's locals
"""
if lineno + 1 not in trace \
or trace[lineno + 1]['locals'] == "{{}}":
return ''
result = ''
try:
iterator = self._cast_json(trace[lineno + 1]['locals'])
line = trace[lineno]['str']
for k, v in iterator.items():
if k in line:
result += "<\t({} = {})\n".format(k, v)
except Exception as e:
print('Locals Failed: {}'.format(e))
return result
def _get_call(self, log)->str:
"""
Get a string representation of the currently called function
Args:
log - log of called function
Returns:
string representation of trace
"""
result = ''
try:
for k, v in log.items():
result += 'Call-> {} : {}\n'.format(k, v['filename'])
for lineno, line in v['lines'].items():
result += "C->\t\t___line {} {}\n".format(lineno, line['str'])
local_vars = self._get_locals(v['lines'], lineno)
if local_vars != '':
result += "C->\t{}".format(local_vars)
if 'return' in line['str']:
result += "C->\t\tret {}\n".format(v['return'])
except Exception as e:
print('Get call trace failed, {}'.format(e))
return result
def _cast_json(self, local_vars)->dict:
"""
Converts the system locals to a dictionary object.
Used as the convertion directly to json cast an exception.
Args:
local_vars - string of locals fetched from sys
Returns:
dictionary object containing all locals from string.
"""
result = dict()
try:
formatted = local_vars.strip("{ }").replace(" ", "").replace("\'", "")
formatted = formatted.split(",")
for pair in formatted:
keyval = pair.split(":")
result[keyval[0]] = keyval[1]
except IndexError:
pass
except Exception as e:
print('Failed to cast to json, {}'.format(e))
return result
def _log_function_def(self, func_name)->None:
"""
Logs the function line which is skipped from the tracing due to it not being a "line" event.
Args:
func_name - function name
"""
try:
line_no = min(self.logs[func_name]['lines'], key=int) - 1
self.logs[func_name]['lines'][line_no] = {
"str" : linecache.getline(self.logs[func_name]['filename'], line_no).rstrip().__str__(),
"locals" : '{{}}'
}
except Exception as e:
print('Failed log function definition: {}'.format(e))
def _read_locals(self, line_split, start_index):
line_split[start_index] = line_split[start_index].strip("(")
line_split[-1] = line_split[-1].replace(")\n", "")
kvpair = ''.join(line_split[start_index:]).split("=")
return kvpair
def _read_init_dict(self, func_name, result, iteration):
"""
Init dictionary for reading logs.
"""
if iteration not in self.logs[func_name][result]:
self.t_lock.acquire()
self.logs[func_name][result][iteration] = {
"lines" : dict(),
"result" : {
"lineno" : -1,
"expl" : ""
}
}
self.t_lock.release()
if 'call' not in self.logs[func_name][result][iteration]:
self.t_lock.acquire()
self.logs[func_name][result][iteration]['call'] = dict()
self.t_lock.release()
def _read_locals_(self, line, entry):
"""
Read the locals of a given line.
Args:
line_split - list containing all words of a line
entry - parsed event containing `event_name, idx, entry]`
"""
try:
if entry[1] == 9:
self.logs\
[entry[2]]\
[entry[3]]\
[entry[4]]\
[entry[5]]\
[entry[6]]\
[entry[7]]\
[entry[8]]\
[entry[9]]\
[entry[10]] += line
elif entry[1] == 6:
self.logs\
[entry[2]]\
[entry[3]]\
[entry[4]]\
[entry[5]]\
[entry[6]]\
[entry[7]] += line
except Exception as e:
print(self.logs)
print('Reading locals failed, \"%s\"' % e)
def _read_returns(self, line, entry):
"""
Read the return statement of calls.
Args:
line_split - list containing all words of a line
entry - parsed event containing `[event_name, entry]`
"""
try:
self.logs\
[entry[2]]\
[entry[3]]\
[entry[4]]\
[entry[5]]\
[entry[6]]\
[entry[7]]\
[entry[8]] += line
except Exception as e:
print('Reading return failed, \"%s\"' % e)
def _async_read_log(self, result, targets)->None:
"""
Read a log file.
Is called by read_logs() using this function as async.
Args:
result - string representing if 'passing' or 'failing'
"""
for filename in os.listdir('{}/tracelogs/{}/'.format(os.getcwd(), result)):
iteration = 0
if targets is not None and filename[:-4] not in targets:
continue
with open(os.path.join('{}/tracelogs/{}/'.format(os.getcwd(), result), filename), 'r') as f:
func_name = filename.split(".")[0]
if func_name not in self.iteration_info:
self.iteration_info[func_name] = dict()
self.t_lock.acquire()
if func_name not in self.logs:
self.logs[func_name] = dict()
self.logs[func_name][result] = {
iteration : {
"lines" : dict(),
"result" : {
"lineno" : -1,
"expl" : ""
}
}
}
self.t_lock.release()
try:
lines = f.readlines()
except UnicodeDecodeError:
self.err = (Color.FAIL + 'Unsupported Unicode format.' + Color.ENDC)
return
lineno = 0
call_lineno = 0
expl = False
call_name = ''
event = None
for line in lines:
try:
if iteration not in self.logs[func_name][result]:
self.t_lock.acquire()
self.logs[func_name][result][iteration] = {
"lines" : dict(),
"result" : {
"lineno" : -1,
"expl" : "",
"str" : ""
}
}
self.t_lock.release()
if 'call' not in self.logs[func_name][result][iteration]:
self.t_lock.acquire()
self.logs[func_name][result][iteration]['call'] = dict()
self.t_lock.release()
line_split = [x for x in (line.replace("\t", " ").split(" ")) if x != '' and x != '\"']
if line_split[0] == '___line':
event = None
lineno = int(line_split[1])
if lineno < 0:
continue
line_str = ' '.join(line_split[2:])
self.logs[func_name][result][iteration]['lines'][lineno] = {
"str" : line_str,
"locals" : ""
}
elif line_split[0] == 'Call->':
event = None
call_name = line_split[1]
self.logs[func_name][result][iteration]['call'][lineno] = dict()
self.logs[func_name][result][iteration]['call'][lineno][call_name] = {
'lines' : dict(),
'filename' : line_split[3].rstrip("\n"),
'return' : ""
}
elif line_split[0] == 'C->':
event = None
if line_split[1] == 'ret':
event = [2, -1, func_name, result, iteration, 'call', lineno, call_name, 'return']
self._read_returns(
' '.join(line_split[2:]).replace("\n", ""),
event
)
elif line_split[1] == '<':
event = [2, 9, func_name, result, iteration, 'call', lineno, call_name, 'lines', call_lineno, 'locals']
self._read_locals_(
' '.join(line_split[2:]).replace("\n", "").replace("=", ":"),
event
)
else:
call_lineno = int(line_split[2])
if call_lineno not in self.logs[func_name][result][iteration]['call'][lineno][call_name]['lines']:
self.logs[func_name][result][iteration]['call'][lineno][call_name]['lines'][call_lineno] = {
"str" : "",
"locals" : ""
}
self.logs[func_name][result][iteration]['call'][lineno][call_name]['lines'][call_lineno]['str'] = ' '.join(line_split[3:]).rstrip("\n")
elif line_split[0] == "<":
event = [1, 6, func_name, result, iteration, 'lines', lineno, "locals"]
self._read_locals_(
' '.join(line_split[1:]).replace("\n", "").replace("=", ":"),
event
)
elif line_split[0] == ">":
event = None
expl = True
self.logs[func_name][result][iteration]['result'] = {
"lineno" : lineno,
"expl" : ' '.join(line_split[1:]).rstrip("\n"),
"str" : self.logs[func_name][result][iteration]['lines'][lineno]['str']
}
elif event is not None:
if event[1] == -1:
self._read_returns(
line,
event
)
else:
self._read_locals_(
line,
event
)
elif line.replace("\n", "") == '====================':
iteration += 1
expl = False
event = None
elif expl:
self.logs[func_name][result][iteration]['result']['expl'] += line
except Exception as e:
self.err = ('Reading logs exception, \"%s\"' % e)
return
self.t_lock.acquire()
self.iteration_info[func_name][result] = iteration
self.t_lock.release()
def _init_log_structure(self)->dict:
structure = dict()
return structure
```
|
{
"source": "jesper-olsen/PChess",
"score": 2
}
|
#### File: jesper-olsen/PChess/chesslib.py
```python
openings="""
#Spansk_v1.
e2e4 e7e5
g1f3 b8c6
f1b5 a7a6
b5a4 g8f6
e1g1 f8e7
f1e1 b7b5
a4b3 d7d6
c2c3 e8g8
-
#Spansk_v2.
e2e4 e7e5
g1f3 b8c6
f1b5 a7a6
b5a4 g8f6
e1g1 f6e4
-
#Spansk_v3.
e2e4 e7e5
g1f3 b8c6
f1b5 a7a6
b5c6 d7c6
-
#Philidors_Forsvar_v1.
e2e4 e7e5
g1f3 d7d6
d2d4 e5d4
-
#Philidors_Forsvar_v2.
e2e4 e7e5
g1f3 d7d6
d2d4 b8d7
-
#Fransk.
e2e4 e7e6
d2d4 d7d5
b1c3 g8f6
g1f3 f8e7
-
#Caro-Kann.
e2e4 c7c6
d2d4 d7d5
b1c3 d5e4
c3e4 c8f5
-
#Siciliansk.
e2e4 c7c5
g1f3 d7d6
d2d4 c5d4
f3d4 g8f6
-
#Dronninggambit.
d2d4 d7d5
c2c4 e7e6
b1c3 g8f6
c1g5 f8e7
-
#Nimzo-Indisk.
d2d4 g8f6
c2c4 e7e6
b1c3 f8b4
d1c2 b8c6
-
#Dronningeindisk.
d2d4 g8f6
c2c4 e7e6
g1f3 b7b6
g2g3 c8b7
f1g2 f8e7
.
"""
def read_openings():
seq=[]
for line in openings.split('\n'):
if line=='' or line[0]=="#": continue
if line[0] in "-." and seq!=[]:
yield seq
seq=[]
else:
seq+=[((w[0],w[1]),(w[2],w[3])) for w in line.split()]
if __name__=="__main__":
for i,seq in enumerate(read_openings()):
print("Opening",i)
for mv in seq:
print("{}->{}".format(mv[0], mv[1]))
```
|
{
"source": "jesper-raemaekers/polarion-test-result-doc",
"score": 3
}
|
#### File: jesper-raemaekers/polarion-test-result-doc/polarion_helpers.py
```python
from polarion.polarion import Polarion
import progressbar
import logging
def getTestRuns(polarion_config):
test_runs = {}
try:
pol = Polarion(polarion_config['url'], polarion_config['username'], polarion_config['password'])
except Exception as e:
logging.error(f"Connection to polarion ({polarion_config['url']} {polarion_config['username']}) failed with the following error: {e}")
print(f"Connection to polarion ({polarion_config['url']} {polarion_config['username']}) failed with the following error: {e}")
try:
project = pol.getProject(polarion_config['project'])
except Exception as e:
logging.error(f"Opening the project {polarion_config[' project ']} failed with the following error: {e}")
print(f"Opening the project {polarion_config[' project ']} failed with the following error: {e}")
print(f'Loading test runs from Polarion')
records_sum = 0
for run in progressbar.progressbar(polarion_config['test_runs'], redirect_stdout=True):
try:
test_runs[run] = project.getTestRun(run)
records_sum += len(test_runs[run].records)
except Exception as e:
logging.error(f'Opening test run {run} failed with the following error: {e}')
print(f'Opening test run {run} failed with the following error: {e}')
logging.info(f"Found {records_sum} test records in polarion test runs: {polarion_config['test_runs']}")
return test_runs
```
|
{
"source": "jesper-raemaekers/python-polarion",
"score": 3
}
|
#### File: python-polarion/polarion/record.py
```python
from enum import Enum
from .factory import createFromUri
import os
import requests
class Record(object):
"""
Create a Polarion test record,
:param polarion: Polarion client object
:param test_run: Test run instance
:param polarion_record: The data from Polarion of this testrun
:param index: The index of this record in the test run
"""
class ResultType(Enum):
"""
Record result enum
"""
No = None
PASSED = 'passed'
FAILED = 'failed'
BLOCKED = 'blocked'
def __init__(self, polarion, test_run, polarion_record, index):
self._polarion = polarion
self._test_run = test_run
self._polarion_record = polarion_record
self._index = index
self._buildWorkitemFromPolarion()
def _buildWorkitemFromPolarion(self):
# parse all polarion attributes to this class
for attr, value in self._polarion_record.__dict__.items():
for key in value:
setattr(self, key, value[key])
self._testcase = self._polarion_record.testCaseURI
self._testcase_name = self._testcase.split('}')[1]
self._defect = self._polarion_record.defectURI
def _reloadFromPolarion(self):
service = self._polarion.getService('TestManagement')
self._polarion_record = service.getTestCaseRecords(self._test_run.uri, self._testcase)[0]
self._buildWorkitemFromPolarion()
# self._original_polarion_test_run = copy.deepcopy(self._polarion_test_run)
def setTestStepResult(self, step_number, result: ResultType, comment=None):
""""
Set the result of a test step
:param step_number: Step number
:param result: The result fo the test step
:param comment: An optional comment
"""
if self.testStepResults is None:
# get the number of test steps in
service = self._polarion.getService('TestManagement')
test_steps = service.getTestSteps(self.testCaseURI)
number_of_steps = 0
if test_steps.steps is not None:
number_of_steps = len(test_steps.steps.TestStep)
self.testStepResults = self._polarion.ArrayOfTestStepResultType()
for _i in range(number_of_steps):
self.testStepResults.TestStepResult.append(
self._polarion.TestStepResultType())
if step_number < len(self.testStepResults.TestStepResult):
self.testStepResults.TestStepResult[step_number].result = self._polarion.EnumOptionIdType(
id=result.value)
if comment is not None:
self.testStepResults.TestStepResult[step_number].comment = self._polarion.TextType(
content=comment, type='text/html', contentLossy=False)
self.save()
def getResult(self):
"""
Get the test result of this record
:return: The test case result
:rtype: ResultType
"""
if self.result is not None:
return self.ResultType(self.result.id)
return self.ResultType.No
def getComment(self):
"""
Get a comment if available. The comment may contain HTML if edited in Polarion!
:return: Get the comment, may contain HTML
:rtype: string
"""
if self.comment is not None:
return self.comment.content
return None
@property
def testcase_id(self):
"""
The test case name including prefix
"""
return self._testcase_name
def getTestCaseName(self):
"""
Get the test case name including prefix
:return: The name
:rtype: string
"""
return self._testcase_name
def setComment(self, comment):
"""
tries to get the severity enum of this workitem type
When it fails to get it, the list will be empty
:param comment: Comment string, may contain HTML
"""
self.comment = self._polarion.TextType(
content=comment, type='text/html', contentLossy=False)
def setResult(self, result: ResultType = ResultType.FAILED, comment=None):
"""
Set the result of this record and save it.
:param result: The result of this record
:param comment: Comment string, may contain HTML
"""
if comment is not None:
self.setComment(comment)
if self.result is not None:
self.result.id = result.value
else:
self.result = self._polarion.EnumOptionIdType(
id=result.value)
self.save()
def getExecutingUser(self):
"""
Gets the executing user if the test was executed
:return: The user
:rtype: User/None
"""
if self.executedByURI is not None:
return createFromUri(self._polarion, None, self.executedByURI)
return None
def hasAttachment(self):
"""
Checks if the Record has attachments
:return: True/False
:rtype: boolean
"""
if self.attachments is not None:
return True
return False
def getAttachment(self, file_name):
"""
Get the attachment data
:param file_name: The attachment file name
:return: list of bytes
:rtype: bytes[]
"""
# find the file
url = None
for attachment in self.attachments.TestRunAttachment:
if attachment.fileName == file_name:
url = attachment.url
if url is not None:
resp = requests.get(url, auth=(self._polarion.user, self._polarion.password))
if resp.ok:
return resp.content
else:
raise Exception(f'Could not download attachment {file_name}')
else:
raise Exception(f'Could not find attachment with name {file_name}')
def saveAttachmentAsFile(self, file_name, file_path):
"""
Save an attachment to file.
:param file_name: The attachment file name
:param file_path: File where to save the attachment
"""
bin = self.getAttachment(file_name)
with open(file_path, "wb") as file:
file.write(bin)
def deleteAttachment(self, file_name):
"""
Delete an attachment.
:param file_name: The attachment file name
"""
service = self._polarion.getService('TestManagement')
service.deleteAttachmentFromTestRecord(self._test_run.uri, self._index, file_name)
self._reloadFromPolarion()
def addAttachment(self, file_path, title):
"""
Upload an attachment
:param file_path: Source file to upload
:param title: The title of the attachment
"""
service = self._polarion.getService('TestManagement')
file_name = os.path.split(file_path)[1]
with open(file_path, "rb") as file_content:
service.addAttachmentToTestRecord(self._test_run.uri, self._index, file_name, title, file_content.read())
self._reloadFromPolarion()
def testStepHasAttachment(self, step_index):
"""
Checks if the a test step has attachments
:param step_index: The test step index
:return: True/False
:rtype: boolean
"""
if self.testStepResults is None:
return False
if self.testStepResults.TestStepResult[step_index].attachments is not None:
return True
return False
def getAttachmentFromTestStep(self, step_index, file_name):
"""
Get the attachment data from a test step
:param step_index: The test step index
:param file_name: The attachment file name
:return: list of bytes
:rtype: bytes[]
"""
# find the file
url = None
for attachment in self.testStepResults.TestStepResult[step_index].attachments.TestRunAttachment:
if attachment.fileName == file_name:
url = attachment.url
if url is not None:
resp = requests.get(url, auth=(self._polarion.user, self._polarion.password))
if resp.ok:
return resp.content
else:
raise Exception(f'Could not download attachment {file_name}')
else:
raise Exception(f'Could not find attachment with name {file_name}')
def saveAttachmentFromTestStepAsFile(self, step_index, file_name, file_path):
"""
Save an attachment to file from a test step
:param step_index: The test step index
:param file_name: The attachment file name
:param file_path: File where to save the attachment
"""
bin = self.getAttachmentFromTestStep(step_index, file_name)
with open(file_path, "wb") as file:
file.write(bin)
def deleteAttachmentFromTestStep(self, step_index, file_name):
"""
Delete an attachment from a test step
:param step_index: The test step index
:param file_name: The attachment file name
"""
service = self._polarion.getService('TestManagement')
service.deleteAttachmentFromTestStep(self._test_run.uri, self._index, step_index, file_name)
self._reloadFromPolarion()
def addAttachmentToTestStep(self, step_index, file_path, title):
"""
Upload an attachment to a test step
:param step_index: The test step index
:param file_path: Source file to upload
:param title: The title of the attachment
"""
service = self._polarion.getService('TestManagement')
file_name = os.path.split(file_path)[1]
with open(file_path, "rb") as file_content:
service.addAttachmentToTestStep(self._test_run.uri, self._index, step_index, file_name, title, file_content.read())
self._reloadFromPolarion()
def save(self):
"""
Saves the current test record
"""
new_item = {}
for attr, value in self.__dict__.items():
if not attr.startswith('_'):
# only add if public value
new_item[attr] = value
service = self._polarion.getService('TestManagement')
service.executeTest(
self._test_run.uri, new_item)
self._reloadFromPolarion()
def __repr__(self):
return f'{self._testcase_name} in {self._test_run.id} ({self.getResult()} on {self.executed})'
def __str__(self):
return f'{self._testcase_name} in {self._test_run.id} ({self.getResult()} on {self.executed})'
```
#### File: python-polarion/tests/test_polarion_client.py
```python
import unittest
from polarion.polarion import Polarion
from keys import polarion_user, polarion_password, polarion_url
from time import sleep
import mock
class TestPolarionClient(unittest.TestCase):
def test_wrong_url(self):
url = polarion_url + '/wrong'
self.assertRaises(Exception, Polarion.__init__, url,
polarion_user, polarion_password)
def test_wrong_user(self):
user = polarion_user + 'incorrect'
self.assertRaises(Exception, Polarion.__init__, polarion_url,
user, polarion_password)
def test_wrong_password(self):
password = <PASSWORD>ion_password + '<PASSWORD>'
self.assertRaises(Exception, Polarion.__init__, polarion_url,
polarion_user, password)
def test_available_services_static(self):
known_services = ['Session', 'Project', 'Tracker',
'Builder', 'Planning', 'TestManagement', 'Security']
pol = Polarion(polarion_url, polarion_user,
polarion_password, static_service_list=True)
for service in known_services:
self.assertTrue(pol.hasService(service),
msg='Service should exist')
self.assertFalse(pol.hasService('made_up'),
msg='Service should not exist')
def test_available_services(self):
known_services = ['Session', 'Project', 'Tracker',
'Builder', 'Planning', 'TestManagement', 'Security']
pol = Polarion(polarion_url, polarion_user, polarion_password)
for service in known_services:
self.assertTrue(pol.hasService(service),
msg='Service should exist')
self.assertFalse(pol.hasService('made_up'),
msg='Service should not exist')
def test_services(self):
known_services = ['Tracker', 'TestManagement']
pol = Polarion(polarion_url, polarion_user, polarion_password)
for service in known_services:
s = pol.getService(service)
# print(s)
self.assertGreater(len(s.__dir__()), 10)
def test_types(self):
pol = Polarion(polarion_url, polarion_user, polarion_password)
self.assertIn('EnumOptionId', str(type(pol.EnumOptionIdType)))
self.assertIn('Text', str(type(pol.TextType)))
self.assertIn('ArrayOfTestStepResult',
str(type(pol.ArrayOfTestStepResultType)))
self.assertIn('TestStepResult', str(type(pol.TestStepResultType)))
def test_type_wrong_service(self):
pol = Polarion(polarion_url, polarion_user, polarion_password)
self.assertRaises(Exception, pol.getTypeFromService, 'made_up',
'dont care')
def test_string(self):
pol = Polarion(polarion_url, polarion_user, polarion_password)
self.assertIn(polarion_url, pol.__str__())
self.assertIn(polarion_user, pol.__str__())
self.assertIn(polarion_url, pol.__repr__())
self.assertIn(polarion_user, pol.__repr__())
@mock.patch('polarion.project.Project.__init__')
def test_project_creation(self, mock_project):
mock_project.return_value = None
pol = Polarion(polarion_url, polarion_user, polarion_password)
project = pol.getProject('Random_id')
mock_project.assert_called_with(pol, 'Random_id')
project = pol.getProject('other_project')
mock_project.assert_called_with(pol, 'other_project')
```
|
{
"source": "jesperrix/rixstribute",
"score": 3
}
|
#### File: rixstribute/rixtribute/ssh.py
```python
import os
import shlex
import subprocess
from typing import List, Optional
import tempfile
def generate_scp_command(source_files :List[str],
destination :str,
recursive :bool=False,
port :int=22,
key_path :str=None,
skip_host_check :bool=False,
no_verbose :bool=True):
# TODO support for more custom args
# If key is specified
if key_path:
cmd = ['scp', '-i', key_path]
else:
cmd = ['scp']
if port != 22:
cmd = cmd + ['-p', str(port)]
if recursive is True:
cmd = cmd + ['-r']
if skip_host_check is True:
cmd = cmd + ['-o', 'StrictHostKeyChecking=no']
if no_verbose is True:
cmd = cmd + ['-q']
# Add source
cmd = cmd + source_files
# add dest
cmd = cmd + [destination]
return cmd
def generate_ssh_command(host :str,
user :str,
port :int=22,
key_path :str=None,
command :str=None,
skip_host_check :bool=False,
no_verbose :bool=True):
# If key is specified
if key_path:
cmd = ['ssh', '-ti', key_path]
else:
cmd = ['ssh', '-t']
if skip_host_check is True:
cmd = cmd + ['-o', 'StrictHostKeyChecking=no']
if port != 22:
cmd = cmd + ['-p', str(port)]
if no_verbose is True:
cmd = cmd + ['-q']
# TODO support for more custom args
cmd = cmd + [f'{user}@{host}']
if command:
cmd.append(f"'{command}'")
return cmd
def attach_tmux_session_and_run_command(session_name :str, command :str) -> str:
# -s = session name, -n = window name
tmux_init_cmd = f'tmux new-session -d -s {session_name} -n main'
# tmux select-window -t "$SESSION":model \;
# tmux attach-session -t "$SESSION" \;
tmux_create_cmd = (
f'if ! tmux has-session -t {session_name} > /dev/null 2>&1; then '
f'{tmux_init_cmd}; '
f'fi'
)
tmux_run_command = (
f'tmux send-keys -t "{session_name}":main \"{command}\" Enter \; '
)
tmux_attach_cmd = f"tmux attach-session -t {session_name}"
tmux_command = f"{tmux_create_cmd} && {tmux_run_command}"
return tmux_command
def ssh_command_tmux(host :str,
user :str,
command :str,
port :int=22,
key_path :str=None,
key_str :str=None):
remote_cmd = attach_tmux_session_and_run_command("automated-session", command)
if key_str != None:
f = tempfile.NamedTemporaryFile(suffix='_temp', prefix='rxtb_', delete=True)
f.write(key_str.encode("utf8"))
f.flush()
key_path = f.name
cmd = generate_ssh_command(host=host,
user=user,
port=port,
key_path=key_path,
skip_host_check=True,
command=remote_cmd)
subprocess.call(" ".join(cmd), shell=True)
def ssh_command(host :str,
user :str,
command :Optional[str],
port :int=22,
key :str=None,
print_output :bool=False):
cmd = generate_ssh_command(host=host,
user=user,
port=port,
key_path=key,
skip_host_check=True,
command=command)
cmd_str = ' '.join(cmd)
rc, output = subprocess.getstatusoutput(cmd_str)
if rc != 0:
print(output)
return False
if print_output:
print(f"\n{output}")
# subprocess.check_output(cmd)
def scp(source :List[str],
dest :str,
recursive :bool=False,
port :int=22,
key_path :str=None,
key_str :str=None) -> bool:
if key_str != None:
f = tempfile.NamedTemporaryFile(suffix='_temp', prefix='rxtb_', delete=True)
f.write(key_str.encode("utf8"))
f.flush()
key_path = f.name
# Generate scp command
cmd = generate_scp_command(source_files=source,
destination=dest,
recursive=recursive,
port=port,
key_path=key_path,
skip_host_check=True)
cmd_str = ' '.join(cmd)
rc, output = subprocess.getstatusoutput(cmd_str)
if rc != 0:
print(output)
return False
return True
def ssh(host :str, user :str, port :int=22, key_path :str=None, key_str :str=None):
print(f"SSHing into: {host}")
if key_str != None:
f = tempfile.NamedTemporaryFile(suffix='_temp', prefix='rxtb_', delete=True)
f.write(key_str.encode("utf8"))
f.flush()
key_path = f.name
command = generate_ssh_command(host=host, user=user, command=None, port=port, key_path=key_path)
ssh = subprocess.Popen(' '.join(command), shell=True, env=os.environ)
# WORKING FOR INITIAL CONNECT
ssh.wait()
if __name__ == "__main__":
host = "ec2-52-214-34-243.eu-west-1.compute.amazonaws.com"
# cmd = ssh_command(host, 22, 'ec2-user', '/home/jri/.ssh/jesper_ssh.pem', 'ls -la')
# subprocess.check_output(cmd)
ssh(host, 'ec2-user', 22, '/home/jri/.ssh/jesper_ssh.pem')
# import datetime
# cmd = f'sleep 2; echo "{str(datetime.datetime.now())}" >> /tmp/testing/hello.txt'
# run_remote_command(host, 22, 'ec2-user', key='/home/jri/.ssh/jesper_ssh.pem', command=cmd)
```
|
{
"source": "jespersundin/pandera",
"score": 3
}
|
#### File: pandera/pandera/schema_components.py
```python
from copy import copy
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from . import errors
from .dtypes import PandasDtype
from .schemas import (
CheckList,
DataFrameSchema,
PandasDtypeInputTypes,
SeriesSchemaBase,
)
def _is_valid_multiindex_tuple_str(x: Tuple[Any]) -> bool:
"""Check that a multi-index tuple key has all string elements"""
return isinstance(x, tuple) and all(isinstance(i, str) for i in x)
class Column(SeriesSchemaBase):
"""Validate types and properties of DataFrame columns."""
def __init__(
self,
pandas_dtype: PandasDtypeInputTypes = None,
checks: CheckList = None,
nullable: bool = False,
allow_duplicates: bool = True,
coerce: bool = False,
required: bool = True,
name: str = None,
regex: bool = False,
) -> None:
"""Create column validator object.
:param pandas_dtype: datatype of the column. A ``PandasDtype`` for
type-checking dataframe. If a string is specified, then assumes
one of the valid pandas string values:
http://pandas.pydata.org/pandas-docs/stable/basics.html#dtypes
:param checks: checks to verify validity of the column
:param nullable: Whether or not column can contain null values.
:param allow_duplicates: Whether or not column can contain duplicate
values.
:param coerce: If True, when schema.validate is called the column will
be coerced into the specified dtype.
:param required: Whether or not column is allowed to be missing
:param name: column name in dataframe to validate.
:param regex: whether the ``name`` attribute should be treated as a
regex pattern to apply to multiple columns in a dataframe.
:raises SchemaInitError: if impossible to build schema from parameters
:example:
>>> import pandas as pd
>>> import pandera as pa
>>>
>>>
>>> schema = pa.DataFrameSchema({
... "column": pa.Column(pa.String)
... })
>>>
>>> schema.validate(pd.DataFrame({"column": ["foo", "bar"]}))
column
0 foo
1 bar
See :ref:`here<column>` for more usage details.
"""
super().__init__(
pandas_dtype, checks, nullable, allow_duplicates, coerce
)
if (
name is not None
and not isinstance(name, str)
and not _is_valid_multiindex_tuple_str(name)
and regex
):
raise ValueError(
"You cannot specify a non-string name when setting regex=True"
)
self.required = required
self._name = name
self._regex = regex
if coerce and self._pandas_dtype is None:
raise errors.SchemaInitError(
"Must specify dtype if coercing a Column's type"
)
@property
def regex(self) -> bool:
"""True if ``name`` attribute should be treated as a regex pattern."""
return self._regex
@property
def _allow_groupby(self) -> bool:
"""Whether the schema or schema component allows groupby operations."""
return True
@property
def properties(self) -> Dict[str, Any]:
"""Get column properties."""
return {
"pandas_dtype": self._pandas_dtype,
"checks": self._checks,
"nullable": self._nullable,
"allow_duplicates": self._allow_duplicates,
"coerce": self._coerce,
"required": self.required,
"name": self._name,
"regex": self._regex,
}
def set_name(self, name: str):
"""Used to set or modify the name of a column object.
:param str name: the name of the column object
"""
if (
not isinstance(name, str)
and not _is_valid_multiindex_tuple_str(name)
and self.regex
):
raise ValueError(
"You cannot specify a non-string name when setting regex=True"
)
self._name = name
return self
def validate(
self,
check_obj: pd.DataFrame,
head: Optional[int] = None,
tail: Optional[int] = None,
sample: Optional[int] = None,
random_state: Optional[int] = None,
lazy: bool = False,
inplace: bool = False,
) -> pd.DataFrame:
"""Validate a Column in a DataFrame object.
:param check_obj: pandas DataFrame to validate.
:param head: validate the first n rows. Rows overlapping with `tail` or
`sample` are de-duplicated.
:param tail: validate the last n rows. Rows overlapping with `head` or
`sample` are de-duplicated.
:param sample: validate a random sample of n rows. Rows overlapping
with `head` or `tail` are de-duplicated.
:param random_state: random seed for the ``sample`` argument.
:param lazy: if True, lazily evaluates dataframe against all validation
checks and raises a ``SchemaErrors``. Otherwise, raise
``SchemaError`` as soon as one occurs.
:param inplace: if True, applies coercion to the object of validation,
otherwise creates a copy of the data.
:returns: validated DataFrame.
"""
if not inplace:
check_obj = check_obj.copy()
if self._name is None:
raise errors.SchemaError(
self,
check_obj,
"column name is set to None. Pass the ``name` argument when "
"initializing a Column object, or use the ``set_name`` "
"method.",
)
column_keys_to_check = (
self.get_regex_columns(check_obj.columns)
if self._regex
else [self._name]
)
check_results = []
for column_name in column_keys_to_check:
if self.coerce:
check_obj[column_name] = self.coerce_dtype(
check_obj[column_name]
)
check_results.append(
isinstance(
super(Column, copy(self).set_name(column_name)).validate(
check_obj, head, tail, sample, random_state, lazy
),
pd.DataFrame,
)
)
assert all(check_results)
return check_obj
def get_regex_columns(
self, columns: Union[pd.Index, pd.MultiIndex]
) -> Union[pd.Index, pd.MultiIndex]:
"""Get matching column names based on regex column name pattern.
:param columns: columns to regex pattern match
:returns: matchin columns
"""
if isinstance(self.name, tuple):
# handle MultiIndex case
if len(self.name) != columns.nlevels:
raise IndexError(
"Column regex name='%s' is a tuple, expected a MultiIndex "
"columns with %d number of levels, found %d level(s)"
% (self.name, len(self.name), columns.nlevels)
)
matches = np.ones(len(columns)).astype(bool)
for i, name in enumerate(self.name):
matched = pd.Index(
columns.get_level_values(i).str.match(name)
).fillna(False)
matches = matches & np.array(matched.tolist())
column_keys_to_check = columns[matches]
else:
if isinstance(columns, pd.MultiIndex):
raise IndexError(
"Column regex name %s is a string, expected a dataframe "
"where the index is a pd.Index object, not a "
"pd.MultiIndex object" % (self.name)
)
column_keys_to_check = columns[
# str.match will return nan values when the index value is
# not a string.
pd.Index(columns.str.match(self.name))
.fillna(False)
.tolist()
]
if column_keys_to_check.shape[0] == 0:
raise errors.SchemaError(
self,
columns,
"Column regex name='%s' did not match any columns in the "
"dataframe. Update the regex pattern so that it matches at "
"least one column:\n%s" % (self.name, columns.tolist()),
)
return column_keys_to_check
def __repr__(self):
if isinstance(self._pandas_dtype, PandasDtype):
dtype = self._pandas_dtype.value
else:
dtype = self._pandas_dtype
return f"<Schema Column: '{self._name}' type={dtype}>"
def __eq__(self, other):
def _compare_dict(obj):
return {
k: v if k != "_checks" else set(v)
for k, v in obj.__dict__.items()
}
return _compare_dict(self) == _compare_dict(other)
class Index(SeriesSchemaBase):
"""Validate types and properties of a DataFrame Index."""
def coerce_dtype(self, series_or_index: pd.Index) -> pd.Index:
"""Coerce type of a pd.Index by type specified in pandas_dtype.
:param pd.Index series: One-dimensional ndarray with axis labels
(including time series).
:returns: ``Index`` with coerced data type
"""
if self._pandas_dtype is PandasDtype.Str:
# only coerce non-null elements to string
return series_or_index.where(
series_or_index.isna(), series_or_index.astype(str)
)
return series_or_index.astype(self.dtype)
@property
def _allow_groupby(self) -> bool:
"""Whether the schema or schema component allows groupby operations."""
return False
def validate(
self,
check_obj: Union[pd.DataFrame, pd.Series],
head: Optional[int] = None,
tail: Optional[int] = None,
sample: Optional[int] = None,
random_state: Optional[int] = None,
lazy: bool = False,
inplace: bool = False,
) -> Union[pd.DataFrame, pd.Series]:
"""Validate DataFrameSchema or SeriesSchema Index.
:check_obj: pandas DataFrame of Series containing index to validate.
:param head: validate the first n rows. Rows overlapping with `tail` or
`sample` are de-duplicated.
:param tail: validate the last n rows. Rows overlapping with `head` or
`sample` are de-duplicated.
:param sample: validate a random sample of n rows. Rows overlapping
with `head` or `tail` are de-duplicated.
:param random_state: random seed for the ``sample`` argument.
:param lazy: if True, lazily evaluates dataframe against all validation
checks and raises a ``SchemaErrors``. Otherwise, raise
``SchemaError`` as soon as one occurs.
:param inplace: if True, applies coercion to the object of validation,
otherwise creates a copy of the data.
:returns: validated DataFrame or Series.
"""
if self.coerce:
check_obj.index = self.coerce_dtype(check_obj.index)
# handles case where pandas native string type is not supported
# by index.
obj_to_validate = pd.Series(check_obj.index).astype(self.dtype)
else:
obj_to_validate = pd.Series(check_obj.index)
assert isinstance(
super().validate(
obj_to_validate,
head,
tail,
sample,
random_state,
lazy,
inplace,
),
pd.Series,
)
return check_obj
def __repr__(self):
if self._name is None:
return "<Schema Index>"
return f"<Schema Index: '{self._name}'>"
def __eq__(self, other):
return self.__dict__ == other.__dict__
class MultiIndex(DataFrameSchema):
"""Validate types and properties of a DataFrame MultiIndex.
Because `MultiIndex.__call__` converts the index to a dataframe via
`to_frame()`, each index is treated as a series and it makes sense to
inherit the `__call__` and `validate` methods from DataFrameSchema.
"""
def __init__(
self,
indexes: List[Index],
coerce: bool = False,
strict: bool = False,
name: str = None,
) -> None:
"""Create MultiIndex validator.
:param indexes: list of Index validators for each level of the
MultiIndex index.
:param coerce: Whether or not to coerce the MultiIndex to the
specified pandas_dtypes before validation
:param strict: whether or not to accept columns in the MultiIndex that
aren't defined in the ``indexes`` argument.
:param name: name of schema component
:example:
>>> import pandas as pd
>>> import pandera as pa
>>>
>>>
>>> schema = pa.DataFrameSchema(
... columns={"column": pa.Column(pa.Int)},
... index=pa.MultiIndex([
... pa.Index(pa.String,
... pa.Check(lambda s: s.isin(["foo", "bar"])),
... name="index0"),
... pa.Index(pa.Int, name="index1"),
... ])
... )
>>>
>>> df = pd.DataFrame(
... data={"column": [1, 2, 3]},
... index=pd.MultiIndex.from_arrays(
... [["foo", "bar", "foo"], [0, 1, 2]],
... names=["index0", "index1"],
... )
... )
>>>
>>> schema.validate(df)
column
index0 index1
foo 0 1
bar 1 2
foo 2 3
See :ref:`here<multiindex>` for more usage details.
"""
self.indexes = indexes
super().__init__(
columns={
i
if index._name is None
else index._name: Column(
pandas_dtype=index._pandas_dtype,
checks=index.checks,
nullable=index._nullable,
allow_duplicates=index._allow_duplicates,
)
for i, index in enumerate(indexes)
},
coerce=coerce,
strict=strict,
name=name,
)
@property
def coerce(self):
return self._coerce or any(index.coerce for index in self.indexes)
def coerce_dtype(self, multi_index: pd.MultiIndex) -> pd.MultiIndex:
"""Coerce type of a pd.Series by type specified in pandas_dtype.
:param multi_index: multi-index to coerce.
:returns: ``MultiIndex`` with coerced data type
"""
_coerced_multi_index = []
if multi_index.nlevels != len(self.indexes):
raise errors.SchemaError(
self,
multi_index,
"multi_index does not have equal number of levels as "
"MultiIndex schema %d != %d."
% (multi_index.nlevels, len(self.indexes)),
)
for level_i, index in enumerate(self.indexes):
index_array = multi_index.get_level_values(level_i)
if index.coerce or self.coerce:
index_array = index.coerce_dtype(index_array)
_coerced_multi_index.append(index_array)
return pd.MultiIndex.from_arrays(
_coerced_multi_index, names=multi_index.names
)
def validate(
self,
check_obj: Union[pd.DataFrame, pd.Series],
head: Optional[int] = None,
tail: Optional[int] = None,
sample: Optional[int] = None,
random_state: Optional[int] = None,
lazy: bool = False,
inplace: bool = False,
) -> Union[pd.DataFrame, pd.Series]:
"""Validate DataFrame or Series MultiIndex.
:param check_obj: pandas DataFrame of Series to validate.
:param head: validate the first n rows. Rows overlapping with `tail` or
`sample` are de-duplicated.
:param tail: validate the last n rows. Rows overlapping with `head` or
`sample` are de-duplicated.
:param sample: validate a random sample of n rows. Rows overlapping
with `head` or `tail` are de-duplicated.
:param random_state: random seed for the ``sample`` argument.
:param lazy: if True, lazily evaluates dataframe against all validation
checks and raises a ``SchemaErrors``. Otherwise, raise
``SchemaError`` as soon as one occurs.
:param inplace: if True, applies coercion to the object of validation,
otherwise creates a copy of the data.
:returns: validated DataFrame or Series.
"""
if self.coerce:
check_obj.index = self.coerce_dtype(check_obj.index)
try:
validation_result = super().validate(
check_obj.index.to_frame(),
head,
tail,
sample,
random_state,
lazy,
inplace,
)
except errors.SchemaErrors as err:
# This is a hack to re-raise the SchemaErrors exception and change
# the schema context to MultiIndex. This should be fixed by with
# a more principled schema class hierarchy.
schema_error_dicts = []
# pylint: disable=protected-access
for schema_error_dict in err._schema_error_dicts:
error = schema_error_dict["error"]
error = errors.SchemaError(
self,
check_obj,
error.args[0],
error.failure_cases.assign(column=error.schema.name),
error.check,
error.check_index,
)
schema_error_dict["error"] = error
schema_error_dicts.append(schema_error_dict)
raise errors.SchemaErrors(schema_error_dicts, check_obj)
assert isinstance(validation_result, pd.DataFrame)
return check_obj
def __repr__(self):
return f"<Schema MultiIndex: '{list(self.columns)}'>"
def __eq__(self, other):
return self.__dict__ == other.__dict__
```
|
{
"source": "jesperswillem/protwis",
"score": 2
}
|
#### File: protwis/ligand/views.py
```python
from django.db.models import Count, Avg, Min, Max
from collections import defaultdict
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import TemplateView, View
from common.models import ReleaseNotes
from common.phylogenetic_tree import PhylogeneticTreeGenerator
from common.selection import Selection, SelectionItem
from ligand.models import Ligand, AssayExperiment, LigandProperities, LigandVendorLink
from protein.models import Protein, Species, ProteinFamily
from copy import deepcopy
import itertools
import json
class LigandBrowser(TemplateView):
"""
Per target summary of ligands.
"""
template_name = 'ligand_browser.html'
def get_context_data (self, **kwargs):
context = super(LigandBrowser, self).get_context_data(**kwargs)
ligands = AssayExperiment.objects.values(
'protein__entry_name',
'protein__species__common_name',
'protein__family__name',
'protein__family__parent__name',
'protein__family__parent__parent__name',
'protein__family__parent__parent__parent__name',
'protein__species__common_name'
).annotate(num_ligands=Count('ligand', distinct=True))
context['ligands'] = ligands
return context
def LigandDetails(request, ligand_id):
"""
The details of a ligand record. Lists all the assay experiments for a given ligand.
"""
ligand_records = AssayExperiment.objects.filter(
ligand__properities__web_links__index=ligand_id
).order_by('protein__entry_name')
record_count = ligand_records.values(
'protein',
).annotate(num_records = Count('protein__entry_name')
).order_by('protein__entry_name')
ligand_data = []
for record in record_count:
per_target_data = ligand_records.filter(protein=record['protein'])
protein_details = Protein.objects.get(pk=record['protein'])
"""
A dictionary of dictionaries with a list of values.
Assay_type
|
-> Standard_type [list of values]
"""
tmp = defaultdict(lambda: defaultdict(list))
tmp_count = 0
for data_line in per_target_data:
tmp[data_line.assay_type][data_line.standard_type].append(data_line.standard_value)
tmp_count += 1
#Flattened list of lists of dict values
values = list(itertools.chain(*[itertools.chain(*tmp[x].values()) for x in tmp.keys()]))
ligand_data.append({
'protein_name': protein_details.entry_name,
'receptor_family': protein_details.family.parent.name,
'ligand_type': protein_details.get_protein_family(),
'class': protein_details.get_protein_class(),
'record_count': tmp_count,
'assay_type': ', '.join(tmp.keys()),
#Flattened list of lists of dict keys:
'value_types': ', '.join(itertools.chain(*(list(tmp[x]) for x in tmp.keys()))),
'low_value': min(values),
'average_value': sum(values)/len(values),
'standard_units': ', '.join(list(set([x.standard_units for x in per_target_data])))
})
context = {'ligand_data': ligand_data, 'ligand':ligand_id}
return render(request, 'ligand_details.html', context)
def TargetDetailsCompact(request, **kwargs):
if 'slug' in kwargs:
slug = kwargs['slug']
if slug.count('_') == 0 :
ps = AssayExperiment.objects.filter(protein__family__parent__parent__parent__slug=slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
elif slug.count('_') == 1 and len(slug) == 7:
ps = AssayExperiment.objects.filter(protein__family__parent__parent__slug=slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
elif slug.count('_') == 2:
ps = AssayExperiment.objects.filter(protein__family__parent__slug=slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
#elif slug.count('_') == 3:
elif slug.count('_') == 1 and len(slug) != 7:
ps = AssayExperiment.objects.filter(protein__entry_name = slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
if slug.count('_') == 1 and len(slug) == 7:
f = ProteinFamily.objects.get(slug=slug)
else:
f = slug
context = {
'target':f
}
else:
simple_selection = request.session.get('selection', False)
selection = Selection()
if simple_selection:
selection.importer(simple_selection)
if selection.targets != []:
prot_ids = [x.item.id for x in selection.targets]
ps = AssayExperiment.objects.filter(protein__in=prot_ids, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
context = {
'target': ', '.join([x.item.entry_name for x in selection.targets])
}
ps = ps.prefetch_related('protein','ligand__properities__web_links__web_resource','ligand__properities__vendors__vendor')
d = {}
for p in ps:
if p.ligand not in d:
d[p.ligand] = {}
if p.protein not in d[p.ligand]:
d[p.ligand][p.protein] = []
d[p.ligand][p.protein].append(p)
ligand_data = []
for lig, records in d.items():
links = lig.properities.web_links.all()
chembl_id = [x for x in links if x.web_resource.slug=='chembl_ligand'][0].index
vendors = lig.properities.vendors.all()
purchasability = 'No'
for v in vendors:
if v.vendor.name not in ['ZINC', 'ChEMBL', 'BindingDB', 'SureChEMBL', 'eMolecules', 'MolPort', 'PubChem']:
purchasability = 'Yes'
for record, vals in records.items():
per_target_data = vals
protein_details = record
"""
A dictionary of dictionaries with a list of values.
Assay_type
|
-> Standard_type [list of values]
"""
tmp = defaultdict(list)
tmp_count = 0
for data_line in per_target_data:
tmp["Bind" if data_line.assay_type == 'b' else "Funct"].append(data_line.pchembl_value)
tmp_count += 1
values = list(itertools.chain(*tmp.values()))
ligand_data.append({
'ligand_id': chembl_id,
'protein_name': protein_details.entry_name,
'species': protein_details.species.common_name,
'record_count': tmp_count,
'assay_type': ', '.join(tmp.keys()),
'purchasability': purchasability,
#Flattened list of lists of dict keys:
'low_value': min(values),
'average_value': sum(values)/len(values),
'high_value': max(values),
'standard_units': ', '.join(list(set([x.standard_units for x in per_target_data]))),
'smiles': lig.properities.smiles,
'mw': lig.properities.mw,
'rotatable_bonds': lig.properities.rotatable_bonds,
'hdon': lig.properities.hdon,
'hacc': lig.properities.hacc,
'logp': lig.properities.logp,
})
context['ligand_data'] = ligand_data
return render(request, 'target_details_compact.html', context)
def TargetDetails(request, **kwargs):
if 'slug' in kwargs:
slug = kwargs['slug']
if slug.count('_') == 0 :
ps = AssayExperiment.objects.filter(protein__family__parent__parent__parent__slug=slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
elif slug.count('_') == 1 and len(slug) == 7:
ps = AssayExperiment.objects.filter(protein__family__parent__parent__slug=slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
elif slug.count('_') == 2:
ps = AssayExperiment.objects.filter(protein__family__parent__slug=slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
#elif slug.count('_') == 3:
elif slug.count('_') == 1 and len(slug) != 7:
ps = AssayExperiment.objects.filter(protein__entry_name = slug, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
if slug.count('_') == 1 and len(slug) == 7:
f = ProteinFamily.objects.get(slug=slug)
else:
f = slug
context = {
'target':f
}
else:
simple_selection = request.session.get('selection', False)
selection = Selection()
if simple_selection:
selection.importer(simple_selection)
if selection.targets != []:
prot_ids = [x.item.id for x in selection.targets]
ps = AssayExperiment.objects.filter(protein__in=prot_ids, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
context = {
'target': ', '.join([x.item.entry_name for x in selection.targets])
}
ps = ps.values('standard_type',
'standard_relation',
'standard_value',
'assay_description',
'assay_type',
#'standard_units',
'pchembl_value',
'ligand__id',
'ligand__properities_id',
'ligand__properities__web_links__index',
#'ligand__properities__vendors__vendor__name',
'protein__species__common_name',
'protein__entry_name',
'ligand__properities__mw',
'ligand__properities__logp',
'ligand__properities__rotatable_bonds',
'ligand__properities__smiles',
'ligand__properities__hdon',
'ligand__properities__hacc','protein'
).annotate(num_targets = Count('protein__id', distinct=True))
for record in ps:
record['purchasability'] = 'Yes' if len(LigandVendorLink.objects.filter(lp=record['ligand__properities_id']).exclude(vendor__name__in=['ZINC', 'ChEMBL', 'BindingDB', 'SureChEMBL', 'eMolecules', 'MolPort', 'PubChem'])) > 0 else 'No'
context['proteins'] = ps
return render(request, 'target_details.html', context)
def TargetPurchasabilityDetails(request, **kwargs):
simple_selection = request.session.get('selection', False)
selection = Selection()
if simple_selection:
selection.importer(simple_selection)
if selection.targets != []:
prot_ids = [x.item.id for x in selection.targets]
ps = AssayExperiment.objects.filter(protein__in=prot_ids, ligand__properities__web_links__web_resource__slug = 'chembl_ligand')
context = {
'target': ', '.join([x.item.entry_name for x in selection.targets])
}
ps = ps.values('standard_type',
'standard_relation',
'standard_value',
'assay_description',
'assay_type',
'standard_units',
'pchembl_value',
'ligand__id',
'ligand__properities_id',
'ligand__properities__web_links__index',
'ligand__properities__vendors__vendor__id',
'ligand__properities__vendors__vendor__name',
'protein__species__common_name',
'protein__entry_name',
'ligand__properities__mw',
'ligand__properities__logp',
'ligand__properities__rotatable_bonds',
'ligand__properities__smiles',
'ligand__properities__hdon',
'ligand__properities__hacc','protein'
).annotate(num_targets = Count('protein__id', distinct=True))
purchasable = []
for record in ps:
try:
if record['ligand__properities__vendors__vendor__name'] in ['ZINC', 'ChEMBL', 'BindingDB', 'SureChEMBL', 'eMolecules', 'MolPort', 'PubChem', 'IUPHAR/BPS Guide to PHARMACOLOGY']:
continue
tmp = LigandVendorLink.objects.filter(vendor=record['ligand__properities__vendors__vendor__id'], lp=record['ligand__properities_id'])[0]
record['vendor_id'] = tmp.vendor_external_id
record['vendor_link'] = tmp.url
purchasable.append(record)
except:
continue
context['proteins'] = purchasable
return render(request, 'target_purchasability_details.html', context)
class LigandStatistics(TemplateView):
"""
Per class statistics of known ligands.
"""
template_name = 'ligand_statistics.html'
def get_context_data (self, **kwargs):
context = super().get_context_data(**kwargs)
assays = AssayExperiment.objects.all().prefetch_related('protein__family__parent__parent__parent', 'protein__family')
classes = ProteinFamily.objects.filter(slug__in=['001', '002', '003', '004', '005', '006']) #ugly but fast
proteins = Protein.objects.all().prefetch_related('family__parent__parent__parent')
ligands = []
for fam in classes:
lig_count = len(assays.filter(protein__family__parent__parent__parent=fam).distinct('ligand'))
prot_count = len(proteins.filter(family__parent__parent__parent=fam).distinct('family'))
target_count = len(assays.filter(protein__family__parent__parent__parent=fam).distinct('protein__family'))
ligands.append({
'name': fam.name,
'num_ligands': lig_count,
'avg_num_ligands': lig_count/prot_count,
'target_percentage': target_count/prot_count*100,
'target_count': target_count
})
lig_count_total = sum([x['num_ligands'] for x in ligands])
prot_count_total = len(proteins.distinct('family'))
target_count_total = sum([x['target_count'] for x in ligands])
lig_total = {
'num_ligands': lig_count_total,
'avg_num_ligands': lig_count_total/prot_count_total,
'target_percentage': target_count_total/prot_count_total*100,
'target_count': target_count_total
}
#Elegant solution but kinda slow (6s querries):
"""
ligands = AssayExperiment.objects.values(
'protein__family__parent__parent__parent__name',
'protein__family__parent__parent__parent',
).annotate(num_ligands=Count('ligand', distinct=True))
for prot_class in ligands:
class_subset = AssayExperiment.objects.filter(
id=prot_class['protein__family__parent__parent__parent']).values(
'protein').annotate(
avg_num_ligands=Avg('ligand', distinct=True),
p_count=Count('protein')
)
prot_class['avg_num_ligands']=class_subset[0]['avg_num_ligands']
prot_class['p_count']=class_subset[0]['p_count']
"""
context['ligands_total'] = lig_total
context['ligands_by_class'] = ligands
context['release_notes'] = ReleaseNotes.objects.all()[0]
tree = PhylogeneticTreeGenerator()
class_a_data = tree.get_tree_data(ProteinFamily.objects.get(name='Class A (Rhodopsin)'))
context['class_a_options'] = deepcopy(tree.d3_options)
context['class_a_options']['anchor'] = 'class_a'
context['class_a_options']['leaf_offset'] = 50
context['class_a_options']['label_free'] = []
context['class_a'] = json.dumps(class_a_data.get_nodes_dict('ligands'))
class_b1_data = tree.get_tree_data(ProteinFamily.objects.get(name__startswith='Class B1 (Secretin)'))
context['class_b1_options'] = deepcopy(tree.d3_options)
context['class_b1_options']['anchor'] = 'class_b1'
context['class_b1_options']['branch_trunc'] = 60
context['class_b1_options']['label_free'] = [1,]
context['class_b1'] = json.dumps(class_b1_data.get_nodes_dict('ligands'))
class_b2_data = tree.get_tree_data(ProteinFamily.objects.get(name__startswith='Class B2 (Adhesion)'))
context['class_b2_options'] = deepcopy(tree.d3_options)
context['class_b2_options']['anchor'] = 'class_b2'
context['class_b2_options']['label_free'] = [1,]
context['class_b2'] = json.dumps(class_b2_data.get_nodes_dict('ligands'))
class_c_data = tree.get_tree_data(ProteinFamily.objects.get(name__startswith='Class C (Glutamate)'))
context['class_c_options'] = deepcopy(tree.d3_options)
context['class_c_options']['anchor'] = 'class_c'
context['class_c_options']['branch_trunc'] = 50
context['class_c_options']['label_free'] = [1,]
context['class_c'] = json.dumps(class_c_data.get_nodes_dict('ligands'))
class_f_data = tree.get_tree_data(ProteinFamily.objects.get(name__startswith='Class F (Frizzled)'))
context['class_f_options'] = deepcopy(tree.d3_options)
context['class_f_options']['anchor'] = 'class_f'
context['class_f_options']['label_free'] = [1,]
context['class_f'] = json.dumps(class_f_data.get_nodes_dict('ligands'))
class_t2_data = tree.get_tree_data(ProteinFamily.objects.get(name='Taste 2'))
context['class_t2_options'] = deepcopy(tree.d3_options)
context['class_t2_options']['anchor'] = 'class_t2'
context['class_t2_options']['label_free'] = [1,]
context['class_t2'] = json.dumps(class_t2_data.get_nodes_dict('ligands'))
return context
```
#### File: protwis/seqsign/sequence_signature.py
```python
from django.conf import settings
#from django.core import exceptions
from alignment.functions import strip_html_tags, get_format_props, prepare_aa_group_preference
Alignment = getattr(__import__(
'common.alignment_' + settings.SITE_NAME,
fromlist=['Alignment']
), 'Alignment')
from common.definitions import AA_ZSCALES, AMINO_ACIDS, AMINO_ACID_GROUPS, AMINO_ACID_GROUP_NAMES, AMINO_ACID_GROUP_PROPERTIES, ZSCALES
from protein.models import Protein, ProteinConformation
from residue.models import Residue
from collections import OrderedDict
from copy import deepcopy
import numpy as np
from operator import itemgetter
import re
from scipy.stats import t
import time
class SequenceSignature:
"""
A class handling the sequence signature.
"""
def __init__(self):
self.aln_pos = Alignment()
self.aln_neg = Alignment()
self.features_normalized_pos = OrderedDict()
self.features_normalized_neg = OrderedDict()
self.features_frequency_difference = OrderedDict()
self.features_frequency_diff_display = []
self.features_consensus_pos = OrderedDict()
self.features_consensus_neg = OrderedDict()
self.freq_cutoff = 30
self.common_gn = OrderedDict()
self.common_segments = OrderedDict()
self.common_schemes = {}
self.signature = OrderedDict()
self.zscales_signature = OrderedDict()
self.feature_preference = prepare_aa_group_preference()
self.group_lengths = dict([
(x, len(y)) for x,y in enumerate(AMINO_ACID_GROUPS.values())
])
self.default_column = np.array([((y == '-') and 100) or 0 for y in AMINO_ACID_GROUPS.keys()])
def _assign_preferred_features(self, signature, segment, ref_matrix):
new_signature = []
for pos, argmax in enumerate(signature):
new_signature.append(self._calculate_best_feature(pos, segment, argmax, ref_matrix))
return new_signature
def _calculate_best_feature(self, pos, segment, argmax, ref_matrix):
tmp = self.feature_preference[argmax]
equiv_feat = np.where(np.isin(ref_matrix[segment][:, pos], ref_matrix[segment][argmax][pos]))[0]
pref_feat = argmax
min_len = self.group_lengths[argmax]
for efeat in equiv_feat:
if efeat in tmp and self.group_lengths[efeat] < min_len:
pref_feat = efeat
min_len = self.group_lengths[efeat]
# when two features have the same aa count, take the one from positive set
elif efeat in tmp and self.group_lengths[efeat] == min_len:
if ref_matrix[segment][pref_feat][pos] < 0 and ref_matrix[segment][efeat][pos] > 0:
pref_feat = efeat
return pref_feat
def setup_alignments(self, segments, protein_set_positive=None, protein_set_negative=None):
"""Setup (fetch and normalize) the data necessary for calculation of the signature.
Arguments:
segments {list} -- List of segments to calculate the signature from
Keyword Arguments:
protein_set_positive {list} -- list of Protein objects - a positive (reference) set (default: {None})
protein_set_negative {list} -- list of Protein objects - a negative set (default: {None})
"""
if protein_set_positive:
self.aln_pos.load_proteins(protein_set_positive)
if protein_set_negative:
self.aln_neg.load_proteins(protein_set_negative)
# In case positive and negative sets come from different classes
# unify the numbering schemes
self.common_schemes = self.merge_numbering_schemes()
self.aln_pos.numbering_schemes = self.common_schemes
self.aln_neg.numbering_schemes = self.common_schemes
# now load the segments and generic numbers
self.aln_pos.load_segments(segments)
self.aln_neg.load_segments(segments)
self.aln_pos.build_alignment()
self.aln_neg.build_alignment()
self.common_gn = deepcopy(self.aln_pos.generic_numbers)
for scheme in self.aln_neg.numbering_schemes:
for segment in self.aln_neg.segments:
for pos in self.aln_neg.generic_numbers[scheme[0]][segment].items():
if pos[0] not in self.common_gn[scheme[0]][segment].keys():
self.common_gn[scheme[0]][segment][pos[0]] = pos[1]
self.common_gn[scheme[0]][segment] = OrderedDict(sorted(
self.common_gn[scheme[0]][segment].items(),
key=lambda x: x[0].split('x')
))
self.common_segments = OrderedDict([
(x, sorted(list(set(self.aln_pos.segments[x]) | set(self.aln_neg.segments[x])), key=lambda x: x.split('x'))) for x in self.aln_neg.segments
])
# tweaking alignment
self.aln_pos.calculate_statistics()
self._update_alignment(self.aln_pos)
# tweaking consensus seq
self._update_consensus_sequence(self.aln_pos)
# tweaking negative alignment
self.aln_neg.calculate_statistics()
self._update_alignment(self.aln_neg)
# tweaking consensus seq
self._update_consensus_sequence(self.aln_neg)
def _update_alignment(self, alignment):
for prot in alignment.proteins:
for seg, resi in prot.alignment.items():
consensus = []
aln_list = [x[0] for x in resi]
aln_dict = dict([
(x[0], x) for x in resi
])
for pos in self.common_segments[seg]:
if pos not in aln_list:
consensus.append([pos, False, '_', 0])
else:
consensus.append(aln_dict[pos])
prot.alignment[seg] = consensus
def _update_consensus_sequence(self, alignment):
for seg, resi in alignment.consensus.items():
consensus = OrderedDict()
aln_list = [x for x in resi.keys()]
aln_dict = dict([
(x, resi[x]) for x in resi.keys()
])
for pos in self.common_segments[seg]:
if pos not in aln_list:
consensus[pos] = ['-', 0, 100]
else:
consensus[pos] = aln_dict[pos]
alignment.consensus[seg] = consensus
def _convert_feature_stats(self, fstats, aln):
tmp_fstats = []
for row in range(len(AMINO_ACID_GROUPS.keys())):
tmp_row = []
for segment in self.common_segments:
tmp_row.append([[
str(x),
str(int(x/10)), #if x != 0 else -1,
] for x in fstats[segment][row]])
tmp_fstats.append(tmp_row)
aln.feature_stats = tmp_fstats
def setup_alignments_from_selection(self, positive_selection, negative_selection):
"""
The function gathers necessary information from provided selections
and runs the calculations of the sequence alignments independently for
both protein sets. It also finds the common set of residue positions.
Arguments:
positive_selection {Selection} -- selection containing first group of proteins
negative_selection {[type]} -- selction containing second group of proteins along with the user-selcted sequence segments for the alignment
"""
self.aln_pos.load_proteins_from_selection(positive_selection)
self.aln_neg.load_proteins_from_selection(negative_selection)
# local segment list
segments = []
# read selection
for segment in negative_selection.segments:
segments.append(segment)
self.setup_alignments(segments)
def calculate_signature(self):
"""
Calculates the feature frequency difference between two protein sets.
Generates the full differential matrix as well as maximum difference for a position (for scatter plot).
"""
for sid, segment in enumerate(self.aln_neg.segments):
self.features_normalized_pos[segment] = np.array(
[[x[0] for x in feat[sid]] for feat in self.aln_pos.feature_stats],
dtype='int'
)
self.features_normalized_neg[segment] = np.array(
[[x[0] for x in feat[sid]] for feat in self.aln_neg.feature_stats],
dtype='int'
)
for segment in self.aln_neg.segments:
#TODO: get the correct default numering scheme from settings
for idx, res in enumerate(self.common_gn[self.common_schemes[0][0]][segment].keys()):
if res not in self.aln_pos.generic_numbers[self.common_schemes[0][0]][segment].keys():
self.features_normalized_pos[segment] = np.insert(self.features_normalized_pos[segment], idx, self.default_column, axis=1)
elif res not in self.aln_neg.generic_numbers[self.common_schemes[0][0]][segment].keys():
self.features_normalized_neg[segment] = np.insert(self.features_normalized_neg[segment], idx, self.default_column, axis=1)
# now the difference
self.features_frequency_difference[segment] = np.subtract(
self.features_normalized_pos[segment],
self.features_normalized_neg[segment]
)
self._convert_feature_stats(self.features_normalized_pos, self.aln_pos)
self._convert_feature_stats(self.features_normalized_neg, self.aln_neg)
# Version with display data
for row in range(len(AMINO_ACID_GROUPS.keys())):
tmp_row = []
for segment in self.aln_neg.segments:
#first item is the real value,
# second is the assignmnent of color (via css)
# 0 - red, 5 - yellow, 10 - green
#third item is a tooltip
tmp_row.append([[
x,
int(x/20)+5 if x!= 0 else -1,
"{} - {}".format(
self.features_normalized_pos[segment][row][y],
self.features_normalized_neg[segment][row][y]
)
] for y, x in enumerate(self.features_frequency_difference[segment][row])])
self.features_frequency_diff_display.append(tmp_row)
self.signature = OrderedDict([(x, []) for x in self.aln_neg.segments])
for segment in self.aln_neg.segments:
tmp = np.array(self.features_frequency_difference[segment])
signature_map = np.absolute(tmp).argmax(axis=0)
#signature_map = tmp.argmax(axis=0)
# Update mapping to prefer features with fewer amino acids
signature_map = self._assign_preferred_features(signature_map, segment, self.features_frequency_difference)
self.signature[segment] = []
for col, pos in enumerate(list(signature_map)):
self.signature[segment].append([
list(AMINO_ACID_GROUP_PROPERTIES.values())[pos]['display_name_short'] if self.features_frequency_difference[segment][pos][col] > 0 else '-' + list(AMINO_ACID_GROUP_PROPERTIES.values())[pos]['display_name_short'], # latest implementation of NOT... properties
list(AMINO_ACID_GROUP_NAMES.values())[pos] if self.features_frequency_difference[segment][pos][col] > 0 else "Not " + list(AMINO_ACID_GROUP_NAMES.values())[pos], # latest implementation of NOT... properties
self.features_frequency_difference[segment][pos][col],
int(self.features_frequency_difference[segment][pos][col]/20)+5,
list(AMINO_ACID_GROUP_PROPERTIES.values())[pos]['length'],
list(AMINO_ACID_GROUPS.keys())[pos],
list(AMINO_ACID_GROUP_PROPERTIES.values())[pos]['display_name_short']
])
features_pos = OrderedDict()
features_neg = OrderedDict()
self.features_consensus_pos = OrderedDict([(x, []) for x in self.aln_neg.segments])
self.features_consensus_neg = OrderedDict([(x, []) for x in self.aln_neg.segments])
for sid, segment in enumerate(self.aln_neg.segments):
features_pos[segment] = np.array(
[[x[0] for x in feat[sid]] for feat in self.aln_pos.feature_stats],
dtype='int'
)
features_neg[segment] = np.array(
[[x[0] for x in feat[sid]] for feat in self.aln_neg.feature_stats],
dtype='int'
)
features_cons_pos = features_pos[segment].argmax(axis=0)
features_cons_pos = self._assign_preferred_features(features_cons_pos, segment, features_pos)
features_cons_neg = features_neg[segment].argmax(axis=0)
features_cons_neg = self._assign_preferred_features(features_cons_neg, segment, features_neg)
for col, pos in enumerate(list(features_cons_pos)):
self.features_consensus_pos[segment].append([
# list(AMINO_ACID_GROUPS.keys())[pos],
list(AMINO_ACID_GROUP_PROPERTIES.values())[pos]['display_name_short'],
list(AMINO_ACID_GROUP_NAMES.values())[pos],
features_pos[segment][pos][col],
int(features_pos[segment][pos][col]/20)+5,
list(AMINO_ACID_GROUP_PROPERTIES.values())[pos]['length'],
list(AMINO_ACID_GROUPS.keys())[pos]
])
for col, pos in enumerate(list(features_cons_neg)):
self.features_consensus_neg[segment].append([
# list(AMINO_ACID_GROUPS.keys())[pos],
list(AMINO_ACID_GROUP_PROPERTIES.values())[pos]['display_name_short'],
list(AMINO_ACID_GROUP_NAMES.values())[pos],
features_neg[segment][pos][col],
int(features_neg[segment][pos][col]/20)+5,
list(AMINO_ACID_GROUP_PROPERTIES.values())[pos]['length'],
list(AMINO_ACID_GROUPS.keys())[pos]
])
self._convert_feature_stats(self.features_normalized_pos, self.aln_pos)
self._convert_feature_stats(self.features_normalized_neg, self.aln_neg)
def calculate_zscales_signature(self):
"""
Calculates the Z-scales (Z1-Z5) difference between two protein sets for each GN residue position
Generates the full difference matrix and calculates the relevance (P-value) for each z-scale & position combination.
"""
# Prepare zscales for both sets
self.aln_pos.calculate_zscales()
self.aln_neg.calculate_zscales()
# Difference + p-value calculation for shared residues
ZSCALES.sort()
for zscale in ZSCALES:
self.zscales_signature[zscale] = OrderedDict()
for segment in self.aln_pos.zscales[ZSCALES[0]].keys():
self.zscales_signature[zscale][segment] = OrderedDict()
all_keys = set(self.aln_pos.zscales[zscale][segment].keys()).union(self.aln_neg.zscales[zscale][segment].keys())
shared_keys = set(self.aln_pos.zscales[zscale][segment].keys()).intersection(self.aln_neg.zscales[zscale][segment].keys())
for entry in sorted(all_keys):
if entry in shared_keys:
var1 = self.aln_pos.zscales[zscale][segment][entry]
var2 = self.aln_neg.zscales[zscale][segment][entry]
# Welch's t-test
# One-liner alternative : sed = np.sqrt(var1[1]**2.0/var1[2] + var2[1]**2.0/var2[2])
# se1 = var1[1]/np.sqrt(var1[2])
# se2 = var2[1]/np.sqrt(var2[2])
# sed = np.sqrt(se1**2.0 + se2**2.0)
# Student t-test assuming similar variance different sample sizes
sed = 0
if var1[2] > 0 and var2[2] > 0 and (var1[2]+var2[2] - 2) > 0:
sed = np.sqrt(((var1[2] - 1) * var1[1]**2.0 + (var2[2]-1)*var2[1]**2.0)/(var1[2]+var2[2]-2)) * np.sqrt(1/var1[2] + 1/var2[2])
t_value = 1
p = 100
color = -1
if sed != 0:
mean_diff = var1[0] - var2[0]
t_value = mean_diff / sed
# Grab P-value
df = var1[2] + var2[2] - 2
p = (1.0 - t.cdf(abs(t_value), df)) * 2.0
# Coloring based on statistical significance
#if p <= 0.05:
# color = int(round(10 - 9 * p/0.05, 0))
#else:
# color = 0
# Coloring difference Z-scale means when statistically significant
if p <= 0.05 and abs(mean_diff) > 0.6:
color = round(mean_diff / 4 * 5, 0)
if abs(color) > 5:
color = color/abs(color) * 5
color = int(color + 5)
tooltip = entry + " ("+ zscale + ")<br/>" + \
"Set 1: " + str(round(var1[0], 2)) + " ± " + str(round(var1[1], 2)) + " (" + str(var1[2]) + ")</br>" + \
"Set 2: " + str(round(var2[0], 2)) + " ± " + str(round(var2[1], 2)) + " (" + str(var2[2]) + ")</br>"
if p > 0.001:
tooltip += "P-value: {0:.3f}".format(p)
else:
tooltip += "P-value: {0:.2E}".format(p)
self.zscales_signature[zscale][segment][entry] = [round(var1[0]-var2[0],1), color, tooltip] # diff, P-value, tooltip
else:
tooltip = entry + "<br/>Set 1: GAP<br/>"
if entry in self.aln_pos.zscales[zscale][segment]:
var1 = self.aln_pos.zscales[zscale][segment][entry]
tooltip = entry + "<br/>Set 1: " + str(round(var1[0], 2)) + " ± " + str(round(var1[1], 2)) + " (" + str(var1[2]) + ")</br>"
if entry in self.aln_neg.zscales[zscale][segment]:
var2 = self.aln_neg.zscales[zscale][segment][entry]
tooltip += "Set 2: " + str(round(var2[0], 2)) + " ± " + str(round(var2[1], 2)) + " (" + str(var2[2]) + ")</br>"
else:
tooltip += "Set 2: GAP<br/>"
self.zscales_signature[zscale][segment][entry] = ["-", -1, tooltip] # diff, P-value, tooltip
def prepare_display_data(self):
options = {
'num_residue_columns': len(sum([[x for x in self.common_gn[self.common_schemes[0][0]][segment]] for segment in self.aln_neg.segments], [])),
'num_of_sequences_pos': len(self.aln_pos.proteins),
'num_residue_columns_pos': len(self.aln_pos.positions),
'num_of_sequences_neg': len(self.aln_neg.proteins),
'num_residue_columns_neg': len(self.aln_neg.positions),
'common_segments': self.common_segments,
'common_generic_numbers': self.common_gn,
'feats_signature': self.features_frequency_diff_display,
'signature_consensus': self.signature,
'zscales_signature': self.zscales_signature,
'feats_cons_pos': self.features_consensus_pos,
'feats_cons_neg': self.features_consensus_neg,
'a_pos': self.aln_pos,
'a_neg': self.aln_neg,
}
return options
def prepare_session_data(self):
session_signature = {
'common_positions': self.common_gn,
'diff_matrix': self.features_frequency_difference,
'numbering_schemes': self.common_schemes,
'common_segments': self.common_segments,
}
return session_signature
def merge_numbering_schemes(self):
"""
Extract all of the numbering schemes used for a set of proteins.
Arguments:
proteins {selection} -- A set of proteins to analyze
"""
numbering_schemes = {}
for prot in self.aln_pos.proteins + self.aln_neg.proteins:
if prot.protein.residue_numbering_scheme.slug not in numbering_schemes:
rnsn = prot.protein.residue_numbering_scheme.name
try:
#New way of breaking down the numbering scheme
rnsn_parent = prot.protein.residue_numbering_scheme.parent.short_name
except Exception as msg:
rnsn_parent = ''
numbering_schemes[prot.protein.residue_numbering_scheme.slug] = (rnsn, rnsn_parent)
# order and convert numbering scheme dict to tuple
return sorted([(x[0], x[1][0], x[1][1]) for x in numbering_schemes.items()], key=itemgetter(0))
# def apply_cutoff(self, cutoff=0):
# matrix_consensus = OrderedDict()
# for segment in self.segments:
# # print(segment)
# segment_consensus = []
# signature_map = np.absolute(self.features_frequency_difference[segment]).argmax(axis=0)
# for col, pos in enumerate(list(signature_map)):
# if abs(self.features_frequency_difference[segment][pos][col]) > self.cutoff:
# segment_consensus.append(self.features_frequency_difference[segment][ : , col])
# for scheme in self.schemes:
# gnum = list(self.common_gn[scheme[0]][segment].items())[col]
# try:
# self.relevant_gn[scheme[0]][segment][gnum[0]] = gnum[1]
# except:
# self.relevant_gn[scheme[0]][segment] = OrderedDict()
# self.relevant_gn[scheme[0]][segment][gnum[0]] = gnum[1]
# segment_consensus = np.array(segment_consensus).T
# if segment_consensus != []:
# matrix_consensus[segment] = segment_consensus
# self.signature_matrix_filtered = matrix_consensus
# self.relevant_segments = OrderedDict([
# (
# x[0],
# self.relevant_gn[self.schemes[0][0]][x[0]].keys()
# ) for x in self.signature_matrix_filtered.items()
# ])
# signature = OrderedDict([(x[0], []) for x in matrix_consensus.items()])
# for segment in self.relevant_segments:
# signature_map = np.absolute(self.signature_matrix_filtered[segment]).argmax(axis=0)
# tmp = np.array(self.signature_matrix_filtered[segment])
# for col, pos in enumerate(list(signature_map)):
# signature[segment].append([
# list(AMINO_ACID_GROUPS.keys())[pos],
# list(AMINO_ACID_GROUP_NAMES.values())[pos],
# tmp[pos][col],
# int(tmp[pos][col]/20)+5
# ])
# self.signature_consensus = signature
def prepare_excel_worksheet(self, workbook, worksheet_name, aln='positive', data='alignment'):
"""
A function saving alignment data subset into the excel spreadsheet.
It adds a worksheet to an existing workbook and saves only a selected subset of alignment data.
For a complete save of the alignment it needs to be wrapped with additional code.
The outline of the excel worksheet is similar to the one of html page.
First column shows nunbering schemes, protein list, etc
The frequency data start from column B
Arguments:
workbook {xlrsxwriter.Workbook} -- object to add workseet to
worksheet_name {string} -- name for the new workseet
Keyword Arguments:
alignment {string} -- alignment to extract data from.
Possible choices: positive, negative, signature
data {string} -- data type to save to workshet: 'alignment' or 'features' frequencies
"""
props = AMINO_ACID_GROUP_NAMES.values()
worksheet = workbook.add_worksheet(worksheet_name)
if aln == 'positive':
alignment = self.aln_pos
if data == 'features':
data_block = self.aln_pos.feature_stats
feat_consensus = self.features_consensus_pos
elif aln == 'negative':
alignment = self.aln_neg
if data == 'features':
data_block = self.aln_neg.feature_stats
feat_consensus = self.features_consensus_neg
else:
if data == 'features':
data_block = self.features_frequency_diff_display
feat_consensus = self.signature
numbering_schemes = self.common_schemes
generic_numbers_set = self.common_gn
# First column, numbering schemes
for row, scheme in enumerate(numbering_schemes):
# worksheet.write(1 + 3*row, 0, scheme[1])
worksheet.write(1 + 3 * row, 0, 'Residue number')
worksheet.write(2 + 3 * row, 0, 'Sequence-based ({})'.format(scheme[2]))
worksheet.write(3 + 3*row, 0, 'Structure-based (GPCRdb)')
# First column, stats
if data == 'features':
for offset, prop in enumerate(props):
worksheet.write(1 + 3 * len(numbering_schemes) + offset, 0, prop)
if aln == 'signature':
worksheet.write(
1 + 3 * len(numbering_schemes) + len(props),
0,
'Signature consensus'
)
else:
worksheet.write(
1 + 3 * len(numbering_schemes) + len(props),
0,
'Prop/AA consensus'
)
worksheet.write(
2 + 3 * len(numbering_schemes) + len(props),
0,
'Length'
)
# First column, protein list (for alignment) and line for consensus sequence
else:
for offset, prot in enumerate(alignment.proteins):
worksheet.write(
1 + 3 * len(numbering_schemes) + offset,
0,
prot.protein.entry_name
)
worksheet.write(
1 + 3 * len(numbering_schemes) + len(alignment.proteins),
0,
'Seq consensus'
)
# Second column and on
# Segments
offset = 0
for segment in generic_numbers_set[numbering_schemes[0][0]].keys():
worksheet.merge_range(
0,
1 + offset,
0,
len(generic_numbers_set[numbering_schemes[0][0]][segment]) + offset - 1,
segment
)
offset += len(generic_numbers_set[numbering_schemes[0][0]][segment])
# Generic numbers
# for row, item in enumerate(generic_numbers_set.items()):
for row, item in enumerate(numbering_schemes):
scheme = item[0]
offset = 1
for _, gn_list in generic_numbers_set[scheme].items():
for col, gn_pair in enumerate(gn_list.items()):
try:
tm, bw, gpcrdb = re.split('\.|x', strip_html_tags(gn_pair[1]))
except:
tm, bw, gpcrdb = ('', '', '')
worksheet.write(
1 + 3 * row,
col + offset,
tm
)
worksheet.write(
2 + 3 * row,
col + offset,
bw
)
worksheet.write(
3 + 3*row,
col + offset,
gpcrdb
)
offset += len(gn_list.items())
# Stats
if data == 'features':
offset = 1 + 3 * len(numbering_schemes)
for row, prop in enumerate(data_block):
col_offset = 0
for segment in prop:
for col, freq in enumerate(segment):
# if aln == 'signature':
# cell_format = workbook.add_format(get_format_props(freq[1] if freq[0] != 0 else 5))
# else:
if aln == 'signature':
cell_format = workbook.add_format(get_format_props(freq[1]))
else:
cell_format = workbook.add_format(get_format_props(freq_gs=freq[1]))
worksheet.write(
offset + row,
1 + col + col_offset,
freq[0] if isinstance(freq[0], int) else int(freq[0]),
cell_format
)
col_offset += len(segment)
col_offset = 0
for segment, cons_feat in feat_consensus.items():
for col, chunk in enumerate(cons_feat):
if aln == 'signature':
cell_format = workbook.add_format(get_format_props(feat=chunk[-1].replace('\u03b1', 'a')))
else:
cell_format = workbook.add_format(get_format_props(feat=chunk[0].replace('\u03b1', 'a')))
#Property group
worksheet.write(
offset + len(AMINO_ACID_GROUPS),
1 + col + col_offset,
chunk[0],
cell_format
)
#Length of prop
worksheet.write(
1 + offset + len(AMINO_ACID_GROUPS),
1 + col + col_offset,
chunk[4],
)
if aln == 'signature':
cell_format = workbook.add_format(get_format_props(int(chunk[2]/20)+5))
else:
cell_format = workbook.add_format(get_format_props(int(chunk[2]/10))) #if chunk[2] != 0 else get_format_props(-1))
#Percentages
worksheet.write(
2 + offset + len(AMINO_ACID_GROUPS),
1 + col + col_offset,
chunk[2],
cell_format
)
col_offset += len(cons_feat)
# Alignment
else:
offset = 1 + 3 * len(alignment.numbering_schemes)
for row, data in enumerate(alignment.proteins):
col_offset = 0
for segment, sequence in data.alignment.items():
for col, res in enumerate(sequence):
cell_format = workbook.add_format(get_format_props(res=res[2]))
worksheet.write(
offset + row,
1 + col + col_offset,
res[2],
cell_format
)
col_offset += len(sequence)
# Consensus sequence
row = 1 + 3 * len(alignment.numbering_schemes) + len(alignment.proteins)
col_offset = 0
for segment, sequence in alignment.consensus.items():
for col, data in enumerate(sequence.items()):
res = data[1]
cell_format = workbook.add_format(get_format_props(res=res[0]))
worksheet.write(
row,
1 + col + col_offset,
res[0],
cell_format
)
cell_format = workbook.add_format(get_format_props(res[1]))
worksheet.write(
row + 1,
1 + col + col_offset,
res[2],
cell_format
)
col_offset += len(sequence.items())
def per_gn_signature_excel(self, workbook, worksheet_name='SignByCol'):
per_gn_signature = []
for segment in self.common_segments:
for pos, item in enumerate(self.signature[segment]):
gn = list(self.common_gn[self.common_schemes[0][0]][segment].keys())[pos]
if 'x' not in gn:
continue # skip positions without a generic number
prop = AMINO_ACID_GROUP_PROPERTIES[item[5]]['display_name_short']
length = AMINO_ACID_GROUP_PROPERTIES[item[5]]['length']
prop_name = item[1]
score = abs(item[2])
per_gn_signature.append([gn, score, prop, length, prop_name])
worksheet = workbook.add_worksheet(worksheet_name)
worksheet.write_row(0, 0, ['Pos', 'Score', 'Prop/AA', 'Length', 'Property'])
for row, pos in enumerate(sorted(per_gn_signature, key=lambda x: x[1], reverse=True)):
worksheet.write_row(
row + 1,
0,
pos,
)
class SignatureMatch():
def __init__(self, common_positions, numbering_schemes, segments, difference_matrix, protein_set_pos, protein_set_neg, cutoff=40):
self.cutoff = cutoff
self.norm = 0.0
self.common_gn = common_positions
self.schemes = numbering_schemes
self.segments = segments
self.diff_matrix = difference_matrix
self.signature_matrix_filtered = OrderedDict()
self.signature_consensus = OrderedDict()
self.protein_set = protein_set_pos + protein_set_neg
self.protein_set_pos = protein_set_pos
self.protein_set_neg = protein_set_neg
self.relevant_gn = OrderedDict([(x[0], OrderedDict()) for x in self.schemes])
self.relevant_segments = OrderedDict()
self.scored_proteins = []
self.protein_report = OrderedDict()
self.protein_signatures = OrderedDict()
self.feature_preference = prepare_aa_group_preference()
print(self.schemes)
self.find_relevant_gns()
self.residue_to_feat = dict(
[(x, set()) for x in AMINO_ACIDS.keys()]
)
for fidx, feat in enumerate(AMINO_ACID_GROUPS.items()):
for res in feat[1]:
try:
self.residue_to_feat[res].add(fidx)
except KeyError:
self.residue_to_feat['-'].add(fidx)
self._find_norm()
self.scores_pos, self.signatures_pos, self.scored_proteins_pos = self.score_protein_set(self.protein_set_pos)
self.scores_neg, self.signatures_neg, self.scored_proteins_neg = self.score_protein_set(self.protein_set_neg)
def _assign_preferred_features(self, signature, segment, ref_matrix):
new_signature = []
for pos, argmax in enumerate(signature):
updated = True
new_feat = argmax
while updated:
tmp = self._calculate_best_feature(pos, segment, argmax, ref_matrix)
if tmp == new_feat:
updated = False
else:
new_feat = tmp
new_signature.append(new_feat)
return new_signature
def _calculate_best_feature(self, pos, segment, argmax, ref_matrix):
tmp = self.feature_preference[argmax]
amax = ref_matrix[segment][argmax, pos]
equiv_feat = np.where(np.isin(ref_matrix[segment][:, pos], amax))[0]
for efeat in equiv_feat:
if efeat in tmp:
return efeat
return argmax
def _find_norm(self):
norm = 0.0
for segment in self.relevant_segments:
norm += np.sum(np.amax(np.absolute(self.signature_matrix_filtered[segment]), axis=0))
self.norm = norm
def find_relevant_gns(self):
"""
Find the set of generic residue positions meeting the cutoff.
"""
matrix_consensus = OrderedDict()
for segment in self.segments:
segment_consensus = []
#signature_map = self.diff_matrix[segment].argmax(axis=0)
signature_map = np.absolute(self.diff_matrix[segment]).argmax(axis=0)
# Update mapping to prefer features with fewer amino acids
signature_map = self._assign_preferred_features(signature_map, segment, self.diff_matrix)
for col, pos in enumerate(list(signature_map)):
if abs(self.diff_matrix[segment][pos][col]) >= self.cutoff:
segment_consensus.append(self.diff_matrix[segment][ : , col])
for scheme in self.schemes:
gnum = list(self.common_gn[scheme[0]][segment].items())[col]
try:
self.relevant_gn[scheme[0]][segment][gnum[0]] = gnum[1]
except KeyError:
self.relevant_gn[scheme[0]][segment] = OrderedDict()
self.relevant_gn[scheme[0]][segment][gnum[0]] = gnum[1]
segment_consensus = np.array(segment_consensus).T
if segment_consensus.shape != (0,):
matrix_consensus[segment] = segment_consensus
self.signature_matrix_filtered = matrix_consensus
self.relevant_segments = OrderedDict([
(
x[0],
self.relevant_gn[self.schemes[0][0]][x[0]].keys()
) for x in self.signature_matrix_filtered.items()
])
signature = OrderedDict([(x[0], []) for x in matrix_consensus.items()])
for segment in self.relevant_segments:
# signature_map = self.signature_matrix_filtered[segment].argmax(axis=0)
signature_map = np.absolute(self.signature_matrix_filtered[segment]).argmax(axis=0)
signature_map = self._assign_preferred_features(signature_map, segment, self.signature_matrix_filtered)
tmp = np.array(self.signature_matrix_filtered[segment])
for col, pos in enumerate(list(signature_map)):
signature[segment].append([
# list(AMINO_ACID_GROUPS.keys())[pos],
list(AMINO_ACID_GROUP_PROPERTIES.values())[pos]['display_name_short'],
list(AMINO_ACID_GROUP_NAMES.values())[pos],
tmp[pos][col],
int(tmp[pos][col]/20)+5,
list(AMINO_ACID_GROUP_PROPERTIES.values())[pos]['length'],
])
self.signature_consensus = signature
def score_protein_class(self, pclass_slug='001'):
start = time.time()
protein_scores = {}
protein_signature_match = {}
class_proteins = Protein.objects.filter(
species__common_name='Human',
family__slug__startswith=pclass_slug
).exclude(
id__in=[x.id for x in self.protein_set]
)
class_a_pcf = ProteinConformation.objects.order_by(
'protein__family__slug',
'protein__entry_name'
).filter(
protein__in=class_proteins,
protein__sequence_type__slug='wt'
).exclude(protein__entry_name__endswith='-consensus')
for pcf in class_a_pcf:
p_start = time.time()
score, nscore, signature_match = self.score_protein(pcf)
protein_scores[pcf] = (score, nscore)
protein_signature_match[pcf] = signature_match
p_end = time.time()
print("Time elapsed for {}: ".format(pcf.protein.entry_name), p_end - p_start)
end = time.time()
self.protein_report = OrderedDict(sorted(protein_scores.items(), key=lambda x: x[1][0], reverse=True))
for prot in self.protein_report.items():
self.protein_signatures[prot[0]] = protein_signature_match[prot[0]]
self.scored_proteins = list(self.protein_report.keys())
print("Total time: ", end - start)
def score_protein_set(self, protein_set):
start = time.time()
protein_scores = {}
protein_signature_match = {}
pcfs = ProteinConformation.objects.order_by(
'protein__family__slug',
'protein__entry_name'
).filter(
protein__in=protein_set,
protein__sequence_type__slug='wt'
).exclude(protein__entry_name__endswith='-consensus')
for pcf in pcfs:
p_start = time.time()
score, nscore, signature_match = self.score_protein(pcf)
protein_scores[pcf] = (score, nscore)
protein_signature_match[pcf] = signature_match
p_end = time.time()
print("Time elapsed for {}: ".format(pcf.protein.entry_name), p_end - p_start)
end = time.time()
protein_report = OrderedDict(sorted(protein_scores.items(), key=lambda x: x[1][0], reverse=True))
protein_signatures = OrderedDict()
for prot in protein_report.items():
protein_signatures[prot[0]] = protein_signature_match[prot[0]]
scored_proteins = list(protein_report.keys())
print("Total time: ", end - start)
return (protein_report, protein_signatures, scored_proteins)
def score_protein(self, pcf):
prot_score = 0.0
#norm = 0.0
consensus_match = OrderedDict([(x, []) for x in self.relevant_segments])
relevant_gns_total = []
for segment in self.relevant_segments:
for idx, pos in enumerate(self.relevant_gn[self.schemes[0][0]][segment].keys()):
relevant_gns_total.append(pos)
resi = Residue.objects.filter(
protein_conformation=pcf,
generic_number__label__in=relevant_gns_total
).prefetch_related('generic_number')
resi_dict = {}
for r in resi:
if r.generic_number:
resi_dict[r.generic_number.label] = r
for segment in self.relevant_segments:
tmp = []
# signature_map = self.signature_matrix_filtered[segment].argmax(axis=0)
signature_map = np.absolute(self.signature_matrix_filtered[segment]).argmax(axis=0)
signature_map = self._assign_preferred_features(signature_map, segment, self.signature_matrix_filtered)
#norm += np.sum(np.amax(self.signature_matrix_filtered[segment], axis=0))
for idx, pos in enumerate(self.relevant_gn[self.schemes[0][0]][segment].keys()):
feat = signature_map[idx]
feat_abr = list(AMINO_ACID_GROUPS.keys())[feat]
feat_name = list(AMINO_ACID_GROUP_NAMES.values())[feat]
val = self.signature_matrix_filtered[segment][feat][idx]
if pos in resi_dict:
res = resi_dict[pos]
if feat in self.residue_to_feat[res.amino_acid]:
if val > 0:
prot_score += val
tmp.append([
feat_abr,
feat_name,
val,
#"green",
"#808080",
res.amino_acid, pos
]) if val > 0 else tmp.append([
feat_abr,
feat_name,
val,
"white",
res.amino_acid,
pos
])
else:
#David doesn't want the negative values in the score
# prot_score -= val
if val < 0:
prot_score -= val #if a receptor does NOT have the negative property, add the score
tmp.append([
feat_abr,
feat_name,
val,
# "red",
"white",
res.amino_acid,
pos
]) if val > 0 else tmp.append([
feat_abr,
feat_name,
val,
# "green",
"#808080",
res.amino_acid,
pos
])
else:
if feat_name == 'Gap':
tmp.append([
feat_abr,
feat_name,
val,
# "green",
"#808080",
'-',
pos
]) if val > 0 else tmp.append([
feat_abr,
feat_name,
val,
"white",
'-',
pos
])
prot_score += val
else:
#David doesn't want the negative values in the score
#prot_score -= val
tmp.append([
feat_abr,
feat_name,
val,
# "red",
"white",
'-',
pos
]) if val > 0 else tmp.append([
feat_abr,
feat_name,
val,
"white",
'-',
pos
])
consensus_match[segment] = tmp
return (prot_score/100, prot_score/self.norm*100, consensus_match)
def signature_score_excel(workbook, scores, protein_signatures, signature_filtered, relevant_gn, relevant_segments, numbering_schemes, scores_positive=None, scores_negative=None, signatures_positive=None, signatures_negative=None):
worksheet = workbook.add_worksheet('scored_proteins')
#wrap = workbook.add_format({'text_wrap': True})
# First column, numbering schemes
for row, scheme in enumerate(numbering_schemes):
worksheet.write(1 + 3 * row, 4, 'Residue number')
worksheet.write(2 + 3 * row, 4, 'Sequence-based ({})'.format(scheme[2]))
worksheet.write(3 + 3*row, 4, 'Structure-based (GPCRdb)')
worksheet.write(2 + 3 * len(numbering_schemes), 0, 'UniProt')
worksheet.write(2 + 3 * len(numbering_schemes), 1, 'Receptor name (IUPHAR)')
worksheet.write(2 + 3 * len(numbering_schemes) ,2, 'Receptor family')
worksheet.write(2 + 3 * len(numbering_schemes) ,3, 'Ligand class')
# Score header
worksheet.write(2 + 3 * len(numbering_schemes), 4, 'Score')
#worksheet.write(1, 3, 'Normalized score')
offset = 0
# Segments
for segment, resi in relevant_segments.items():
worksheet.merge_range(
0,
5 + offset,
0,
5 + len(resi) + offset - 1,
segment
)
offset += len(resi)
# Generic numbers
# for row, item in enumerate(generic_numbers_set.items()):
for row, item in enumerate(numbering_schemes):
scheme = item[0]
offset = 1
for _, gn_list in relevant_gn[scheme].items():
for col, gn_pair in enumerate(gn_list.items()):
try:
tm, bw, gpcrdb = re.split('\.|x', strip_html_tags(gn_pair[1]))
except:
tm, bw, gpcrdb = ('', '', '')
worksheet.write(
1 + 3 * row,
4 + col + offset,
tm
)
worksheet.write(
2 + 3 * row,
4 + col + offset,
bw
)
worksheet.write(
3 + 3*row,
4 + col + offset,
gpcrdb
)
offset += len(gn_list.items())
# Line for sequence signature
worksheet.write(
1 + 3 * len(numbering_schemes),
4,
'CONSENSUS'
)
col_offset = 0
for segment, cons_feat in signature_filtered.items():
for col, chunk in enumerate(cons_feat):
worksheet.write(
1 + 3 * len(numbering_schemes),
5 + col + col_offset,
chunk[0]
)
cell_format = workbook.add_format(get_format_props(int(chunk[2]/20)+5))
worksheet.write(
2 + 3 * len(numbering_schemes),
5 + col + col_offset,
chunk[2],
cell_format
)
col_offset += len(cons_feat)
# Score lines
row_offset = 0
for protein, score in scores.items():
worksheet.write(
3 + 3 * len(numbering_schemes) + row_offset,
0,
protein.protein.entry_name,
)
worksheet.write(
3 + 3 * len(numbering_schemes) + row_offset,
1,
protein.protein.name
)
worksheet.write(
3 + 3 * len(numbering_schemes) + row_offset,
2,
protein.protein.family.parent.name,
)
worksheet.write(
3 + 3 * len(numbering_schemes) + row_offset,
3,
protein.protein.family.parent.parent.name,
)
# worksheet.write(
# 3 + 3 * len(numbering_schemes) + row_offset,
# 2,
# score[0],
# )
worksheet.write(
3 + 3 * len(numbering_schemes) + row_offset,
4,
score[1],
)
col_offset = 0
for segment, data in protein_signatures[protein].items():
for col, res in enumerate(data):
cell_format = workbook.add_format({'bg_color': res[3],})
worksheet.write(
3 + 3 * len(numbering_schemes) + row_offset,
5 + col + col_offset,
res[4],
cell_format
)
col_offset += len(data)
row_offset += 1
static_offset = 3 + 3 * len(numbering_schemes) + len(protein_signatures.items())
#Scores for positive set (if specified)
if scores_positive:
worksheet.write(
static_offset,
0,
'Protein set 1'
)
static_offset += 1
row_offset = 0
for protein, score in scores_positive.items():
worksheet.write(
static_offset + row_offset,
0,
protein.protein.entry_name,
)
worksheet.write(
static_offset + row_offset,
1,
protein.protein.name
)
worksheet.write(
static_offset + row_offset,
2,
protein.protein.family.parent.name,
)
worksheet.write(
static_offset + row_offset,
3,
protein.protein.family.parent.parent.name,
)
# worksheet.write(
# 3 + 3 * len(numbering_schemes) + row_offset,
# 2,
# score[0],
# )
worksheet.write(
static_offset + row_offset,
4,
score[1],
)
col_offset = 0
for segment, data in signatures_positive[protein].items():
for col, res in enumerate(data):
cell_format = workbook.add_format({'bg_color': res[3],})
worksheet.write(
static_offset + row_offset,
5 + col + col_offset,
res[4],
cell_format
)
col_offset += len(data)
row_offset += 1
static_offset += len(scores_positive.items())
#Scores for negative set (if specified)
if scores_negative:
worksheet.write(
static_offset,
0,
'Protein set 2'
)
static_offset += 1
row_offset = 0
for protein, score in scores_negative.items():
worksheet.write(
static_offset + row_offset,
0,
protein.protein.entry_name,
)
worksheet.write(
static_offset + row_offset,
1,
protein.protein.name
)
worksheet.write(
static_offset + row_offset,
2,
protein.protein.family.parent.name,
)
worksheet.write(
static_offset + row_offset,
3,
protein.protein.family.parent.parent.name,
)
# worksheet.write(
# 3 + 3 * len(numbering_schemes) + row_offset,
# 2,
# score[0],
# )
worksheet.write(
static_offset + row_offset,
4,
score[1],
)
col_offset = 0
for segment, data in signatures_negative[protein].items():
for col, res in enumerate(data):
cell_format = workbook.add_format({'bg_color': res[3],})
worksheet.write(
static_offset + row_offset,
5 + col + col_offset,
res[4],
cell_format
)
col_offset += len(data)
row_offset += 1
```
#### File: protwis/structure/signprot_modeling.py
```python
from django.conf import settings
from protein.models import Protein, ProteinConformation, ProteinAnomaly, ProteinState, ProteinSegment
from residue.models import Residue
from residue.functions import dgn, ggn
from structure.models import *
from structure.functions import HSExposureCB, PdbStateIdentifier, update_template_source, compare_and_update_template_source
from common.alignment import AlignedReferenceTemplate, GProteinAlignment
from common.definitions import *
from common.models import WebLink
from signprot.models import SignprotComplex
import structure.structural_superposition as sp
import structure.assign_generic_numbers_gpcr as as_gn
from structure.homology_modeling_functions import GPCRDBParsingPDB
import Bio.PDB as PDB
from collections import OrderedDict
import os
import logging
import pprint
from io import StringIO, BytesIO
import sys
import re
import math
import yaml
import traceback
import subprocess
from copy import deepcopy
gprotein_segments = ProteinSegment.objects.filter(proteinfamily='Alpha')
gprotein_segment_slugs = [i.slug for i in gprotein_segments]
atom_num_dict = {'E':9, 'S':6, 'Y':12, 'G':4, 'A':5, 'V':7, 'M':8, 'L':8, 'I':8, 'T':7, 'F':11, 'H':10, 'K':9,
'D':8, 'C':6, 'R':11, 'P':7, 'Q':9, 'N':8, 'W':14, '-':0}
class SignprotModeling():
def __init__(self, main_structure, signprot, template_source, trimmed_residues, alignment, main_pdb_array):
self.main_structure = main_structure
self.signprot = signprot
self.template_source = template_source
self.trimmed_residues = trimmed_residues
self.a = alignment
self.main_pdb_array = main_pdb_array
self.target_signprot = None
def run(self):
parse = GPCRDBParsingPDB()
self.signprot_complex = SignprotComplex.objects.get(structure=self.main_structure)
structure_signprot= self.signprot_complex.protein
if self.signprot!=False:
self.target_signprot = Protein.objects.get(entry_name=self.signprot)
else:
self.target_signprot = self.signprot_complex.protein
self.signprot_protconf = ProteinConformation.objects.get(protein=self.target_signprot)
sign_a = GProteinAlignment()
sign_a.run_alignment(self.target_signprot, structure_signprot)
io = StringIO(self.main_structure.pdb_data.pdb)
assign_cgn = as_gn.GenericNumbering(pdb_file=io, pdb_code=self.main_structure.pdb_code.index, sequence_parser=True, signprot=structure_signprot)
signprot_pdb_array = assign_cgn.assign_cgn_with_sequence_parser(self.signprot_complex.alpha)
new_array = OrderedDict()
# Initiate complex part of template source
source_resis = Residue.objects.filter(protein_conformation__protein=self.target_signprot)
for res in source_resis:
if res.protein_segment.slug not in self.template_source:
self.template_source[res.protein_segment.slug] = OrderedDict()
if res.protein_segment.category=='loop':
self.template_source[res.protein_segment.slug][str(res.sequence_number)] = [None, None]
else:
self.template_source[res.protein_segment.slug][res.display_generic_number.label] = [self.main_structure, self.main_structure]
# Superimpose missing regions H1 - hfs2
alt_complex_struct = None
segs_for_alt_complex_struct = []
if self.main_structure.pdb_code.index!='3SN6':
segs_for_alt_complex_struct = ['H1', 'h1ha', 'HA', 'hahb', 'HB', 'hbhc', 'HC', 'hchd', 'HD', 'hdhe', 'HE', 'hehf', 'HF', 'hfs2']
alt_complex_struct = Structure.objects.get(pdb_code__index='3SN6')
io = StringIO(alt_complex_struct.pdb_data.pdb)
alt_signprot_complex = SignprotComplex.objects.get(structure__pdb_code__index='3SN6')
alt_assign_cgn = as_gn.GenericNumbering(pdb_file=io, pdb_code='3SN6', sequence_parser=True, signprot=alt_signprot_complex.protein)
alt_signprot_pdb_array = alt_assign_cgn.assign_cgn_with_sequence_parser(alt_signprot_complex.alpha)
before_cgns = ['G.HN.50', 'G.HN.51', 'G.HN.52', 'G.HN.53']
after_cgns = ['G.H5.03', 'G.H5.04', 'G.H5.05', 'G.H5.06']
orig_residues1 = parse.fetch_residues_from_array(signprot_pdb_array['HN'], before_cgns)
orig_residues2 = parse.fetch_residues_from_array(signprot_pdb_array['H5'], after_cgns)
orig_residues = parse.add_two_ordereddict(orig_residues1, orig_residues2)
alt_residues1 = parse.fetch_residues_from_array(alt_signprot_pdb_array['HN'], before_cgns)
alt_residues2 = parse.fetch_residues_from_array(alt_signprot_pdb_array['H5'], after_cgns)
for i,j in orig_residues.items():
print(i, j, j[0].get_parent())
print('ALTERNATIVES')
for i,j in alt_residues1.items():
print(i, j, j[0].get_parent())
for i,j in alt_residues2.items():
print(i, j, j[0].get_parent())
alt_middle = OrderedDict()
for s in segs_for_alt_complex_struct:
alt_middle = parse.add_two_ordereddict(alt_middle, alt_signprot_pdb_array[s])
self.template_source = update_template_source(self.template_source, list(self.template_source[s].keys()), alt_complex_struct, s)
alt_residues = parse.add_two_ordereddict(parse.add_two_ordereddict(alt_residues1, alt_middle), alt_residues2)
del_list = []
for r, t in alt_middle.items():
if t=='x':
del_list.append(r)
for r in del_list:
del alt_residues[r]
superpose = sp.LoopSuperpose(orig_residues, alt_residues)
new_residues = superpose.run()
key_list = list(new_residues.keys())[4:-4]
for key in key_list:
seg = key.split('.')[1]
signprot_pdb_array[seg][key] = new_residues[key]
# alt local loop alignment
alt_sign_a = GProteinAlignment()
alt_sign_a.run_alignment(self.target_signprot, alt_signprot_complex.protein, segments=segs_for_alt_complex_struct)
for alt_seg in segs_for_alt_complex_struct:
sign_a.reference_dict[alt_seg] = alt_sign_a.reference_dict[alt_seg]
sign_a.template_dict[alt_seg] = alt_sign_a.template_dict[alt_seg]
sign_a.alignment_dict[alt_seg] = alt_sign_a.alignment_dict[alt_seg]
# fix h1ha and hahb and hbhc
if self.target_signprot.entry_name!='gnas2_human':
h1ha = Residue.objects.filter(protein_conformation__protein=alt_signprot_complex.protein, protein_segment__slug='h1ha')
h1ha_dict, hahb_dict = OrderedDict(), OrderedDict()
for h in h1ha:
h1ha_dict[h.generic_number.label] = 'x'
signprot_pdb_array['h1ha'] = h1ha_dict
right_order = sorted(list(signprot_pdb_array['hahb'].keys()), key=lambda x: (x))
for r in right_order:
hahb_dict[r] = signprot_pdb_array['hahb'][r]
signprot_pdb_array['hahb'] = hahb_dict
# Let Modeller model buffer regions
self.trimmed_residues.append('s1h1_6')
self.trimmed_residues.append('hfs2_1')
self.trimmed_residues.append('hfs2_2')
self.trimmed_residues.append('hfs2_3')
self.trimmed_residues.append('hfs2_4')
self.trimmed_residues.append('hfs2_5')
self.trimmed_residues.append('hfs2_6')
self.trimmed_residues.append('hfs2_7')
self.trimmed_residues.append('G.S2.01')
self.trimmed_residues.append('G.S2.02')
self.trimmed_residues.append('s4h3_4')
self.trimmed_residues.append('s4h3_5')
# New loop alignments for signprot. If length differs between ref and temp, buffer is created in the middle of the loop
loops = [i.slug for i in ProteinSegment.objects.filter(proteinfamily='Alpha', category='loop')]
loops_to_model = []
for r_seg, t_seg, a_seg in zip(sign_a.reference_dict, sign_a.template_dict, sign_a.alignment_dict):
if r_seg in loops:
loop_length = len(sign_a.reference_dict[r_seg])
ref_loop = [i for i in list(sign_a.reference_dict[r_seg].values()) if i not in ['x','-']]
ref_keys = [i for i in list(sign_a.reference_dict[r_seg].keys()) if i not in ['x','-']]
ref_loop_residues = Residue.objects.filter(protein_conformation__protein=self.target_signprot, protein_segment__slug=r_seg)
temp_loop = [i for i in list(sign_a.template_dict[t_seg].values()) if i not in ['x','-']]
temp_keys = [i for i in list(sign_a.template_dict[t_seg].keys()) if i not in ['x','-']]
if alt_complex_struct and r_seg in segs_for_alt_complex_struct:
temp_loop_residues = Residue.objects.filter(protein_conformation__protein=alt_signprot_complex.protein, protein_segment__slug=r_seg)
else:
temp_loop_residues = Residue.objects.filter(protein_conformation__protein=structure_signprot, protein_segment__slug=r_seg)
ref_out, temp_out, align_out = OrderedDict(), OrderedDict(), OrderedDict()
# ref is longer
if len(ref_loop)>len(temp_loop):
mid_temp = math.ceil(len(temp_loop)/2)
j = 0
for i in range(0, loop_length):
key = r_seg+'_'+str(i+1)
if i+1<=mid_temp:
temp_out[key] = temp_loop[i]
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, i, ref_loop_residues[i].display_generic_number.label,
ref_loop_residues[i].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
elif mid_temp<i+1<=loop_length-mid_temp+1:
if i+1==loop_length-mid_temp+1 and len(temp_loop)%2==0:
temp_out[key] = temp_loop[mid_temp+j]
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, mid_temp+j, ref_loop_residues[i].display_generic_number.label,
ref_loop_residues[i].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
j+=1
else:
temp_out[key.replace('_','?')] = '-'
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, mid_temp+j, ref_loop_residues[i].display_generic_number.label,
ref_loop_residues[i].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
else:
temp_out[key] = temp_loop[mid_temp+j]
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, mid_temp+j, ref_loop_residues[i].display_generic_number.label,
ref_loop_residues[i].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
j+=1
for i, j in enumerate(list(sign_a.reference_dict[r_seg].values())):
key = r_seg+'_'+str(i+1)
try:
temp_out[key]
ref_out[key] = j
except:
ref_out[key.replace('_','?')] = j
i+=1
# temp is longer
elif len(ref_loop)<len(temp_loop):
mid_ref = math.ceil(len(ref_loop)/2)
j = 0
for i in range(0, loop_length):
key = r_seg+'_'+str(i+1)
if i+1<=mid_ref:
ref_out[key] = ref_loop[i]
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, i, temp_loop_residues[i].display_generic_number.label,
ref_loop_residues[i].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
elif mid_ref<i+1<=loop_length-mid_ref+1:
if i+1==loop_length-mid_ref+1 and len(ref_loop)%2==0:
ref_out[key] = ref_loop[mid_ref+j]
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, mid_ref+j, temp_loop_residues[i].display_generic_number.label,
ref_loop_residues[mid_ref+j].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
j+=1
else:
ref_out[key.replace('_','?')] = '-'
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, mid_ref+j, temp_loop_residues[i].display_generic_number.label,
ref_loop_residues[mid_ref+j].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
else:
ref_out[key] = ref_loop[mid_ref+j]
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, mid_ref+j, temp_loop_residues[i].display_generic_number.label,
ref_loop_residues[mid_ref+j].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
j+=1
for i, j in enumerate(list(sign_a.template_dict[t_seg].values())):
key = r_seg+'_'+str(i+1)
try:
ref_out[key]
temp_out[key] = j
except:
temp_out[key.replace('_','?')] = j
i+=1
loops_to_model.append(r_seg)
# ref and temp length equal
else:
c = 1
for i, j in zip(list(sign_a.reference_dict[r_seg].values()), list(sign_a.template_dict[t_seg].values())):
ref_out[r_seg+'_'+str(c)] = i
temp_out[r_seg+'_'+str(c)] = j
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, c-1, temp_loop_residues[c-1].display_generic_number.label,
ref_loop_residues[c-1].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
c+=1
c = 1
# update alignment dict
for i, j in zip(list(ref_out.values()), list(temp_out.values())):
key = r_seg+'_'+str(c)
if i=='-' or j=='-':
align_out[key.replace('_','?')] = '-'
elif i!=j:
align_out[key] = '.'
elif i==j:
align_out[key] = i
c+=1
# update pdb array
new_pdb_array = OrderedDict()
atoms_list = list(signprot_pdb_array[t_seg].values())
j = 0
for t_c, t in temp_out.items():
jplus1 = False
if t!='-':
for i in range(j, len(atoms_list)):
if atoms_list[j]!='-':
new_pdb_array[t_c] = atoms_list[j]
jplus1 = True
break
if jplus1:
j+=1
else:
new_pdb_array[t_c] = 'x'
# j+=1
# pprint.pprint(new_pdb_array)
# for i,j in new_pdb_array.items():
# try:
# print(i, PDB.Polypeptide.three_to_one(j[0].get_parent().get_resname()))
# except:
# print(i, j)
# update dictionary keys with '?' if no backbone template
ref_out_final, temp_out_final, align_out_final, new_pdb_array_final = OrderedDict(), OrderedDict(), OrderedDict(), OrderedDict()
# self.template_source[r_seg] = OrderedDict()
for i,j in new_pdb_array.items():
if '?' not in i and j=='x':
ref_out_final[i.replace('_','?').replace('.','?')] = ref_out[i]
temp_out_final[i.replace('_','?').replace('.','?')] = temp_out[i]
align_out_final[i.replace('_','?').replace('.','?')] = align_out[i]
new_pdb_array_final[i.replace('_','?').replace('.','?')] = new_pdb_array[i]
else:
ref_out_final[i] = ref_out[i]
temp_out_final[i] = temp_out[i]
align_out_final[i] = align_out[i]
new_pdb_array_final[i] = new_pdb_array[i]
sign_a.reference_dict[r_seg] = ref_out_final
sign_a.template_dict[t_seg] = temp_out_final
sign_a.alignment_dict[a_seg] = align_out_final
signprot_pdb_array[r_seg] = new_pdb_array_final
align_loop = list(sign_a.alignment_dict[a_seg].values())
self.a.reference_dict = deepcopy(self.a.reference_dict)
self.a.template_dict = deepcopy(self.a.template_dict)
self.a.alignment_dict = deepcopy(self.a.alignment_dict)
for seg, values in sign_a.reference_dict.items():
new_array[seg] = OrderedDict()
# self.template_source[seg] = OrderedDict()
final_values = deepcopy(values)
for key, res in values.items():
try:
if signprot_pdb_array[seg][key]=='x':
new_array[seg][key] = 'x'
self.template_source = update_template_source(self.template_source, [key], None, seg)
else:
new_array[seg][key] = signprot_pdb_array[seg][key]
except:
if res!='-':
new_array[seg][key] = '-'
self.template_source = update_template_source(self.template_source, [key], None, seg)
self.a.reference_dict[seg] = final_values
for seg, values in sign_a.template_dict.items():
for key, res in values.items():
if new_array[seg][key]=='x':
sign_a.template_dict[seg][key] = 'x'
else:
if new_array[seg][key]=='-':
sign_a.template_dict[seg][key] = '-'
else:
pdb_res = PDB.Polypeptide.three_to_one(new_array[seg][key][0].get_parent().get_resname())
if pdb_res!=sign_a.template_dict[seg][key]:
sign_a.template_dict[seg][key] = pdb_res
self.a.template_dict[seg] = sign_a.template_dict[seg]
for seg, values in sign_a.alignment_dict.items():
for key, res in values.items():
if new_array[seg][key]=='x':
values[key] = 'x'
self.a.alignment_dict[seg] = values
signprot_pdb_array = new_array
for seg, values in signprot_pdb_array.items():
self.main_pdb_array[seg] = values
delete_HN_begin = []
for i in self.a.reference_dict['HN']:
if i=='G.HN.30':
break
delete_HN_begin.append(i)
for d in delete_HN_begin:
del self.a.reference_dict['HN'][d]
try:
del self.a.template_dict['HN'][d]
except:
pass
try:
del self.a.alignment_dict['HN'][d]
except:
pass
del self.main_pdb_array['HN'][d]
try:
del self.template_source['HN'][d]
except:
pass
# add residues to model to self.trimmed_residues
gprot_segments = [i.slug for i in ProteinSegment.objects.filter(proteinfamily='Alpha')]
for i,j in self.a.reference_dict.items():
if i in gprot_segments:
for k,l in j.items():
if '?' in k or self.main_pdb_array[i][k] in ['-','x']:
self.trimmed_residues.append(k)
if i in loops_to_model:
self.trimmed_residues.append(k)
# custom mods
long_HG_prots = Protein.objects.filter(family__name='Gs')
if structure_signprot in long_HG_prots and self.target_signprot not in long_HG_prots:
self.trimmed_residues.append('G.HG.08')
self.trimmed_residues.append('G.HG.09')
self.trimmed_residues.append('G.HG.12')
self.trimmed_residues.append('G.HG.13')
self.trimmed_residues.append('G.HG.14')
self.trimmed_residues.append('G.HG.16')
self.trimmed_residues.append('G.HG.17')
if structure_signprot!=self.target_signprot or alt_signprot_complex.protein not in [None, self.target_signprot]:
# hbhc
hbhc_keys = list(self.a.reference_dict['hbhc'].keys())
self.trimmed_residues.append(hbhc_keys[2])
self.trimmed_residues.append(hbhc_keys[3])
self.trimmed_residues.append(hbhc_keys[-3])
self.trimmed_residues.append(hbhc_keys[-2])
# H1
self.trimmed_residues.append('G.H1.07')
self.trimmed_residues.append('G.H1.08')
if 'hgh4' in loops_to_model:
self.trimmed_residues.append('G.H4.01')
self.trimmed_residues.append('G.H4.02')
self.trimmed_residues.append('G.H4.03')
# Add mismatching residues to trimmed residues for modeling
for seg, val in self.a.alignment_dict.items():
if seg in gprotein_segment_slugs:
for key, res in val.items():
if res=='.':
self.trimmed_residues.append(key)
# Add residues with missing atom coordinates to trimmed residues for modeling
for seg, val in self.main_pdb_array.items():
if seg in gprotein_segment_slugs:
for key, atoms in val.items():
if atoms not in ['-','x']:
if atom_num_dict[PDB.Polypeptide.three_to_one(atoms[0].get_parent().get_resname())]>len(atoms):
self.trimmed_residues.append(key)
# Add Beta and Gamma chains
p = PDB.PDBParser(QUIET=True).get_structure('structure', StringIO(self.main_structure.pdb_data.pdb))[0]
beta = p[self.signprot_complex.beta_chain]
gamma = p[self.signprot_complex.gamma_chain]
self.a.reference_dict['Beta'] = OrderedDict()
self.a.template_dict['Beta'] = OrderedDict()
self.a.alignment_dict['Beta'] = OrderedDict()
self.main_pdb_array['Beta'] = OrderedDict()
self.template_source['Beta'] = OrderedDict()
self.a.reference_dict['Gamma'] = OrderedDict()
self.a.template_dict['Gamma'] = OrderedDict()
self.a.alignment_dict['Gamma'] = OrderedDict()
self.main_pdb_array['Gamma'] = OrderedDict()
self.template_source['Gamma'] = OrderedDict()
for b_res in beta:
key = str(b_res.get_id()[1])
self.a.reference_dict['Beta'][key] = PDB.Polypeptide.three_to_one(b_res.get_resname())
self.a.template_dict['Beta'][key] = PDB.Polypeptide.three_to_one(b_res.get_resname())
self.a.alignment_dict['Beta'][key] = PDB.Polypeptide.three_to_one(b_res.get_resname())
atoms = [atom for atom in b_res]
self.main_pdb_array['Beta'][key] = atoms
self.template_source['Beta'][key] = [self.main_structure, self.main_structure]
for g_res in gamma:
key = str(g_res.get_id()[1])
self.a.reference_dict['Gamma'][key] = PDB.Polypeptide.three_to_one(g_res.get_resname())
self.a.template_dict['Gamma'][key] = PDB.Polypeptide.three_to_one(g_res.get_resname())
self.a.alignment_dict['Gamma'][key] = PDB.Polypeptide.three_to_one(g_res.get_resname())
atoms = [atom for atom in g_res]
self.main_pdb_array['Gamma'][key] = atoms
self.template_source['Gamma'][key] = [self.main_structure, self.main_structure]
# raise AssertionError
# for i,j,k,l in zip(sign_a.reference_dict, sign_a.template_dict, sign_a.alignment_dict, signprot_pdb_array):
# pprint.pprint(self.template_source[i])
# for v,b,n,m in zip(sign_a.reference_dict[i], sign_a.template_dict[j], sign_a.alignment_dict[k], signprot_pdb_array[l]):
# print(v, b, n, m, sign_a.reference_dict[i][v], sign_a.template_dict[j][b], sign_a.alignment_dict[k][n], signprot_pdb_array[l][m])
```
|
{
"source": "JesperWestell/GMVAE_Experiments",
"score": 2
}
|
#### File: JesperWestell/GMVAE_Experiments/subgraphs.py
```python
import tensorflow as tf
from tensorbayes.layers import gaussian_sample
from tensorbayes.distributions import log_bernoulli_with_logits, log_normal, log_squared_loss
import numpy as np
# vae subgraphs
def qy_graph(x, k=10):
reuse = len(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='qy')) > 0
# -- q(y)
with tf.variable_scope('qy'):
h1 = tf.contrib.layers.fully_connected(x, 2, scope='layer1',
activation_fn=tf.nn.relu,
reuse=reuse)
qy_logit = tf.contrib.layers.fully_connected(x, k, scope='logit',
activation_fn=tf.nn.relu, reuse=reuse)
qy = tf.nn.softmax(qy_logit, name='prob')
return qy_logit, qy
def qz_graph(x, y, n_z):
reuse = len(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='qz')) > 0
# -- q(z)
with tf.variable_scope('qz'):
xy = tf.concat((x, y), 1, name='xy/concat')
h1 = tf.contrib.layers.fully_connected(xy, 4, scope='layer1',
activation_fn=tf.nn.relu,
reuse=reuse)
zm = tf.contrib.layers.fully_connected(h1, n_z, scope='zm',
activation_fn=None,
reuse=reuse)
zv = tf.contrib.layers.fully_connected(h1, n_z, scope='zv',
activation_fn=tf.nn.softplus,
reuse=reuse)
z = gaussian_sample(zm, zv, 'z')
# Used to feed into z when sampling
z = tf.identity(z,name='z_sample')
return z, zm, zv
def z_graph(zm,zv):
with tf.variable_scope('z'):
z = gaussian_sample(zm, zv, 'z')
# Used to feed into z when sampling
z = tf.identity(z, name='z_sample')
return z
def pz_graph(y, n_z):
reuse = len(
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='pz')) > 0
# -- p(z)
with tf.variable_scope('pz'):
zm = tf.contrib.layers.fully_connected(y, n_z, scope='zm',
activation_fn=None, reuse=reuse)
zv = tf.contrib.layers.fully_connected(y, n_z, scope='zv',
activation_fn=tf.nn.softplus,
reuse=reuse)
return y, zm, zv
def px_fixed_graph(z):
reuse = len(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='px_fixed')) > 0
# -- p(x)
with tf.variable_scope('px_fixed'):
h = tf.contrib.layers.fully_connected(z, 2, scope='layer1',
activation_fn=tf.nn.relu,
reuse=reuse,
weights_initializer=tf.constant_initializer(
[[1,-2],[1,-2]], verify_shape=True))
px_logit = tf.contrib.layers.fully_connected(h, 2, scope='output',
activation_fn=None,
reuse=reuse,
weights_initializer=tf.constant_initializer(
[[1,-4],[-3,2]],
verify_shape=True))
#px_logit = tf.identity(px_logit,name='x')
return px_logit
def px_graph(z, n_x):
reuse = len(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='px')) > 0
# -- p(x)
with tf.variable_scope('px'):
h = tf.contrib.layers.fully_connected(z, 2, scope='layer1',
activation_fn=tf.nn.relu,
reuse=reuse)
px_logit = tf.contrib.layers.fully_connected(h, n_x, scope='output',
activation_fn=None,
reuse=reuse)
#px_logit = tf.identity(px_logit,name='x')
return px_logit
def labeled_loss(x, px_logit, z, zm, zv, zm_prior, zv_prior):
xy_loss = log_squared_loss(x, px_logit)
xy_loss += log_normal(z, zm, zv) - log_normal(z, zm_prior, zv_prior)
return xy_loss - np.log(0.1)
```
#### File: GMVAE_Experiments/tensorbayes/distributions.py
```python
import tensorflow as tf
import numpy as np
def log_bernoulli_with_logits(x, logits, eps=0.0, axis=-1):
if eps > 0.0:
max_val = np.log(1.0 - eps) - np.log(eps)
logits = tf.clip_by_value(logits, -max_val, max_val,
name='clipped_logit')
return -tf.reduce_sum(
tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=x), axis)
def log_squared_loss(x,logits):
return tf.losses.mean_squared_error(x,logits)
def log_normal(x, mu, var, eps=0.0, axis=-1):
if eps > 0.0:
var = tf.add(var, eps, name='clipped_var')
return -0.5 * tf.reduce_sum(
tf.log(2 * np.pi) + tf.log(var) + tf.square(x - mu) / var, axis)
```
|
{
"source": "JesperWestell/Greedy_InfoMax_w_PointNet2",
"score": 3
}
|
#### File: pointnet/arg_parser/general_args.py
```python
def parse_general_args(parser):
parser.add_option(
"--experiment",
type="string",
default="vision",
help="not a real option, just for bookkeeping (stays the same for pointnet)",
)
parser.add_option(
"--dataset",
type="string",
default="modelnet40",
help="Dataset to use for training, default: modelnet40",
)
parser.add_option(
"--num_workers", type="int", default=8, help="Number of dataloader workers"
)
parser.add_option(
"--num_points", type="int", default=1024, help="Number of points in each point cloud, max 2048"
)
parser.add_option(
"--num_unsupervised_training_samples", type="int", default=8000,
help="Number of training examples to be used in unsupervised learning out of the total "
"9843 in the ModelNet40 training data. The rest are used in supervised learning."
)
parser.add_option(
"--num_epochs", type="int", default=300, help="Number of Epochs for Training"
)
parser.add_option("--seed", type="int", default=2, help="Random seed for training")
parser.add_option("--batch_size", type="int", default=12, help="Batchsize")
parser.add_option(
"-i",
"--data_input_dir",
type="string",
default="./datasets",
help="Directory to store bigger datafiles (dataset and models)",
)
parser.add_option(
"-o",
"--data_output_dir",
type="string",
default=".",
help="Directory to store bigger datafiles (dataset and models)",
)
parser.add_option(
"--loss",
type="string",
default="info_nce",
help="Loss function to use for training:"
"info_nce - InfoNCE loss"
"supervised - supervised loss using class labels for training whole network"
"classifier - supervised loss on classifier with pre-trained encoder modules",
)
parser.add_option(
"--validate",
action="store_true",
default=True,
help="Boolean to decide whether to plot validation loss (True) or not (False)",
)
parser.add_option(
"--weight_init",
action="store_true",
default=False,
help="Boolean to decide whether to use special weight initialization (delta orthogonal)",
)
parser.add_option(
"--save_dir",
type="string",
default="",
help="If given, uses this string to create directory to save results in "
"(be careful, this can overwrite previous results); "
"otherwise saves logs according to time-stamp",
)
return parser
```
#### File: pointnet/arg_parser/plotter_args.py
```python
def parse_plotter_args(parser):
parser.add_option(
"--name_of_3dmodel",
type="string",
default="airplane/airplane_0630.ply",
help="name of 3D model to plot using the 'plot_subclouds' script",
)
parser.add_option(
"--save_plot_frames",
action="store_true",
default=False,
help="whether to record the 3D model shown in 'plot_subclouds' script",
)
parser.add_option(
"--plotted_image_folder",
type="string",
default="./gif_images",
help="folder to store the images created if save_plot_frames=True",
)
return parser
```
#### File: pointnet/data/get_dataloader.py
```python
import torch
import torchvision.transforms
import torchvision
import os
import numpy as np
from copy import copy
from GreedyInfoMax.pointnet.data.ModelNet40Dataset import ModelNet40Cls
import GreedyInfoMax.pointnet.data.data_utils as data_utils
def get_dataloader(opt):
if opt.dataset == "modelnet40":
unsupervised_loader, \
unsupervised_dataset, \
train_loader, \
train_dataset, \
test_loader, \
test_dataset = get_modelnet40_dataloaders(
opt
)
else:
raise Exception("Invalid option")
return (
unsupervised_loader,
unsupervised_dataset,
train_loader,
train_dataset,
test_loader,
test_dataset,
)
def get_modelnet40_dataloaders(opt):
try:
os.mkdir(opt.data_input_dir)
except:
pass
base_folder = os.path.join(opt.data_input_dir, "modelnet40")
transforms = torchvision.transforms.transforms.Compose(
[
data_utils.PointcloudToTensor(),
data_utils.PointcloudRotate(axis=np.array([1, 0, 0])),
data_utils.PointcloudScale(),
data_utils.PointcloudTranslate(),
data_utils.PointcloudJitter(),
]
)
modelnet40_train_dataset = ModelNet40Cls(base_folder, opt.num_points, train=True, transforms=transforms)
modelnet40_test_dataset = ModelNet40Cls(base_folder, opt.num_points, train=False, transforms=None)
modelnet40_unsupervised_dataset = copy(modelnet40_train_dataset)
# Split between supervised and unsupervised training data
modelnet40_unsupervised_dataset.points = modelnet40_unsupervised_dataset.points[
:opt.num_unsupervised_training_samples]
modelnet40_unsupervised_dataset.labels = modelnet40_unsupervised_dataset.labels[
:opt.num_unsupervised_training_samples]
modelnet40_train_dataset.points = modelnet40_train_dataset.points[
opt.num_unsupervised_training_samples:]
modelnet40_train_dataset.labels = modelnet40_train_dataset.labels[
opt.num_unsupervised_training_samples:]
# default dataset loaders
train_loader = torch.utils.data.DataLoader(
modelnet40_train_dataset,
batch_size=opt.batch_size_multiGPU,
shuffle=True,
num_workers=opt.num_workers
)
unsupervised_loader = torch.utils.data.DataLoader(
modelnet40_unsupervised_dataset,
batch_size=opt.batch_size_multiGPU,
shuffle=True,
num_workers=opt.num_workers,
)
test_loader = torch.utils.data.DataLoader(
modelnet40_test_dataset,
batch_size=opt.batch_size_multiGPU,
shuffle=False,
num_workers=opt.num_workers
)
return unsupervised_loader, modelnet40_unsupervised_dataset, \
train_loader, modelnet40_train_dataset, \
test_loader, modelnet40_test_dataset
```
#### File: pointnet/data/ModelNet40Dataset.py
```python
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
import torch.utils.data as data
import numpy as np
import os
import h5py
import subprocess
import shlex
import json
def _get_data_files(list_filename):
with open(list_filename) as f:
return [line.rstrip()[5:] for line in f]
def _load_data_file(name):
f = h5py.File(name, "r")
data = f["data"][:]
label = f["label"][:]
return data, label
def _load_model_names(filename):
with open(filename, 'r') as f:
names = json.load(f)
return names
class ModelNet40Cls(data.Dataset):
def __init__(self, data_dir, num_points=1024, transforms=None, train=True, download=True):
super().__init__()
self.transforms = transforms
self.url = "https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip"
if download and not os.path.exists(data_dir):
zipfile = os.path.join(os.path.dirname(data_dir), os.path.basename(self.url))
subprocess.check_call(
shlex.split("curl {} -o {}".format(self.url, zipfile))
)
subprocess.check_call(
shlex.split("unzip {} -d {}".format(zipfile, data_dir))
)
subprocess.check_call(shlex.split("rm {}".format(zipfile)))
unzip_dir = os.path.join(data_dir, os.path.splitext(os.path.basename(self.url))[0])
self.train = train
if self.train:
self.files = _get_data_files(os.path.join(unzip_dir, "train_files.txt"))
else:
self.files = _get_data_files(os.path.join(unzip_dir, "test_files.txt"))
self.names = []
point_list, label_list = [], []
for f in self.files:
points, labels = _load_data_file(os.path.join(data_dir, f))
point_list.append(points)
label_list.append(labels)
# Read model names for referencing when plotting point clouds
f = os.path.splitext(f)[0]
f = f[:-1] + "_{:s}_id2file.json".format(f[-1])
self.names.extend(_load_model_names(os.path.join(data_dir, f)))
self.points = np.concatenate(point_list, 0)
self.labels = np.concatenate(label_list, 0)
self.set_num_points(num_points)
def __getitem__(self, idx):
pt_idxs = np.arange(0, self.num_points)
np.random.shuffle(pt_idxs)
current_points = self.points[idx, pt_idxs].copy()
label = torch.from_numpy(self.labels[idx]).type(torch.LongTensor)
if self.transforms is not None:
current_points = self.transforms(current_points)
return current_points, label
def __len__(self):
return self.points.shape[0]
def set_num_points(self, pts):
self.num_points = min(self.points.shape[1], pts)
def randomize(self):
pass
```
#### File: pointnet/models/ClassificationModel.py
```python
import torch.nn as nn
class ClassificationModel(nn.Sequential):
def __init__(self, in_channels=1024, num_classes=40, hidden_nodes=[512, 256]):
super(ClassificationModel, self).__init__()
self.in_channels = in_channels
self.num_classes = num_classes
if not isinstance(hidden_nodes, list):
hidden_nodes = [hidden_nodes]
l = [in_channels] + hidden_nodes + [num_classes]
for il in range(len(l) - 1):
self.add_module("relu{}".format(il), nn.ReLU(inplace=True))
self.add_module("fc_layer{}".format(il), nn.Linear(l[il], l[il + 1], bias=True))
```
#### File: pointnet/models/model_utils.py
```python
import torch
import torch.nn as nn
from PointNet2.pointnet2.pointnet2_utils import QueryAndGroup
class PointCloudGrouper(nn.Module):
"""
Splits a point cloud into multiple overlapping sub point clouds, to be used in the Greedy InfoMax algorithm
"""
def __init__(self, opt):
super(PointCloudGrouper, self).__init__()
self.cube_size = opt.subcloud_cube_size
self.centers = self._create_sub_cloud_centers(opt)
self.splitter = QueryAndGroup(opt.subcloud_ball_radius, opt.subcloud_num_points)
def _create_sub_cloud_centers(self, opt):
centers = torch.Tensor([[x, y, z] for
x in range(opt.subcloud_cube_size)
for y in range(opt.subcloud_cube_size)
for z in range(opt.subcloud_cube_size)]).unsqueeze(0).to(opt.device)
centers -= (opt.subcloud_cube_size - 1) / 2
centers *= opt.subcloud_ball_radius
return centers
def forward(self, xyz):
# (B, N, 3) -> (B, 3, cube_size^3, num_points)
xyz = self.splitter(xyz, self.centers.repeat(xyz.shape[0], 1, 1))
# (B, 3, cube_size^3, num_points) -> (B, cube_size^3, num_points, 3)
xyz = xyz.permute(0, 2, 3, 1)
# B, cube_size^3, num_points, 3) -> (B*cube_size^3, num_points, 3)
xyz = xyz.reshape(xyz.shape[0]*xyz.shape[1], xyz.shape[2], xyz.shape[3])
# If there are no points to gather within the sphere for a group, the function used in QueryAndGroup will
# simply fill the group with copies of the center point. While this works without problems when grouping in
# the PointNet++ algorithm, in our case it will result in creating points that don't exist in the original
# point cloud. To fix this, we need to keep track of these 'failed' groups so that we ignore them in the
# loss function.
failed_groups = torch.eq(torch.eq(xyz[:,1], xyz[:,2]).sum(dim=1),3)
return xyz, failed_groups
```
#### File: pointnet/models/PointNetEncoder.py
```python
import torch
import torch.nn as nn
from GreedyInfoMax.pointnet.models.InfoNCE_Loss import InfoNCE_Loss
class PointNetEncoder(nn.Module):
def __init__(self, opt, SA_modules: list, feature_length: int, encoder_num: int, point_cloud_grouper=None):
super(PointNetEncoder, self).__init__()
self.opt = opt
self.SA_modules = nn.ModuleList(SA_modules)
self.encoder_num = encoder_num
self.calc_loss = opt.loss == 'info_nce'
self.grouper = point_cloud_grouper
if self.calc_loss and self.encoder_num == 0:
assert self.grouper is not None, "Needs to have centers for each sub point cloud defined!"
# Always add loss for parameter loading reasons, but might not use it
self.loss = InfoNCE_Loss(opt,
in_channels=feature_length,
out_channels=feature_length)
self.failed_groups = None
def set_calc_loss(self, calc_loss):
self.calc_loss = calc_loss
def _patchify(self, x):
x, failed_groups = self.grouper(xyz=x)
return x, failed_groups
def forward(self, xyz, features, failed_groups=None):
if self.calc_loss and self.encoder_num == 0:
assert features is None # Should only do this at first layer
xyz, failed_groups = self._patchify(xyz)
for m in self.SA_modules:
xyz, features = m(xyz=xyz, features=features)
if self.calc_loss:
z = features.mean(dim=2) # Average features of all points in each subcloud
z = z.reshape(-1,
self.opt.subcloud_cube_size,
self.opt.subcloud_cube_size,
self.opt.subcloud_cube_size,
z.shape[1])
z = z.permute(0,4,1,2,3) # (B, C, cube_size, cube_size, cube_size)
targets_to_ignore = failed_groups.reshape(-1,
self.opt.subcloud_cube_size,
self.opt.subcloud_cube_size,
self.opt.subcloud_cube_size)
loss = self.loss(z, z, targets_to_ignore=targets_to_ignore)
else:
loss = None
return xyz, features, loss, failed_groups
```
|
{
"source": "jespino/anillo",
"score": 3
}
|
#### File: anillo/middlewares/params.py
```python
import functools
from anillo.utils.common import merge_dicts
from urllib.parse import parse_qs
from cgi import parse_header
def wrap_form_params(func):
"""
A middleware that parses the url-encoded body and attach
the result to the request `form_params` attribute.
This middleware also merges the parsed value with the existing
`params` attribute in same way as `wrap_query_params` is doing.
"""
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "application/x-www-form-urlencoded":
params = {}
for key, value in parse_qs(request.body.decode("utf-8")).items():
if len(value) == 1:
params[key] = value[0]
else:
params[key] = value
request.params = merge_dicts(getattr(request, "params", None), params)
request.form_params = params
return func(request, *args, **kwargs)
return wrapper
def wrap_query_params(func):
"""
A middleware that parses the urlencoded params from the querystring
and attach it to the request `query_params` attribute.
This middleware also merges the parsed value with the existing
`params` attribute in same way as `wrap_form_params` is doing.
"""
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
params = {}
for key, value in parse_qs(request.query_string.decode("utf-8")).items():
if len(value) == 1:
params[key] = value[0]
else:
params[key] = value
request.params = merge_dicts(getattr(request, "params", None), params)
request.query_params = params
return func(request, *args, **kwargs)
return wrapper
```
|
{
"source": "jespino/anillo-cookie-session",
"score": 3
}
|
#### File: anillo-cookie-session/anillo_cookie_session/backends.py
```python
import json
from jwkest.jwk import SYMKey
from jwkest.jwe import JWE
class _SessionKey:
def __init__(self, value):
self.value = value
def set(self, value):
self.value = value
def get(self):
return self.value
class BaseCookieStorage:
def __init__(self, cookie_name="session-id"):
self.cookie_name = cookie_name
def get_session_key(self, request):
return _SessionKey(request.get('cookies', {}).get(self.cookie_name, {}).get('value', None))
def persist_session_key(self, request, response, session_key):
if request.get("cookies", {}).get(self.cookie_name, {}).get('value', None) != session_key.get():
if not hasattr(response, 'cookies'):
response.cookies = {}
response.cookies[self.cookie_name] = {"value": session_key.get()}
def store(self, request, response, session_key, data):
session_key.set(self.dumps(data))
def retrieve(self, request, session_key):
try:
return self.loads(session_key.get())
except Exception:
return {}
class InsecureJsonCookieStorage(BaseCookieStorage):
def dumps(self, data):
return json.dumps(data)
def loads(self, data):
return json.loads(data)
class JWSCookieStorage(BaseCookieStorage):
def __init__(self, secret, cookie_name="session-id", sign_alg="ES256"):
self.cookie_name = cookie_name
self.secret = secret
self.sign_alg = sign_alg
def dumps(self, data):
sym_key = SYMKey(key=self.secret, alg=self.cypher_alg)
jws = JWS(data, alg=self.sign_alg)
return jws.sign_compact(keys=[self.secret])
def loads(self, data):
jws = JWS()
return jws.verify_compact(data, keys=[self.secret])
class JWECookieStorage(BaseCookieStorage):
def __init__(self, secret, cookie_name="session-id", cypher_alg="A128KW", cypher_enc="A256CBC-HS512"):
self.cookie_name = cookie_name
self.secret = secret
self.cypher_alg = cypher_alg
self.cypher_enc = cypher_enc
self.sym_key = SYMKey(key=self.secret, alg=self.cypher_alg)
def dumps(self, data):
jwe = JWE(json.dumps(data), alg=self.cypher_alg, enc=self.cypher_enc)
return jwe.encrypt([self.sym_key])
def loads(self, data):
(plain, success) = JWE().decrypt(data, keys=[self.sym_key])
if success:
return json.loads(plain.decode('utf-8'))
return None
```
|
{
"source": "jespino/chia-blockchain",
"score": 2
}
|
#### File: tests/core/test_cost_calculation.py
```python
import asyncio
import time
import logging
import pytest
from src.consensus.cost_calculator import calculate_cost_of_program, CostResult
from src.full_node.bundle_tools import best_solution_program
from src.full_node.mempool_check_conditions import (
get_name_puzzle_conditions,
get_puzzle_and_solution_for_coin,
)
from src.types.blockchain_format.program import SerializedProgram
from src.util.byte_types import hexstr_to_bytes
from tests.setup_nodes import test_constants, bt
from clvm_tools import binutils
BURN_PUZZLE_HASH = b"0" * 32
log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
@pytest.fixture(scope="module")
def large_txn_hex():
import pathlib
my_dir = pathlib.Path(__file__).absolute().parent
with open(my_dir / "large-block.hex", "r") as f:
hex_str = f.read()
yield hex_str
class TestCostCalculation:
@pytest.mark.asyncio
async def test_basics(self):
wallet_tool = bt.get_pool_wallet_tool()
ph = wallet_tool.get_new_puzzlehash()
num_blocks = 3
blocks = bt.get_consecutive_blocks(
num_blocks, [], guarantee_transaction_block=True, pool_reward_puzzle_hash=ph, farmer_reward_puzzle_hash=ph
)
coinbase = None
for coin in blocks[2].get_included_reward_coins():
if coin.puzzle_hash == ph:
coinbase = coin
break
assert coinbase is not None
spend_bundle = wallet_tool.generate_signed_transaction(
coinbase.amount,
BURN_PUZZLE_HASH,
coinbase,
)
assert spend_bundle is not None
program = best_solution_program(spend_bundle)
ratio = test_constants.CLVM_COST_RATIO_CONSTANT
result: CostResult = calculate_cost_of_program(program, ratio)
clvm_cost = result.cost
error, npc_list, cost = get_name_puzzle_conditions(program, False)
assert error is None
coin_name = npc_list[0].coin_name
error, puzzle, solution = get_puzzle_and_solution_for_coin(program, coin_name)
assert error is None
# Create condition + agg_sig_condition + length + cpu_cost
assert clvm_cost == 200 * ratio + 20 * ratio + len(bytes(program)) * ratio + cost
@pytest.mark.asyncio
async def test_strict_mode(self):
wallet_tool = bt.get_pool_wallet_tool()
ph = wallet_tool.get_new_puzzlehash()
num_blocks = 3
blocks = bt.get_consecutive_blocks(
num_blocks, [], guarantee_transaction_block=True, pool_reward_puzzle_hash=ph, farmer_reward_puzzle_hash=ph
)
coinbase = None
for coin in blocks[2].get_included_reward_coins():
if coin.puzzle_hash == ph:
coinbase = coin
break
assert coinbase is not None
spend_bundle = wallet_tool.generate_signed_transaction(
coinbase.amount,
BURN_PUZZLE_HASH,
coinbase,
)
assert spend_bundle is not None
program = SerializedProgram.from_bytes(
binutils.assemble(
"(q . ((0x3d2331635a58c0d49912bc1427d7db51afe3f20a7b4bcaffa17ee250dcbcbfaa"
" (((c (q . ((c (q . ((c (i 11 (q . ((c (i (= 5 (point_add 11"
" (pubkey_for_exp (sha256 11 ((c 6 (c 2 (c 23 (q . ())))))))))"
" (q . ((c 23 47))) (q . (x))) 1))) (q . (c (c 4 (c 5 (c ((c 6 (c 2"
" (c 23 (q . ()))))) (q . ())))) ((c 23 47))))) 1))) (c (q . (57 (c"
" (i (l 5) (q . (sha256 (q . 2) ((c 6 (c 2 (c 9 (q . ()))))) ((c 6 (c"
" 2 (c 13 (q . ()))))))) (q . (sha256 (q . 1) 5))) 1))) 1)))) (c"
" (q . 0x88bc9360319e7c54ab42e19e974288a2d7a817976f7633f4b43"
"f36ce72074e59c4ab8ddac362202f3e366f0aebbb6280)"
' 1))) (() (q . ((65 "00000000000000000000000000000000" 0x0cbba106e000))) ())))))'
).as_bin()
)
error, npc_list, cost = get_name_puzzle_conditions(program, True)
assert error is not None
error, npc_list, cost = get_name_puzzle_conditions(program, False)
assert error is None
coin_name = npc_list[0].coin_name
error, puzzle, solution = get_puzzle_and_solution_for_coin(program, coin_name)
assert error is None
@pytest.mark.asyncio
async def test_clvm_strict_mode(self):
program = SerializedProgram.from_bytes(
# this is a valid generator program except the first clvm
# if-condition, that depends on executing an unknown operator
# ("0xfe"). In strict mode, this should fail, but in non-strict
# mode, the unknown operator should be treated as if it returns ().
binutils.assemble(
"(i (a (q . 0xfe) (q . ())) (q . ()) "
"(q . ((0x3d2331635a58c0d49912bc1427d7db51afe3f20a7b4bcaffa17ee250dcbcbfaa"
" (((c (q . ((c (q . ((c (i 11 (q . ((c (i (= 5 (point_add 11"
" (pubkey_for_exp (sha256 11 ((c 6 (c 2 (c 23 (q . ())))))))))"
" (q . ((c 23 47))) (q . (x))) 1))) (q . (c (c 4 (c 5 (c ((c 6 (c 2"
" (c 23 (q . ()))))) (q . ())))) ((c 23 47))))) 1))) (c (q . (57 (c"
" (i (l 5) (q . (sha256 (q . 2) ((c 6 (c 2 (c 9 (q . ()))))) ((c 6 (c"
" 2 (c 13 (q . ()))))))) (q . (sha256 (q . 1) 5))) 1))) 1)))) (c"
" (q . 0x88bc9360319e7c54ab42e19e974288a2d7a817976f7633f4b43"
"f36ce72074e59c4ab8ddac362202f3e366f0aebbb6280)"
' 1))) (() (q . ((51 "00000000000000000000000000000000" 0x0cbba106e000))) ())))))'
")"
).as_bin()
)
error, npc_list, cost = get_name_puzzle_conditions(program, True)
assert error is not None
error, npc_list, cost = get_name_puzzle_conditions(program, False)
assert error is None
@pytest.mark.asyncio
async def test_tx_generator_speed(self, large_txn_hex):
generator = hexstr_to_bytes(large_txn_hex)
program = SerializedProgram.from_bytes(generator)
start_time = time.time()
err, npc, cost = get_name_puzzle_conditions(program, False)
end_time = time.time()
duration = end_time - start_time
assert err is None
assert len(npc) == 687
log.info(f"Time spent: {duration}")
assert duration < 3
@pytest.mark.asyncio
async def test_standard_tx(self):
puzzle = "((c (q . ((c (q . ((c (i 11 (q . ((c (i (= 5 (point_add 11 (pubkey_for_exp (sha256 11 ((c 6 (c 2 (c 23 (q . ()))))))))) (q . ((c 23 47))) (q . (x))) 1))) (q . (c (c 4 (c 5 (c ((c 6 (c 2 (c 23 (q . ()))))) (q . ())))) ((c 23 47))))) 1))) (c (q . (57 (c (i (l 5) (q . (sha256 (q . 2) ((c 6 (c 2 (c 9 (q . ()))))) ((c 6 (c 2 (c 13 (q . ()))))))) (q . (sha256 (q . 1) 5))) 1))) 1)))) (c (q . 0xaf949b78fa6a957602c3593a3d6cb7711e08720415dad831ab18adacaa9b27ec3dda508ee32e24bc811c0abc5781ae21) 1)))" # noqa: E501
solution = "(() (q . ((51 0x699eca24f2b6f4b25b16f7a418d0dc4fc5fce3b9145aecdda184158927738e3e 10) (51 0x847bb2385534070c39a39cc5dfdc7b35e2db472dc0ab10ab4dec157a2178adbf 0x00cbba106df6))) ())" # noqa: E501
time_start = time.time()
total_cost = 0
puzzle_program = SerializedProgram.from_bytes(binutils.assemble(puzzle).as_bin())
solution_program = SerializedProgram.from_bytes(binutils.assemble(solution).as_bin())
for i in range(0, 1000):
cost, result = puzzle_program.run_with_cost(solution_program)
total_cost += cost
time_end = time.time()
duration = time_end - time_start
log.info(f"Time spent: {duration}")
assert duration < 3
```
|
{
"source": "jespino/django-rest-framework-apidoc",
"score": 2
}
|
#### File: django-rest-framework-apidoc/rest_framework_apidoc/apidoc.py
```python
from django.conf import settings
from rest_framework.settings import import_from_string
from .mixins import FileContentMixin, DocStringContentMixin, MarkupProcessMixin, NoProcessMixin, SafeProcessMixin
APIDOC_DEFAULT_DOCUMENTER_CLASSES = getattr(
settings,
'APIDOC_DEFAULT_DOCUMENTER_CLASSES',
['rest_framework_apidoc.apidoc.MDDocStringsDocumenter']
)
def get_view_description(view_cls, html=False, request=None):
documenters = []
if hasattr(view_cls, 'documenter_classes'):
for cls in view_cls.documenter_classes:
documenters.append(cls())
else:
for cls in APIDOC_DEFAULT_DOCUMENTER_CLASSES:
documenter_class = import_from_string(cls, "APIDOC_DEFAULT_DOCUMENTER_CLASS")
documenters.append(documenter_class())
for documenter in documenters:
description = documenter.get_description(view_cls, html, request)
if description:
return description
return ""
class Documenter(object):
def get_description(self, view_cls, html=True, request=None):
if html:
return self.process(self.get_content(view_cls, html, request))
return self.get_content(view_cls, html, request=None)
class RSTFilesDocumenter(Documenter, FileContentMixin, MarkupProcessMixin):
extension = ".rst"
markup = "restructuredtext"
class RSTDocStringsDocumenter(Documenter, DocStringContentMixin, MarkupProcessMixin):
markup = "restructuredtext"
class MDFilesDocumenter(Documenter, FileContentMixin, MarkupProcessMixin):
extension = ".md"
markup = "markdown"
class MDDocStringsDocumenter(Documenter, DocStringContentMixin, MarkupProcessMixin):
markup = "markdown"
class TextileFilesDocumenter(Documenter, FileContentMixin, MarkupProcessMixin):
extension = ".textile"
markup = "textile"
class TextileDocStringsDocumenter(Documenter, DocStringContentMixin, MarkupProcessMixin):
markup = "textile"
class TxtFilesDocumenter(Documenter, FileContentMixin, NoProcessMixin):
extension = ".txt"
class TxtDocStringsDocumenter(Documenter, DocStringContentMixin, NoProcessMixin):
pass
class HtmlFilesDocumenter(Documenter, FileContentMixin, SafeProcessMixin):
extension = ".html"
class HtmlDocStringsDocumenter(Documenter, DocStringContentMixin, SafeProcessMixin):
pass
```
#### File: django-rest-framework-apidoc/rest_framework_apidoc/monkey.py
```python
def patch_api_view():
from rest_framework import views
if hasattr(views, "_patched"):
return
views._APIView = views.APIView
views._patched = True
class APIView(views.APIView):
def get_view_description(self, html=False):
func = self.settings.VIEW_DESCRIPTION_FUNCTION
return func(self.__class__, html, self.request)
@classmethod
def as_view(cls, **initkwargs):
view = super(views._APIView, cls).as_view(**initkwargs)
view.cls_instance = cls(**initkwargs)
return view
views.APIView = APIView
```
#### File: django-rest-framework-apidoc/tests/tests.py
```python
from django.test import TestCase
from django.test.utils import override_settings
from rest_framework.views import APIView
from rest_framework_apidoc.mixins import FileContentMixin, DocStringContentMixin
class DummyObject(object):
pass
class RequestMock(object):
def __init__(self, url_name):
self.resolver_match = DummyObject()
self.resolver_match.url_name = url_name
class TestView(APIView):
pass
class DocumentedTestView(APIView):
"documented test view"
class DocumentedMultilineTestView(APIView):
"""documented test view
with multiple lines
documentation"""
class FileContentWithExtension(FileContentMixin):
extension = ".extension"
class FileContentWithoutExtension(FileContentMixin):
pass
class FileContentMixinTestCase(TestCase):
@override_settings(APIDOC_DOCUMENTATION_PATH="tests/test_docs")
def test_with_extension(self):
requestMock = RequestMock("test_with_extension")
content = FileContentWithExtension().get_content(TestView, False, requestMock)
self.assertEqual(content, "test_with_extension\n")
content = FileContentWithExtension().get_content(TestView, True, requestMock)
self.assertEqual(content, "test_with_extension\n")
@override_settings(APIDOC_DOCUMENTATION_PATH="tests/test_docs")
def test_without_extension(self):
requestMock = RequestMock("test_without_extension")
content = FileContentWithoutExtension().get_content(TestView, False, requestMock)
self.assertEqual(content, "test_without_extension\n")
content = FileContentWithoutExtension().get_content(TestView, True, requestMock)
self.assertEqual(content, "test_without_extension\n")
@override_settings(APIDOC_DOCUMENTATION_PATH="tests/test_docs")
def test_not_existing_file(self):
requestMock = RequestMock("test_not_existing_file")
content = FileContentWithoutExtension().get_content(TestView, True, requestMock)
self.assertEqual(content, "")
content = FileContentWithoutExtension().get_content(TestView, False, requestMock)
self.assertEqual(content, "")
class DocStringContentMixinTestCase(TestCase):
def test_with_docstring(self):
content = DocStringContentMixin().get_content(DocumentedTestView, True)
self.assertEqual(content, "documented test view")
def test_with_multiline_docstring(self):
content = DocStringContentMixin().get_content(DocumentedMultilineTestView, True)
self.assertEqual(content, "documented test view\nwith multiple lines\ndocumentation")
def test_without_docstring(self):
content = DocStringContentMixin().get_content(TestView, True)
self.assertEqual(content, "")
```
|
{
"source": "jespino/hospitales-covid19",
"score": 2
}
|
#### File: fuckcovid/hospitals/views.py
```python
from django.db.models import Sum
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Region, Hospital, Resource, Need
from .serializers import *
from rest_framework import generics
class Home(TemplateView):
template_name = 'hospitals/home.html'
class RegionList(ListView):
queryset = Region.objects.all().order_by('name')
context_object_name = 'regions'
template_name = 'hospitals/region_list.html'
class RegionDetail(DetailView ):
template_name = 'hospitals/region_detail.html'
context_object_name = 'region'
model = Region
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['hospitals'] = self.object.hospital_set.order_by('name')
return context
class HospitalDetail(DetailView):
template_name = 'hospitals/hospital_detail.html'
context_object_name = 'hospital'
model = Hospital
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['needs'] = self.object.need_set.order_by('resource__name')
return context
class HospitalUpdate(LoginRequiredMixin, UpdateView):
model = Hospital
fields = ['name', 'city', 'phone', 'address', 'comment']
class HospitalAddNeed(LoginRequiredMixin, CreateView):
model = Need
fields = ['resource', 'amount_per_day', 'comment']
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['hospital'] = get_object_or_404(Hospital, pk=self.kwargs['pk'])
context['prev_url'] = self.request.path
return context
def form_valid(self, form):
form.instance.hospital = get_object_or_404(Hospital, pk=self.kwargs['pk'])
form.instance.editor = self.request.user
return super().form_valid(form)
def get_success_url(self):
return reverse('hospitals:hospital-detail', args=[self.kwargs['pk']])
class NeedUpdateView(LoginRequiredMixin, UpdateView):
model = Need
fields = ['amount_per_day', 'comment']
def get_success_url(self):
return reverse('hospitals:hospital-detail', args=[str(self.object.hospital.pk)])
class ResourceCreateView(LoginRequiredMixin, CreateView):
model = Resource
fields = ['name', ]
def get_success_url(self):
return self.request.GET.get('next')
class ResourceDetail(DetailView):
template_name = 'hospitals/resource_detail.html'
context_object_name = 'resource'
model = Resource
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['total_per_day'] = self.object.need_set.aggregate(total_per_day=Sum('amount_per_day'))['total_per_day']
context['production_per_day'] = self.object.production_set.aggregate(total_per_day=Sum('amount_per_day'))['total_per_day']
return context
class ResourceList(ListView):
template_name = 'hospitals/resource_list.html'
context_object_name = 'resources'
model = Resource
class NeedList(generics.ListCreateAPIView):
queryset = Need.objects.all()
serializer_class = NeedSerializer
class NeedDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Need.objects.all()
serializer_class = NeedSerializer
```
#### File: fuckcovid/makers/views.py
```python
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.views.generic import ListView, DetailView, CreateView, UpdateView
from fuckcovid.hospitals.models import Region
from fuckcovid.makers.models import Maker, Production
class RegionList(ListView):
queryset = Region.objects.all().order_by('name')
context_object_name = 'regions'
template_name = 'makers/region_list.html'
class RegionDetail(DetailView ):
template_name = 'makers/region_detail.html'
context_object_name = 'region'
model = Region
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['makers'] = self.object.maker_set.order_by('name')
return context
class MakerDetail(DetailView):
template_name = 'makers/maker_detail.html'
context_object_name = 'maker'
model = Maker
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['productions'] = self.object.production_set.order_by('resource__name')
return context
class MakerCreate(LoginRequiredMixin, CreateView):
model = Maker
fields = ['name', 'city', 'phone', 'address', 'region', 'comment']
def form_valid(self, form):
form.instance.editor = self.request.user
return super().form_valid(form)
class MakerUpdate(LoginRequiredMixin, UpdateView):
model = Maker
fields = ['name', 'city', 'phone', 'address', 'region', 'comment']
def get_object(self, queryset=None):
object = super().get_object(queryset)
if object.editor != self.request.user:
raise PermissionDenied()
return object
class ProductionCreate(LoginRequiredMixin, CreateView):
model = Production
fields = ['resource', 'amount_per_day', 'comment', 'donation']
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
maker = get_object_or_404(Maker, pk=self.kwargs['pk'])
if maker.editor != self.request.user:
raise PermissionDenied()
context['maker'] = maker
context['prev_url'] = self.request.path
return context
def form_valid(self, form):
form.instance.maker = get_object_or_404(Maker, pk=self.kwargs['pk'])
return super().form_valid(form)
def get_success_url(self):
return reverse('makers:maker-detail', args=[self.kwargs['pk']])
class ProductionUpdate(LoginRequiredMixin, UpdateView):
model = Production
fields = ['resource', 'amount_per_day', 'comment', 'donation']
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
maker = self.object.maker
if maker.editor != self.request.user:
raise PermissionDenied()
context['maker'] = maker
return context
def get_success_url(self):
return reverse('makers:maker-detail', args=[self.object.maker.id])
```
|
{
"source": "jespino/matteractions",
"score": 2
}
|
#### File: matteractions/actions/abuse.py
```python
import requests
def report_abuse(webhook_url, text, domain, team_name, post_id):
resp = requests.post(webhook_url, json={
"text": "Abuse reported in this message: {}/{}/pl/{}\n\nWith text:\n > {}".format(domain, team_name, post_id, text)
})
```
#### File: matteractions/actions/deepmoji.py
```python
import json
import requests
from flask import Flask
app = Flask(__name__)
from flask import request
from flask import jsonify
from flask_wtf import CSRFProtect
EMOJIS_MAP = {
0: "joy",
1: "unamused",
2: "weary",
3: "sob",
4: "heart_eyes",
5: "pensive",
6: "ok_hand",
7: "blush",
8: "heart",
9: "smirk",
10: "grin",
11: "notes",
12: "flushed",
13: "100",
14: "sleeping",
15: "relieved",
16: "relaxed",
17: "raised_hands",
18: "two_hearts",
19: "expressionless",
20: "sweat_smile",
21: "pray",
22: "confused",
23: "kissing_heart",
24: "hearts",
25: "neutral_face",
26: "information_desk_person",
27: "disappointed",
28: "see_no_evil",
29: "tired_face",
30: "v",
31: "sunglasses",
32: "rage",
33: "thumbsup",
34: "cry",
35: "sleepy",
36: "stuck_out_tongue_winking_eye",
37: "triumph",
38: "raised_hand",
39: "mask",
40: "clap",
41: "eyes",
42: "gun",
43: "persevere",
44: "imp",
45: "sweat",
46: "broken_heart",
47: "blue_heart",
48: "headphones",
49: "speak_no_evil",
50: "wink",
51: "skull",
52: "confounded",
53: "smile",
54: "stuck_out_tongue_winking_eye",
55: "angry",
56: "no_good",
57: "muscle",
58: "punch",
59: "purple_heart",
60: "sparkling_heart",
61: "blue_heart",
62: "grimacing",
63: "sparkles"
}
def suggest_emoji(text):
response = requests.get("https://deepmoji.mit.edu/api/", {"q": text})
data = response.json()
counter = 0
maxIdx = None
for score in data["scores"]:
if maxIdx is None:
maxIdx = [counter, score]
elif maxIdx[1] < score:
maxIdx = [counter, score]
counter += 1
if maxIdx is None:
return None
return EMOJIS_MAP[maxIdx[0]]
```
#### File: matteractions/actions/jira_send.py
```python
import requests
from requests.auth import HTTPBasicAuth
def send_to_jira(api_url, api_user, api_token, project_key, text, type, summary):
description = text
if summary == "" or summary is None:
summary = text[:150]
if type == "" or type is None:
type = "Bug"
data = {
"fields": {
"project": {"key": project_key},
"summary": summary,
"description": description,
"issuetype": {"name": type}
}
}
resp = requests.post("{}/rest/api/2/issue/".format(api_url),
json=data, auth=HTTPBasicAuth(api_user, api_token))
return "{}/browse/{}".format(api_url, resp.json()['key'])
```
#### File: matteractions/actions/translate.py
```python
from google.cloud import translate as trans
def translate(text, target):
translate_client = trans.Client()
translation = translate_client.translate(
text,
target_language=target)
return translation['translatedText']
```
|
{
"source": "jespino/sampledata",
"score": 3
}
|
#### File: sampledata/sampledata/helper.py
```python
import random
from .mixins import NumberMixin, TextMixin, TimeMixin, LocalizedMixin, ImageMixin, OtherMixin
class SampleData(NumberMixin, TextMixin, TimeMixin, LocalizedMixin, ImageMixin, OtherMixin):
def __init__(self, seed=None):
if seed is not None:
random.seed(seed)
```
#### File: sampledata/mixins/localized_mixin.py
```python
import random
from sampledata.l10n.names import Name, Surname, FullName
from sampledata.l10n.cities import City
from sampledata.l10n.occupations import Occupation
from sampledata.l10n.skills import Skill
from ..exceptions import ParameterError
class LocalizedMixin(object):
def state_code(self, locale):
"""Random province code."""
if locale == "es":
return random.choice(
['01', '02', '03', '04', '05', '06', '07', '08', '09', '10',
'11', '12', '13', '14', '15', '16', '17', '18', '19', '20',
'21', '22', '23', '24', '25', '26', '27', '28', '29', '30',
'31', '32', '33', '34', '35', '36', '37', '38', '39', '40',
'41', '42', '43', '44', '45', '46', '47', '48', '49', '50',
'51', '52', 'AD', ]
)
elif locale == "us":
return random.choice(
['AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'FL', 'GA',
'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MD',
'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ',
'NM', 'NY', 'NC', 'ND', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC',
'SD', 'TN', 'TX', 'UT', 'VT', 'VA', 'WA', 'WV', 'WI', 'WY',
'AS', 'DC', 'FM', 'GU', 'MH', 'MP', 'PW', 'PR', 'VI', ]
)
else:
raise ParameterError("Not valid locale")
def name(self, locale=None, number=None, as_list=False):
return Name().generate(self, locale, number, as_list)
def surname(self, locale=None, number=None, as_list=False):
return Surname().generate(self, locale, number, as_list)
def fullname(self, locale=None, as_list=False):
return FullName().generate(self, locale, as_list)
def phone(self, locale=None, country_code=False):
phone = ''
if locale == "es":
if country_code is True:
phone += "+34 "
phone += random.choice(['6', '9'])
phone += str(self.int(10000000, 99999999))
return phone
else:
# Only works with implemented locales
raise ParameterError("Not valid locale")
def zip_code(self, locale=None):
zip_code = ''
if locale == "es":
zip_code = "%05d" % self.int(1000, 52999)
return zip_code
else:
# Only works with implemented locales
raise ParameterError("Not valid locale")
def id_card(self, locale=None):
id_card = ''
if locale == "es":
id_card = "%05d" % self.int(1000, 52999)
id_card = self.number_string(8)
id_card_letters = "TRWAGMYFPDXBNJZSQVHLCKET"
id_card += id_card_letters[int(id_card) % 23]
return id_card
else:
# Only works with implemented locales
raise ParameterError("Not valid locale")
def city(self, locale=None):
return City().generate(self, locale)
def occupation(self, locale=None):
return Occupation().generate(self, locale)
def skill(self, locale=None, subtype=None):
return Skill().generate(self, locale, subtype)
def skills(self, locale=None, subtype=None, total=None):
return Skill().generate_skills(self, locale, subtype, total)
```
#### File: sampledata/mixins/other_mixin.py
```python
import random
import os
from ..exceptions import ParameterError, NotChoicesFound
class OtherMixin(object):
def boolean(self):
return random.randrange(0, 2) == 0
def nullboolean(self):
return random.choice([None, True, False])
def choice(self, choices):
if not isinstance(choices, list) and not isinstance(choices, tuple):
raise ParameterError('choices must be a list or a tuple')
if choices == []:
raise ParameterError('choices can\'t be a empty list')
return random.choice(choices)
def ipv4(self):
return "{0}.{1}.{2}.{3}".format(
self.int(0, 255),
self.int(0, 255),
self.int(0, 255),
self.int(0, 255),
)
def ipv6(self):
return "{0}:{1}:{2}:{3}:{4}:{5}:{6}:{7}".format(
self.hex_chars(1, 4),
self.hex_chars(1, 4),
self.hex_chars(1, 4),
self.hex_chars(1, 4),
self.hex_chars(1, 4),
self.hex_chars(1, 4),
self.hex_chars(1, 4),
self.hex_chars(1, 4)
)
def mac_address(self):
return "{0}:{1}:{2}:{3}:{4}:{5}".format(
self.hex_chars(2, 2),
self.hex_chars(2, 2),
self.hex_chars(2, 2),
self.hex_chars(2, 2),
self.hex_chars(2, 2),
self.hex_chars(2, 2),
)
def hex_chars(self, min_chars=1, max_chars=5):
if min_chars > max_chars:
raise ParameterError('min_chars greater than max_chars')
result = ""
chars = random.randint(min_chars, max_chars)
for x in range(chars):
result += self.choice(['0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'a', 'b', 'c', 'd', 'e', 'f'])
return result
def path(self, absolute=None, extension='', min_levels=1, max_levels=5):
if min_levels > max_levels:
raise ParameterError('min_levels greater than max_levels')
if absolute is None:
absolute = self.boolean()
if absolute:
result = "/"
else:
result = ""
levels = random.randint(min_levels, max_levels)
for x in range(levels):
result += self.word()
if x != max_levels - 1:
result += "/"
result += extension
return result
def path_from_directory(self, directory_path, valid_extensions=['.jpg', '.bmp', '.png']):
if not os.path.exists(directory_path):
raise ParameterError('directory_path must be a valid path')
list_of_images = os.listdir(directory_path)
list_of_images = list(filter(lambda x: os.path.splitext(x)[1] in valid_extensions, list_of_images))
if len(list_of_images) == 0:
raise NotChoicesFound('Not valid images found in directory_path for valid_extensions')
random_path = os.path.join(directory_path, random.choice(list_of_images))
return random_path
```
|
{
"source": "jespiron/writeups",
"score": 3
}
|
#### File: cryptography/James Brahm Returns/crack_jbr.py
```python
from pwn import *
import hashlib
def pad(message):
if len(message) % 16 == 0:
message = message + chr(16)*16
elif len(message) % 16 != 0:
message = message + chr(16 - len(message)%16)*(16 - len(message)%16)
return message
conn=remote("2018shell2.picoctf.com", 14263)
conn.recvuntil("Send & verify (S)")
def do_encrypt(prefix, suffix):
''' encrypt a report
Keyword arguments:
prefix -- situation report part of the message body.
service inserts this string before the CTF flag
suffix -- service inserts this string after the CTF flag
Return -- cipher message
'''
conn.send("e\n")
conn.recvuntil("Please enter your situation report: ")
conn.send(prefix + "\n")
conn.recvuntil("Anything else? ")
conn.send(suffix + "\n")
conn.recvuntil("encrypted: ")
# strip the trailing newline
cipher = conn.recvline()[:-1]
# 16-bytes block == 32 hex string (2 hex per byte)
assert 0 == len(cipher) % 32, "Fatal error - cipher message must be multiple of 32"
assert len(cipher) >= 96, "Fatal error - cipher message must be at least 96 characters long"
conn.recvuntil("Send & verify (S)")
return cipher
def do_verify(cipher):
""" Verify if a cipher can be successfully decrypted
Keyword arguments:
cipher -- the cipher message
Return -- True the message is successfully decrypted and False otherwise
"""
# 16-bytes block == 32 hex string (2 hex per byte)
assert 0 == (len(cipher) % 32), "Fatal error length of cipher message must be multiple of 32"
# cipher message is 96 for zero-length plaintext
# 1. one 16-byte IV block
# 2. one 20-byte MAC block
# 3. one 12-byte padding block
assert len(cipher) >= 96, "Fatal error - cipher message must be at least 96 characters long"
conn.send("s\n")
conn.recvuntil("Please input the encrypted message: ")
conn.send(cipher + "\n")
line = conn.recvline()
conn.recvuntil("Send & verify (S)")
return line.startswith('Successful decryption.')
def get_EBoP_metrics():
""" determine the length of shortest message with an EBoP and the associated suffix length
Note: an EBoP (Entire Block of Padding) is defined as a padding block containing only padding data.
Return -- two tuple with the fist value being the length of shortest message with EBoP
and the 2nd value being the required padding length (for both prefix and suffix)
"""
# cipher message length with zero-length prefix and suffix
cipher_len = 0
suffix_len = 0
while True:
new_cipher_len = len(do_encrypt("", 'a' * suffix_len)) / 2
assert new_cipher_len >= 48, "Fatal error"
assert 0 == new_cipher_len % 16, "Fatal error"
if new_cipher_len > cipher_len:
# the new length must be exactly 16-byte longer (or 32-hex digit longer)
assert(new_cipher_len == cipher_len + 16), "fatal error - block_size incorrect"
return new_cipher_len, suffix_len
cipher_len = new_cipher_len
suffix_len = suffix_len + 1
def get_CTF_metrics(EBoP_len, EBoP_padding):
""" determine the location of CTF
Return - the smallest where we can place CTF
"""
# find the earliest possible (where the prefix is empty) CTF location
prefix = ''
ctf = ''
suffix = 'A' * EBoP_padding
message = """Agent,
Greetings. My situation report is as follows:
{0}
My agent identifying code is: {1}.
Down with the Soviets,
006
""".format(prefix, ctf)
h = hashlib.sha1()
message = message+suffix
h.update(message)
message = message+h.digest()
assert 20 == len(h.digest()), "fatal error"
substr_to_find = "code is: "
return message.find(substr_to_find) + len(substr_to_find)
def decode_char(offset, EBoP_padding):
'''
decode a character at the specified offset using the given EBoP_padding
Arguments:
offset -- the offset of the character to be decoded
EBoP_padding -- the total amount of padding in order to ensure the cipher
message contains EBoP (Entire Block of Padding)
'''
# determine the required prefix padding in order to place the character at the end of the block
prefix_len = 15 - (offset % 16)
# the rest of padding will be absorbed by the suffix padding
suffix_len = EBoP_padding - prefix_len
block_start = offset + prefix_len + 1
assert (0 == (block_start % 16)), "fatal error"
# the block id containing our target character
# add 1 to accommodate IV
bid = block_start / 16 + 1
attempts = 0
prefix = 'A' * prefix_len
suffix = 'B' * suffix_len
print("decoding offset: %d prefix length %d suffix length %d" %(offset, prefix_len, suffix_len))
# implements the algorithm published by google
# https://www.openssl.org/~bodo/ssl-poodle.pdf
while True:
try:
message = do_encrypt(prefix, suffix)
if 0 == (attempts % 16):
print("offset %d attempts %d" % (offset, attempts))
assert len(message) == EBoP_len * 2, "fatal error"
# substitute last block with block bid
message = message[:-32] + message[(bid - 1)*32:bid * 32]
attempts = attempts + 1
if do_verify(message):
print("offset=%d attempts=%d done" % (offset, attempts))
# the plain text is C[bid - 1] ^ C[N - 1] ^ 16
# we use 16 instead of 15 because of padding differences
char = int(message[bid * 32 - 34:bid * 32 - 32], 16) ^ int(message[-34:-32], 16) ^ 16
print("offset=%d attempts=%d char=0x%02x" % (offset, attempts, char))
return p8(char)
except EOFError:
global conn
conn.close()
conn=remote("2018shell2.picoctf.com", 14263)
conn.recvuntil("Send & verify (S)")
# Min_EBoP_len, Min_EBoP_padding = get_EBoP_metrics()
Min_EBoP_len, Min_EBoP_padding = 208, 14
print("Min_EBop_Len %d Min_EBoP_padding %d" % (Min_EBoP_len, Min_EBoP_padding))
# increase the padding by 32 bytes (enough to decode CTF)
EBoP_len = Min_EBoP_len + 32
EBoP_padding = Min_EBoP_padding + 32
CTF_start = get_CTF_metrics(EBoP_len, EBoP_padding)
print("CTF start %d" % (CTF_start))
print("full block size %d padding %d" % (Min_EBoP_len, EBoP_padding))
start_offset = len("picoCTF") + CTF_start
cur_offset = start_offset
ch = ''
ctf = ''
# loop until we get }
while ch != '}':
ch = decode_char(cur_offset, EBoP_padding)
# { can only be in first characters
assert cur_offset != start_offset or ch == '{', "first character must be {"
assert ch != '{' or cur_offset == start_offset , "first character must be {"
cur_offset = cur_offset + 1
ctf = ctf + ch
print ctf
```
|
{
"source": "jes-p/pyasdf",
"score": 3
}
|
#### File: pyasdf/pyasdf/inventory_utils.py
```python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
import copy
from lxml import etree
from obspy import UTCDateTime
def merge_inventories(inv_a, inv_b, network_id, station_id):
"""
Takes two inventories, merges the contents of both and isolates the
contents of a certain network and station id.
Returns the processed inventory object. The original one will not be
changed.
:param inv_a: Inventory A. Contents of that inventory will be prioritized.
:type inv_a: :class:`~obspy.core.inventory.inventory.Inventory`
:param inv_b: Inventory B.
:type inv_b: :class:`~obspy.core.inventory.inventory.Inventory`
:param network_id: The network id.
:type network_id: str
:param station_id: The station id.
:type station_id: str
"""
inv = copy.deepcopy(inv_a)
inv.networks.extend(copy.deepcopy(inv_b.networks))
return isolate_and_merge_station(inv, network_id=network_id,
station_id=station_id)
def isolate_and_merge_station(inv, network_id, station_id):
"""
Takes an inventory object, isolates the given station and merged them.
Merging is sometimes necessary as many files have the same station
multiple times.
Returns the processed inventory object. The original one will not be
changed.
:param inv: The inventory.
:type inv: :class:`~obspy.core.inventory.inventory.Inventory`
:param network_id: The network id.
:type network_id: str
:param station_id: The station id.
:type station_id: str
"""
inv = copy.deepcopy(inv.select(network=network_id, station=station_id,
keep_empty=True))
# Merge networks if necessary.
if len(inv.networks) != 1:
network = inv.networks[0]
for other_network in inv.networks[1:]:
# Merge the stations.
network.stations.extend(other_network.stations)
# Update the times if necessary.
if other_network.start_date is not None:
if network.start_date is None or \
network.start_date > other_network.start_date:
network.start_date = other_network.start_date
# None is the "biggest" end_date.
if network.end_date is not None and other_network.end_date is \
not None:
if other_network.end_date > network.end_date:
network.end_date = other_network.end_date
elif other_network.end_date is None:
network.end_date = None
# Update comments.
network.comments = list(
set(network.comments).union(set(other_network.comments)))
# Update the number of stations.
if other_network.total_number_of_stations:
if network.total_number_of_stations or \
network.total_number_of_stations < \
other_network.total_number_of_stations:
network.total_number_of_stations = \
other_network.total_number_of_stations
# Update the other elements
network.alternate_code = (network.alternate_code or
other_network.alternate_code) or None
network.description = (network.description or
other_network.description) or None
network.historical_code = (network.historical_code or
other_network.historical_code) or None
network.restricted_status = network.restricted_status or \
other_network.restricted_status
inv.networks = [network]
# Merge stations if necessary.
if len(inv.networks[0].stations) != 1:
station = inv.networks[0].stations[0]
for other_station in inv.networks[0].stations[1:]:
# Merge the channels.
station.channels.extend(other_station.channels)
# Update the times if necessary.
if other_station.start_date is not None:
if station.start_date is None or \
station.start_date > other_station.start_date:
station.start_date = other_station.start_date
# None is the "biggest" end_date.
if station.end_date is not None and other_station.end_date is \
not None:
if other_station.end_date > station.end_date:
station.end_date = other_station.end_date
elif other_station.end_date is None:
station.end_date = None
# Update comments.
station.comments = list(
set(station.comments).union(set(other_station.comments)))
# Update the number of channels.
if other_station.total_number_of_channels:
if station.total_number_of_channels or \
station.total_number_of_channels < \
other_station.total_number_of_channels:
station.total_number_of_channels = \
other_station.total_number_of_channels
# Update the other elements
station.alternate_code = (station.alternate_code or
other_station.alternate_code) or None
station.description = (station.description or
other_station.description) or None
station.historical_code = (station.historical_code or
other_station.historical_code) or None
station.restricted_status = station.restricted_status or \
other_station.restricted_status
inv.networks[0].stations = [station]
# Last but not least, remove duplicate channels. This is done on the
# location and channel id, and the times, nothing else.
unique_channels = []
available_channel_hashes = []
for channel in inv[0][0]:
c_hash = hash((str(channel.start_date), str(channel.end_date),
channel.code, channel.location_code))
if c_hash in available_channel_hashes:
continue
else:
unique_channels.append(channel)
available_channel_hashes.append(c_hash)
inv[0][0].channels = unique_channels
# Update the selected number of stations and channels.
inv[0].selected_number_of_stations = 1
inv[0][0].selected_number_of_channels = len(inv[0][0].channels)
return inv
def get_coordinates(data, level="station"):
"""
Very quick way to get coordinates from a StationXML file.
Can extract coordinates at the station and at the channel level.
"""
ns = "http://www.fdsn.org/xml/station/1"
network_tag = "{%s}Network" % ns
station_tag = "{%s}Station" % ns
channel_tag = "{%s}Channel" % ns
latitude_tag = "{%s}Latitude" % ns
longitude_tag = "{%s}Longitude" % ns
elevation_tag = "{%s}Elevation" % ns
depth_tag = "{%s}Depth" % ns
# Return station coordinates.
if level == "station":
coordinates = {}
# Just triggering on network and station tags and getting the
# station elements' children does (for some reason) not work as the
# childrens will not be complete if there are a lot of them. Maybe
# this is some kind of shortcoming or bug of etree.iterparse()?
tags = (network_tag, station_tag, latitude_tag, longitude_tag,
elevation_tag)
context = etree.iterparse(data, events=("start", ), tag=tags)
# Small state machine.
current_network = None
current_station = None
current_coordinates = {}
for _, elem in context:
if elem.tag == network_tag:
current_network = elem.get('code')
current_station = None
current_coordinates = {}
elif elem.tag == station_tag:
current_station = elem.get('code')
current_coordinates = {}
elif elem.getparent().tag == station_tag:
if elem.tag == latitude_tag:
current_coordinates["latitude"] = float(elem.text)
if elem.tag == longitude_tag:
current_coordinates["longitude"] = float(elem.text)
if elem.tag == elevation_tag:
current_coordinates["elevation_in_m"] = float(elem.text)
if len(current_coordinates) == 3:
coordinates["%s.%s" % (current_network,
current_station)] = \
current_coordinates
current_coordinates = {}
return coordinates
# Return channel coordinates.
elif level == "channel":
coordinates = collections.defaultdict(list)
# Small state machine.
net_state, sta_state = (None, None)
tags = (network_tag, station_tag, channel_tag)
context = etree.iterparse(data, events=("start", ), tag=tags)
for _, elem in context:
if elem.tag == channel_tag:
# Get basics.
channel = elem.get('code')
location = elem.get('locationCode').strip()
starttime = UTCDateTime(elem.get('startDate'))
endtime = elem.get('endDate')
if endtime:
endtime = UTCDateTime(endtime)
tag = "%s.%s.%s.%s" % (net_state, sta_state, location, channel)
channel_coordinates = {"starttime": starttime,
"endtime": endtime}
coordinates[tag].append(channel_coordinates)
for child in elem.getchildren():
if child.tag == latitude_tag:
channel_coordinates["latitude"] = float(child.text)
elif child.tag == longitude_tag:
channel_coordinates["longitude"] = float(child.text)
elif child.tag == elevation_tag:
channel_coordinates["elevation_in_m"] = float(
child.text)
elif child.tag == depth_tag:
channel_coordinates["local_depth_in_m"] = float(
child.text)
elif elem.tag == station_tag:
sta_state = elem.get('code')
elif elem.tag == network_tag:
net_state = elem.get('code')
return dict(coordinates)
else:
raise ValueError("Level must be either 'station' or 'channel'.")
```
#### File: pyasdf/pyasdf/watermark.py
```python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from multiprocessing import cpu_count
from pkg_resources import get_distribution
import platform
from socket import gethostname
from time import strftime
import h5py
try:
from mpi4py import MPI
except ImportError:
MPI = None
from .utils import is_multiprocessing_problematic
# Dependencies.
modules = ["numpy", "scipy", "obspy", "lxml", "h5py", "prov", "dill"]
if MPI:
modules.append("mpi4py")
def get_watermark():
"""
Return information about the current system relevant for pyasdf.
"""
vendor = MPI.get_vendor() if MPI else None
c = h5py.get_config()
if not hasattr(c, "mpi") or not c.mpi:
is_parallel = False
else:
is_parallel = True
watermark = {
"python_implementation": platform.python_implementation(),
"python_version": platform.python_version(),
"python_compiler": platform.python_compiler(),
"platform_system": platform.system(),
"platform_release": platform.release(),
"platform_version": platform.version(),
"platform_machine": platform.machine(),
"platform_processor": platform.processor(),
"platform_processor_count": cpu_count(),
"platform_architecture": platform.architecture()[0],
"platform_hostname": gethostname(),
"date": strftime('%d/%m/%Y'),
"time": strftime('%H:%M:%S'),
"timezone": strftime('%Z'),
"hdf5_version": h5py.version.hdf5_version,
"parallel_h5py": is_parallel,
"mpi_vendor": vendor[0] if vendor else None,
"mpi_vendor_version": ".".join(map(str, vendor[1]))
if vendor else None,
"problematic_multiprocessing": is_multiprocessing_problematic()
}
watermark["module_versions"] = {
module: get_distribution(module).version for module in modules}
if MPI is None:
watermark["module_versions"]["mpi4py"] = None
return watermark
```
#### File: jes-p/pyasdf/setup.py
```python
import inspect
import os
from setuptools import setup, find_packages
DOCSTRING = __doc__.strip().split("\n")
def get_package_data():
"""
Returns a list of all files needed for the installation relative to the
'pyasdf' subfolder.
"""
filenames = []
# The lasif root dir.
root_dir = os.path.join(os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe()))), "pyasdf")
# Recursively include all files in these folders:
folders = [os.path.join(root_dir, "tests", "data")]
for folder in folders:
for directory, _, files in os.walk(folder):
for filename in files:
# Exclude hidden files.
if filename.startswith("."):
continue
filenames.append(os.path.relpath(
os.path.join(directory, filename),
root_dir))
return filenames
setup_config = dict(
name="pyasdf",
version="0.3.x",
description=DOCSTRING[0],
long_description="\n".join(DOCSTRING),
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/SeismicData/pyasdf",
packages=find_packages(),
license="BSD",
platforms="OS Independent",
install_requires=["numpy", "obspy>=1.0.0", "h5py", "colorama", "pytest",
"flake8", "prov", "dill"],
extras_require={"mpi": ["mpi4py"]},
package_data={
"pyasdf": get_package_data()},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Physics'],
)
if __name__ == "__main__":
setup(**setup_config)
```
|
{
"source": "Jesr2104/Chromosome-browser",
"score": 2
}
|
#### File: Chromosome-browser/cgi-bin/front_module.py
```python
"""
Program: front_module
File: front_module.py
Version: V1.0
Date: 10.05.18
Function: creates and recives data from html pages.
Copyright: (c) <NAME>, Birkbeck Bioinformatics student, 2018
Author: <NAME>
Address: 2018 Bioinformatics Course 1st year ,University of Birkbeck, London
--------------------------------------------------------------------------
Revision History:
=================
V1.0 10.05.18 Original By: ORS (<NAME>)
v2.0 13.05.18 By: ORS
Changelog
v2.0 - Changed preset to pre_set thoughtout program
"""
#************************************************************************
# Import libraries
#import collections
import cgitb,cgi,codecs,unittest,tempfile
cgitb.enable()
from middle_output_preliminar import middle_to_db
from ors_modules.data_processing_ors import gene_protein_accession_processing, location_processing, duplicate_processing, determine_type_of_middle_output
# for use in testsing, REMOVE BEFORE HAND IN
from ors_modules.dummy_data_ors import type012_dummy_data, type3_dummy_data, type4_dummy_data, type5_dummy_data
#************************************************************************
"""
#*************************************************************************
def print_page(result,results):
inserts the html output into a string of the html file for presenting
Input: file --- a html file
results --- a string
Return: None --- Prints a webpage
10.05.18 Original By: ORS
"""
def print_page(file, results):
print("Content-Type: text/html\n")
f=codecs.open(file, 'r')
page = f.read()
# outputs results which are embedded within the selected html
str_results = str(results)
print(page.format(output = str_results))
"""
#*************************************************************************
def print_tempate(result):
selects which html template to embed the output data into
Input: results --- a string
Return: None --- Prints a webpage
10.05.18 Original By: ORS
"""
def print_template(results):
# html page that acts as a wrapper around results
file ="template.html"
print_page(file,results)
"""
#*************************************************************************
def data_processing(data):
processes the data from web page forms into one form of output
Input: data --- a dictionary containing cgi data
Return: contents --- a dictionary containing processed data
10.05.18 Original By: ORS
"""
""" The purpose of data_processing is to take the dictionary from
cgifeildstorage to dict and to check that both the keys and values are present.
It then checks the name of the dictionary and creates a dictionary with type,
pre_set, name and enzyme.
it also turns the key:valuse 'gene':'gene1' into 'type':1,'name':'gene1
which is neccisary to fit our api"""
def data_processing(data): # directs the output from forms in homepage.html
# assigns variables to values type,pre_set,name, enzyme from form
# form.getvalue("name") stores a dictionary containing the type and name
if "name" in data:
query_name = data["name"]
if 'type' in data:
query_type = data["type"]
query_type = int(query_type)
# changes the data type from a string "int" to an int
# elif "type" in data:
# query_type = data["type"]
# if type(query_type) == str:
# query_type = int(query_type)
# changes the pre_set form types from their names to the number based api 0,1,2,3
elif "gene" in data:
query_name = data["gene"]
query_type = 0
elif "protein" in data:
query_name = data["protein"]
query_type = 1
elif "acession" in data:
query_name = data["acession"]
query_type = 2
elif "location" in data:
query_name = data["location"]
query_type = 3
# This creates empy values if none are found
else:
query_name =""
query_type =""
if "pre_set" in data:
query_pre_set =data["pre_set"]
else:
query_pre_set = False
if "enzyme" in data:
query_enzyme =data["enzyme"]
else:
query_enzyme = ""
# final format for the front to middle API
contents = {"type":query_type,"pre_set": query_pre_set,"name" : query_name,"enzyme": query_enzyme}
######################################################################
#### Change return(query_example) to return( contents)
####################################################################
return contents
"""
#*************************************************************************
def cgi_field_storage_to_dict(fieldStorage ):
processes the data from web page forms into one form of output
Input: data --- a dictionary containing cgi data
Return: contents --- a dictionary containing processed data
10.05.18 Original By: ORS
"""
""" The purpose of this function is to check the output from the middle layer
and check if there is a type = 4. A type 4 indicating that the name or protein
has multiple entires in the database. This will determine where a normal results
screen or a 'select_duplicate' is created """
def cgi_field_storage_to_dict( fieldStorage ):
#Get a plain dictionary, rather than the '.value' system used by the cgi module
params = {}
for key in fieldStorage.keys():
params[ key ] = fieldStorage[ key ].value
return params
"""
###########################################
def create_codon_table(codon_list):
takes the nested lists that makes up the
Input: data --- nested dictionaries/lists containing proccessed data
Return: final_str --- a string to be inserted into a HTML template to generate webpage
10.05.18 Original By: ORS
"""
def create_codon_table(codon_lists):
dna_list = codon_lists
inner_table = ""
inner_table += "<table>"
inner_table += "<tr>"
# inputs the headers for each column
inner_table += "<th>Codon</th><th>%</th><th>Ratio</th><th>Amino acid</th>"
inner_table += "<th>Codon</th><th>%</th><th>Ratio</th><th>Amino acid</th>"
inner_table += "<th>Codon</th><th>%</th><th>Ratio</th><th>Amino acid</th>"
inner_table += "<th>Codon</th><th>%</th><th>Ratio</th><th>Amino acid</th>"
inner_table+="</tr>"
for i in dna_list:
inner_table = inner_table + "<tr>"
for key in dna_list[i]:
the_data = dna_list[i][key]
inner_table = inner_table + "<td>" + key +"</td>"
for j in the_data:
inner_table = inner_table + "<td>" + str(j) +"</td>"
inner_table = inner_table + "</tr>"
inner_table += "</table>"
return inner_table
"""
#***************************************************************
def print_formating(processed_middle_data):
processes the input to be presented on a html page in a ordered manner
Input: data --- nested dictionaries/lists containing proccessed data
Return: final_str --- a string to be inserted into a HTML template to generate webpage
10.05.18 Original By: ORS
as the data set i got back from the middle level is not properly formated i cant print it to web page.
This is an example of the correctly formated data as dummy data
"""
def print_formating(processed_middle_data):
data = processed_middle_data
final_str = ""
# if it is a type 5 which is just a dict
if "type" in data:
if data["type"] == 5:
final_str = final_str + "This return type means that no entry was found in our database. This may be because of an error in the genbank file, please try again <br>"
for x in data:
final_str = final_str + x + " : " +str(data[x]) +"<br>"
return final_str
# checks for a nested dict key value 'additional_data. indicates type 4 data
elif any( 'additional_data' in d for d in data):
final_str += final_str + "This return type means that duplicate entires were found, please select one <br> <br><br>"
for dicts in data:
temp_dict = str(dicts["additional_data"])
final_str += temp_dict + "<br><br>"
return final_str
elif'associated_genes' in data:
final_str += "This return type is for a Cromosomal location <br> Here accociated genes, proteins, protein IDs, Primary accesion numbers and Chromosomal Locations <br><br>"
final_str += data["searched_type"] + " " + data["searched_name"] +"<br>"
final_str += "associated genes :" + "<br>" + str(data["associated_genes"]) + "<br><br>"
final_str += "associated proteins :" + "<br>" + str(data["associated_protein"]) + "<br><br>"
final_str += "associated primary Accesion numbers :" + "<br>"+ str(data["associated_accession"]) + "<br><br>"
final_str += "associated cromosomal location :" + "<br>" + str(data["associated_location"]) + "<br><br>"
return final_str
# This is check if a nested key exists which is found only in type012 data
elif any( 'searched_name' in d for d in data):
final_str += "This return type is for a gene, protein or accesion number <br> Here is the relevent data <br><br>"
temp_dict = data[0]
for x in temp_dict:
final_str += x + " : " + temp_dict[x] + "<br><br>"
final_str += " This is the table for chromosomal codon usage<br>"
temp_dict = create_codon_table(data[1])
final_str += temp_dict + "<br><br>"
final_str += " This is the table for cds codon usage<br>"
temp_dict = create_codon_table(data[2])
final_str += temp_dict
return final_str
else:
return ("Nothing found")
# takes middle layer output and prints to html template
def front_to_middle():
# collects the form data from webpage
form_data = cgi_field_storage_to_dict(cgi.FieldStorage())
# organises form data in a uniform manner
processed_web_data = data_processing(form_data)
# invokes middle layer and passes form data
middle_query = middle_to_db(processed_web_data)
# determines types and processes data
processed_middle_data = determine_type_of_middle_output(middle_query)
# modifies data for printing to html page
formatted_data = print_formating(processed_middle_data)
#True ooutput
print_template(formatted_data)
front_to_middle()
```
#### File: cgi-bin/ors_modules/data_processing_ors.py
```python
import collections
#************************************************************************
"""
note: The reason I imported collections was because I wanted to use collections.OrderedDict() due to the fact I gave me
greater flexibility for how I would print the output on the webpage. This is due to the controll it gives me when
generating html tags using a for loop.
When i refer to an ordered dictionary I am refering to the collections.OrderedDict() functoin
"""
"""
--------------------------------------------------------------------------------
Codon usage dummy data
--------------------------------------------------------------------------------
i must also include the codon ratios across the chromosome and for the coding sequence
(codon, frequency_100, ratio_aa, amino_acid, chr_name)
VALUES ('aug', 1.89, 1.0, 'met', 'chromosome 16')
I have the dictionary output_dict. Within the output dict I have a key chromosome_codon_usage with its value
being a dictiony containing all the codons and their usage for that chromosome.
Within output dict is another cds_codon usage.
each of those dictionaries have sub dictionaries of each codon key with the value a list.
example
ouptut_dict = {'chromosome_codon_usage':{'aug':[freq_100:1.89,1.0,'met']}}
"""
# They will follow the format 'codon':[requencey_100, ratio_aa, amino_acid]
chromosome_codon_usage = {
'aug':[ 1.89, 1.0, 'met'],'cag':[3.91, 0.85, 'gln'],'ccg':[ 1.29, 0.18, 'pro'],'aga': [0.8, 0.12, 'arg'],
'acu': [0.78, 0.16, 'thr'],'cuc': [2.47, 0.23, 'leu'],'guu': [0.66, 0.11, 'val'],'cug': [5.64, 0.53, 'leu'],
'ugc': [1.63, 0.67, 'cys'],'ucc': [2.01, 0.25, 'ser'],'gug': [3.41, 0.55, 'val'],'cua': [0.45, 0.04, 'leu'],
'aca': [1.02, 0.21, 'thr'],'ucu': [1.05, 0.13, 'ser'],'gca': [1.38, 0.17, 'ala'],'gaa': [1.65, 0.26, 'glu'],
'gau': [1.34, 0.3, 'asp'],'uug': [0.92, 0.09, 'leu'],'gac': [3.15, 0.7, 'asp'],'ccu': [1.51, 0.21, 'pro'],
'gga': [1.24, 0.17, 'gly'],'uuu': [1.07, 0.31, 'phe'],'aaa': [1.33, 0.3, 'lys'],'uuc': [2.34, 0.69, 'phe'],
'cau': [0.74, 0.26, 'his'],'auc': [2.14, 0.66, 'ile'],'aau': [0.88, 0.33, 'asn'],'cca': [1.5, 0.21, 'pro'],
'gcu': [1.56, 0.19, 'ala'],'auu': [0.8, 0.25, 'ile'],'gag': [4.8, 0.74, 'glu'],'uca': [0.84, 0.11, 'ser'],
'aac': [1.82, 0.67, 'asn'],'acc': [2.22, 0.45, 'thr'],'agu': [0.8, 0.1, 'ser'],'ugu': [0.8, 0.33, 'cys'],
'aag': [3.15, 0.7, 'lys'],'cgc': [1.67, 0.25, 'arg'],'uau': [0.65, 0.29, 'tyr'],'guc': [1.76, 0.28, 'val'],
'ucg':[ 0.71, 0.09, 'ser'],'agc':[ 2.52, 0.32, 'ser'],'uac':[ 1.62, 0.71, 'tyr'],'ggc':[ 3.19, 0.43, 'gly'],
'uua':[ 0.28, 0.03, 'leu'],'aua':[ 0.3, 0.09, 'ile'],'cgg':[ 1.73, 0.26, 'arg'],'ccc':[ 2.88, 0.4, 'pro'],
'gcg':[ 1.41, 0.17, 'ala'],'ggg':[ 2.11, 0.28, 'gly'],'caa':[ 0.69, 0.15, 'gln'],'agg':[ 1.47, 0.22, 'arg'],
'gua':[ 0.37, 0.06, 'val'],'cga':[ 0.55, 0.08, 'arg'],'cac':[ 2.13, 0.74, 'his'],'gcc':[ 3.9, 0.47, 'ala'],
'cgu':[ 0.49, 0.07, 'arg'],'acg':[ 0.86, 0.18, 'thr'],'ggu':[ 0.89, 0.12, 'gly'],'ugg':[ 1.54, 1.0, 'trp'],
'cuu':[ 0.85, 0.08, 'leu'],'uga':[ 0.25, 0.55, 'ter'],'uaa':[ 0.11, 0.25, 'ter'],'uag':[ 0.09, 0.2, 'ter'],
}
#This is the same as the chromosome_codon_usage but with modified requency and aa_ratio
cds_codon_usage = {
'aug':[ 1.83, 1.0, 'met'],'cag':[3.91, 0.35, 'gln'],'ccg':[ 1.49, 0.28, 'pro'],'aga': [0.5, 0.22, 'arg'],
'acu': [0.68, 0.36, 'thr'],'cuc': [2.77, 0.43, 'leu'],'guu': [1.66, 0.51, 'val'],'cug': [1.64, 0.73, 'leu'],
'ugc': [1.63, 0.87, 'cys'],'ucc': [2.01, 0.95, 'ser'],'gug': [4.41, 0.15, 'val'],'cua': [0.45, 0.24, 'leu'],
'aca': [2.02, 0.31, 'thr'],'ucu': [1.05, 0.43, 'ser'],'gca': [0.38, 0.67, 'ala'],'gaa': [2.65, 0.26, 'glu'],
'gau': [0.34, 0.3, 'asp'],'uug': [1.92, 0.49, 'leu'],'gac': [3.15, 0.5, 'asp'],'ccu': [2.51, 0.61, 'pro'],
'gga': [0.24, 0.77, 'gly'],'uuu': [3.07, 0.81, 'phe'],'aaa': [1.33, 0.2, 'lys'],'uuc': [2.34, 0.49, 'phe'],
'cau': [2.74, 0.56, 'his'],'auc': [2.14, 0.66, 'ile'],'aau': [2.88, 0.73, 'asn'],'cca': [0.5, 0.22, 'pro'],
'gcu': [1.56, 0.39, 'ala'],'auu': [0.8, 0.45, 'ile'],'gag': [4.8, 0.14, 'glu'],'uca': [0.84, 0.31, 'ser'],
'aac': [0.82, 0.57, 'asn'],'acc': [0.22, 0.25, 'thr'],'agu': [0.8, 0.2, 'ser'],'ugu': [0.8, 0.43, 'cys'],
'aag': [0.15, 0.4, 'lys'],'cgc': [0.67, 0.15, 'arg'],'uau': [0.65, 0.39, 'tyr'],'guc': [1.76, 0.38, 'val'],
'ucg':[ 0.71, 0.19, 'ser'],'agc':[ 1.52, 0.22, 'ser'],'uac':[ 1.62, 0.21, 'tyr'],'ggc':[ 2.19, 0.13, 'gly'],
'uua':[ 0.28, 0.33, 'leu'],'aua':[ 1.3, 0.19, 'ile'],'cgg':[ 1.73, 0.26, 'arg'],'ccc':[ 2.88, 0.1, 'pro'],
'gcg':[ 2.41, 0.27, 'ala'],'ggg':[ 1.11, 0.38, 'gly'],'caa':[ 1.69, 0.15, 'gln'],'agg':[ 2.47, 0.32, 'arg'],
'gua':[ 0.37, 0.16, 'val'],'cga':[ 1.55, 0.38, 'arg'],'cac':[ 2.13, 0.14, 'his'],'gcc':[ 2.9, 0.47, 'ala'],
'cgu':[ 0.49, 0.17, 'arg'],'acg':[ 1.86, 0.28, 'thr'],'ggu':[ 1.89, 0.32, 'gly'],'ugg':[ 2.54, 1.4, 'trp'],
'cuu':[ 1.85, 0.18, 'leu'],'uga':[ 1.25, 0.25, 'ter'],'uaa':[ 1.11, 0.15, 'ter'],'uag':[ 1.09, 0.3, 'ter'],
}
"""
this will turn the dictary of conds into 16 lists of 4 lists each
dnaList = []
dnaList[0] = {"CCU":["CCP","CCQ","CCT"],"ABC":["ABD","ABE","ABF"],"EFG":["657","AasdE","AasdBF"] }
dnaList[1] = {"CCU":["CCP","CCQ","CCT"],"ABC":["ABD","ABE","ABF"] }
dnaList[2] = {"CCU":["CCP","CCQ","CCT"],"ABC":["ABD","ABE","ABF"]
"""
"""
--------------------------------------------------------------------------------
Type 0,1,2 Middle Layer Dummy data example
--------------------------------------------------------------------------------
{'output':
{'type': 2, 'pre_set': True, 'name': 'AB001090', 'enzyme': 'ecori'},
'originaldata': {'locus_sequence': 'aggagcagagcaggcaatttcaccaccaaattatgtatg',
'chr_location': '16q24',
'locus_name': 'AB001103',
'chr_name': 'chromosome 16',
'gene_name': '',
'product_name': 'H-cadherin',
'product_id': 'BAA32411.1',
'seq_location': '1669..1713,6321..6463,6818..7051,7358..7576,7950..7957',
'whole_seq': 'atgcagccgagaactctgcaacgcggcgggggccctgcgcttcagcctgccctcagtcctgctcctcagcctcttcagcttagcttgtctgtga',
'translation': 'MQPRTPLVLCVLLSQVLDLLRFSLPSVLLLSLFSLACL'},
'cds_translation_alignment': {'whole_seq': 'atgcagctacG<SPAN STYLE=BACKGROUND-COLOR:#0081D5>ATACACACCCTCNNNNTTCAG<SPAN STYLE=BACKGROUND-COLOR:#0081D5>GTCTGTGA</SPAN>GAACTCCTGTCAAAAGAC'}
"""
"""
this is an example of the dummy data i would expect to recieve from the middle layer.
"""
type012_dummy_data = {'output' :{'type': 2, 'pre_set': True, 'name': 'AB001090', 'enzyme': 'ecori'},
'originaldata':{'locus_sequence': 'aggagcagagcaggcaatttcaccaccaaattatgtatg',
'chr_location': '16q24',
'locus_name': 'AB001103',
'chr_name': 'chromosome 16',
'gene_name': '',
'product_name': 'H-cadherin',
'product_id': 'BAA32411.1',
'seq_location': '1669..1713,6321..6463,6818..7051,7358..7576,7950..7957',
'whole_seq': 'atgcagccgagaactctgcaacgcggcgggggccctgcgcttcagcctgccctcagtcctgctcctcagcctcttcagcttagcttgtctgtga',
'translation': 'MQPRTPLVLCVLLSQVLDLLRFSLPSVLLLSLFSLACL'},
'cds_translation_alignment' :{'whole_seq': 'atgcagctacG<SPAN STYLE=BACKGROUND-COLOR:#0081D5>ATACACACCCTCNNNNTTCAG<SPAN STYLE=BACKGROUND-COLOR:#0081D5>GTCTGTGA</SPAN>GAACTCCTGTCAAAAGAC'},
'chromosome_codon_usage': chromosome_codon_usage,
'cds_codon_usage' : cds_codon_usage}
"""
----------------------------------------------------------------------------------------------------------------
Type 3 cromosomal location dummy data
---------------------------------------------------------------------------------------------------------------
This is the format that data from the middle layer should return to the front layer from a location query.
. This returns as 1 dictionary of the origonal input for identifying purposes. It then returns 3 dicts each
containg a list of the associated genes, proteins and accession numbers for that location query.
"""
type3_dummy_data = {
'output' :{'type': 3, 'pre_set': True, 'name': 'AB001090', 'enzyme': 'ecori'},
'gene' :['gene1','gene2','gene3','gene4'],
'protein' :['protein1','protein2','protein3','protein4'],
'accession' :['accession1','accession2','accession3','accession4'],
'location' :['location1','locatoin2','location3','location4'],
}
"""
--------------------------------------------------------------------------------
TYPE 4 duplicate dummy data
--------------------------------------------------------------------------------
The type being changed to type 4 (as opposed to type 0 =gene, 1= protein, 3 = accession, 4 =location) indicates
that there has been multiple returns for that "name" and type. The returned data aslo includes the
product_name,gene_name, product_id, accession_num as stored in the middle layer.
front_layer_output = {'type': 2, 'pre_set': True, 'name': 'AB001090', 'enzyme': 'ecori'}
The additional data is to help identify to the user which version they want to query.
give them the optoin to select which version they would like. I then return to the middle layer
once a version is selected i return a single dictionry of the selected version to the middle layer consisting of the normal output + the product_id as a unique identifier.
front_layer_duplicate_output = {'type':4, 'pre_set':True, 'name':'AB001090', 'enzyme':'ecori', 'product_id':'BAA32411.1'}
"""
type4_dummy_data = [
{'type':4, 'pre_set':True, 'name':'AB001090', 'enzyme':'ecori', 'product_id':'BAA32411.1','product_name':'-14 gene' , 'gene_name':'nthl1/nth1','accession_num':'ab014460'},
{'type':4, 'pre_set':True, 'name':'AB001090', 'enzyme':'ecori', 'product_id':'cab06556.1','product_name':'-14 gene protein ', 'gene_name':'','accession_num':'z84722'},
{'type':4, 'pre_set':True, 'name':'AB001090', 'enzyme':'ecori', 'product_id':'abd95907.1','product_name':'11 beta-hydroxysteroid dehydrogenase 2', 'gene_name': 'c16orf35','accession_num':'dq431198'}
]
"""
-------------------------------------------------------------------------------
TYPE 5 Not found dummy data
-------------------------------------------------------------------------------
A found found dummy data is if the request is a user input and does not have an entry
in our database of if there is an entry in our database and it. It is returned to the middle layer
as a dictionary in the same format as the middle layer output but with the "type" value changed.
This is then presented to the user with a message saying that
"""
type5_dummy_data = {'type': 5, 'pre_set': True, 'name': 'AB001090', 'enzyme': 'ecori'}
"""
----------------------------------------------------------------------------------------------
Programs
----------------------------------------------------------------------------------------------
"""
"""
--------------------------------------------------------------------------------------
codon_organiser
---------------------------------------------------------------------------------------
input: Dictionary
a dict with the condons as the key and the [requencey_100, ratio_aa, amino_acid] as values
output: Dictionary
a dict with 16 sub dicts. each sub dict representing a row on a codon table
"""
def codon_organiser(codons):
d = codons
dna_list = {}
dna_list[0]={'uuu': d['uuu'],'ucu': d['ucu'],'uau': d['uau'],'ugu': d['ugu']}
dna_list[1]={'uuc': d['uuc'],'ucc': d['ucc'],'uac': d['uac'],'ugc': d['ugc']}
dna_list[3]={'uua': d['uua'],'uca': d['uca'],'uaa': d['uaa'],'uga': d['uga']}
dna_list[4]={'uug': d['uug'],'ucg': d['ucu'],'uag': d['uag'],'ugg': d['ugg']}
dna_list[5]={'cuu': d['cuu'],'ccu': d['ccu'],'cau': d['cau'],'cgu': d['cgu']}
dna_list[6]={'cuc': d['cuc'],'ucc': d['ccc'],'cac': d['cac'],'ugc': d['cgc']}
dna_list[7]={'cua': d['cua'],'cca': d['cca'],'caa': d['caa'],'cga': d['cga']}
dna_list[8]={'cug': d['cug'],'ccg': d['ccu'],'cag': d['cag'],'cgg': d['cgg']}
dna_list[9]={'auu': d['auu'],'acu': d['acu'],'aau': d['aau'],'agu': d['agu']}
dna_list[10]={'auc': d['auc'],'acc': d['acc'],'aac': d['aac'],'agc': d['agc']}
dna_list[11]={'aua': d['aua'],'aca': d['aca'],'aaa': d['aaa'],'aga': d['aga']}
dna_list[12]={'aug': d['aug'],'acg': d['acu'],'aag': d['aag'],'agg': d['agg']}
dna_list[13]={'guu': d['guu'],'gcu': d['gcu'],'gau': d['gau'],'ggu': d['ggu']}
dna_list[14]={'guc': d['guc'],'gcc': d['gcc'],'gac': d['gac'],'ggc': d['ggc']}
dna_list[15]={'gua': d['gua'],'gca': d['gca'],'gaa': d['gaa'],'gga': d['gga']}
dna_list[16]={'gug': d['gug'],'gcg': d['gcu'],'gag': d['gag'],'ggg': d['ggg']}
return(dna_list)
"""
----------------------------------------------------------------------------------------------
program for type 0,1,2 processing (gene, protein and accession)
----------------------------------------------------------------------------------------------
The purpose of this program is to take the output from a gene, protein or accesion query (types 0,1,2,) respectivly)
and order them into a output list that i can pass to the websites as an array.
input : a dictionry
containing 4 sub dictionaries
input = {
output:{output values},
origonal_data :{ retreived data from Database},
chromosome_codon_usage : {codons as keys and value attribues },
chromosome_codon_usage : {codons as keys and value attribues}
}
output: a list
a list of 4 dicts. 1st an ordered dict containg the product information name, type, seq etc
2nd containg a list containing 16 dicts. And each dict containg 4 sub dicts.
( this represents the structure i will use for making a table.")
3nd containg 16 lists, each of which contains a dicts with 4 codons.
input = [
'general_data':{output values},
origonal_data :{ retreived data from Database},
chromosome_codon_usage : {codons as keys and value attribues },
chromosome_codon_usage : {codons as keys and value attribues}
]
variables
middle_data = output from the middle layer
output_dict = collections.OrderedDict()
"""
def gene_protein_accession_processing(middle_data):
list_of_dicts = []
output_dict = collections.OrderedDict()
output = middle_data
output_dict['searched_name']=output['output']['name']
if output['output']['type'] == 0:
output_dict['searched_type'] = "gene"
elif output['output']['type'] == 1:
output_dict['searched_type'] = "protein"
elif output['output']['type'] == 2:
output_dict['searched_type'] = "acession"
else:
output_dict['searched_type']= "not found"
output_dict['chromosome'] =output['originaldata']['chr_name']
output_dict['chromosomal location'] =output['originaldata']['chr_location']
output_dict['locus name'] =output['originaldata']['locus_name']
# not sure if we need the whole sequence
#output_dict['locus sequence'] =output['originaldata']['locus_sequence']
output_dict['gene name'] =output['originaldata']['gene_name']
output_dict['cds'] =output['originaldata']['whole_seq']
output_dict['product name'] =output['originaldata']['product_name']
output_dict['translation'] =output['originaldata']['translation']
output_dict['whole sequence'] =output['cds_translation_alignment']['whole_seq']
organised_chromosome_codon = (codon_organiser(chromosome_codon_usage))
organised_cds_codon = (codon_organiser(cds_codon_usage))
list_of_dicts += [output_dict]
list_of_dicts += [organised_chromosome_codon]
list_of_dicts += [organised_cds_codon]
return list_of_dicts
"""
------------------------------------------------------------------------------------------------
Program for Type 3 processing (location)
-----------------------------------------------------------------------------------------------
input: a dictionary
output: a dictionary
an ordered dictionary with 6 keys. searched_name, type, associated_genes, associated_proteins,
associated_accession, associated_locatoin.
variables: middle_data
output_dict
"""
def location_processing(middle_data):
output_dict = collections.OrderedDict()
output = middle_data
output_dict["searched_name"]= output["output"]["name"]
if output['output']['type'] == 3:
output_dict['searched_type'] = "location"
else:
return TypeError("This is in locaiton_processing but is not type 3")
output_dict["associated_genes"] = output["gene"]
output_dict["associated_protein"] = output["protein"]
# output_dict["associated_protein_id"] = output['protein_id']
output_dict["associated_accession"] = output["accession"]
output_dict["associated_location"] = output["location"]
return(output_dict)
"""
------------------------------------------------------------------------------------------------
Program for Type 4 processing (duplicate)
-----------------------------------------------------------------------------------------------
input: dictionary/list
This contains the query sent to middle layer and additional information brought back. it
can take the form of
i) type 0,1,2,3 a dictionary
ii) type 4 a list
iii) type 5 a dictionary
output: dictionary
each entry will give a ordered dict of d = collections.OrderedDict() with the
key being query and returned data and the value being a dict containing the origonal query
"""
"""
The additional data is to help identify to the user which version they want to query.
give them the optoin to select which version they would like. I then return to the middle layer
once a version is selected i return a single dictionry of the selected version to the middle layer consisting of the normal output + the product_id as a unique identifier.
front_layer_duplicate_output = {'type':4, 'pre_set':True, 'name':'AB001090', 'enzyme':'ecori', 'product_id':'BAA32411.1'}
example of input
"""
type4_output = [
{'type':4, 'pre_set':True, 'name':'AB001090', 'enzyme':'ecori', 'product_id':'BAA32411.1','product_name':'-14 gene' , 'gene_name':'nthl1/nth1','accession_num':'ab014460'},
{'type':4, 'pre_set':True, 'name':'AB001090', 'enzyme':'ecori', 'product_id':'cab06556.1','product_name':'-14 gene protein ', 'gene_name':'','accession_num':'z84722'},
{'type':4, 'pre_set':True, 'name':'AB001090', 'enzyme':'ecori', 'product_id':'abd95907.1','product_name':'11 beta-hydroxysteroid dehydrogenase 2', 'gene_name': 'c16orf35','accession_num':'dq431198'}
]
"""
example of output
type4_processed_data = [
{'query': {'type':4, 'pre_set':True, 'name':'AB001090', 'enzyme':'ecori'}, "additional_data" : {'product_id':'BAA32411.1','product_name':'-14 gene' , 'gene_name':'nthl1/nth1','accession_num':'ab014460'}},
{'query': {'type':4, 'pre_set':True, 'name':'AB001090', 'enzyme':'ecori'}, "additional_data" : {'product_id':'C34511.1','product_name':'-14 gene' , 'gene_name':'nthl1/nth1','accession_num':'ab014460'}},
{'query': {'type':4, 'pre_set':True, 'name':'AB001090', 'enzyme':'ecori'}, "additional_data" : {'product_id':'A523162411.1','product_name':'-14 gene' , 'gene_name':'nthl1/nth1','accession_num':'ab014460'}}
]
variables: middle_layer_data
"""
def duplicate_processing(duplicate_data):
output_list = []
output = duplicate_data
for d in output:
temp_dict = {}
temp_dict["query"] = {'name': d['name'], 'type' : d['type'],'pre_set' : d['pre_set'],'enzyme':d['enzyme'],"product_id": d['product_id']}
temp_dict["additional_data"] = {
'product_id' : d['product_id'], 'product_name' : d['product_id'],
'gene_name' : d['gene_name'], 'accession_num' : d['accession_num']
}
output_list.append(temp_dict)
return output_list
"""
------------------------------------------------------------------------------------------------
Program for Type 5 processing (No entry found)
-----------------------------------------------------------------------------------------------
Input : a dict of the origonal query with the type changed to 5
type5_output = {'type': 5, 'pre_set': True, 'name': 'AB001090', 'enzyme': 'ecori'}
output: {'type': 5, 'pre_set': True, 'name': 'AB001090', 'enzyme': 'ecori'}
The data is passed though unmodified so there is actually no program required
"""
"""
------------------------------------------------------------------------------------------------
Program for determining Type of middle layer output
-----------------------------------------------------------------------------------------------
INPUT: The output from the middle layer. This can take the form of either a nested dict,.
OUTPUT: Dictionary
an ordered dict with the correctly formated data for that call
This determines what type the output data is [0,1,2,3,4] and which processing operatoins to apply to them.
variables :
"""
def determine_type_of_middle_output(middle_layer_return):
middle_data = middle_layer_return
if "output" in middle_data:
# output in middle_data indicates it is a type 0,1,2 dict of dicts or type 3 return from the middle layer
if middle_data["output"]["type"] == 0:
processed_data = gene_protein_accession_processing(middle_data)
return(processed_data)
elif middle_data["output"]["type"] == 1:
processed_data = gene_protein_accession_processing(middle_data)
return(processed_data)
elif middle_data["output"]["type"] == 2:
processed_data = gene_protein_accession_processing(middle_data)
return(processed_data)
# this indicates a type 3 list of dicts for a location answer
elif middle_data["output"]["type"] ==3:
processed_data = location_processing(middle_data)
return(processed_data)
#This was required as if is search for key == 4 where there is no key due
# to it being a list of lists of a list of dicts it will throw a type error
elif 'type' in middle_data:
processed_data = middle_data
return(middle_data)
elif any( 'type' in d for d in middle_data):
temp_val = ([d['type'] for d in middle_data if 'type' in d])
if 4 in temp_val :
processed_data = duplicate_processing(middle_data)
return(processed_data)
else:
return TypeError("middle_layer_output_processing could not determine data catagory ")
#print(determine_type_of_middle_output(type3_dummy_data))
"""
This would be a dummy example of the output from middle_layer to front_layer, where it contains the initial output with the query,
the db dictionary without being modified and the extra information modified, sent as a dictionary of dictionaries.
"""
```
|
{
"source": "jess010/pandas",
"score": 3
}
|
#### File: tests/categorical/test_missing.py
```python
import numpy as np
import pandas.util.testing as tm
from pandas import (Categorical, Index, isna)
from pandas.compat import lrange
from pandas.core.dtypes.dtypes import CategoricalDtype
class TestCategoricalMissing(object):
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
tm.assert_numpy_array_equal(isna(cat), labels == -1)
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0],
dtype=np.int8))
c[1] = np.nan
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0],
dtype=np.int8))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0],
dtype=np.int8))
def test_set_dtype_nans(self):
c = Categorical(['a', 'b', np.nan])
result = c._set_dtype(CategoricalDtype(['a', 'c']))
tm.assert_numpy_array_equal(result.codes, np.array([0, -1, -1],
dtype='int8'))
def test_set_item_nan(self):
cat = Categorical([1, 2, 3])
cat[1] = np.nan
exp = Categorical([1, np.nan, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(cat, exp)
```
|
{
"source": "Jess1006/MystanCodeProjects",
"score": 4
}
|
#### File: MystanCodeProjects/breakout_game/breakout.py
```python
from campy.gui.events.timer import pause
from breakoutgraphics import BreakoutGraphics
FRAME_RATE = 1000 / 120 # 120 frames per second
NUM_LIVES = 3 # Number of attempts
def main():
graphics = BreakoutGraphics()
lives = NUM_LIVES
# getter: get the initial ball velocity
dx = graphics.get_ball_x_velocity()
dy = graphics.get_ball_y_velocity()
# animation while loop here
while True:
# pause
pause(FRAME_RATE)
# check 1
if graphics.ball_is_out_of_window():
lives -= 1
if lives > 0:
graphics.restart()
else:
graphics.window.add(graphics.lose)
break
# check 2
if graphics.bricks_cleared():
graphics.window.remove(graphics.ball)
graphics.window.add(graphics.win)
break
# check 3 & update
if graphics.is_game_start:
graphics.ball.move(dx, dy)
graphics.collision_or_not()
if graphics.collide:
dy *= -1
else:
if graphics.ball.x < 0 or graphics.ball.x + graphics.ball.width >= graphics.window.width:
dx = -dx
if graphics.ball.y < 0:
dy = -dy
if __name__ == '__main__':
main()
```
|
{
"source": "Jess3Jane/pyforms",
"score": 2
}
|
#### File: pyforms/terminal/BaseWidget.py
```python
from pyforms.terminal.Controls.ControlFile import ControlFile
from pyforms.terminal.Controls.ControlSlider import ControlSlider
from pyforms.terminal.Controls.ControlText import ControlText
from pyforms.terminal.Controls.ControlCombo import ControlCombo
from pyforms.terminal.Controls.ControlCheckBox import ControlCheckBox
from pyforms.terminal.Controls.ControlBase import ControlBase
from pyforms.terminal.Controls.ControlDir import ControlDir
from pyforms.terminal.Controls.ControlNumber import ControlNumber
from datetime import datetime, timedelta
import argparse, uuid, os, shutil, time, sys, subprocess
import simplejson as json
try:
import requests
except:
print("No requests lib")
class BaseWidget(object):
def __init__(self, *args, **kwargs):
self._parser = argparse.ArgumentParser()
self._controlsPrefix = ''
self._title = kwargs.get('title', args[0] if len(args)>0 else '')
self.stop = False
self._conf = kwargs.get('load', None)
############################################################################
############ Module functions #############################################
############################################################################
def init_form(self, parse=True):
result = {}
for fieldname, var in self.controls.items():
name = var._name
if isinstance(var, (
ControlFile, ControlSlider, ControlText,
ControlCombo,ControlCheckBox, ControlDir, ControlNumber
)
):
self._parser.add_argument("--%s" % name, help=var.label)
if parse:
self._parser.add_argument('terminal_mode', type=str, default='terminal_mode', help='Flag to run pyforms in terminal mode')
self._parser.add_argument(
"--exec{0}".format(self._controlsPrefix),
default='',
help='Function from the application that should be executed. Use | to separate a list of functions.')
self._parser.add_argument(
"--load{0}".format(self._controlsPrefix),
default=None,
help='Load a json file containing the pyforms form configuration.')
self._args = self._parser.parse_args()
self.__parse_terminal_parameters()
self.__execute_events()
def load_form(self, data, path=None):
allparams = self.controls
if hasattr(self, 'load_order'):
for name in self.load_order:
param = allparams[name]
if name in data:
param.load_form(data[name])
else:
for name, param in allparams.items():
if name in data:
param.load_form(data[name])
def __parse_terminal_parameters(self):
for fieldname, var in self.controls.items():
name = var._name
if self._args.__dict__.get(name, None):
if isinstance(var, ControlFile):
value = self._args.__dict__[name]
if value!=None and (value.startswith('http://') or value.startswith('https://')):
local_filename = value.split('/')[-1]
outputFileName = os.path.join('input', local_filename)
self.__downloadFile(value, outputFileName)
var.value = outputFileName
else:
var.value = value
if isinstance(var, ControlDir):
value = self._args.__dict__[name]
var.value = value
elif isinstance(var, (ControlText, ControlCombo)):
var.value = self._args.__dict__[name]
elif isinstance(var, ControlCheckBox):
var.value = self._args.__dict__[name]=='True'
elif isinstance(var, (ControlSlider, ControlNumber) ):
var.value = int(self._args.__dict__[name])
if self._args.load:
print('\n--------- LOADING CONFIG ------------------')
with open(self._args.load) as infile:
data = json.load(infile)
self.load_form(data, os.path.dirname(self._args.load))
print('--------- END LOADING CONFIG --------------\n')
elif self._conf is not None:
print('\n--------- LOADING DEFAULT CONFIG ------------------')
self.load_form(self._conf, '.')
print('--------- END LOADING DEFAULT CONFIG --------------\n')
def __execute_events(self):
for function in self._args.__dict__.get("exec{0}".format(self._controlsPrefix), []).split('|'):
if len(function)>0:
getattr(self, function)()
res = {}
for controlName, control in self.controls.items():
res[controlName] = {'value': control.value }
with open('out-parameters.txt', 'w') as outfile:
outfile.write( str(res) )
def __downloadFile(self, url, outFilepath):
chunksize = 512*1024
r = requests.get(url, stream=True)
with open(outFilepath, 'w') as f:
for chunk in r.iter_content(chunk_size=chunksize):
if chunk: f.write(chunk); f.flush();
def execute(self): pass
def start_progress(self, total = 100):
self._total_processing_count = total
self._processing_initial_time = time.time()
self._processing_count = 1
def update_progress(self):
div = int(self._total_processing_count/400)
if div==0: div = 1
if (self._processing_count % div )==0:
self._processing_last_time = time.time()
total_passed_time = self._processing_last_time - self._processing_initial_time
remaining_time = ( (self._total_processing_count * total_passed_time) / self._processing_count ) - total_passed_time
if remaining_time<0: remaining_time = 0
time_remaining = datetime(1,1,1) + timedelta(seconds=remaining_time )
time_elapsed = datetime(1,1,1) + timedelta(seconds=(total_passed_time) )
values = (
time_elapsed.day-1, time_elapsed.hour, time_elapsed.minute, time_elapsed.second,
time_remaining.day-1, time_remaining.hour, time_remaining.minute, time_remaining.second,
(float(self._processing_count)/float(self._total_processing_count))*100.0, self._processing_count, self._total_processing_count,
)
print("Elapsed: %d:%d:%d:%d; Remaining: %d:%d:%d:%d; Processed %0.2f %% (%d/%d); | \r" % values)
sys.stdout.flush()
self._processing_count += 1
def end_progress(self):
self._processing_count = self._total_processing_count
self.update_progress()
def __savePID(self, pid):
try:
with open('pending_PID.txt', 'w') as f:
f.write(str(pid))
f.write('\n')
except (IOError) as e:
raise e
def __savePID(self, pid):
try:
with open('pending_PID.txt', 'w') as f:
f.write(str(pid))
f.write('\n')
except (IOError) as e:
raise e
def executeCommand(self, cmd, cwd=None, env=None):
if cwd!=None:
currentdirectory = os.getcwd()
os.chdir(cwd)
print(" ".join(cmd))
proc = subprocess.Popen(cmd)
if cwd!=None: os.chdir(currentdirectory)
self.__savePID(proc.pid)
proc.wait()
#(output, error) = proc.communicate()
#if error: print 'error: ', error
#print 'output: ', output
return ''#output
def exec_terminal_cmd(self, args, **kwargs):
print('TERMINAL <<',' '.join(args) )
sys.stdout.flush()
proc = subprocess.Popen(args, **kwargs)
self.__savePID(proc.pid)
proc.wait()
sys.stdout.flush()
@property
def controls(self):
"""
Return all the form controls from the the module
"""
result = {}
for name, var in vars(self).items():
if isinstance(var, ControlBase):
var._name = self._controlsPrefix+"-"+name if len(self._controlsPrefix)>0 else name
result[name] = var
return result
```
#### File: terminal/Controls/ControlBase.py
```python
import os, pickle,uuid
class ControlBase(object):
_value = None
_label = None
_controlHTML = ""
def __init__(self, *args, **kwargs):
self._id = uuid.uuid4()
self._value = kwargs.get('default', None)
self._parent = 1
self._label = kwargs.get('label', args[0] if len(args)>0 else '')
def init_form(self): pass
def load_form(self, data, path=None):
oldvalue = self.value
self.value = data.get('value', None)
if oldvalue!=self.value: self.changed_event()
def changed_event(self):
"""
Function called when ever the Control value is changed
"""
return True
def show(self):pass
def hide(self):pass
def open_popup_menu(self, position): pass
def add_popup_submenu_option(self, label, options): pass
def add_popup_menu_option(self, label, functionAction = None): pass
def __repr__(self): return self.value
############################################################################
############ Properties ####################################################
############################################################################
@property
def enabled(self): return True
@enabled.setter
def enabled(self, value): pass
############################################################################
@property
def value(self): return self._value
@value.setter
def value(self, value):
oldvalue = self._value
self._value = value
if oldvalue!=value: self.changed_event()
############################################################################
@property
def label(self): return self._label
@label.setter
def label(self, value): self._label = value
############################################################################
@property
def form(self): return None
############################################################################
@property
def parent(self): return self._parent
@parent.setter
def parent(self, value): self._parent = value
```
#### File: terminal/Controls/ControlCheckBoxList.py
```python
from pyforms.terminal.Controls.ControlBase import ControlBase
class ControlCheckBoxList(ControlBase):
def __init__(self, *args, **kwargs):
if 'default' not in kwargs: kwargs['default'] = []
super(ControlCheckBoxList, self).__init__(*args, **kwargs)
def clear(self):
self._value = []
def __add__(self, val):
self._value.append(val)
return self
def __sub__(self, other):
self._value.remove(other)
return self
def load_form(self, data, path=None):
results = data['selected']
for row in range(self.count):
item = self._value[row][0]
if item != None and str(item) in results:
self._value[row] = [item, True]
else:
self._value[row] = [item, False]
self.changed_event()
@property
def count(self):
return len(self._value)
@property
def selected_row_index(self):
return -1
@property
def value(self):
results = []
for item, checked in self._value:
if checked:
results.append(item)
return results
@value.setter
def value(self, value):
self.clear()
for row in value: self += row
self.changed_event()
@property
def items(self):
for item, checked in self._value:
yield (item, checked)
```
#### File: terminal/Controls/ControlEmptyWidget.py
```python
from pyforms.terminal.Controls.ControlBase import ControlBase
class ControlEmptyWidget(ControlBase):
def load_form(self, data, path=None):
if 'value' in data and self.value is not None and self.value != '':
self.value.load_form(data['value'], path)
```
|
{
"source": "jessamynsmith/django-enumfields",
"score": 2
}
|
#### File: django-enumfields/tests/test_django_admin.py
```python
import uuid
try:
from django.contrib.auth import get_user_model
except ImportError: # `get_user_model` only exists from Django 1.5 on.
from django.contrib.auth.models import User
get_user_model = lambda: User
from django.core.urlresolvers import reverse
from django.test import Client
import pytest
from enumfields import EnumIntegerField
from .models import MyModel
@pytest.fixture
def client():
return Client()
SUPERUSER_USERNAME = "superuser"
SUPERUSER_PASS = "<PASSWORD>"
@pytest.fixture
def superuser():
return get_user_model().objects.create_superuser(username=SUPERUSER_USERNAME, password=<PASSWORD>,
email="<EMAIL>")
@pytest.fixture
def superuser_client(client, superuser):
client.login(username=SUPERUSER_USERNAME, password=<PASSWORD>)
return client
@pytest.mark.django_db
@pytest.mark.urls('tests.urls')
def test_model_admin(superuser_client):
url = reverse("admin:tests_mymodel_add")
secret_uuid = str(uuid.uuid4())
post_data = {
'color': MyModel.Color.RED.value,
'taste': MyModel.Taste.UMAMI.value,
'taste_int': MyModel.Taste.SWEET.value,
'random_code': secret_uuid
}
response = superuser_client.post(url, follow=True, data=post_data)
response.render()
text = response.content
assert b"This field is required" not in text
assert b"Select a valid choice" not in text
try:
inst = MyModel.objects.get(random_code=secret_uuid)
except MyModel.DoesNotExist:
assert False, "Object wasn't created in the database"
assert inst.color == MyModel.Color.RED, "Redness not assured"
assert inst.taste == MyModel.Taste.UMAMI, "Umami not there"
assert inst.taste_int == MyModel.Taste.SWEET, "Not sweet enough"
def test_django_admin_lookup_value_for_integer_enum_field():
field = EnumIntegerField(MyModel.Taste)
assert field.get_prep_value(str(MyModel.Taste.BITTER)) == 3, "get_prep_value should be able to convert from strings"
```
|
{
"source": "jessamynsmith/django-getting-started-source",
"score": 3
}
|
#### File: open_weather_map/tests/test_wrapper.py
```python
from django.test import TestCase
from mock import patch
import requests
from libs.open_weather_map.wrapper import OpenWeatherMap
class OpenWeatherMapTestCase(TestCase):
def setUp(self):
# Create a reference to the class being tested
self.open_weather_map = OpenWeatherMap()
# Patch out requests.get to avoid making a call to the actual API
@patch("requests.get")
def test_failure(self, mock_get):
# Set up a response with a failure error code to test error handling
response = requests.Response()
response.status_code = requests.codes.bad_request
# Set the mock to return the failure response
mock_get.return_value = response
forecast = self.open_weather_map.get_forecast()
# Ensure that if an error occurs, we get back an empty object and no exceptions are thrown
self.assertEqual({}, forecast)
# Patch out requests.get to avoid making a call to the actual API
@patch("requests.get")
def test_success(self, mock_get):
# Set up a response with a successful error code and some data to test the success case
response = requests.Response()
response.status_code = requests.codes.ok
response._content = b'{"city": {"name": "Sydney"}}'
# Set the mock to return the success response
mock_get.return_value = response
forecast = self.open_weather_map.get_forecast()
# Ensure that the data we set in the response body has been parsed correctly
self.assertEqual("Sydney", forecast['city']['name'])
```
|
{
"source": "jessamynsmith/django-opendebates",
"score": 2
}
|
#### File: django-opendebates/opendebates/celery.py
```python
from __future__ import absolute_import
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
if os.path.exists('opendebates/local_settings.py'):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "opendebates.local_settings")
else:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "opendebates.settings")
from django.conf import settings # noqa
app = Celery('opendebates')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
```
#### File: opendebates_emails/tests/test_models.py
```python
from django.test import TestCase
from mock import patch
from opendebates.tests.factories import SubmissionFactory
from opendebates_emails.models import send_email
from opendebates_emails.tests.factories import EmailTemplateFactory
@patch('opendebates_emails.models.send_email_task')
class EmailTemplateTestCase(TestCase):
def setUp(self):
self.idea = SubmissionFactory()
self.ctx = {'idea': self.idea}
self.type = 'template_name'
EmailTemplateFactory(type=self.type)
def test_sends_in_background(self, mock_task):
send_email(self.type, self.ctx)
mock_task.delay.assert_called_with(self.type, self.idea.pk)
def test_ctx_no_idea(self, mock_task):
send_email(self.type, {})
self.assertFalse(mock_task.called)
def test_ctx_not_submission(self, mock_task):
send_email(self.type, {'idea': "String not Submission object"})
self.assertFalse(mock_task.called)
def test_no_such_template(self, mock_task):
send_email("no_such", self.ctx)
self.assertFalse(mock_task.called)
```
#### File: management/commands/load_zipcode_database.py
```python
from django.core.management.base import BaseCommand
from opendebates.models import ZipCode
import csv
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('csv_location', nargs='+')
def handle(self, *args, **options):
fp = open(options['csv_location'][0])
lines = csv.reader(fp)
for line in lines:
z = ZipCode(zip=line[0], city=line[1], state=line[2])
z.save()
```
#### File: management/commands/update_trending_scores.py
```python
from django.core.management.base import BaseCommand
from opendebates.tasks import update_trending_scores
class Command(BaseCommand):
def handle(self, *args, **options):
update_trending_scores()
```
#### File: opendebates/migrations/0017_enable_unaccent.py
```python
from __future__ import unicode_literals
from django.core.management import call_command
from django.db import migrations, models
def update_search_index(apps, schema):
print("Updating search field...")
call_command("update_search_field", "opendebates")
print("Updating search field...done")
def no_op(apps, schema):
pass
class Migration(migrations.Migration):
dependencies = [
('opendebates', '0016_auto_20160212_1940'),
]
operations = [
migrations.RunSQL(
"""
CREATE EXTENSION IF NOT EXISTS unaccent;
ALTER FUNCTION unaccent(text) IMMUTABLE;
-- The next line doesn't work:
-- CREATE INDEX opendebates_submission_search_idx ON opendebates_submission USING gin(to_tsvector('english', search_index));
""",
"""
DROP EXTENSION IF EXISTS unaccent;
DROP INDEX IF EXISTS opendebates_submission_search_idx;
"""
),
migrations.RunPython(
update_search_index,
no_op
)
]
```
#### File: django-opendebates/opendebates/resolvers.py
```python
from importlib import import_module
from django.conf.urls import url, include
class PrefixedUrlconf(object):
def __init__(self, prefix):
self.prefix = prefix
@property
def urlpatterns(self):
url_module = import_module('opendebates.urls')
return [
pattern
if (
not hasattr(pattern, 'urlconf_name') or
getattr(pattern.urlconf_name, '__name__', None) != 'opendebates.prefixed_urls'
) else
url(r'^{}/'.format(self.prefix), include('opendebates.prefixed_urls'))
for pattern in url_module.urlpatterns
]
```
#### File: opendebates/tests/test_flatpage_metadata_override.py
```python
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase
from django.utils.html import escape
from opendebates.models import FlatPageMetadataOverride
from opendebates import site_defaults
from .factories import SiteFactory, DebateFactory
class FlatPageTest(TestCase):
def setUp(self):
self.site = SiteFactory()
self.debate = DebateFactory(site=self.site)
self.page1_content = 'About the site'
self.page1 = FlatPage(url='/{}/about/'.format(self.debate.prefix),
title='About',
content=self.page1_content)
self.page1.save()
self.page1.sites.add(self.site)
self.page2_content = '[An embedded video]'
self.page2 = FlatPage(url='/{}/watch/'.format(self.debate.prefix),
title='Watch Now!',
content=self.page2_content)
self.page2.save()
self.page2.sites.add(self.site)
FlatPageMetadataOverride(page=self.page2).save()
def tearDown(self):
Site.objects.clear_cache()
def test_metadata_not_overridden(self):
rsp = self.client.get(self.page1.url)
self.assertContains(rsp, self.page1_content)
self.assertContains(rsp, escape(site_defaults.FACEBOOK_SITE_TITLE))
self.assertContains(rsp, escape(site_defaults.FACEBOOK_SITE_DESCRIPTION))
self.assertContains(rsp, escape(site_defaults.FACEBOOK_IMAGE))
def test_default_metadata_overrides(self):
rsp = self.client.get(self.page2.url)
self.assertContains(rsp, self.page2_content)
self.assertNotContains(rsp, escape(site_defaults.FACEBOOK_SITE_TITLE))
self.assertNotContains(rsp, escape(site_defaults.FACEBOOK_SITE_DESCRIPTION))
self.assertNotContains(rsp, escape(site_defaults.FACEBOOK_IMAGE))
self.assertNotContains(rsp, escape(site_defaults.TWITTER_IMAGE))
self.assertContains(rsp, escape(site_defaults.FLATPAGE_FACEBOOK_TITLE))
self.assertContains(rsp, escape(site_defaults.FLATPAGE_FACEBOOK_DESCRIPTION))
self.assertContains(rsp, escape(site_defaults.FLATPAGE_FACEBOOK_IMAGE))
self.assertContains(rsp, escape(site_defaults.FLATPAGE_TWITTER_IMAGE))
def test_custom_metadata_overrides(self):
FlatPageMetadataOverride(
page=self.page1,
facebook_title='Foo! Foo! Foo!',
twitter_description='lorem ipsum dolor sit amet').save()
rsp = self.client.get(self.page1.url)
self.assertContains(rsp, escape('Foo! Foo! Foo!'))
self.assertContains(rsp, escape('lorem ipsum dolor sit amet'))
self.assertContains(rsp, escape(site_defaults.FLATPAGE_TWITTER_IMAGE))
```
#### File: opendebates/tests/test_models.py
```python
from django.contrib.sites.models import Site
from django.test import TestCase
import urlparse
from .factories import UserFactory, VoterFactory, SubmissionFactory, SiteFactory, DebateFactory
class SubmissionReallyAbsoluteUrlTest(TestCase):
def setUp(self):
self.site = SiteFactory()
self.debate = DebateFactory(site=self.site)
self.submission = SubmissionFactory()
self.site.domain = 'example.net'
self.site.save()
self.id = self.submission.id
def tearDown(self):
Site.objects.clear_cache()
def test_default(self):
self.assertEqual(
'https://testserver/%s/questions/%s/vote/' % (
self.debate.prefix, self.id),
self.submission.really_absolute_url())
def test_source(self):
url = self.submission.really_absolute_url("fb")
# When a source is included, nothing before querystring is affected
self.assertEqual(self.submission.really_absolute_url(),
url.split('?')[0])
# The query string will include a ?source parameter prefixed with share-
# that includes both the platform we are sharing on, and the question ID
parsed = urlparse.urlparse(url)
self.assertEqual(parsed.query, 'source=share-fb-%s' % self.id)
parsed = urlparse.urlparse(self.submission.really_absolute_url('foo'))
self.assertEqual(parsed.query, 'source=share-foo-%s' % self.id)
class VoterUserDisplayNameTest(TestCase):
def setUp(self):
self.site = SiteFactory()
self.debate = DebateFactory(site=self.site)
def tearDown(self):
Site.objects.clear_cache()
def test_anonymous(self):
voter = VoterFactory(user=None)
self.assertEqual('Somebody', str(voter.user_display_name()))
def test_user_blank(self):
user = UserFactory(first_name='', last_name='')
voter = VoterFactory(user=user)
self.assertEqual('Somebody', str(voter.user_display_name()))
def test_user_no_last_name(self):
user = UserFactory(first_name='George', last_name='')
voter = VoterFactory(user=user)
self.assertEqual('George', str(voter.user_display_name()))
def test_user_both_names(self):
user = UserFactory(first_name='George', last_name='Washington')
voter = VoterFactory(user=user)
self.assertEqual('<NAME>.', str(voter.user_display_name()))
def test_user_with_state(self):
user = UserFactory(first_name='George', last_name='Washington')
voter = VoterFactory(user=user, state='VA')
self.assertEqual('<NAME>. from VA', str(voter.user_display_name()))
def test_user_with_explicit_display_name(self):
user = UserFactory(first_name='George', last_name='Washington')
voter = VoterFactory(user=user, display_name='Prez1')
self.assertEqual('Prez1', str(voter.user_display_name()))
def test_voter_with_explicit_display_name_with_state(self):
user = UserFactory(first_name='George', last_name='Washington')
voter = VoterFactory(user=user, display_name='Prez1', state='VA')
self.assertEqual('Prez1 from VA', str(voter.user_display_name()))
```
#### File: opendebates/tests/test_sharing.py
```python
from functools import partial
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.html import escape
from opendebates.tests.factories import SubmissionFactory, SiteFactory, DebateFactory
# Force the reverse() used here in the tests to always use the full
# urlconf, despite whatever machinations have taken place due to the
# DebateMiddleware.
old_reverse = reverse
reverse = partial(old_reverse, urlconf='opendebates.urls')
class FacebookTest(TestCase):
def setUp(self):
self.site = SiteFactory()
self.debate = DebateFactory(site=self.site)
def tearDown(self):
Site.objects.clear_cache()
def test_facebook_site(self):
rsp = self.client.get(reverse('list_ideas', kwargs={'prefix': self.debate.prefix}))
self.assertContains(
rsp,
'<meta property="og:url" content="http://%s/%s"/>' % (
self.debate.site.domain, self.debate.prefix)
)
self.assertContains(
rsp,
'<meta property="og:type" content="%s"/>' % 'website'
)
self.assertContains(
rsp,
'<meta property="og:title" content="%s"/>'
% escape(self.debate.facebook_site_title)
)
self.assertContains(
rsp,
'<meta property="og:description" content="%s"/>'
% escape(self.debate.facebook_site_description)
)
self.assertContains(
rsp,
'<meta property="og:image" content="%s"/>' % self.debate.facebook_image
)
def test_facebook_question(self):
question = SubmissionFactory(idea="Bogus & Broken")
rsp = self.client.get(question.get_absolute_url())
self.assertContains(
rsp,
'<meta property="og:url" content="%s"/>' % question.really_absolute_url()
)
self.assertContains(
rsp,
'<meta property="og:type" content="%s"/>' % 'website'
)
self.assertContains(
rsp,
'<meta property="og:title" content="%s"/>'
% escape(self.debate.facebook_question_title)
)
self.assertContains(
rsp,
'<meta property="og:description" content="%s"/>'
% escape(self.debate.facebook_question_description
.format(idea=question.idea))
)
self.assertContains(
rsp,
'<meta property="og:image" content="%s"/>' % self.debate.facebook_image
)
def test_facebook_title(self):
question = SubmissionFactory(idea="Bogus & Broken")
self.assertEqual(
self.debate.facebook_question_title.format(idea=question.idea),
question.facebook_title()
)
def test_facebook_description(self):
question = SubmissionFactory(idea="Bogus & Broken")
self.assertEqual(
self.debate.facebook_question_description.format(idea=question.idea),
question.facebook_description()
)
class TwitterTest(TestCase):
def setUp(self):
self.site = SiteFactory()
self.debate = DebateFactory(site=self.site)
def tearDown(self):
Site.objects.clear_cache()
def test_twitter_site_card(self):
rsp = self.client.get(reverse('list_ideas', kwargs={'prefix': self.debate.prefix}))
self.assertContains(rsp, '<meta name="twitter:card" content="summary_large_image">')
self.assertContains(rsp,
'<meta name="twitter:title" content="%s">'
% escape(self.debate.twitter_site_title))
self.assertContains(rsp,
'<meta name="twitter:description" content="%s">'
% escape(self.debate.twitter_site_description))
self.assertContains(
rsp,
'<meta name="twitter:image" content="%s">' % self.debate.twitter_image
)
def test_twitter_question_card(self):
question = SubmissionFactory(idea="Bogus & Broken")
rsp = self.client.get(question.get_absolute_url())
self.assertContains(rsp, '<meta name="twitter:card" content="summary_large_image">')
self.assertContains(rsp,
'<meta name="twitter:title" content="%s">'
% escape(question.twitter_title()))
self.assertContains(rsp,
'<meta name="twitter:description" content="%s">'
% escape(question.twitter_description()))
self.assertContains(
rsp,
'<meta name="twitter:image" content="%s">' % self.debate.twitter_image
)
def test_twitter_title(self):
question = SubmissionFactory(idea="Bogus & Broken")
self.assertEqual(
self.debate.twitter_question_title.format(idea=question.idea),
question.twitter_title()
)
def test_twitter_description(self):
question = SubmissionFactory(idea="Bogus & Broken")
self.assertEqual(
self.debate.twitter_question_description.format(idea=question.idea),
question.twitter_description()
)
```
|
{
"source": "jessamynsmith/eggtimer-server",
"score": 2
}
|
#### File: eggtimer-server/periods/serializers.py
```python
import django_filters
from rest_framework import serializers
from periods import models as period_models
class NullableEnumField(serializers.ChoiceField):
"""
Field that handles empty entries for EnumFields
"""
def __init__(self, enum, **kwargs):
super(NullableEnumField, self).__init__(enum.choices(), allow_blank=True, required=False)
def to_internal_value(self, data):
if data == '' and self.allow_blank:
return None
return super(NullableEnumField, self).to_internal_value(data)
class FlowEventSerializer(serializers.ModelSerializer):
clots = NullableEnumField(period_models.ClotSize)
cramps = NullableEnumField(period_models.CrampLevel)
class Meta:
model = period_models.FlowEvent
exclude = ('user',)
class FlowEventFilter(django_filters.FilterSet):
min_timestamp = django_filters.DateTimeFilter(name="timestamp", lookup_type='gte')
max_timestamp = django_filters.DateTimeFilter(name="timestamp", lookup_type='lte')
class Meta:
model = period_models.FlowEvent
fields = ('min_timestamp', 'max_timestamp')
class StatisticsSerializer(serializers.ModelSerializer):
class Meta:
model = period_models.Statistics
fields = ('average_cycle_length', 'predicted_events', 'first_date', 'first_day')
```
#### File: periods/tests/test_helpers.py
```python
from django.test import TestCase
from periods import helpers
class TestGetFullDomain(TestCase):
def test_http(self):
result = helpers.get_full_domain()
self.assertEqual('http://example.com', result)
def test_https(self):
with self.settings(SECURE_SSL_REDIRECT=True):
result = helpers.get_full_domain()
self.assertEqual('https://example.com', result)
```
|
{
"source": "jessamynsmith/fontbakery",
"score": 2
}
|
#### File: bakery_cli/pipe/pyfontaine.py
```python
import codecs
import os.path as op
from fontaine.cmap import Library
from fontaine.builder import Builder, Director
from bakery_cli.utils import UpstreamDirectory
def targettask(pyfontaine, pipedata, task):
try:
library = Library(collections=['subsets'])
director = Director(_library=library)
sourcedir = op.join(pyfontaine.builddir)
directory = UpstreamDirectory(sourcedir)
fonts = []
for font in directory.ALL_FONTS:
if font.startswith('sources'):
continue
fonts.append(op.join(pyfontaine.builddir, font))
_ = ('fontaine --collections subsets --text %s'
' > fontaine.txt\n') % ' '.join(fonts)
pyfontaine.bakery.logging_cmd(_)
fontaine_log = op.join(pyfontaine.builddir, 'fontaine.txt')
fp = codecs.open(fontaine_log, 'w', 'utf-8')
result = Builder.text_(director.construct_tree(fonts))
fp.write(result.output)
pyfontaine.bakery.logging_raw('end of pyfontaine process\n')
except Exception as ex:
pyfontaine.bakery.logging_raw('pyfontaine error: {}'.format(ex))
pyfontaine.bakery.logging_raw('pyfontaine process has been failed\n')
class PyFontaine(object):
def __init__(self, bakery):
self.project_root = bakery.project_root
self.builddir = bakery.build_dir
self.bakery = bakery
def execute(self, pipedata):
task = self.bakery.logging_task('pyfontaine')
if self.bakery.forcerun:
return
targettask(self, pipedata, task)
```
#### File: fontbakery/tests/test_metadata.py
```python
import types
import unittest
from bakery_lint.metadata import Metadata, FamilyMetadata, FontMetadata
class MetadataTestCase(unittest.TestCase):
def test_family_metadata_is_loaded(self):
""" Check if Metadata can read family metadata correctly """
fm = Metadata.get_family_metadata('{"name": "Family Name"}')
self.assertEqual(type(fm), FamilyMetadata)
self.assertEqual(fm.name, "Family Name")
self.assertEqual(fm.designer, "")
self.assertEqual(fm.license, "")
self.assertEqual(fm.visibility, "Sandbox")
self.assertEqual(fm.category, "")
self.assertEqual(fm.size, 0)
self.assertEqual(fm.date_added, "")
self.assertEqual(fm.subsets, [])
def test_font_metadata_is_loaded(self):
""" Check if font metadata can be read from family metadata """
fm = Metadata.get_family_metadata(
'{"name": "Family Name", "fonts": [{"name": "FontName"}]}')
fonts_metadata = fm.fonts
self.assertEqual(type(fonts_metadata), types.GeneratorType)
fm = fonts_metadata.next()
self.assertEqual(type(fm), FontMetadata)
self.assertEqual(fm.name, "FontName")
self.assertEqual(fm.post_script_name, "")
self.assertEqual(fm.full_name, "")
self.assertEqual(fm.style, "normal")
self.assertEqual(fm.weight, 400)
self.assertEqual(fm.filename, "")
self.assertEqual(fm.copyright, "")
```
#### File: fontbakery/tools/fontbakery-build.py
```python
from __future__ import print_function
import argparse
import logging
import os
import os.path as op
import sys
import yaml
try:
import git
from git import Repo
GITPYTHON_INSTALLED = True
except ImportError:
GITPYTHON_INSTALLED = False
from bakery_cli.bakery import Bakery, BAKERY_CONFIGURATION_DEFAULTS
from bakery_cli.logger import logger
from bakery_cli.utils import UpstreamDirectory, ttfautohint_installed
def create_bakery_config(bakery_yml_file, data):
if not op.exists(op.dirname(bakery_yml_file)):
os.makedirs(op.dirname(bakery_yml_file))
data = {k: data[k] for k in data if data[k]}
l = open(bakery_yml_file, 'w')
l.write(yaml.safe_dump(data))
l.close()
def run_bakery(path, verbose=False):
# fontbakery-build supports passing arguments of directory or
# concrete bakery.y[a]ml files. In case of passing directory
# it looks at existing bakery.yml or bakery.yaml and runs on
# first matched filepath
# There can also be cases when directory does not contain any
# bakery.y[a]ml or passed bakery.yml file does not exist. Then
# fontbakery-build loads default configuration.
bakery_yml_file = None
sourcedir = path
if os.path.isdir(path):
for filename in ['bakery.yml', 'bakery.yaml']:
if os.path.exists(os.path.join(path, filename)):
bakery_yml_file = os.path.join(path, filename)
break
else:
bakery_yml_file = path
sourcedir = os.path.dirname(path)
try:
if bakery_yml_file:
config = yaml.safe_load(open(bakery_yml_file, 'r'))
else:
raise IOError
except IOError:
bakery_yml_file = os.path.join(sourcedir, 'bakery.yml')
config = yaml.safe_load(open(BAKERY_CONFIGURATION_DEFAULTS))
try:
builddir = 'build'
if GITPYTHON_INSTALLED:
try:
repo = Repo(sourcedir)
builddir = repo.git.rev_parse('HEAD', short=True)
except git.exc.InvalidGitRepositoryError:
pass
builddir = os.environ.get('TRAVIS_COMMIT', builddir)
if 'process_files' not in config:
directory = UpstreamDirectory(sourcedir)
# normalize process_files path
config['process_files'] = directory.get_fonts()
create_bakery_config(bakery_yml_file, config)
b = Bakery('', sourcedir, 'builds', builddir)
b.addLoggingToFile()
b.load_config(bakery_yml_file)
b.run()
if not ttfautohint_installed():
msg = ('Command line tool `ttfautohint` is required. Install it with'
' `apt-get install ttfautohint` or `brew install ttfautohint`')
logger.error(msg)
except:
if verbose or config.get('verbose'):
raise
sys.exit(1)
if __name__ == '__main__':
description = ('Builds projects specified by bakery.yml file(s).'
' Output is in project/builds/commit/')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('path', metavar='DIRECTORY OR BAKERY.YML', nargs='+',
help="Directory or path to bakery.y[a]ml to run"
" bakery build process on")
parser.add_argument('--verbose', default=False, action='store_true')
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.INFO)
for p in args.path:
run_bakery(os.path.abspath(p), verbose=args.verbose)
```
|
{
"source": "jessamynsmith/quotations",
"score": 2
}
|
#### File: quotations/libs/query_set.py
```python
def get_random(query_set):
return query_set.order_by('?')
```
|
{
"source": "jessamynsmith/talkbackbot",
"score": 2
}
|
#### File: talkbackbot/talkback/bot.py
```python
import logging
from twisted.words.protocols import irc
from twisted.internet import protocol
from file_quotation_selector import FileQuotationSelector
from url_quotation_selector import UrlQuotationSelector
class TalkBackBot(irc.IRCClient):
def connectionMade(self):
irc.IRCClient.connectionMade(self)
logging.info("connectionMade")
def connectionLost(self, reason):
irc.IRCClient.connectionLost(self, reason)
logging.info("connectionLost")
# callbacks for events
def signedOn(self):
"""Called when bot has successfully signed on to server."""
logging.info("Signed on")
self.join(self.factory.channel)
def joined(self, channel):
"""This will get called when the bot joins the channel."""
logging.info("[%s has joined %s]"
% (self.nickname, self.factory.channel))
def privmsg(self, user, channel, msg):
"""This will get called when the bot receives a message."""
trigger_found = None
send_to = channel
if self.factory.settings.NICKNAME.startswith(channel) or \
channel.startswith(self.factory.settings.NICKNAME):
trigger_found = True
send_to = user.split('!')[0]
else:
for trigger in self.factory.settings.TRIGGERS:
if msg.lower().find(trigger) >= 0:
trigger_found = trigger
break
if trigger_found:
quote = self.factory.quotation.select()
logging.info("got quote:\n\t%s" % quote)
self.msg(send_to, quote)
logging.info("sent message to '%s'" % send_to)
class TalkBackBotFactory(protocol.ClientFactory):
def __init__(self, settings):
self.settings = settings
self.channel = self.settings.CHANNEL
if hasattr(settings, 'QUOTES_FILE') and settings.QUOTES_FILE:
self.quotation = FileQuotationSelector(self.settings)
elif hasattr(settings, 'QUOTES_URL'):
self.quotation = UrlQuotationSelector(self.settings)
else:
raise AttributeError('Must specify either QUOTES_URL or QUOTES_FILE in settings')
def buildProtocol(self, addr):
bot = TalkBackBot()
bot.factory = self
bot.nickname = self.settings.NICKNAME
bot.realname = self.settings.REALNAME
return bot
def clientConnectionLost(self, connector, reason):
logging.info("connection lost, reconnecting")
connector.connect()
def clientConnectionFailed(self, connector, reason):
logging.info("connection failed: %s" % reason)
connector.connect()
```
#### File: talkbackbot/talkback/file_quotation_selector.py
```python
from random import choice
class FileQuotationSelector(object):
def __init__(self, settings):
with open(settings.QUOTES_FILE) as quotes_file:
self.quotes = quotes_file.readlines()
def select(self):
return choice(self.quotes).strip()
```
#### File: talkbackbot/tests/test_url_quotation_selector.py
```python
from mock import MagicMock, patch
import types
import unittest
from talkback.url_quotation_selector import UrlQuotationSelector
class TestUrlQuotationSelector(unittest.TestCase):
def setUp(self):
settings = types.ModuleType('test_url_settings')
settings.QUOTES_URL = "https://example.com/api/v2/quotations/?limit=1"
self.selector = UrlQuotationSelector(settings)
@patch('requests.get')
def test_select_failure(self, mock_get):
mock_get.return_value = MagicMock(status_code=500)
quote = self.selector.select()
mock_get.assert_called_once_with('https://example.com/api/v2/quotations/?limit=1')
self.assertEqual(None, quote)
@patch('requests.get')
def test_select_response_not_json(self, mock_get):
mock_json = MagicMock(side_effect=ValueError())
mock_get.return_value = MagicMock(status_code=200, json=mock_json)
quote = self.selector.select()
mock_get.assert_called_once_with('https://example.com/api/v2/quotations/?limit=1')
self.assertEqual(None, quote)
@patch('requests.get')
def test_select_invalid_response_format(self, mock_get):
mock_json = MagicMock(return_value={'text': 'Hi!', 'author': 'An'})
mock_get.return_value = MagicMock(status_code=200, json=mock_json)
quote = self.selector.select()
mock_get.assert_called_once_with('https://example.com/api/v2/quotations/?limit=1')
self.assertEqual(None, quote)
@patch('requests.get')
def test_select_success(self, mock_get):
mock_json = MagicMock(return_value={'results': [{'text': 'Hi!', 'author': 'An'}]})
mock_get.return_value = MagicMock(status_code=200, json=mock_json)
quote = self.selector.select()
mock_get.assert_called_once_with('https://example.com/api/v2/quotations/?limit=1')
self.assertEqual('Hi! ~ An', quote)
self.assertEqual(str, type(quote))
```
|
{
"source": "jessanc/stonkBot",
"score": 3
}
|
#### File: stonkBot/Depreciated Functions, etc/old-algo13fxns.py
```python
from matplotlib import pyplot as plt
#depreciated functions from algo13.py that are no longer used
#get list of common penny stocks under $price and sorted by gainers (up) or losers (down) - source: stockunder1.org
def getPennies(price=1,updown="up"):
url = 'https://stocksunder1.org/nasdaq-penny-stocks/'
# url = 'https://stocksunder1.org/nasdaq-stocks-under-1/' #alt url that can be used
while True:
try:
html = requests.post(url, params={"price":price,"volume":0,"updown":updown}).content
break
except Exception:
print("No connection, or other error encountered. Trying again...")
time.sleep(3)
continue
tableList = read_html(html)
# print(tableList)
try:
symList = tableList[5][0:]['Symbol']
except Exception:
symList = tableList[5][1:][0] #this keeps changing (possibly intentionally - possibly due to switching btw windows and linux?)
symList = [re.sub(r'\W+','',e.replace(' predictions','')) for e in symList] #strip "predictions" and any non alphanumerics
# print(tableList[5][0:]['Symbol'])
return symList
#not really OBE, but not really used
#gets a list of volatile stocks using criteria outlined here: https://stocksunder1.org/how-to-trade-penny-stocks/
def getVolatile(lbound=0.8, ubound=5,minPercChange=30, minVol=8000000):
url = 'https://www.marketwatch.com/tools/stockresearch/screener/results.asp'
params = {"submit":"Screen",
"Symbol":"true",
"ChangePct":"true",
"CompanyName":"false",
"Volume":"true",
"Price":"true",
"Change":"false",
"SortyBy":"Symbol",
"SortDirection":"Ascending",
"ResultsPerPage":"OneHundred",
"TradesShareEnable":"true",
"TradesShareMin":str(lbound),
"TradesShareMax":str(ubound),
"PriceDirEnable":"true",
"PriceDir":"Up",
"PriceDirPct":str(minPercChange),
"TradeVolEnable":"true",
"TradeVolMin":str(minVol),
"TradeVolMax":"",
"Exchange":"NASDAQ",
"IndustryEnable":"false",
"MoreInfo":"false"}
while True:
try:
html = requests.post(url, params=params).content
break
except Exception:
print("No connection, or other error encountered. Trying again...")
time.sleep(3)
continue
tableList = read_html(html)
symList = tableList[0].transpose().to_dict() #transpose for organization, dictionary to have all wanted data
return symList
#function to sim stocks that have already peaked and return the good ones - OBE
def simPast(symList):
'''
the idea is to look at what happens in the following days after a big jump and trade accordingly
'''
global apiKeys
global someSettings
#generate data files for each stock
print("Getting stock data...")
winners = {}
for i,symb in enumerate(symList):
print("("+str(i+1)+"/"+str(len(symList))+") "+symb)
if(not os.path.isfile(stockDir+symb+".txt")):
url = apiKeys["ALPHAVANTAGEURL"]
params= { # NOTE: the information is also available as CSV which would be more efficient
'apikey' : apiKeys["ALPHAVANTAGEKEY"],
'function' : 'TIME_SERIES_DAILY', #daily resolution (open, high, low, close, volume)
'symbol' : symb, #ticker symbol
'outputsize' : 'full' #up to 20yrs of data
}
while True:
try:
response = requests.request('GET', url, params=params).text #send request and store response
stonkData = json.loads(response) #read in as json data
dateData = stonkData[list(stonkData.keys())[1]] #time series (daily) - index 0=meta data, 1=stock data
break
except Exception:
print("No connection, or other error encountered. Trying again...")
time.sleep(3)
continue
if(i<len(symList)): #only delay if it's not the last one
time.sleep(12.5) #max requests of 5 per minute for free alphavantage account, delay to stay under that limit
out = open(stockDir+symb+'.txt','w') #write to file for later usage
out.write(response)
out.close()
#gather info about single stock
stonkFile = open(stockDir+symb+'.txt','r') #open the file containing stonk data
stonkData = json.loads(stonkFile.read()) #read in as json data
stonkFile.close()
dateData = stonkData[list(stonkData.keys())[1]] #time series (daily) - index 0=meta data, 1=stock data
period = min(someSettings['periodLength'],len(dateData)-1) #how long for period
dates = [e for e in dateData]
lows = [max(float(dateData[e]['3. low']),0.0000001) for e in dateData] #must not be 0 due to being a devisor
highs = [float(dateData[e]['2. high']) for e in dateData]
opens = [max(float(dateData[e]['1. open']),0.0000001) for e in dateData] #must not be 0 due to being a devisor
closes = [float(dateData[e]['4. close']) for e in dateData]
volumes = [float(dateData[e]['5. volume']) for e in dateData]
volatility = [(highs[i]-lows[i])/(lows[i]) for i in range(len(lows))] #this isn't the real volatility measurement, but it's good enough for me - vol = 1 means price doubled, 0 = no change
delDayRatio = [(closes[i]-opens[i])/(closes[i+1]) for i in range(len(closes)-1)] #this is the change over the day normalized to the opening price
#start sim here
startDate = someSettings['periodLength']-1 #here we're looking for the most recent big jump - init at least 1 period length ago
'''
the following conditions should be true when asking if the date should be skipped (in order as they appear in the while statement):
make sure we're in range
arbirary volatility of the day - higher= more volatility in a given day (volImpulse is minimum volatility to have)
look only for positive daily changes
the difference between today's (startDate-1) change and yesterdays must be sufficiently large (and negative) to constitute underdamped oscilation - at least 1/2 of original
'''
while startDate<len(volatility)-2 and\
(volatility[startDate]<someSettings['volImpulse'] or\
(delDayRatio[startDate]<.25 or\
(delDayRatio[startDate-1]-delDayRatio[startDate])>-.75\
)\
):
startDate += 1
# start data analysis here
if(startDate<len(volatility)-2 and startDate<90 and closes[startDate-1]>closes[startDate]): #only show info if the jump happened in the past year/few months (ignore if it reaches the end)
for i in range(startDate,startDate-someSettings['periodLength'],-1):
print(dates[i]+" - "+str(round(volatility[i],2))+" - "+str(opens[i])+" - "+str(round(delDayRatio[i]-delDayRatio[i+1],2)))
#symbols that show up in the graph/meet the conditions
winners[symb] = {"volatility":volatility[startDate],
"startDelDayRatio":delDayRatio[startDate]-delDayRatio[startDate+1],
"nextDelDayRatio":delDayRatio[startDate-1]-delDayRatio[startDate],
"diff":(delDayRatio[startDate]-delDayRatio[startDate+1])-(delDayRatio[startDate-1]-delDayRatio[startDate])}
# plt.figure(1)
# plt.subplot(211)
# plt.plot([delDayRatio[i]-delDayRatio[i+1] for i in range(startDate,startDate-someSettings['periodLength'],-1)], label=symb)
# plt.title("today-yesterday delDayRatio ((close-open)/open)")
# plt.legend(loc='right')
#
# plt.subplot(212)
# plt.plot([volatility[i] for i in range(startDate,startDate-someSettings['periodLength'],-1)], label=symb)
# plt.title("volatility ((high-low)/low)")
# plt.legend(loc='right')
plt.figure(2)
# plt.plot([delDayRatio[i]-delDayRatio[i+1] for i in range(startDate,startDate-someSettings['periodLength'],-1)], label=symb)
plt.plot([closes[i]/closes[startDate] for i in range(startDate+80, startDate-someSettings['periodLength'],-1)], label=symb)
# plt.title("today-yesterday delDayRatio ((close-open)/close-1)")
plt.legend(loc='right')
# print('\n\n')
sortedSyms = sorted(list(winners.keys()), key=lambda k: float(winners[k]['diff']))[::-1]
# print(sortedSyms)
plt.show()
return sortedSyms
# return if a stock should be put on a watchlist - OBE
# https://stocksunder1.org/how-to-trade-penny-stocks/
def presentList(symList):
global apiKeys
global someSettings
validBuys = {}
#TODO: check date, market last open date, etc - how many trading days since initial bump
for i,symb in enumerate(symList):
print("("+str(i+1)+"/"+str(len(symList))+") "+symb)
if(not os.path.isfile(stockDir+symb+".txt")):
url = apiKeys["ALPHAVANTAGEURL"]
params= { # NOTE: the information is also available as CSV which would be more efficient
'apikey' : apiKeys["ALPHAVANTAGEKEY"],
'function' : 'TIME_SERIES_DAILY', #daily resolution (open, high, low, close, volume)
'symbol' : symb, #ticker symbol
'outputsize' : 'full' #up to 20yrs of data
}
while True:
try:
response = requests.request('GET', url, params=params).text #send request and store response
break
except Exception:
print("No connection, or other error encountered. Trying again...")
time.sleep(3)
continue
if(len(symList)>=5):
time.sleep(11) #max requests of 5 per minute for free alphavantage account, delay to stay under that limit
out = open(stockDir+symb+'.txt','w') #write to file for later usage
out.write(response)
out.close()
#calc price % diff over past 20 days (current price/price of day n) - current must be >= 80% for any
#calc volume % diff over average past some days (~60 days?) - must be sufficiently higher (~300% higher?)
#TODO: clean up the indexing in here - this looks gross and I think it can be improved
dateData = json.loads(open(stockDir+symb+".txt","r").read()) #dictionary of all data returned from AV
dateData = dateData[list(dateData)[1]] #dict without the metadata - just the date data
volAvgDays = min(60,len(list(dateData))) #arbitrary number to avg volumes over
checkPriceDays = 20 #check if the price jumped substantially over the last __ days
checkPriceAmt = 1.7 #check if the price jumped by this amount in the above days (% - i.e 1.5 = 150%)
volGain = 3 #check if the volume increased by this amout (i.e. 3 = 300% or 3x)
avgVol = sum([int(dateData[list(dateData)[i]]['5. volume']) for i in range(volAvgDays)])/volAvgDays #avg of volumes over a few days
lastVol = int(dateData[list(dateData)[0]]['5. volume']) #the latest volume
lastPrice = float(dateData[list(dateData)[0]]['2. high']) #the latest highest price
validBuys[symb] = "Do Not Watch"
if(lastVol/avgVol>volGain): #much larger than normal volume
dayPrice = lastPrice
i = 1
while(i<=checkPriceDays and lastPrice/dayPrice<checkPriceAmt): #
dayPrice = float(dateData[list(dateData)[i]]['2. high'])
# print(str(i)+" - "+str(lastPrice/dayPrice))
i += 1
if(lastPrice/dayPrice>=checkPriceAmt):
validBuys[symb] = "Watch"
#save ?
# f = open(stockDir+symb+"--"+str(dt.date.today())+".txt","w")
# f.write(
return validBuys #return a dict of whether a stock is a valid purchase or not
#basically do what presentList is doing, but like, better... - OBE
def getGainers_old(symList):
global apiKeys
global someSettings
validBuys = {}
#TODO: check date, market last open date, etc - how many trading days since initial bump
for i,symb in enumerate(symList):
print("("+str(i+1)+"/"+str(len(symList))+") "+symb)
if(os.path.isfile(stockDir+symb+".txt")): #if a file exists
dateData = json.loads(open(stockDir+symb+".txt","r").read()) #read it
if((dt.date.today()-dt.datetime.fromtimestamp(os.stat(stockDir+symb+".txt").st_mtime).date()).days>0): #if the last time it was pulled was more than __ days ago
os.remove(stockDir+symb+".txt") #delete it
if(not os.path.isfile(stockDir+symb+".txt")): #if the file doesn't exist
url = apiKeys["ALPHAVANTAGEURL"]
params= { # NOTE: the information is also available as CSV which would be more efficient
'apikey' : apiKeys["ALPHAVANTAGEKEY"],
'function' : 'TIME_SERIES_DAILY', #daily resolution (open, high, low, close, volume)
'symbol' : symb, #ticker symbol
'outputsize' : 'compact' #compact=last 100 days, full=up to 20 years
}
while True:
try:
response = requests.request('GET', url, params=params).text #send request and store response
dateData = json.loads(response) #dictionary of all data returned from AV
dateData = dateData[list(dateData)[1]] #dict without the metadata - just the date data
break
except Exception:
print("No connection, or other error encountered. Trying again...")
time.sleep(3)
continue
out = open(stockDir+symb+'.txt','w') #write to file for later usage
out.write(response)
out.close()
if(len(symList)>=5 and i<len(symList)-1):
time.sleep(12.5) #max requests of 5 per minute for free alphavantage account, delay to stay under that limit
#calc price % diff over past 20 days (current price/price of day n) - current must be >= 80% for any
#calc volume % diff over average past some days (~60 days?) - must be sufficiently higher (~300% higher?)
#TODO: clean up the indexing in here - this looks gross and I think it can be improved
dateData = json.loads(open(stockDir+symb+".txt","r").read()) #dictionary of all data returned from AV
dateData = dateData[list(dateData)[1]] #dict without the metadata - just the date data
days2wait4fall = 3 #wait for stock price to fall for this many days
startDate = days2wait4fall+1 #add 1 to account for the jump day itself
days2look = 25 #look back this far for a jump
firstJumpAmt = 1.3 #stock first must jump by this amount (1.3=130% over 1 day)
sellUp = 1.25 #% to sell up at
sellDn = 0.5 #% to sell dn at
while(float(dateData[list(dateData)[startDate]]['4. close'])/float(dateData[list(dateData)[startDate+1]]['4. close'])<firstJumpAmt and startDate<min(days2look,len(dateData)-2)):
startDate += 1
#we know the date of the initial jump (startDate)
if(float(dateData[list(dateData)[startDate]]['4. close'])/float(dateData[list(dateData)[startDate+1]]['4. close'])>=firstJumpAmt):
#make sure that the jump happened in the time frame rather than too long ago
volAvgDays = min(60,len(list(dateData))) #arbitrary number to avg volumes over
checkPriceDays = 20 #check if the price jumped substantially over the last __ days
checkPriceAmt = 1.7 #check if the price jumped by this amount in the above days (% - i.e 1.5 = 150%)
volGain = 3 #check if the volume increased by this amout (i.e. 3 = 300% or 3x, 0.5 = 50% or 0.5x)
volLoss = .5 #check if the volume decreases by this amount
priceDrop = .4 #price should drop this far when the volume drops
avgVol = sum([int(dateData[list(dateData)[i]]['5. volume']) for i in range(startDate,min(startDate+volAvgDays,len(dateData)))])/volAvgDays #avg of volumes over a few days
lastVol = int(dateData[list(dateData)[startDate]]['5. volume']) #the latest volume
lastPrice = float(dateData[list(dateData)[startDate]]['2. high']) #the latest highest price
if(lastVol/avgVol>volGain): #much larger than normal volume
#volume had to have gained
#if the next day's price has fallen significantly and the volume has also fallen
if(float(dateData[list(dateData)[startDate-days2wait4fall]]['2. high'])/lastPrice-1<priceDrop and int(dateData[list(dateData)[startDate-days2wait4fall]]['5. volume'])<=lastVol*volLoss):
#the jump happened, the volume gained, the next day's price and volumes have fallen
dayPrice = lastPrice
i = 1 #magic number? TODO: figure out exactly what this counter is doing
# check within the the last few days, check the price has risen compared to the past some days, and we're within the valid timeframe
while(i<=checkPriceDays and lastPrice/dayPrice<checkPriceAmt and startDate+i<len(dateData)):
dayPrice = float(dateData[list(dateData)[startDate+i]]['2. high'])
i += 1
if(lastPrice/dayPrice>=checkPriceAmt):
#the price jumped compared to both the previous day and to the past few days, the volume gained, and the price and the volume both fell
#check to see if we missed the next jump (where we want to strike)
missedJump = False
for e in range(0,startDate):
diff = float(dateData[list(dateData)[e]]['4. close'])/float(dateData[list(dateData)[e+1]]['4. close'])
if(diff>=sellUp):
missedJump = True
if(not missedJump):
validBuys[symb] = list(dateData)[startDate] #return the stock and the date it initially jumped
return validBuys #return a dict of valid stocks and the date of their latest jump
#whether to buy a stock or not
def check2buy(latestTrades, minPortVal, reducedCash, reducedBuy, lowCash, lowBuy, minCash):
'''
original buy/sell logic:
- if cash<some amt (reduced cash mode)
- buy max of 10 unique from list
- else (standard mode) buy as many as we can off the list generated by the sim
- if cash<10 (low cash mode)
- buy max of 5 unique from list
- if portVal<5 (bottom out mode)
- error, sell all and stop trading
- stop loss at ~60%
- limit gain at ~25%
'''
'''
reducedCash = 100 #enter reduced cash mode if portfolio reaches under this amount
reducedBuy = 10 #buy this many unique stocks if in reduced cash mode
lowCash = 10 #enter low cash mode if portfolio reaches under this amount
lowBuy = 5 #buy this many unique stocks if in low cash mode
minCash = 1 #buy until this amt is left in buying power/cash balance
'''
global gainers
acct = a.getAcct()
gainers = [e for e in gainers if e not in [t.getName() for t in threading.enumerate()]] #remove stocks currently trying to be sold
portVal = float(acct['portfolio_value'])
buyPow = float(acct['buying_power'])
'''
add something like this:
if(buyPow >= maxPortVal):
buyPow = buyPow - cash2Hold
where maxPortVal is ~20k and cash2Hold is ~1k
'''
if(buyPow>reducedCash): #in normal operating mode
print("Normal Operation Mode. Available Buying Power: $"+str(buyPow))
#div cash over all gainers
for e in gainers:
if(a.isAlpacaTradable(e)):
curPrice = a.getPrice(e)
if(curPrice>0 and reducedBuy>0): #don't bother buying if the stock is invalid (no div0)
shares2buy = int((buyPow/reducedBuy)/curPrice)
try:
lastTradeDate = a.o.dt.datetime.strptime(latestTrades[gainers[i]][0],'%Y-%m-%d').date()
lastTradeType = latestTrades[gainers[i]][1]
except Exception:
lastTradeDate = a.o.dt.date.today()-a.o.dt.timedelta(1)
lastTradeType = "NA"
#check to make sure that we're not buying/selling on the same day
if(shares2buy>0 and (lastTradeDate<a.o.dt.date.today() or lastTradeType=="NA" or lastTradeType=="buy")):
print(a.createOrder("buy",shares2buy,e,"market","day"))
latestTrades[e] = [str(a.o.dt.date.today()), "buy"]
f = open("../stockStuff/latestTrades.json","w")
f.write(a.o.json.dumps(latestTrades, indent=2))
f.close()
else:
if(buyPow>lowCash): #in reduced cash mode
print("Reduced Cash Mode. Available Buying Power: $"+str(buyPow))
#div cash over $reducedBuy stocks
for i in range(min(reducedBuy,len(gainers))):
if(a.isAlpacaTradable(gainers[i])): #just skip it if it can't be traded
curPrice = a.getPrice(gainers[i])
if(curPrice>0 and reducedBuy>0): #don't bother buying if the stock is invalid
shares2buy = int((buyPow/reducedBuy)/curPrice)
try:
lastTradeDate = a.o.dt.datetime.strptime(latestTrades[gainers[i]][0],'%Y-%m-%d').date()
lastTradeType = latestTrades[gainers[i]][1]
except Exception:
lastTradeDate = a.o.dt.date.today()-a.o.dt.timedelta(1)
lastTradeType = "NA"
if(shares2buy>0 and (lastTradeDate<a.o.dt.date.today() or lastTradeType=="NA" or lastTradeType=="buy")):
print(a.createOrder("buy",shares2buy,gainers[i],"market","day"))
latestTrades[gainers[i]] = [str(a.o.dt.date.today()), "buy"]
f = open("../stockStuff/latestTrades.json","w")
f.write(a.o.json.dumps(latestTrades, indent=2))
f.close()
else:
if(buyPow>minCash): #in low cash mode
print("Low Cash Mode. Available Buying Power: $"+str(buyPow))
#div cash over $lowBuy cheapest stocks in list
for i in range(min(lowBuy,len(gainers))):
if(a.isAlpacaTradable(gainers[i])): #just skip it if it can't be traded
curPrice = a.getPrice(gainers[i])
if(curPrice>0): #don't bother buying if the stock is invalid
shares2buy = int((buyPow/reducedBuy)/curPrice)
try:
lastTradeDate = a.o.dt.datetime.strptime(latestTrades[gainers[i]][0],'%Y-%m-%d').date()
lastTradeType = latestTrades[gainers[i]][1]
except Exception:
lastTradeDate = a.o.dt.date.today()-a.o.dt.timedelta(1)
lastTradeType = "NA"
if(shares2buy>0 and (lastTradeDate<a.o.dt.date.today() or lastTradeType=="NA" or lastTradeType=="buy")):
print(a.createOrder("buy",shares2buy,gainers[i],"market","day"))
latestTrades[gainers[i]] = [str(a.o.dt.date.today()), "buy"]
f = open("../stockStuff/latestTrades.json","w")
f.write(a.o.json.dumps(latestTrades, indent=2))
f.close()
else:
print("Buying power is less than minCash - Holding")
```
#### File: stonkBot/Depreciated Functions, etc/old-alpacafxns.py
```python
def getPennies(price=1,updown="up"):
url = 'https://stocksunder1.org/nasdaq-penny-stocks/'
while True:
try:
html = requests.post(url, params={"price":price,"volume":0,"updown":updown}).content
break
except Exception:
print("No connection, or other error encountered. Trying again...")
time.sleep(3)
continue
tableList = read_html(html)
try:
symList = tableList[5][1:][0]
symList = [e.replace(' predictions','') for e in symList]
return symList
except Exception:
return ["Error"]
#get list of volatile penny stocks under $price and sorted by gainers (up) or losers (down)
def getVolatilePennies(price=1,updown="up"):
url = 'https://stocksunder1.org/most-volatile-stocks/'
while True:
try:
html = requests.post(url, params={"price":price,"volume":0,"updown":updown}).content
break
except Exception:
print("No connection, or other error encountered. Trying again...")
time.sleep(3)
continue
tableList = read_html(html)
symList = tableList[2][0][5:]
symList = [e.replace(' predictions','') for e in symList]
return symList
#buy numToBuy different stocks from the symlist, sharesOfEach of each stock (e.g. buy 25 stocks from this list, and 10 of each)
def buyRandom(numToBuy, symList, sharesOfEach):
for i in range(numToBuy):
symbol = symList[random.randint(0,len(symList)-1)]
print(symbol)
print(createOrder("buy",str(sharesOfEach),symbol,"market","day"))
print("Done Buying.")
#buy the topmost symbols from the list
def buyFromTop(numToBuy, symList, sharesOfEach):
for i in range(numToBuy):
symbol = symList[min(i,len(symList)-1)]
print(symbol)
print(createOrder("buy",str(sharesOfEach),symbol,"market","day"))
print("Done Buying.")
#return the number of shares held of a given stock
def getShares(symb):
while True:
try:
s = json.loads(requests.get(POSURL+"/"+symb.upper(), headers=HEADERS).content)
break
except Exception:
print("No connection, or other error encountered. Trying again...")
time.sleep(3)
continue
try:
return float(s['qty'])
except Exception:
return 0
#return the average price per share of a held stock
def getBuyPrice(symb):
shareHeaders = HEADERS
shareHeaders["symbol"] = symb
while True:
try:
s = json.loads(requests.get(POSURL, headers=shareHeaders).content)
break
except Exception:
print("No connection, or other error encountered. Trying again...")
time.sleep(3)
continue
return float(s[0]["avg_entry_price"]) if len(s) and s[0]["symbol"].lower()==symb.lower() else 0
#return the % cahnge of the portfolio given we know the starting amount
def getPortfolioChange(startVal):
return round((float(getAcct()['portfolio_value'])-startVal)/startVal*100,2)
#return the (integer) number of shares of a given stock able to be bought with the available cash
def sharesPurchasable(symb):
price = getPrice(symb)
if(price):
return int(float(getAcct()['buying_power'])/price)
else:
return 0
```
#### File: Sim/algo13sim2/algo13sim2.py
```python
import requests, csv, os, time, re, math, json
import datetime as dt
from pandas import read_html
global stockDir
stockDir = "./stockData/"
#get list of stocks from stocksUnder1 and marketWatch lists
def getList():
symbList = []
'''
url = 'https://www.marketwatch.com/tools/stockresearch/screener/results.asp'
#many of the options listed are optional and can be removed from the get request
params = {
"TradesShareEnable" : "True",
"TradesShareMin" : "0.8",
"TradesShareMax" : "5",
"PriceDirEnable" : "False",
"PriceDir" : "Up",
"LastYearEnable" : "False",
"TradeVolEnable" : "true",
"TradeVolMin" : "300000",
"TradeVolMax" : "",
"BlockEnable" : "False",
"PERatioEnable" : "False",
"MktCapEnable" : "False",
"MovAvgEnable" : "False",
"MktIdxEnable" : "False",
"Exchange" : "NASDAQ",
"IndustryEnable" : "False",
"Symbol" : "True",
"CompanyName" : "False",
"Price" : "False",
"Change" : "False",
"ChangePct" : "False",
"Volume" : "False",
"LastTradeTime" : "False",
"FiftyTwoWeekHigh" : "False",
"FiftyTwoWeekLow" : "False",
"PERatio" : "False",
"MarketCap" : "False",
"MoreInfo" : "False",
"SortyBy" : "Symbol",
"SortDirection" : "Ascending",
"ResultsPerPage" : "OneHundred"
}
params['PagingIndex'] = 0 #this will change to show us where in the list we should be - increment by 100 (see ResultsPerPage key)
while True:
try:
r = requests.get(url, params=params).text
totalStocks = int(r.split("matches")[0].split("floatleft results")[1].split("of ")[1]) #get the total number of stocks in the list - important because they're spread over multiple pages
break
except Exception:
print("No connection or other error encountered. Trying again...")
time.sleep(3)
continue
print("Getting MarketWatch data...")
for i in range(0,totalStocks,100): #loop through the pages (100 because ResultsPerPage is OneHundred)
print(f"page {int(i/100)+1} of {math.ceil(totalStocks/100)}")
params['PagingIndex'] = i
while True:
try:
r = requests.get(url, params=params).text
break
except Exception:
print("No connection or other error encountered. Trying again...")
time.sleep(3)
continue
symbList += read_html(r)[0]['Symbol'].values.tolist()
'''
#now that we have the marketWatch list, let's get the stocksunder1 list - essentially the getPennies() fxn from other files
url = 'https://stocksunder1.org/nasdaq-penny-stocks/'
print("Getting stocksunder1 data...")
while True:
try:
html = requests.post(url, params={"price":5,"volume":0,"updown":"up"}).content
break
except Exception:
print("No connection, or other error encountered. Trying again...")
time.sleep(3)
continue
tableList = read_html(html)
try:
symList = tableList[5][0:]['Symbol']
except Exception:
symList = tableList[5][1:][0] #this keeps changing (possibly intentionally - possibly due to switching btw windows and linux?)
symList = [re.sub(r'\W+','',e.replace(' predictions','')) for e in symList] #strip "predictions" and any non alphanumerics
symbList = list(set(symbList+symList)) #combine and remove duplicates
print("Done getting stock lists")
return symbList
#get the history of a stock from the nasdaq api (date format is yyyy-mm-dd)
#returns as 2d array order of Date, Close/Last, Volume, Open, High, Low sorted by dates newest to oldest
def getHistory(symb, startDate, endDate):
#write to file after checking that the file doesn't already exist (we don't want to abuse the api)
if(not os.path.isfile(stockDir+symb+".csv")): #TODO: check if the date was modified recently
url = f'https://www.nasdaq.com/api/v1/historical/{symb}/stocks/{startDate}/{endDate}/'
while True:
try:
r = requests.get(url, headers={"user-agent":"-"}).text #send request and store response - cannot have empty user-agent
break
except Exception:
print("No connection, or other error encountered. Trying again...")
time.sleep(3)
continue
out = open(stockDir+symb+'.csv','w') #write to file for later usage
out.write(r)
out.close()
#read csv and convert to array
with open(stockDir+symb+".csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
out = [[ee.replace('$','').replace('N/A','0') for ee in e] for e in csv_reader][1::] #trim first line to get rid of headers, also replace $'s and N/A volumes to calculable values
return out
#checks whether something is a good buy or not (if not, return why - no initial jump or second jump already missed).
#if it is a good buy, return initial jump date
#same criteria as in getGainers() of other algo13sim
def goodBuy(symb):
#calc price % diff over past 20 days (current price/price of day n) - current must be >= 80% for any
#calc volume % diff over average past some days (~60 days?) - must be sufficiently higher (~300% higher?)
days2wait4fall = 3 #wait for stock price to fall for this many days
startDate = days2wait4fall+1 #add 1 to account for the jump day itself
days2look = 25 #look back this far for a jump
firstJumpAmt = 1.3 #stock first must jump by this amount (1.3=130% over 1 day)
sellUp = 1.25 #% to sell up at
sellDn = 0.5 #% to sell dn at
#make sure that the jump happened in the time frame rather than too long ago
volAvgDays = 60 #arbitrary number to avg volumes over
checkPriceDays = 20 #check if the price jumped substantially over the last __ days
checkPriceAmt = 1.7 #check if the price jumped by this amount in the above days (% - i.e 1.5 = 150%)
volGain = 3 #check if the volume increased by this amout (i.e. 3 = 300% or 3x, 0.5 = 50% or 0.5x)
volLoss = .5 #check if the volume decreases by this amount
priceDrop = .4 #price should drop this far when the volume drops
dateData = getHistory(symb, str(dt.date.today()-dt.timedelta(days=(volAvgDays+days2look))), str(dt.date.today()))
validBuy = "NA" #set to the jump date if it's valid
if(startDate>=len(dateData)): #if a stock returns nothing or very few data pts
return validBuy
while(float(dateData[startDate][1])/float(dateData[startDate+1][1])<firstJumpAmt and startDate<min(days2look,len(dateData)-2)):
startDate += 1
#we know the date of the initial jump (startDate)
if(float(dateData[startDate][1])/float(dateData[startDate+1][1])>=firstJumpAmt):
avgVol = sum([int(dateData[i][2]) for i in range(startDate,min(startDate+volAvgDays,len(dateData)))])/volAvgDays #avg of volumes over a few days
lastVol = int(dateData[startDate][2]) #the latest volume
lastPrice = float(dateData[startDate][4]) #the latest highest price
if(lastVol/avgVol>volGain): #much larger than normal volume
#volume had to have gained
#if the next day's price has fallen significantly and the volume has also fallen
if(float(dateData[startDate-days2wait4fall][4])/lastPrice-1<priceDrop and int(dateData[startDate-days2wait4fall][2])<=lastVol*volLoss):
#the jump happened, the volume gained, the next day's price and volumes have fallen
dayPrice = lastPrice
i = 1 #magic number? TODO: figure out exactly what this counter is doing
# check within the the last few days, check the price has risen compared to the past some days, and we're within the valid timeframe
while(i<=checkPriceDays and lastPrice/dayPrice<checkPriceAmt and startDate+i<len(dateData)):
dayPrice = float(dateData[startDate+i][4])
i += 1
if(lastPrice/dayPrice>=checkPriceAmt):
#the price jumped compared to both the previous day and to the past few days, the volume gained, and the price and the volume both fell
#check to see if we missed the next jump (where we want to strike)
missedJump = False
for e in range(0,startDate):
diff = float(dateData[e][1])/float(dateData[e+1][1])
if(diff>=sellUp):
missedJump = True
if(not missedJump):
validBuy = dateData[startDate][0] #return the stock and the date it initially jumped
return validBuy #return a dict of valid stocks and the date of their latest jump
def getGainers():
symList = getList()
gainers = {}
for i,e in enumerate(symList):
b = goodBuy(e)
if(b!="NA"):
print(f"({i+1}/{len(symList)}) {e}",end='')
gainers[e] = [b,(dt.datetime.strptime(b,"%m/%d/%Y")+dt.timedelta(days=(7*5))).strftime("%m/%d/%Y")]
print(" - "+gainers[e][0]+" - "+gainers[e][1])
# else:
# print('')
# print(json.dumps(gainers,indent=2))
return gainers
```
#### File: Sim/pennyStonks/pennyStonks.py
```python
Functions, etc/Sim/pennyStonks/pennyStonks.py
#determine what the best penny stocks to buy are based on the algorithm
#TODO: place stock data files into separate folder to keep main directory clean (and not accidentally remove the keys file)
import requests, json, re, os.path, time, operator
from pandas import read_html
from datetime import datetime as dt
import statistics as stat
keyFile = open("apikeys.key","r")
apiKeys = json.loads(keyFile.read())
keyFile.close()
#get list of common penny stocks under $price and sorted by gainers (up) or losers (down)
def getPennies(price=1,updown="up"):
url = 'https://stocksunder1.org/'
html = requests.post(url, params={"price":price,"volume":0,"updown":updown}).content
tableList = read_html(html)
# print(tableList[5][1:][0])
# symList = tableList[5][0:]['Symbol']
symList = tableList[5][1:][0] #this keeps changing (possibly intentionally)
symList = [e.replace(' predictions','') for e in symList]
# print(tableList[5][0:]['Symbol'])
return symList
#function to "buy" shares of stock
def buy(shares, price, bPow, equity):
bPow = bPow - shares*price
equity = equity + shares*price
return [shares, bPow, equity, price]
#function to "sell" shares of stock
def sell(shares, price, bPow, equity):
bPow = bPow + shares*price
equity = equity - shares*price
shares = 0
return [shares, bPow, equity, price]
def stonkData(symb):
a = 270 #how many days ago?
b = 10 #time frame duration in days
s = 1 #step (must be int, 1=highest resolution, increase for more speed)
wins = [] #% by which the portfolio has increased/surpassed target %
winTime = [] # number of days it took to win
loses = [] #% by which the portfolio has lost it's value (e.g. if we start w/ $100 and we stop at 60% but it's value is $30, then the loss is 70%)
mids = [] #if the portfolio doesn't reach the goal or fail out, then it goes here
# print("\nSimulating "+symb+" data from "+str(a)+" days ago for a duration of "+str(b)+" days, checking every "+str(s)+" day(s)...")
# if file exists, use that data, else make the file
print(symb)
if(not os.path.isfile(symb+".txt")):
# url = 'https://www.alphavantage.co/query'
url = apiKeys["ALPHAVANTAGEURL"]
params= { # NOTE: the information is also available as CSV which would be more efficient
'apikey' : apiKeys["ALPHAVANTAGEKEY"],
'function' : 'TIME_SERIES_DAILY', #daily resolution (open, high, low, close, volume)
'symbol' : symb, #ticker symbol
'outputsize' : 'full' #upt to 20yrs of data
}
response = requests.request('GET', url, params=params).text #send request and store response
time.sleep(19) #max requests of 5 per minute for free alphavantage account, delay to stay under that limit
out = open(symb+'.txt','w') #write to file for later usage
out.write(response)
out.close()
stonkFile = open(symb+'.txt','r') #open the file containing stonk data
stonkData = json.loads(stonkFile.read()) #read in as json data
stonkFile.close()
dateData = stonkData[list(stonkData.keys())[1]] #time series (daily) - index 0=meta data, 1=stock data
length = min(b,len(dateData)-1) #how long for term
a = min(a, len(dateData)-1) #minimize the inputs as needed
b = min(b, len(dateData)-1)
s = min(s, len(dateData)-1)
for c in range(a,b-s,-s): #loop through the days - every day/window will be different, so we need to account for each one and get an average
startDate = min(c,len(dateData)) #how many days ago? must be >= duration and < length
startDate = max(min(startDate,len(dateData)-1),length)
portfolioGain = 20 # target % to increase in timeframe
portfolioLoss = 25 #acceptable loss % during timeframe
sellUp = 5 #sell if stock rises this % (helps reach the goal easier by breaking into easier chunks)
sellDn = 16 #sell if stock falls this % (depends on your risk tolerance, probably should be higher than sellUp?)
#TODO: add days2wait functionality
# days2wait = 1 #minimum number of days to wait until selling after buying
buyPow = [0]*length #running info of the account buying power
equ = [0]*length #running info of the account equity
portfolio = [0]*length #running info of the portfolio value
sharesHeld = 0 #sharesHeld at a given day
buyPrice = 0 #price last bought at
sellPrice = 0 #price last sold at
buyPow[0] = 100 #starting buying power in $
salePrices = [0]*length #this variable is not needed - it is only used as a reference (and graphing) but is a running value of sellPrice
#init stock data from sheet
opens = [0]*length
highs = [0]*length
lows = [0]*length
closes = [0]*length
volumes = [0]*length
#assign stock data
for i in range(length):
oneDate = dateData[list(dateData.keys())[startDate-i]] #date to get to
opens[i] = float(oneDate[list(oneDate.keys())[0]]) #stock info to grab
highs[i] = float(oneDate[list(oneDate.keys())[1]])
lows[i] = float(oneDate[list(oneDate.keys())[2]])
closes[i] = float(oneDate[list(oneDate.keys())[3]])
volumes[i] = float(oneDate[list(oneDate.keys())[4]])
buyTime = closes #buy at this point of the day
sellTime = opens #sell at this point of the day
for i in range(length): #for every day in the timeframe we're looking at
equ[i] = sellTime[i]*sharesHeld #update the equity
portfolio[i] = buyPow[i]+equ[i] #update the portfolio value
#if the portfolio value has reached the target gain
if(portfolio[i]>=(1+portfolioGain/100)*portfolio[0]):
[sharesHeld, buyPow[i], equ[i], sellPrice] = sell(sharesHeld,sellTime[i],buyPow[i],equ[i]) #sell everything
#update the running values
for j in range(i,length):
portfolio[j] = portfolio[i]
buyPow[j] = buyPow[i]
equ[j] = equ[i]
winTime.append(i)
wins.append((portfolio[i]-portfolio[0])/portfolio[0]*100)
break #stop trading for the timeframe
#else if we've failed out
elif(portfolio[i]<=(1-portfolioLoss/100)*portfolio[0] and sharesHeld>0):
[sharesHeld, buyPow[i], equ[i], sellPrice] = sell(sharesHeld,sellTime[i],buyPow[i],equ[i]) #sell all
if(i==length-1): #if we've reached the end of the timeframe
loses.append((portfolio[i]-portfolio[0])/portfolio[0]*100)
#else if we haven't reached the target, but we haven't failed out either by the end of the timeframe
elif(portfolio[i]>(1-portfolioLoss/100)*portfolio[0] and portfolio[i]<(1+portfolioGain/100)*portfolio[0] and i==length-1):
mids.append((portfolio[i]-portfolio[0])/portfolio[0]*100)
#this is an all-or-nothing system, if we don't have shares and it's okay to buy (i.e. we've rebounded from a fail out)
if(sharesHeld==0 and buyTime[i]>=sellPrice):
[sharesHeld, buyPow[i], equ[i], buyPrice] = buy(int(buyPow[i]/buyTime[i]) if buyTime[i] else 0, buyTime[i], buyPow[i], equ[i]) #buy as many as we can afford
#else if we have shares and it's okay to sell (either up or down)
elif(sharesHeld>0 and (sellTime[i]>=(1+sellUp/100)*buyPrice or sellTime[i]<=(1-sellDn/100)*buyPrice)):
[sharesHeld, buyPow[i], equ[i], null] = sell(sharesHeld,sellTime[i], buyPow[i], equ[i]) #sell all shares
#else we didn't buy or sell - so we hold
else:
equ[i] = sellTime[i]*sharesHeld
for j in range(i,length):
portfolio[j] = portfolio[i]
buyPow[j] = buyPow[i]
equ[j] = equ[i]
salePrices[i] = sellPrice #update the running sales list
#print the output data
avgWinTime = round(stat.mean(winTime) if len(winTime) else 0,2)
totalWin = round(len(wins)/len(wins+loses+mids)*100,2)
totalMids = round(len(mids)/len(wins+loses+mids)*100,2)
totalLose = round(len(loses)/len(wins+loses+mids)*100,2)
avgWin = round(stat.mean(wins) if len(wins) else 0,2)
avgMids = round(stat.mean(mids) if len(mids) else 0,2)
avgLose = round(stat.mean(loses) if len(loses) else 0,2)
weightedAvgWin =round(len(wins)/len(wins+loses+mids)*sum(wins)/float(len(wins)) if float(len(wins)) else 0,2)
weightedAvgMids = round(len(mids)/len(wins+loses+mids)*sum(mids)/float(len(mids)) if float(len(mids)) else 0,2)
weightedAvgLose = round(len(loses)/len(wins+loses+mids)*sum(loses)/float(len(loses)) if float(len(loses)) else 0,2)
greaterThan0 = len([i for i in wins+mids+loses if i>0])
winMidQ = sorted(wins)
winMidQ = winMidQ[int(len(winMidQ)/4):int(3*len(winMidQ)/4)] or [0]
winQ1 = round(winMidQ[0],2)
winQ3 = round(winMidQ[len(winMidQ)-1],2)
midMidQ = sorted(mids)
midMidQ = midMidQ[int(len(midMidQ)/4):int(3*len(midMidQ)/4)] or [0]
midQ1 = round(midMidQ[0],2)
midQ3 = round(midMidQ[len(midMidQ)-1],2)
loseMidQ = sorted(loses)
loseMidQ = loseMidQ[int(len(loseMidQ)/4):int(3*len(loseMidQ)/4)] or [0]
loseQ1 = round(loseMidQ[0],2)
loseQ3 = round(loseMidQ[len(loseMidQ)-1],2)
allMidQ = sorted(wins+mids+loses)
allMidQ = allMidQ[int(len(allMidQ)/4):int(3*len(allMidQ)/4)]
midQavg = round(stat.mean(allMidQ) if len(allMidQ) else 0,2)
outs = {}
outs["win"] = str(len(wins)) # total won
outs["mids"] = str(len(mids)) #total in the middle
outs["lose"] = str(len(loses)) #total lost
outs["winPerc"] = str(totalWin) # % of total simulated won
outs["midPerc"] = str(totalMids) # % of total simulated in the middle
outs["losePerc"] = str(totalLose) # % of total simulated lost
outs["avgWinTime"] = str(avgWinTime) #average days to win
outs["avgWinAmt"] = str(avgWin) #avg % change of portfolio of the wins
outs["avgMidsAmt"] = str(avgMids) #avg % change of portfolio of the middles
outs["avgLoseAmt"] = str(avgLose) #avg % change of portfolio of the loses
outs["weightedAvgWin"] = str(weightedAvgWin) #weighted average portfolio change of the wins
outs["weightedAvgMids"] = str(weightedAvgMids) #weighted average portfolio change of the mids
outs["weightedAvgLose"] = str(weightedAvgLose) #weighted average portfolio change of the Loses
outs["totalWeightedAvg"] = str(round(weightedAvgWin+weightedAvgLose+weightedAvgMids,2)) #sum of the weighted averages
outs["totalAvg"] = str(round(stat.mean(wins+mids+loses) if len(wins+mids+loses) else 0,2))
outs["midQavg"] = str(midQavg)
#these are useful, but can easily mislead because the data probably aren't normally distributed
# outs["winStdDev"] = round(stat.stdev(wins) if len(wins)>1 else 0,2)
# outs["midsStdDev"] = round(stat.stdev(mids) if len(mids)>1 else 0,2)
# outs["loseStdDev"] = round(stat.stdev(loses) if len(loses)>1 else 0,2)
outs["winQ1"] = str(winQ1)
outs["winQ3"] = str(winQ3)
outs["winMidQ"] = str(round(winQ3-winQ1,2))
outs["midQ1"] = str(midQ1)
outs["midQ3"] = str(midQ3)
outs["midMidQ"] = str(round(midQ3-midQ1,2))
outs["loseQ1"] = str(loseQ1)
outs["loseQ3"] = str(loseQ3)
outs["loseMidQ"] = str(round(loseQ3-loseQ1,2))
outs["greaterThan0"] = greaterThan0
return outs
stonks = {}
if(not os.path.isfile("allStonks.txt")):
out = open('allStonks.txt','w') #write to file for later usage
for i,s in enumerate(getPennies()):
stonks[s] = stonkData(s)
out.write(json.dumps(stonks))
out.close()
else:
stonkFile = open('allStonks.txt','r') #open the file containing stonk data
stonks = json.loads(stonkFile.read()) #read in as json data
stonkFile.close()
byWeightedAvg = sorted(list(stonks.keys()), key=lambda k: float(stonks[k]["totalWeightedAvg"]),reverse=True)
byMidQavg = sorted(list(stonks.keys()), key=lambda k: float(stonks[k]["midQavg"]),reverse=True)
byWin = sorted(list(stonks.keys()), key=lambda k: float(stonks[k]["win"]),reverse=True)
byWinMid = sorted(list(stonks.keys()), key=lambda k: float(stonks[k]["winMidQ"]))
byWinTime = sorted(list(stonks.keys()), key=lambda k: float(stonks[k]["avgWinTime"]))
# print("\nWeighted Avg\tWins\t\tWin Mid Q\tWin Time")
# for i in range(len(stonks)):
# print(byWeightedAvg[i]+"\t\t"+byWin[i]+"\t\t"+byWinMid[i]+"\t\t"+byWinTime[i])
# print("\n")
weightedAvgWeight = .8
midQavgWeight = 1
winWeight = .7
winMidWeight = .5
winTimeWeight = .2
scores = {}
for e in stonks:
scores[e] = int(midQavgWeight*byMidQavg.index(e)+weightedAvgWeight*byWeightedAvg.index(e)+winWeight*byWin.index(e)+winMidWeight*byWinMid.index(e)+winTimeWeight*byWinTime.index(e))
scoreList = sorted(list(scores.keys()), key=lambda k: float(scores[k]))
print("\nThe lower the score, the better the stock")
print("\nSymb\tScore\tWin %\twQ1\twQ3\t\tMid %\tmQ1\tmQ3\t\tLose %\tlQ1\tlQ3\t\tTotal Avg Rtn\tMid Q Avg Rtn\tAvg Win Time\tn")
for e in scoreList:
print(e+"\t"+str(scores[e])+"\t"+stonks[e]["winPerc"]+"\t"+stonks[e]["winQ1"]+"\t"+stonks[e]["winQ3"]+"\t\t"+stonks[e]["midPerc"]+"\t"+stonks[e]["midQ1"]+"\t"+stonks[e]["midQ3"]+"\t\t"+stonks[e]["losePerc"]+"\t"+stonks[e]["loseQ1"]+"\t"+stonks[e]["loseQ3"]+"\t\t"+stonks[e]["totalWeightedAvg"]+"\t\t"+stonks[e]["midQavg"]+"\t\t"+stonks[e]["avgWinTime"]+"\t\t"+str(int(stonks[e]["win"])+int(stonks[e]["mids"])+int(stonks[e]["lose"])))
```
#### File: Depreciated Functions, etc/Sim/stonk2.py
```python
import requests, os.path, json
from matplotlib import pyplot as plt
keyFile = open("apikeys.key","r")
apiKeys = json.loads(keyFile.read())
keyFile.close()
def buy(shares, price, bPow, equity):
bPow = bPow - shares*price
equity = equity + shares*price
return [shares, bPow, equity, price]
def sell(shares, price, bPow, equity):
bPow = bPow + shares*price
equity = equity - shares*price
shares = 0
return [shares, bPow, equity, price]
symb = 'RTTR'
# if file exists, use that data, else make the file
if(not os.path.isfile(symb+".txt")):
url = 'https://www.alphavantage.co/query'
params= {
'apikey' : apiKeys["ALPHAVANTAGEKEY"],
'function' : 'TIME_SERIES_DAILY', #daily resolution (open, high, low, close, volume)
'symbol' : symb, #ticker symbol
'outputsize' : 'full' #upt to 20yrs of data
}
response = requests.request('GET', url, params=params).text
out = open(symb+'.txt','w')
out.write(response)
out.close()
stonkFile = open(symb+'.txt','r')
stonkData = json.loads(stonkFile.read())
stonkFile.close()
dateData = stonkData[list(stonkData.keys())[1]] #time series (daily) - index 0=meta data
# oneDate = dateData[list(dateData.keys())[date]] #date to get to
# dateInfo = oneDate[list(oneDate.keys())[info]] #stock info to grab
startDate = 15 #how many days ago? must be >= duration and < length
duration = 14 #how long for term
length = min(duration,len(dateData)-1)
startDate = max(min(startDate,len(dateData)-1),duration)
portfolioGain = 20 #% to increase in timeframe
portfolioLoss = 50 #acceptable loss % during timeframe
sellUp = 9 #sell if stock rises this %
sellDn = 19 #sell if stock falls this %
# days2wait = 1 #minimum number of days to wait until selling after buying
buyPow = [0]*length
equ = [0]*length
portfolio = [0]*length
sharesHeld = 0
buyPrice = 0 #price last bought at
sellPrice = 0 #price last sold at
buyPow[0] = 100 #starting buying power
opens = [0]*length
highs = [0]*length
lows = [0]*length
closes = [0]*length
volumes = [0]*length
for i in range(length):
oneDate = dateData[list(dateData.keys())[startDate-i]] #date to get to
opens[i] = float(oneDate[list(oneDate.keys())[0]]) #stock info to grab
highs[i] = float(oneDate[list(oneDate.keys())[1]])
lows[i] = float(oneDate[list(oneDate.keys())[2]])
closes[i] = float(oneDate[list(oneDate.keys())[3]])
volumes[i] = float(oneDate[list(oneDate.keys())[4]])
buyTime = opens #buy at this point of the day
sellTime = opens #sell at this point of the day
salePrices = [0]*length
for i in range(length):
equ[i] = sellTime[i]*sharesHeld
# input(str(i)+" buyPow/buyPrice/sharesHeld: "+str(buyPow[i])+" - "+str(buyTime[i])+" - "+str(sharesHeld)+" - New day")
portfolio[i] = buyPow[i]+equ[i]
# input("portfolio/equity: "+str(portfolio[i])+" - "+str(equ[i]))
# input(str(portfolio[i])+" - "+str((1+portfolioGain/100)*portfolio[0]))
if(portfolio[i]>=(1+portfolioGain/100)*portfolio[0]):
[sharesHeld, buyPow[i], equ[i], sellPrice] = sell(sharesHeld,sellTime[i],buyPow[i],equ[i])
for j in range(i,length):
portfolio[j] = portfolio[i]
buyPow[j] = buyPow[i]
equ[j] = equ[i]
print(str(i)+" - "+str(portfolioGain)+"%")
break
elif(portfolio[i]<=(1-portfolioLoss/100)*portfolio[0] and sharesHeld>0):
# input(buyPow[i]+sharesHeld*sellTime[i])
[sharesHeld, buyPow[i], equ[i], sellPrice] = sell(sharesHeld,sellTime[i],buyPow[i],equ[i])
print(str(i)+" - "+str(sellPrice))
if(sharesHeld==0 and buyTime[i]>=sellPrice):
# print(str(i)+" - buy")
[sharesHeld, buyPow[i], equ[i], buyPrice] = buy(int(buyPow[i]/buyTime[i]), buyTime[i], buyPow[i], equ[i])
# input(str(i)+" buyPow/buyPrice/sharesHeld: "+str(buyPow[i])+" - "+str(buyTime[i])+" - "+str(sharesHeld))
elif(sharesHeld>0 and (sellTime[i]>=(1+sellUp/100)*buyPrice or sellTime[i]<=(1-sellDn/100)*buyPrice)):
# print(str(i)+" - sell "+str(sharesHeld)+" at "+str(sellTime[i]))
[sharesHeld, buyPow[i], equ[i], null] = sell(sharesHeld,sellTime[i], buyPow[i], equ[i])
else:
# print(str(i)+" - hold")
equ[i] = sellTime[i]*sharesHeld
for j in range(i,length):
portfolio[j] = portfolio[i]
buyPow[j] = buyPow[i]
equ[j] = equ[i]
salePrices[i] = sellPrice
# print(str(sellTime[i])+" - "+str(salePrices[i]))
# print(buyPow[i])
plt.figure(0)
plt.subplot(311)
plt.title('buyTime/sellTime')
plt.plot(buyTime, "-x")
plt.plot(sellTime, "-.")
plt.plot(salePrices, "-.")
plt.legend(['buyTime','sellTime','salePrice'])
plt.subplot(312)
plt.title('portfolio')
plt.plot(portfolio)
plt.subplot(313)
plt.title('equ/buyPow')
plt.plot(equ)
plt.plot(buyPow)
plt.legend(['equity','buying power'])
# plt.figure(1)
plt.show()
```
#### File: jessanc/stonkBot/otherfxns.py
```python
import json,requests,os,time,re,csv,math
import datetime as dt
from bs4 import BeautifulSoup as bs
apiKeys = {}
stockDir = ''
def init(keyFilePath, stockDataDir):
global apiKeys, stockDir
keyFile = open(keyFilePath,"r")
apiKeys = json.loads(keyFile.read())
keyFile.close()
stockDir = stockDataDir
def isTradable(symb):
isTradable = False
while True:
try:
r = requests.request("GET","https://api.nasdaq.com/api/quote/{}/info?assetclass=stocks".format(symb), headers={"user-agent":"-"}).content
break
except Exception:
print("No connection, or other error encountered, trying again...")
time.sleep(3)
continue
try:
isTradable = bool(json.loads(r)['data']['isNasdaqListed'])
except Exception:
print(symb+" - Error in isTradable")
return isTradable
#get list of stocks from stocksUnder1 and marketWatch lists
def getList():
symbList = list()
url = 'https://www.marketwatch.com/tools/stockresearch/screener/results.asp'
#many of the options listed are optional and can be removed from the get request
params = {
"TradesShareEnable" : "True",
"TradesShareMin" : "0.8",
"TradesShareMax" : "5",
"PriceDirEnable" : "False",
"PriceDir" : "Up",
"LastYearEnable" : "False",
"TradeVolEnable" : "true",
"TradeVolMin" : "300000",
"TradeVolMax" : "",
"BlockEnable" : "False",
"PERatioEnable" : "False",
"MktCapEnable" : "False",
"MovAvgEnable" : "False",
"MktIdxEnable" : "False",
"Exchange" : "NASDAQ",
"IndustryEnable" : "False",
"Symbol" : "True",
"CompanyName" : "False",
"Price" : "False",
"Change" : "False",
"ChangePct" : "False",
"Volume" : "False",
"LastTradeTime" : "False",
"FiftyTwoWeekHigh" : "False",
"FiftyTwoWeekLow" : "False",
"PERatio" : "False",
"MarketCap" : "False",
"MoreInfo" : "False",
"SortyBy" : "Symbol",
"SortDirection" : "Ascending",
"ResultsPerPage" : "OneHundred"
}
params['PagingIndex'] = 0 #this will change to show us where in the list we should be - increment by 100 (see ResultsPerPage key)
while True:
try:
r = requests.get(url, params=params).text
totalStocks = int(r.split("matches")[0].split("floatleft results")[1].split("of ")[1]) #get the total number of stocks in the list - important because they're spread over multiple pages
break
except Exception:
print("No connection or other error encountered. Trying again...")
time.sleep(3)
continue
print("Getting MarketWatch data...")
for i in range(0,totalStocks,100): #loop through the pages (100 because ResultsPerPage is OneHundred)
print(f"page {int(i/100)+1} of {math.ceil(totalStocks/100)}")
params['PagingIndex'] = i
while True:
try:
r = requests.get(url, params=params).text
break
except Exception:
print("No connection or other error encountered. Trying again...")
time.sleep(3)
continue
table = bs(r,'html.parser').find_all('table')[0]
for e in table.find_all('tr')[1::]:
symbList.append(e.find_all('td')[0].get_text())
#now that we have the marketWatch list, let's get the stocksunder1 list - essentially the getPennies() fxn from other files
url = 'https://stocksunder1.org/nasdaq-penny-stocks/'
print("Getting stocksunder1 data...")
while True:
try:
html = requests.post(url, params={"price":5,"volume":0,"updown":"up"}).content
break
except Exception:
print("No connection, or other error encountered. Trying again...")
time.sleep(3)
continue
table = bs(html,'html.parser').find_all('table')[6] #6th table in the webpage - this may change depending on the webpage
for e in table.find_all('tr')[1::]: #skip the first element that's the header
#print(re.sub(r'\W+','',e.find_all('td')[0].get_text().replace(' predictions','')))
symbList.append(re.sub(r'\W+','',e.find_all('td')[0].get_text().replace(' predictions','')))
print("Removing Duplicates...")
symbList = list(dict.fromkeys(symbList)) #combine and remove duplicates
print("Done getting stock lists")
return symbList
#get the history of a stock from the nasdaq api (date format is yyyy-mm-dd)
#returns as 2d array order of Date, Close/Last, Volume, Open, High, Low sorted by dates newest to oldest
def getHistory(symb, startDate, endDate):
#try checking the modified date of the file, if it throws an error, just set it to yesterday
try:
modDate = dt.datetime.strptime(time.strftime("%Y-%m-%d",time.localtime(os.stat(stockDir+symb+'.csv').st_mtime)),"%Y-%m-%d").date() #if ANYONE knows of a better way to get the mod date into a date format, for the love of god please let me know
except Exception:
modDate = dt.date.today()-dt.timedelta(1)
#write to file after checking that the file doesn't already exist (we don't want to abuse the api) or that it was edited more than a day ago
if(not os.path.isfile(stockDir+symb+".csv") or modDate<dt.date.today()):
url = f'https://www.nasdaq.com/api/v1/historical/{symb}/stocks/{startDate}/{endDate}/'
while True:
try:
r = requests.get(url, headers={"user-agent":"-"}).text #send request and store response - cannot have empty user-agent
break
except Exception:
print("No connection, or other error encountered. Trying again...")
time.sleep(3)
continue
out = open(stockDir+symb+'.csv','w') #write to file for later usage
out.write(r)
out.close()
#read csv and convert to array
#TODO: see if we can not have to save it to a file if possible due to high read/writes - can also eliminate csv library
with open(stockDir+symb+".csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
out = [[ee.replace('$','').replace('N/A','0') for ee in e] for e in csv_reader][1::] #trim first line to get rid of headers, also replace $'s and N/A volumes to calculable values
return out
#checks whether something is a good buy or not (if not, return why - no initial jump or second jump already missed).
#if it is a good buy, return initial jump date
#this is where the magic really happens
#TODO: check if currently held stock already peaked (i.e. we missed it while holding it) - if it did then lower expectations and try to sell at a profit still(this should only happen is there's a network error or during testing stuff)
def goodBuy(symb,days2look=25): #days2look=how far back to look for a jump
validBuy = "NA" #set to the jump date if it's valid
if isTradable(symb):
#calc price % diff over past 20 days (current price/price of day n) - current must be >= 80% for any
#calc volume % diff over average past some days (~60 days?) - must be sufficiently higher (~300% higher?)
days2wait4fall = 3 #wait for stock price to fall for this many days
startDate = days2wait4fall+1 #add 1 to account for the jump day itself
firstJumpAmt = 1.3 #stock first must jump by this amount (1.3=130% over 1 day)
sellUp = 1.25 #% to sell up at
sellDn = 0.5 #% to sell dn at
#make sure that the jump happened in the frame rather than too long ago
volAvgDays = 60 #arbitrary number to avg volumes over
checkPriceDays = 30 #check if the price jumped substantially over the last __ trade days
checkPriceAmt = 1.7 #check if the price jumped by this amount in the above days (% - i.e 1.5 = 150%)
volGain = 3 #check if the volume increased by this amount during the jump (i.e. 3 = 300% or 3x, 0.5 = 50% or 0.5x)
volLoss = .5 #check if the volume decreases by this amount during the price drop
priceDrop = .4 #price should drop this far when the volume drops
dateData = getHistory(symb, str(dt.date.today()-dt.timedelta(days=(volAvgDays+days2look))), str(dt.date.today()))
if(startDate>=len(dateData)): #if a stock returns nothing or very few data pts
return validBuy
while(float(dateData[startDate][1])/float(dateData[startDate+1][1])<firstJumpAmt and startDate<min(days2look,len(dateData)-2)):
startDate += 1
#we know the date of the initial jump (startDate)
if(float(dateData[startDate][1])/float(dateData[startDate+1][1])>=firstJumpAmt):
avgVol = sum([int(dateData[i][2]) for i in range(startDate,min(startDate+volAvgDays,len(dateData)))])/volAvgDays #avg of volumes over a few days
lastVol = int(dateData[startDate][2]) #the latest volume
lastPrice = float(dateData[startDate][4]) #the latest highest price
if(lastVol/avgVol>volGain): #much larger than normal volume
#volume had to have gained
#if the next day's price has fallen significantly and the volume has also fallen
if(float(dateData[startDate-days2wait4fall][4])/lastPrice-1<priceDrop and int(dateData[startDate-days2wait4fall][2])<=lastVol*volLoss):
#the jump happened, the volume gained, the next day's price and volumes have fallen
dayPrice = lastPrice
i = 1 #increment through days looking for a jump - start with 1 day before startDate
# check within the the last few days, check the price has risen compared to the past some days, and we're within the valid timeframe
while(i<=checkPriceDays and lastPrice/dayPrice<checkPriceAmt and startDate+i<len(dateData)):
dayPrice = float(dateData[startDate+i][4])
i += 1
if(lastPrice/dayPrice>=checkPriceAmt): #TODO: read through this logic some more to determine where exactly to put sellDn
#the price jumped compared to both the previous day and to the past few days, the volume gained, and the price and the volume both fell
#check to see if we missed the next jump (where we want to strike)
missedJump = False
for e in range(0,startDate):
diff = float(dateData[e][1])/float(dateData[e+1][1])
if(diff>=sellUp):
missedJump = True
if(not missedJump):
validBuy = dateData[startDate][0] #return the date the stock initially jumped
return validBuy #return a dict of valid stocks and the date of their latest jump
#the new version of the getGainers function - uses the new functions getList, getHistory, and goodBuy
def getGainers(symblist):
gainers = {}
for i,e in enumerate(symblist):
b = goodBuy(e)
if(b!="NA"):
gainers[e] = [b, (dt.datetime.strptime(b,"%m/%d/%Y")+dt.timedelta(days=(7*5))).strftime("%m/%d/%Y")]
print(f"({i+1}/{len(symblist)}) {e} - {b} - {gainers[e][1]}")
return gainers
```
|
{
"source": "jessa/PySDD",
"score": 2
}
|
#### File: jessa/PySDD/setup.py
```python
from setuptools import setup
from setuptools.extension import Extension
from setuptools.command.build_ext import build_ext as BuildExtCommand
from setuptools import Distribution
from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError
import platform
import os
import re
from pathlib import Path
try:
from Cython.Build import cythonize
except ImportError:
cythonize = None
try:
import cysignals
except ImportError as exc:
print(f"cysignals not found\n{exc}")
cysignals = None
class MyDistribution(Distribution):
global_options = Distribution.global_options + [
('debug', None, 'Compile with debug options on (PySDD option)'),
('usecysignals', None, 'Compile with CySignals (PySDD option)')
]
def __init__(self, attrs=None):
self.debug = 0
self.usecysignals = 0
super().__init__(attrs)
# build_type = "debug"
build_type = "optimized"
here = Path(".") # setup script requires relative paths
with (here / "pysdd" / "__init__.py").open('r') as fd:
wrapper_version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not wrapper_version:
raise RuntimeError('Cannot find version information')
sdd_version = "2.0"
libwrapper_path = here / "pysdd" / "lib"
sdd_path = libwrapper_path / f"sdd-{sdd_version}"
lib_path = sdd_path / "lib"
inc_path = sdd_path / "include"
src_path = sdd_path / "src"
csrc_path = here / "pysdd" / "src"
# c_files_paths = src_path.glob("**/*.c")
c_files_paths = (src_path / "fnf").glob("*.c")
sdd_extra_inc_path = libwrapper_path / "sdd_extra" / "include"
# weight optimization wrapper
wo_path = libwrapper_path / "weight_optimization"
wo_inc_path = wo_path / "include"
wo_src_path = wo_path / "src"
wo_c_files_paths = wo_src_path.glob("*.c")
c_dirs_paths = set(p.parent for p in src_path.glob("**/*.c")) | {wo_src_path}
all_c_file_paths = [str(p) for p in c_files_paths] + [str(p) for p in wo_c_files_paths]
# print("Found c files: ", ", ".join([str(p) for p in all_c_file_paths]))
os.environ["LDFLAGS"] = f"-L{lib_path}"
os.environ["CPPFLAGS"] = f"-I{inc_path} " + f"-I{wo_inc_path} " + f"-I{sdd_extra_inc_path} " + f"-I{csrc_path} " + \
" ".join(f"-I{p}" for p in c_dirs_paths)
compile_time_env = {'HAVE_CYSIGNALS': False}
# if cysignals is not None:
# compile_time_env['HAVE_CYSIGNALS'] = True
c_args = {
'unix': ['-O3', '-march=native'],
'msvc': ['/Ox', '/fp:fast', '/favor:INTEL64', '/Og'],
'mingw32': ['-O3', '-march=native']
}
c_args_debug = {
'unix': ["-march=native", "-O0", '-g'],
'msvc': [["-Zi", "/Od"]],
'mingw32': ["-march=native", "-O0", '-g']
}
l_args = {
'unix': [],
'msvc': [],
'mingw32': []
}
l_args_debug = {
'unix': ['-g'],
'msvc': ["-debug"],
'mingw32': ['-g']
}
class MyBuildExtCommand(BuildExtCommand):
def build_extensions(self):
global lib_path
c = self.compiler.compiler_type
print("Compiler type: {}".format(c))
compiler_name = self.compiler.compiler[0]
print("Compiler name: {}".format(compiler_name))
print("--debug: {}".format(self.distribution.debug))
print("--usecysignals: {}".format(self.distribution.usecysignals))
# Compiler and linker options
if self.distribution.debug:
self.force = True # force full rebuild in debugging mode
cur_c_args = c_args_debug
cur_l_args = l_args_debug
else:
cur_c_args = c_args
cur_l_args = l_args
if "gcc" in compiler_name:
cur_c_args["unix"].append("-std=c99")
if c in cur_c_args:
args = cur_c_args[c]
for e in self.extensions: # type: Extension
e.extra_compile_args = args
else:
print("Unknown compiler type: {}".format(c))
if c in cur_l_args:
args = cur_l_args[c]
for e in self.extensions: # type: Extension
e.extra_link_args = args
if self.distribution.usecysignals:
if cysignals is not None:
if self.cython_compile_time_env is None:
self.cython_compile_time_env = {'HAVE_CYSIGNALS': True}
else:
self.cython_compile_time_env['HAVE_CYSIGNALS'] = True
else:
print("Warning: import cysignals failed")
# Extra objects
if "Darwin" in platform.system():
cur_lib_path = lib_path / "Darwin"
if build_type == "debug":
cur_lib_path = cur_lib_path / "debug"
libsdd_path = cur_lib_path / "libsdd.a"
elif "Linux" in platform.system():
cur_lib_path = lib_path / "Linux"
libsdd_path = cur_lib_path / "libsdd.a"
elif "Windows" in platform.system():
cur_lib_path = lib_path / "Windows"
libsdd_path = cur_lib_path / "libsdd.dll"
else:
libsdd_path = lib_path / "libsdd.a"
for e in self.extensions: # type: Extension
e.extra_objects = [str(libsdd_path)]
BuildExtCommand.build_extensions(self)
if cythonize is not None:
ext_modules = cythonize([
Extension(
"pysdd.sdd", [str(here / "pysdd" / "sdd.pyx")] + all_c_file_paths
# extra_objects=[str(libsdd_path)],
# extra_compile_args=extra_compile_args,
# extra_link_args=extra_link_args
# include_dirs=[numpy.get_include()]
)],
compiler_directives={'embedsignature': True},
# gdb_debug=gdb_debug,
compile_time_env=compile_time_env)
else:
ext_modules = []
print('**********************************************')
print('Cython not yet available, skipping compilation')
print('**********************************************')
# install_requires = ['numpy', 'cython']
install_requires = ['cython>=0.29.6']
setup_requires = ['setuptools>=18.0', 'cython>=0.29.6']
tests_require = ['pytest']
with (here / 'README.rst').open('r', encoding='utf-8') as f:
long_description = f.read()
setup_kwargs = {}
def set_setup_kwargs(**kwargs):
global setup_kwargs
setup_kwargs = kwargs
set_setup_kwargs(
name='PySDD',
version=wrapper_version,
description='Sentential Decision Diagrams',
long_description=long_description,
author='<NAME>, <NAME>',
author_email='<EMAIL>',
url='https://github.com/wannesm/PySDD',
project_urls={
'PySDD documentation': 'http://pysdd.readthedocs.io/en/latest/',
'PySDD source': 'https://github.com/wannesm/PySDD'
},
packages=["pysdd"],
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
extras_require={
'all': ['cysignals', 'numpy']
},
include_package_data=True,
package_data={
'': ['*.pyx', '*.pxd', '*.h', '*.c', '*.so', '*.a', '*.dll', '*lib'],
},
distclass=MyDistribution,
cmdclass={
'build_ext': MyBuildExtCommand
},
entry_points={
'console_scripts': [
'pysdd = pysdd.cli:main'
]},
python_requires='>=3.6',
license='Apache 2.0',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
],
keywords='sdd, knowledge compilation',
ext_modules=ext_modules,
zip_safe=False
)
try:
setup(**setup_kwargs)
except (CCompilerError, DistutilsExecError, DistutilsPlatformError, IOError, SystemExit) as exc:
print("********************************************")
print("ERROR: The C extension could not be compiled")
print("********************************************")
print(exc)
raise exc
```
#### File: PySDD/tests/test_nnf.py
```python
from pysdd.util import nnf_file_wmc, sdd_file_wmc, psdd_file_wmc
from pysdd.sdd import Fnf, Vtree, SddManager
from pysdd.cli import read_weights
import sys
import os
import math
import logging
from pathlib import Path
logger = logging.getLogger("pysdd")
directory = None
counter = 0
here = Path(__file__).parent
def test_nnf1():
weights = {
+3: 0.5, +2: 0.5, +1: 1,
-3: 0.5, -2: 0.5, -1: 1
}
wmc = nnf_file_wmc(here / "rsrc" / "test.cnf.nnf", weights)
assert wmc == 1.0
def test_nnf2():
weights = {
+3: 0.5, +2: 0.5, +1: 1,
-3: 0.5, -2: 0.5, -1: 0
}
wmc = nnf_file_wmc(here / "rsrc" / "test.cnf.nnf", weights)
assert wmc == 0.75
def test_dnf1():
dnf_filename = str(here / "rsrc" / "test.cnf.nnf")
fnf = Fnf.from_dnf_file(bytes(dnf_filename, encoding='utf8'))
# weights = read_weights(dnf_filename)
vtree = Vtree(var_count=fnf.var_count)
manager = SddManager.from_vtree(vtree)
node = manager.fnf_to_sdd(fnf)
print(node)
def test_sdd1():
weights = {
+3: 0.5, +2: 0.5, +1: 1,
-3: 0.5, -2: 0.5, -1: 1
}
wmc = sdd_file_wmc(here / "rsrc" / "test.sdd", weights)
print("WMC", wmc)
assert wmc == 1.0, f"{wmc} != 1.0"
def test_sdd2():
weights = {
+3: 0.5, +2: 0.5, +1: 1,
-3: 0.5, -2: 0.5, -1: 0
}
wmc = sdd_file_wmc(here / "rsrc" / "test.sdd", weights)
print("WMC", wmc)
assert wmc == 0.75, f"{wmc} != 0.75"
def test_psdd1():
wmc = psdd_file_wmc(here / "rsrc" / "test.psdd", None)
wmc = math.exp(wmc)
print("WMC", wmc)
if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler(sys.stdout)
logger.addHandler(sh)
directory = Path(os.environ.get('TESTDIR', Path(__file__).parent))
print(f"Saving files to {directory}")
# test_nnf1()
# test_sdd2()
# test_psdd1()
test_dnf1()
```
|
{
"source": "JessaTehCrow/lynie",
"score": 4
}
|
#### File: lynie/tests/function.py
```python
def some_func(arg1:int=5):
return arg1 ** 5
print(some_func(5))
other_func = lambda arg1=5: arg1 ** 5
print(other_func(5))
```
|
{
"source": "jessaustin/monthdelta",
"score": 3
}
|
#### File: monthdelta/test/test_monthdelta.py
```python
import pickle
import unittest
import types
from operator import lt, le, gt, ge, eq, ne
try:
reversed
sorted
except NameError:
def reversed(seq):
return seq[::-1]
def sorted(it):
it = list(it)
it.sort()
return it
try:
from test import support
except ImportError:
from test import test_support as support
try:
from itertools import combinations, permutations
except ImportError:
def permutations(iterable, r=None):
pool = tuple(iterable)
n = len(pool)
if r is None:
r = n
indices = range(n)
cycles = range(n, n-r, -1)
yield tuple([pool[i] for i in indices[:r]])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple([pool[i] for i in indices[:r]])
break
else:
return
def combinations(iterable, r):
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple([pool[i] for i in indices])
from datetime import MINYEAR, MAXYEAR
from datetime import timedelta
from datetime import tzinfo
from datetime import time
from datetime import date, datetime
from monthdelta import MonthDelta, monthmod
pickle_choices = [(pickle, pickle, proto) for proto in range(3)]
assert len(pickle_choices) == 3
# An arbitrary collection of objects of non-datetime types, for testing
# mixed-type comparisons.
OTHERSTUFF = (10, 10, 34.5, "abc", {}, [], ())
class TestMonthDelta(unittest.TestCase):
expectations = (
(date(2006,12,31), MonthDelta(6), date(2007,6,30),date(2006,6,30)),
(date(2007,1,1), MonthDelta(6), date(2007,7,1), date(2006,7,1)),
(date(2007,1,2), MonthDelta(6), date(2007,7,2), date(2006,7,2)),
(date(2006,12,31), MonthDelta(12),date(2007,12,31),date(2005,12,31)),
(date(2007,1,1), MonthDelta(12), date(2008,1,1),date(2006,1,1)),
(date(2007,1,2), MonthDelta(12), date(2008,1,2),date(2006,1,2)),
(date(2006,12,31), MonthDelta(60),date(2011,12,31),date(2001,12,31)),
(date(2007,1,1), MonthDelta(60), date(2012,1,1),date(2002,1,1)),
(date(2007,1,2), MonthDelta(60), date(2012,1,2),date(2002,1,2)),
(date(2006,12,31), MonthDelta(600),date(2056,12,31),date(1956,12,31)),
(date(2007,1,1), MonthDelta(600), date(2057,1,1),date(1957,1,1)),
(date(2007,1,2), MonthDelta(600), date(2057,1,2),date(1957,1,2)),
(date(2007,2,27), MonthDelta(1), date(2007, 3, 27),date(2007,1, 27)),
(date(2007,2,28), MonthDelta(1), date(2007, 3, 28),date(2007,1, 28)),
(date(2007,3,1), MonthDelta(1), date(2007, 4, 1), date(2007, 2, 1)),
(date(2007,3,30), MonthDelta(1), date(2007, 4, 30),date(2007,2, 28)),
(date(2007,3,31), MonthDelta(1), date(2007, 4, 30),date(2007,2, 28)),
(date(2007,4,1), MonthDelta(1), date(2007, 5, 1), date(2007, 3, 1)),
(date(2008,2,27), MonthDelta(1), date(2008, 3, 27),date(2008,1, 27)),
(date(2008,2,28), MonthDelta(1), date(2008, 3, 28),date(2008,1, 28)),
(date(2008,2,29), MonthDelta(1), date(2008, 3, 29),date(2008,1, 29)),
(date(2008,3,1), MonthDelta(1), date(2008, 4, 1), date(2008, 2, 1)),
(date(2008,3,30), MonthDelta(1), date(2008, 4, 30),date(2008,2, 29)),
(date(2008,3,31), MonthDelta(1), date(2008, 4, 30),date(2008,2, 29)),
(date(2008,4,1), MonthDelta(1), date(2008, 5, 1), date(2008, 3, 1)),
(date(2100,2,27), MonthDelta(1), date(2100, 3, 27),date(2100,1, 27)),
(date(2100,2,28), MonthDelta(1), date(2100, 3, 28),date(2100,1, 28)),
(date(2100,3,1), MonthDelta(1), date(2100, 4, 1), date(2100, 2, 1)),
(date(2100,3,30), MonthDelta(1), date(2100, 4, 30),date(2100,2, 28)),
(date(2100,3,31), MonthDelta(1), date(2100, 4, 30),date(2100,2, 28)),
(date(2100,4,1), MonthDelta(1), date(2100, 5, 1), date(2100, 3, 1)),
(date(2000,2,27), MonthDelta(1), date(2000, 3, 27),date(2000,1, 27)),
(date(2000,2,28), MonthDelta(1), date(2000, 3, 28),date(2000,1, 28)),
(date(2000,2,29), MonthDelta(1), date(2000, 3, 29),date(2000,1, 29)),
(date(2000,3,1), MonthDelta(1), date(2000, 4, 1), date(2000, 2, 1)),
(date(2000,3,30), MonthDelta(1), date(2000, 4, 30),date(2000,2, 29)),
(date(2000,3,31), MonthDelta(1), date(2000, 4, 30),date(2000,2, 29)),
(date(2000,4,1), MonthDelta(1), date(2000, 5, 1), date(2000, 3, 1)))
def test_calc(self):
for dt, md, sub, prev in self.expectations:
self.assertEqual(dt + md, sub)
self.assertEqual(dt - md, prev)
def test_math(self):
for x, y in permutations(range(26),2):
self.assertEqual(MonthDelta(x) + MonthDelta(y), MonthDelta(x +y))
self.assertEqual(MonthDelta(x) - MonthDelta(y), MonthDelta(x -y))
self.assertEqual(MonthDelta(x) * y, MonthDelta(x * y))
for x, y in combinations(range(26),2):
self.assertEqual(MonthDelta(x) // y, MonthDelta(x // y))
self.assertEqual(MonthDelta(x) // MonthDelta(y), x // y)
def test_comp(self):
for x, y in combinations(range(26),2):
self.assertTrue(MonthDelta(x) < MonthDelta(y))
self.assertTrue(MonthDelta(x) <= MonthDelta(y))
self.assertTrue(MonthDelta(x) != MonthDelta(y))
self.assertTrue(MonthDelta(y) > MonthDelta(x))
self.assertTrue(MonthDelta(y) >= MonthDelta(x))
for x in range(26):
self.assertTrue(MonthDelta(x) <= MonthDelta(x))
self.assertTrue(MonthDelta(x) == MonthDelta(x))
self.assertTrue(MonthDelta(x) >= MonthDelta(x))
def test_bool(self):
self.assertTrue(MonthDelta())
self.assertTrue(MonthDelta(-1))
self.assertTrue(not MonthDelta(0))
def test_subclass(self):
class M(MonthDelta):
def from_md(md):
return M(md.months)
from_md = staticmethod(from_md)
def as_years(self):
return round(self.months / 12)
m1 = M()
self.assertTrue(type(m1) is M or type(m1) is types.InstanceType)
self.assertEqual(m1.as_years(), 0)
m2 = M(-24)
self.assertTrue(type(m2) is M or type(m2) is types.InstanceType)
self.assertEqual(m2.as_years(), -2)
m3 = m1 + m2
self.assertTrue(type(m3) is MonthDelta or
type(m3) is types.InstanceType)
m4 = M.from_md(m3)
self.assertTrue(type(m4) is M or type(m4) is types.InstanceType)
self.assertEqual(m3.months, m4.months)
self.assertEqual(str(m3), str(m4))
self.assertEqual(m4.as_years(), -2)
def test_str(self):
self.assertEqual(str(MonthDelta()), '1 month')
self.assertEqual(str(MonthDelta(-1)), '-1 month')
self.assertEqual(str(MonthDelta(3)), '3 months')
self.assertEqual(str(MonthDelta(-17)), '-17 months')
def test_pickling(self):
orig = MonthDelta(42)
green = pickle.dumps(orig)
derived = pickle.loads(green)
self.assertEqual(orig, derived)
def test_disallowed(self):
a = MonthDelta(42)
for i in 1, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
self.assertRaises(TypeError, lambda: a/i)
self.assertRaises(TypeError, lambda: i/a)
self.assertRaises(TypeError, lambda: a/a)
self.assertRaises(ZeroDivisionError, lambda: a // 0)
def inplace_fail():
b = MonthDelta(12)
b //= MonthDelta(3)
self.assertRaises(TypeError, inplace_fail)
x = 2.3
self.assertRaises(TypeError, lambda: a*x)
self.assertRaises(TypeError, lambda: x*a)
self.assertRaises(TypeError, lambda: a // x)
self.assertRaises(TypeError, lambda: x // a)
class TestMonthMod(unittest.TestCase):
md_zero, td_zero = MonthDelta(0), timedelta(0)
expectations = (
(date(2007,1,1), date(2007,1,1), md_zero, td_zero),
(date(2007,2,28), date(2007,2,28), md_zero, td_zero),
(date(2007,3,1), date(2007,3,1), md_zero, td_zero),
(date(2008,2,28), date(2008,2,28), md_zero, td_zero),
(date(2008,2,29), date(2008,2,29), md_zero, td_zero),
(date(2008,3,1), date(2008,3,1), md_zero, td_zero),
(date(2007,1,1), date(2007,2,27), MonthDelta(1), timedelta(26)),
(date(2007,1,1), date(2007,2,28), MonthDelta(1), timedelta(27)),
(date(2007,1,1), date(2007,3,1), MonthDelta(2), timedelta(0)),
(date(2007,1,1), date(2007,3,30), MonthDelta(2), timedelta(29)),
(date(2007,1,1), date(2007,3,31), MonthDelta(2), timedelta(30)),
(date(2007,1,1), date(2007,4,1), MonthDelta(3), timedelta(0)),
(date(2008,1,1), date(2008,2,27), MonthDelta(1), timedelta(26)),
(date(2008,1,1), date(2008,2,28), MonthDelta(1), timedelta(27)),
(date(2008,1,1), date(2008,2,29), MonthDelta(1), timedelta(28)),
(date(2008,1,1), date(2008,3,1), MonthDelta(2), timedelta(0)),
(date(2008,1,1), date(2008,3,30), MonthDelta(2), timedelta(29)),
(date(2008,1,1), date(2008,3,31), MonthDelta(2), timedelta(30)),
(date(2008,1,1), date(2008,4,1), MonthDelta(3), timedelta(0)),
(date(2006,1,1), date(2007,2,27), MonthDelta(13), timedelta(26)),
(date(2006,1,1), date(2007,2,28), MonthDelta(13), timedelta(27)),
(date(2006,1,1), date(2007,3,1), MonthDelta(14), timedelta(0)),
(date(2006,1,1), date(2007,3,30), MonthDelta(14), timedelta(29)),
(date(2006,1,1), date(2007,3,31), MonthDelta(14), timedelta(30)),
(date(2006,1,1), date(2007,4,1), MonthDelta(15), timedelta(0)),
(date(2006,1,1), date(2008,2,27), MonthDelta(25), timedelta(26)),
(date(2006,1,1), date(2008,2,28), MonthDelta(25), timedelta(27)),
(date(2006,1,1), date(2008,2,29), MonthDelta(25), timedelta(28)),
(date(2006,1,1), date(2008,3,1), MonthDelta(26), timedelta(0)),
(date(2006,1,1), date(2008,3,30), MonthDelta(26), timedelta(29)),
(date(2006,1,1), date(2008,3,31), MonthDelta(26), timedelta(30)),
(date(2006,1,1), date(2008,4,1), MonthDelta(27), timedelta(0)),
(date(2007,2,27), date(2007,1,1), MonthDelta(-2), timedelta(5)),
(date(2007,2,28), date(2007,1,1), MonthDelta(-2), timedelta(4)),
(date(2007,3,1), date(2007,1,1), MonthDelta(-2), timedelta(0)),
(date(2007,3,30), date(2007,1,1), MonthDelta(-3), timedelta(2)),
(date(2007,3,31), date(2007,1,1), MonthDelta(-3), timedelta(1)),
(date(2007,4,1), date(2007,1,1), MonthDelta(-3), timedelta(0)),
(date(2008,2,27), date(2008,1,1), MonthDelta(-2), timedelta(5)),
(date(2008,2,28), date(2008,1,1), MonthDelta(-2), timedelta(4)),
(date(2008,2,29), date(2008,1,1), MonthDelta(-2), timedelta(3)),
(date(2008,3,1), date(2008,1,1), MonthDelta(-2), timedelta(0)),
(date(2008,3,30), date(2008,1,1), MonthDelta(-3), timedelta(2)),
(date(2008,3,31), date(2008,1,1), MonthDelta(-3), timedelta(1)),
(date(2008,4,1), date(2008,1,1), MonthDelta(-3), timedelta(0)),
(date(2007,2,27), date(2006,1,1), MonthDelta(-14), timedelta(5)),
(date(2007,2,28), date(2006,1,1), MonthDelta(-14), timedelta(4)),
(date(2007,3,1), date(2006,1,1), MonthDelta(-14), timedelta(0)),
(date(2007,3,30), date(2006,1,1), MonthDelta(-15), timedelta(2)),
(date(2007,3,31), date(2006,1,1), MonthDelta(-15), timedelta(1)),
(date(2007,4,1), date(2006,1,1), MonthDelta(-15), timedelta(0)),
(date(2008,2,27), date(2006,1,1), MonthDelta(-26), timedelta(5)),
(date(2008,2,28), date(2006,1,1), MonthDelta(-26), timedelta(4)),
(date(2008,2,29), date(2006,1,1), MonthDelta(-26), timedelta(3)),
(date(2008,3,1), date(2006,1,1), MonthDelta(-26), timedelta(0)),
(date(2008,3,30), date(2006,1,1), MonthDelta(-27), timedelta(2)),
(date(2008,3,31), date(2006,1,1), MonthDelta(-27), timedelta(1)),
(date(2008,4,1), date(2006,1,1), MonthDelta(-27), timedelta(0)),
(date.min, date.max-timedelta(365), MonthDelta(119975), timedelta(30)))
def test_calc(self):
for start, end, md, td in self.expectations:
self.assertEqual(monthmod(start, end), (md, td))
self.assertTrue((start > end and md < self.md_zero) or
(start <= end and md >= self.md_zero))
self.assertTrue(td >= self.td_zero)
self.assertTrue(td < end.replace(end.year+end.month//12,
end.month%12+1, 1) -
end.replace(day=1))
def test_invariant(self):
for start, end, md, td in self.expectations:
self.assertEqual(sum(monthmod(start, start + td), start),
start + td)
self.assertEqual(sum(monthmod(end, end + td), end),
end + td)
def test_error_handling(self):
self.assertRaises(TypeError, monthmod, date.min)
self.assertRaises(TypeError, monthmod, 123, 'abc')
self.assertRaises(TypeError, monthmod, end=date.max)
self.assertRaises(TypeError, monthmod, date.min, datetime.max)
self.assertRaises(TypeError, monthmod, datetime.min, date.max)
# perhaps it would be better not to overflow for this, but we rely on
# the addition defined by the type of the arguments
self.assertRaises(OverflowError, monthmod, date.min+timedelta(1),
date.min)
def test_main():
support.run_unittest(TestMonthDelta, TestMonthMod)
if __name__ == "__main__":
test_main()
```
|
{
"source": "JessBinder/idg-cfde",
"score": 4
}
|
#### File: idg-cfde/data_dictionary/csv_to_md.py
```python
import csv
def csv_to_md(csv_file_path,type_del):
# creating a file with .md extension for the output file
output_file = csv_file_path.replace(".csv", ".md")
# I used encoding UTF-8 as we won't have to worry about errors while decoding contents of a csv file
csv_dict = csv.DictReader(open(csv_file_path, encoding="UTF-8"),delimiter=type_del)
# storing the content of csv file in a list_of_rows. Each row is a dict.
list_of_rows = [dict_row for dict_row in csv_dict]
# For Headers of the csv file.
headers = list(list_of_rows[0].keys())
# The below code block makes md_string as per the required format of a markdown file.
md_string = " | "
for header in headers:
md_string += header+" |"
md_string += "\n |"
for i in range(len(headers)):
md_string += "--- | "
md_string += "\n"
for row in list_of_rows:
md_string += " | "
for header in headers:
md_string += row[header]+" | "
md_string += "\n"
# writing md_string to the output_file
file = open(output_file, "w", encoding="UTF-8")
file.write(md_string)
file.close()
print("The markdown file has been created!!!")
if __name__=="__main__":
csv_file_path = input("Input path of the csv file:")
#takes input of the type of delimiter in CSV file
type_del=input("Enter the type of delemiter example ',' '|' ';' for space enter 'space' for tab enter 'tab' or any other : ")
if type_del=="space":
type_del=" "
if type_del=="tab":
type_del=" "
csv_to_md(csv_file_path,type_del)
```
#### File: idg-cfde/python/convert_to_c2m2.py
```python
import os,sys
import datetime
import ipdb,traceback
import json,logging,yaml,re
from pandas.io.sql import read_sql_query
import urllib.request
from c2m2_frictionless import create_datapackage, validate_datapackage, validate_id_namespace_name_uniqueness
from c2m2_frictionless.C2M2_Level_0 import table_schema_specs_for_level_0_c2m2_encoding_of_dcc_metadata as c2m2_level_0
#from c2m2_frictionless.C2M2_Level_1 import table_schema_specs_for_level_1_c2m2_encoding_of_dcc_metadata as c2m2_level_1
from dataclasses import dataclass, asdict
#import codecs
#import shutil
#import csv
#import shelve
#from functools import lru_cache #LRU = least recently used
logging.basicConfig(level=logging.DEBUG)
#cache = shelve.open('.cache', 'c')
def ReadParamFile(fparam):
params={};
with open(fparam, 'r') as fh:
for param in yaml.load_all(fh, Loader=yaml.BaseLoader):
for k,v in param.items():
params[k] = v
return params
def tcrd_Connect(host=None, port=None, user=None, passwd=<PASSWORD>, dbname=None,
paramfile=os.environ['HOME']+"/.tcrd.yaml"):
import mysql.connector as mysql
params = ReadParamFile(paramfile)
if host: params['DBHOST'] = host
if port: params['DBPORT'] = port
if user: params['DBUSR'] = user
if passwd: params['DBPW'] = passwd
if dbname: params['DBNAME'] = dbname
try:
dbcon = mysql.connect(host=params['DBHOST'], port=params['DBPORT'], user=params['DBUSR'], passwd=params['DBPW'], db=params['DBNAME'])
return dbcon
except Exception as e:
logging.error('{}'.format(e))
def tcrd_fetchdata_iter():
sql='''
SELECT
target.id tcrdTargetId,
target.name tcrdTargetName,
target.fam tcrdTargetFamily,
target.tdl TDL,
target.ttype tcrdTargetType,
target.idg idgList,
protein.id tcrdProteinId,
protein.sym tcrdGeneSymbol,
protein.family tcrdProteinFamily,
protein.geneid ncbiGeneId,
protein.uniprot uniprotId,
protein.up_version uniprotVersion,
protein.chr,
protein.description tcrdProteinDescription,
protein.dtoid dtoId,
protein.dtoclass dtoClass,
protein.stringid ensemblProteinId
FROM
target
JOIN
t2tc ON t2tc.target_id = target.id
JOIN
protein ON protein.id = t2tc.protein_id
'''
dbcon = tcrd_Connect()
df = read_sql_query(sql, dbcon)
total = df.shape[0]
logging.info("Targets: {}".format(total))
NMAX=10;
for i in range(total):
#if i>NMAX: break
target = df.iloc[i].to_dict()
yield target
def convert_tcrd_to_c2m2():
''' Construct a set of c2m2 objects corresponding to datasets available from TCRD.
'''
ns = c2m2_level_0.id_namespace(
id='http://www.druggablegenome.net/',
abbreviation='IDG',
name='Illuminating the Druggable Genome',
description='The goal of IDG is to improve our understanding of understudied proteins from the three most commonly drug-targeted protein families: G-protein coupled receptors, ion channels, and protein kinases.',
)
yield ns
#
files = {}
# Process files for datum in tcrd_fetchdata_iter():
for datum in tcrd_fetchdata_iter():
try:
# register target file if not done so already
if datum['tcrdTargetName'] not in files:
f = c2m2_level_0.file(
id_namespace=ns.id,
id=datum['tcrdTargetId'],
filename=datum['tcrdTargetName'],
size_in_bytes=len(json.dumps(datum)),
#persistent_id=?,
#sha256=?,
#md5=?,
)
files[datum['tcrdTargetName']] = f
yield f
else:
f = files[datum['tcrdTargetName']]
except Exception as e:
logging.error('{}'.format(e))
if __name__ == '__main__':
if len(sys.argv) >= 2:
outdir = sys.argv[1]
else:
logging.error("OUTDIR must be specified.")
sys.exit()
pkg = create_datapackage('C2M2_Level_0', convert_tcrd_to_c2m2(), outdir)
#build_term_tables(outdir) #NA for Level_0?
validate_datapackage(pkg)
validate_id_namespace_name_uniqueness(pkg)
#cache.close()
```
#### File: idg-cfde/python/export_rss_files.py
```python
import requests
import json
RSS_API_BASE_URL = 'https://rss.ccs.miami.edu/rss-api/'
OUTDIR = '../RSS_JSON'
def run():
target_data = get_target_data()
assert target_data, "Error getting target data: FATAL"
ct = 0
pr_ct = 0
fo_ct = 0
for td in target_data:
ct += 1
if not td['pharosReady']: # Pharos-ready filter
continue
pr_ct += 1
resource_data = get_resource_data(td['id'])
if not resource_data:
print("Error getting resource data for {}: Skipping".format(td['id']))
continue
# Write resource metadata to a file
rid = td['id'].rsplit('/')[-1]
ofn = "{}/{}.json".format(OUTDIR, rid)
with open(ofn, 'w') as ofh:
json.dump(resource_data['data'][0], ofh)
fo_ct += 1
print("Processed {} RSS resources".format(ct))
print(" Got {} Pharos-ready resources".format(pr_ct))
print(" Wrote {} JSON files".format(fo_ct))
def get_target_data():
url = RSS_API_BASE_URL + 'target'
jsondata = None
attempts = 0
resp = requests.get(url, verify=False)
if resp.status_code == 200:
return resp.json()
else:
return False
def get_resource_data(idval):
url = RSS_API_BASE_URL + 'target/id?id=%s'%idval
jsondata = None
attempts = 0
resp = requests.get(url, verify=False)
if resp.status_code == 200:
return resp.json()
else:
return False
if __name__ == '__main__':
run()
```
#### File: idg-cfde/python/rss-test.py
```python
import requests
import json
RSS_API_BASE_URL = 'https://rss.ccs.miami.edu/rss-api/'
OUTDIR = '../RSS_TEST'
DEBUG = False
def run():
target_data = get_target_data()
assert target_data, "Error getting target data: FATAL"
ct = 0
pr_ct = 0
fo_ct = 0
rtypes = {}
for td in target_data:
ct += 1
if not td['pharosReady']:
continue
pr_ct += 1
if td['resourceType'] in rtypes:
# for testing, only process one resource of each type
continue
if DEBUG:
print("[DEBUG] Target data item {}: {}".format(ct, td))
resource_data = get_resource_data(td['id'])
if not resource_data:
print("Error getting resource data for {}: Skipping".format(td['id']))
continue
if DEBUG:
print("[DEBUG] Resource_data:")
for key,val in resource_data['data'][0].items():
if DEBUG:
print("[DEBUG] {}: {}".format(key, val))
# Write resource metadata to a file. Files are named <Resource_Type_Test>.json
ofn = "{}/{}_Test.json".format(OUTDIR, td['resourceType'].replace(' ', '_'))
print("Writing JSON file {} for resource {}...".format(ofn, td['id']))
with open(ofn, 'w') as ofh:
json.dump(resource_data['data'][0], ofh)
fo_ct += 1
rtypes[td['resourceType']] = True
print("Processed {} RSS resources".format(ct))
print(" Got {} Pharos-ready resources".format(pr_ct))
print(" Wrote {} output files".format(fo_ct))
def get_target_data():
url = RSS_API_BASE_URL + 'target'
jsondata = None
attempts = 0
resp = requests.get(url, verify=False)
if resp.status_code == 200:
return resp.json()
else:
return False
def get_resource_data(idval):
url = RSS_API_BASE_URL + 'target/id?id=%s'%idval
jsondata = None
attempts = 0
resp = requests.get(url, verify=False)
if resp.status_code == 200:
return resp.json()
else:
return False
if __name__ == '__main__':
run()
```
|
{
"source": "JessBinder/ProteinGraphML",
"score": 3
}
|
#### File: JessBinder/ProteinGraphML/MakeVis.py
```python
import matplotlib.pyplot as plt
import pandas as pd
from collections import Counter
import logging
from ProteinGraphML.Analysis.featureLabel import convertLabels
from ProteinGraphML.Analysis import Visualize
from ProteinGraphML.DataAdapter import OlegDB,selectAsDF,TCRD
from ProteinGraphML.GraphTools import ProteinDiseaseAssociationGraph
import pickle
import argparse
## we construct a base map of protein to disease just by creating the ProteinDiseaseAs
''''
def featureVisualize(features,AUC,TITLE,count=20):
plt.rcParams.update({'font.size': 15,'lines.linewidth': 1000}) # axes.labelweight': 'bold'})
FILETITLE = TITLE
TITLE = TITLE + "-AUC: "+str(AUC)
df = pd.DataFrame(features.most_common(count), columns=['feature', 'gain'])
plt.figure()
df['gain'] = (df['gain']/sum(df['gain'][:count]))
#df['feature'] = df['feature'].map(processFeature)
r = df.head(count).plot( kind='barh',title=TITLE, x='feature', y='gain',color='tomato', legend=False, figsize=(10, 12))
r.set_xlabel('Importance')
r.set_ylabel('Features')
r.invert_yaxis()
r.figure.savefig(FILETITLE+'.png',bbox_inches='tight')
'''
def load_obj(name):
with open(name, 'rb') as f:
return pickle.load(f)
#Get the name of the disease
DEFAULT_GRAPH = "ProteinDisease_GRAPH.pkl"
DBS=['olegdb', 'tcrd']
parser = argparse.ArgumentParser(description='Run ML Procedure')
parser.add_argument('--disease', metavar='disease', required=True, type=str, nargs='?', help='pickled file with ML features')
#parser.add_argument('--dir', default=dataDir, help='input dir')
parser.add_argument('--db', choices=DBS, default="olegdb", help='{0}'.format(str(DBS)))
parser.add_argument('--featurefile', required=True, help='full path to the pickle feature file')
parser.add_argument('--num', metavar='featureCount', required=True, type=int, nargs='?',help='Number of top features')
parser.add_argument('--kgfile', default=DEFAULT_GRAPH, help='input pickled KG')
logging.info('Generate HTML files for visualization...!!!')
argData = vars(parser.parse_args())
fileName = argData['featurefile']
numOfFeatures = argData['num']
diseaseName = argData['disease']
logging.info('Running visualization using file...{0}'.format(fileName))
#filePath = argData['dir'] + fileName #IMPORTANT: update this if folder name changes
tmpPath = fileName.split('/')[:-1]
filePath = '/'.join(i for i in tmpPath)
#fetch the saved important features
importance = load_obj(fileName)
#importance = Counter({'R-HSA-927802': 0.31735258141642814, 'hsa04740': 0.2208299216149202, 'hsa05100': 0.1847905733996812, 'hsa04930': 0.10625980494746863, 'hsa04514': 0.047493659101048136, 'hsa04114': 0.03542724660274679, 'hsa04810': 0.03365848585388666, 'hsa04144': 0.030556051003490892})
#access the database to get the description of important features
#dbAdapter = OlegDB()
dbAdapter = TCRD() if argData['db'] == "tcrd" else OlegDB()
#labelMap = convertLabels(importance.keys(),dbAdapter,selectAsDF,type='plot')
if True:
currentGraph = ProteinDiseaseAssociationGraph.load(argData['kgfile'])
# for the graph, we need the original importance
for imp in importance.most_common(numOfFeatures):
print(imp)
Visualize(imp, currentGraph.graph, diseaseName, filePath, dbAdapter=dbAdapter) #g,currentGraph.graph,Disease)
#break
#newSet = {}
#for key in importance.keys():
# newSet[labelMap[key]] = importance[key]
#print('STARTING FEAT VIS')
#AUC = 0.9
#print(newSet,labelMap)
#featureVisualize(Counter(newSet),AUC,"AAA")
#Visualize
#convertLabels([343,30001],dbAdapter,selectAsDF,type="han")
```
|
{
"source": "JessBinder/rafm",
"score": 2
}
|
#### File: src/rafm/__main__.py
```python
from typing import Optional
import typer
from . import VERSION
from .common import APP
from .common import STATE
from .plddt import plddt_stats
# global constants
unused_commands = (plddt_stats,)
click_object = typer.main.get_command(APP) # noqa: F841
def version_callback(value: bool) -> None:
"""Print version info."""
if value:
typer.echo(f"{APP.info.name} version {VERSION}")
raise typer.Exit()
VERSION_OPTION = typer.Option(
None,
"--version",
callback=version_callback,
help="Print version string.",
)
@APP.callback()
def set_global_state(
verbose: bool = False,
quiet: bool = False,
version: Optional[bool] = VERSION_OPTION,
) -> None:
"""Set global-state variables."""
if verbose:
STATE["verbose"] = True
STATE["log_level"] = "DEBUG"
elif quiet:
STATE["log_level"] = "ERROR"
unused_state_str = f"{version}" # noqa: F841
def main() -> None:
"""Run the app."""
APP()
```
|
{
"source": "jesscall/EEG2BIDS",
"score": 2
}
|
#### File: EEG2BIDS/python/eeg2bids.py
```python
import os
os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
import eventlet
from eventlet import tpool
import socketio
from python.libs import iEEG
from python.libs.iEEG import ReadError, WriteError, metadata as metadata_fields
from python.libs.Modifier import Modifier
from python.libs import BIDS
from python.libs.loris_api import LorisAPI
import csv
import datetime
import json
# LORIS credentials of user
lorisCredentials = {
'lorisURL': '',
'lorisUsername': '',
'lorisPassword': '',
}
# Create socket listener.
sio = socketio.Server(async_mode='eventlet', cors_allowed_origins=[])
app = socketio.WSGIApp(sio)
# Create Loris API handler.
loris_api = LorisAPI()
@sio.event
def connect(sid, environ):
print('connect: ', sid)
if environ['REMOTE_ADDR'] != '127.0.0.1':
return False # extra precaution.
def tarfile_bids_thread(bids_directory):
iEEG.TarFile(bids_directory)
response = {
'compression_time': 'example_5mins'
}
return eventlet.tpool.Proxy(response)
@sio.event
def tarfile_bids(sid, bids_directory):
response = eventlet.tpool.execute(tarfile_bids_thread, bids_directory)
send = {
'compression_time': response['compression_time']
}
sio.emit('response', send)
@sio.event
def get_participant_data(sid, data):
# todo helper to to data validation
if 'candID' not in data or not data['candID']:
return
candidate = loris_api.get_candidate(data['candID'])
sio.emit('participant_data', candidate)
@sio.event
def set_loris_credentials(sid, data):
global lorisCredentials
lorisCredentials = data
if 'lorisURL' not in lorisCredentials:
print('error with credentials:', data)
return
if lorisCredentials['lorisURL'].endswith('/'):
lorisCredentials['lorisURL'] = lorisCredentials['lorisURL'][:-1]
loris_api.url = lorisCredentials['lorisURL'] + '/api/v0.0.4-dev/'
loris_api.username = lorisCredentials['lorisUsername']
loris_api.password = <PASSWORD>['<PASSWORD>']
resp = loris_api.login()
if resp.get('error'):
sio.emit('loris_login_response', {'error': resp.get('error')})
else:
sio.emit('loris_login_response', {
'success': 200,
'lorisUsername': loris_api.username
})
sio.emit('loris_sites', loris_api.get_sites())
sio.emit('loris_projects', loris_api.get_projects())
def get_loris_sites(sid):
sio.emit('loris_sites', loris_api.get_sites())
@sio.event
def get_loris_projects(sid):
sio.emit('loris_projects', loris_api.get_projects())
@sio.event
def get_loris_subprojects(sid, project):
sio.emit('loris_subprojects', loris_api.get_subprojects(project))
@sio.event
def get_loris_visits(sid, subproject):
sio.emit('loris_visits', loris_api.get_visits(subproject))
@sio.event
def create_visit(sid, data):
loris_api.create_visit(data['candID'], data['visit'], data['site'], data['project'], data['subproject'])
loris_api.start_next_stage(data['candID'], data['visit'], data['site'], data['subproject'], data['project'], data['date'])
@sio.event
def create_candidate_and_visit(sid, data):
new_candidate = loris_api.create_candidate(
data['project'],
data['dob'],
data['sex'],
data['site'],
)
if new_candidate['CandID']:
print('create_visit')
loris_api.create_visit(new_candidate['CandID'], data['visit'], data['site'], data['project'],
data['subproject'])
loris_api.start_next_stage(new_candidate['CandID'], data['visit'], data['site'], data['subproject'],
data['project'], data['date'])
print('new_candidate_created')
sio.emit('new_candidate_created', new_candidate)
@sio.event
def get_edf_data(sid, data):
# data = { files: 'EDF files (array of {path, name})' }
print('get_edf_data:', data)
if 'files' not in data or not data['files']:
msg = 'No EDF file selected.'
print(msg)
response = {'error': msg}
sio.emit('edf_data', response)
return
headers = []
try:
for file in data['files']:
anonymize = iEEG.Anonymize(file['path'])
metadata = anonymize.get_header()
year = '20' + str(metadata[0]['year']) if metadata[0]['year'] < 85 else '19' + str(metadata[0]['year'])
date = datetime.datetime(int(year), metadata[0]['month'], metadata[0]['day'], metadata[0]['hour'],
metadata[0]['minute'], metadata[0]['second'])
headers.append({
'file': file,
'metadata': metadata,
'date': str(date)
})
for i in range(1, len(headers)):
if set(headers[i - 1]['metadata'][1]['ch_names']) != set(headers[i]['metadata'][1]['ch_names']):
msg = 'The files selected contain more than one recording.'
print(msg)
response = {
'error': msg,
}
sio.emit('edf_data', response)
return
# sort the recording per date
headers = sorted(headers, key=lambda k: k['date'])
# return the first split metadata and date
response = {
'files': [header['file'] for header in headers],
'subjectID': headers[0]['metadata'][0]['subject_id'],
'recordingID': headers[0]['metadata'][0]['recording_id'],
'date': headers[0]['date']
}
except ReadError as e:
print(e)
response = {
'error': 'Cannot read file - ' + str(e)
}
except Exception as e:
print(e)
response = {
'error': 'Failed to retrieve EDF header information',
}
sio.emit('edf_data', response)
@sio.event
def get_bids_metadata(sid, data):
# data = { file_path: 'path to metadata file' }
print('data:', data)
if 'file_path' not in data or not data['file_path']:
msg = 'No metadata file selected.'
print(msg)
response = {'error': msg}
elif 'modality' not in data or data['modality'] not in ['ieeg', 'eeg']:
msg = 'No valid modality found.'
print(msg)
response = {'error': msg}
else:
try:
with open(data['file_path']) as fd:
try:
metadata = json.load(fd)
empty_values = [k for k in metadata if isinstance(metadata[k], str) and metadata[k].strip() == '']
diff = list(set(metadata.keys()) - set(metadata_fields[data['modality']]) - set(empty_values))
ignored_keys = empty_values + diff
response = {
'metadata': metadata,
'ignored_keys': ignored_keys,
}
except ValueError as e:
print(e)
metadata = {}
response = {
'error': 'Metadata file format is not valid.',
}
except IOError:
msg = "Could not read the metadata file."
print(msg)
response = {
'error': msg,
}
sio.emit('bids_metadata', response)
def edf_to_bids_thread(data):
print('data is ')
print(data)
error_messages = []
if 'edfData' not in data or 'files' not in data['edfData'] or not data['edfData']['files']:
error_messages.append('No .edf file(s) to convert.')
if 'bids_directory' not in data or not data['bids_directory']:
error_messages.append('The BIDS output folder is missing.')
if not data['session']:
error_messages.append('The LORIS Visit Label is missing.')
if not error_messages:
time = iEEG.Time()
data['output_time'] = 'output-' + time.latest_output
try:
iEEG.Converter(data) # EDF to BIDS format.
# store subject_id for Modifier
data['subject_id'] = iEEG.Converter.m_info['subject_id']
Modifier(data) # Modifies data of BIDS format
response = {
'output_time': data['output_time']
}
return eventlet.tpool.Proxy(response)
except ReadError as e:
error_messages.append('Cannot read file - ' + str(e))
except WriteError as e:
error_messages.append('Cannot write file - ' + str(e))
else:
response = {
'error': error_messages
}
return eventlet.tpool.Proxy(response)
@sio.event
def edf_to_bids(sid, data):
# data = { file_paths: [], bids_directory: '', read_only: false,
# event_files: '', line_freq: '', site_id: '', project_id: '',
# sub_project_id: '', session: '', subject_id: ''}
print('edf_to_bids: ', data)
response = eventlet.tpool.execute(edf_to_bids_thread, data)
print(response)
print('Response received!')
sio.emit('bids', response.copy())
@sio.event
def validate_bids(sid, bids_directory):
print('validate_bids: ', bids_directory)
error_messages = []
if not bids_directory:
error_messages.append('The BIDS output directory is missing.')
if not error_messages:
BIDS.Validate(bids_directory)
response = {
'file_paths': BIDS.Validate.file_paths,
'result': BIDS.Validate.result
}
else:
response = {
'error': error_messages
}
sio.emit('response', response)
@sio.event
def disconnect(sid):
print('disconnect: ', sid)
if __name__ == '__main__':
eventlet.wsgi.server(
eventlet.listen(('127.0.0.1', 7301)),
app,
log_output=True
)
```
|
{
"source": "jesscsam/bigmac",
"score": 3
}
|
#### File: bigmac/code/cleandata.py
```python
import pandas as pd
import json
import random
import numpy as np
import warnings; warnings.filterwarnings('ignore')
def clean_data(all_data):
"""
Preprocesses the adult forum user data
"""
all_data = all_data.iloc[: 28831, :]
# drop unnecessary columns
data = all_data.drop(['Location', 'Verification', 'Looking_for', 'Points_Rank', 'Member_since', 'Friends_ID_list'], axis=1)
# specify column datatypes
data['Number_of_advertisments_posted'] = data['Number_of_advertisments_posted'].astype(int)
data['Gender'] = data['Gender'].astype(str)
data['Risk'] = data['Risk'].astype(str)
data['Sexual_orientation'] = data['Sexual_orientation'].astype(str)
data['Sexual_polarity'] = data['Sexual_polarity'].astype(str)
data['Number_of_Comments_in_public_forum'] = data['Number_of_Comments_in_public_forum'].str.replace(' ', '').astype(int)
data['Number_of_advertisments_posted'] = data['Number_of_advertisments_posted'].astype(int)
data['Number_of_offline_meetings_attended'] = data['Number_of_offline_meetings_attended'].astype(int)
data['Profile_pictures'] = data['Profile_pictures'].astype(int)
# fix age data
data['Age'] = data['Age'].apply(lambda x: x.replace(',', '.'))
data['Age'] = data['Age'].replace('???', np.nan)
data['Age'] = data['Age'].astype(float)
data['Age'].fillna(data['Age'].mean(), inplace=True)
# calculate time spent chatting in minutes
data['Time_spent_chating_H:M'] = data['Time_spent_chating_H:M'].str.replace(' ', '')
data['Time_spent_chating_H:M'] = data['Time_spent_chating_H:M'].apply(get_n_minutes)
# rename columns
data = data.rename({'Number_of_Comments_in_public_forum': 'Num_com'}, axis=1)
data = data.rename({'Number_of_advertisments_posted': 'Num_adv'}, axis=1)
data = data.rename({'Number_of_offline_meetings_attended': 'Num_meet'}, axis=1)
data = data.rename({'Time_spent_chating_H:M': 'Chat_time'}, axis=1)
# add age categories
data = add_age_groups(data)
# fake the risk data
data = create_fake_risk(data)
data = data.drop('Risk', axis=1)
data = data.rename({'Risk2':'Risk'},axis=1)
return data
def data_for_sunburst(data):
"""
Creates dataframe to be used to write JSON for sunburst diagram
"""
grouped_df = data.groupby(['Gender', 'Sexual_orientation', 'Sexual_polarity'])
sunburst_df = pd.DataFrame(grouped_df.size().reset_index(name = "Group_Count"))
sunburst_df = sunburst_df.drop(sunburst_df.index[24:])
filename = 'sunjson'
to_sunburst_json(sunburst_df, filename)
def to_sunburst_json(df, filename):
"""
Convert dataframe into nested JSON as in flare files used for D3.js
"""
flare = dict()
d = {"name":"flare", "children": []}
for index, row in df.iterrows():
parent = row[0]
child = row[1]
child2 = row[2]
size = row[3]
# Make a list of keys
key_list = []
for item in d['children']:
key_list.append(item['name'])
#if 'parent' is NOT a key in flare.JSON, append it
if not parent in key_list:
d['children'].append({"name": parent, "children":[{"name": child, "children": [{"name": child2, "size": size}]}]})
else:
check = False
for item in d['children']:
for item2 in item['children']:
if item2['name'] == child and item['name'] == parent:
item2['children'].append({"name": child2, "size": size})
check = True
if item['name'] == parent:
if check == False:
item['children'].append({"name": child, "children":[]})
flare = d
# export the final result to a json file
with open(filename +'.json', 'w') as outfile:
json.dump(flare, outfile, indent=4)
def data_for_piechart(data):
"""
Creates dataframe to be used to write JSON for pie chart
"""
# Group the necessary data
grouped_df = data.groupby(['Gender', 'Sexual_orientation', 'Sexual_polarity', 'Age_group', 'Risk'])
piechart_df = pd.DataFrame(grouped_df.size().reset_index(name = "Group_Count"))
piechart_df = piechart_df.drop(piechart_df.index[456:])
piedict = {}
# for each entry in the grouped dataframe, add it to a dictionary
for index, row in piechart_df.iterrows():
gender = row[0]
sex_or = row[1]
sex_pol = row[2]
age_group = row[3]
risk = row[4]
count = row[5]
if gender not in piedict:
piedict[gender] = {}
if sex_or not in piedict[gender]:
piedict[gender][sex_or] = {}
if sex_pol not in piedict[gender][sex_or]:
piedict[gender][sex_or][sex_pol] = {}
piedict[gender][sex_or][sex_pol]['All'] = []
if age_group not in piedict[gender][sex_or][sex_pol]:
piedict[gender][sex_or][sex_pol][age_group] = []
if risk not in piedict[gender][sex_or][sex_pol][age_group]:
piedict[gender][sex_or][sex_pol][age_group].append({'Risk': risk, 'Count': count})
# Get 'all' option for user characteristics categories
allage_chars_df = pd.DataFrame(data.groupby(['Gender', 'Sexual_orientation', 'Sexual_polarity', 'Risk']).size().reset_index(name = "Count"))
for index, row in allage_chars_df.iterrows():
gender = row[0]
sex_or = row[1]
sex_pol = row[2]
risk = row[3]
count = row[4]
piedict[gender][sex_or][sex_pol]['All'].append({'Risk': risk, 'Count': count})
# Group risks without taking into account age groups
grouped_for_all = pd.DataFrame(data.groupby(['Age_group', 'Risk']).size().reset_index(name = "Count"))
alldict = {}
# For each entry, add it to a dictionary for all age groups
for index, row in grouped_for_all.iterrows():
gender = row[0]
risk = row[1]
count = row[2]
if gender not in alldict:
alldict[gender] = []
if risk not in alldict[gender]:
alldict[gender].append({'Risk': risk, 'Count': count})
# Add data for combined age groups to main dictionary
piedict['All'] = alldict
really_all = []
grouped_really_all = pd.DataFrame(data.groupby('Risk').size().reset_index(name = "Count"))
for index, row in grouped_really_all.iterrows():
risk = row[0]
count = row[1]
if risk not in really_all:
really_all.append({'Risk': risk, 'Count': count})
piedict['ReallyAll'] = really_all
with open('piejson.json', 'w') as outfile:
json.dump(piedict, outfile)
def data_for_barchart(data):
"""
Creates dataframe to be used to write JSON for bar chart
"""
# drop irrelevant columns
data = data.drop(['Sexual_orientation', 'Sexual_polarity', 'User_ID', 'Risk', 'Age'], axis=1)
# group data needed for bar chart
#barchart_df = data.groupby('Age_group').mean()
grouped_df = data.groupby(['Age_group', 'Gender'])
barchart_df = pd.DataFrame(grouped_df.mean().reset_index())
bardict = {}
male = {'Comments': [], 'Chattime': [], 'Ads': [], 'Meets': [], 'Pics': []}
female = {'Comments': [], 'Chattime': [], 'Ads': [], 'Meets': [], 'Pics': []}
all = {'Comments': [], 'Chattime': [], 'Ads': [], 'Meets': [], 'Pics': []}
for index, row in barchart_df.iterrows():
if row.Gender == 'male':
male['Comments'].append({'Group': row.Age_group, 'mean': row.Num_com})
male['Chattime'].append({'Group': row.Age_group, 'mean': row.Chat_time})
male['Ads'].append({'Group': row.Age_group, 'mean': row.Num_adv})
male['Meets'].append({'Group': row.Age_group, 'mean': row.Num_meet})
male['Pics'].append({'Group': row.Age_group, 'mean': row.Profile_pictures})
elif row.Gender == 'female':
female['Comments'].append({'Group': row.Age_group, 'mean': row.Num_com})
female['Chattime'].append({'Group': row.Age_group, 'mean': row.Chat_time})
female['Ads'].append({'Group': row.Age_group, 'mean': row.Num_adv})
female['Meets'].append({'Group': row.Age_group, 'mean': row.Num_meet})
female['Pics'].append({'Group': row.Age_group, 'mean': row.Profile_pictures})
grouped_for_all = pd.DataFrame(data.groupby('Age_group').mean().reset_index())
for index, row in grouped_for_all.iterrows():
all['Comments'].append({'Group': row.Age_group, 'mean': row.Num_com})
all['Chattime'].append({'Group': row.Age_group, 'mean': row.Chat_time})
all['Ads'].append({'Group': row.Age_group, 'mean': row.Num_adv})
all['Meets'].append({'Group': row.Age_group, 'mean': row.Num_meet})
all['Pics'].append({'Group': row.Age_group, 'mean': row.Profile_pictures})
bardict['male'] = male
bardict['female'] = female
bardict['all'] = all
with open('barjson.json', 'w') as outfile:
json.dump(bardict, outfile)
def add_age_groups(data):
"""
Adds age group column to the dataframe
"""
agegroups = []
for row in data.iterrows():
if row[1]['Age'] >= 18 and row[1]['Age'] < 25:
agegroups.append('18-25')
elif row[1]['Age'] >= 25 and row[1]['Age'] < 30:
agegroups.append('25-30')
elif row[1]['Age'] >= 30 and row[1]['Age'] < 40:
agegroups.append('30-40')
elif row[1]['Age'] >= 40 and row[1]['Age'] < 50:
agegroups.append('40-50')
elif row[1]['Age'] >= 50:
agegroups.append('50+')
else:
agegroups.append(None)
data['Age_group'] = agegroups
return data
def create_fake_risk(data):
"""
Since the raw data had almost only 'unknown_risk' values and I wanted to
have my visualisations look cool, I decided to do a Diederik Stapeltje and
make up the data myself. ¯\_(ツ)_/¯
"""
risks = []
# for a specific amount of times, add the different risk values
for i in range(int(len(data) / 7 * 0.5)):
risks.append('High_risk')
for i in range(int(len(data) / 7 * 2)):
risks.append('Low_risk')
for i in range(int(len(data) / 7 * 3.5)):
risks.append('No_risk')
for i in range(int(len(data) / 7 * 1)):
risks.append('unknown_risk')
# add two more to get to the exact len(data)
risks.append('No_risk')
risks.append('Low_risk')
# shuffle the list so it's not in order
random.shuffle(risks)
# add fake risk data to DataFrame
data['Risk2'] = risks
return data
def get_n_minutes(row):
"""
Cleans chat time data from H:M to just minutes
"""
time_components = row.split(':')
if len(time_components) == 2:
return int(time_components[0]) * 60 + int(time_components[1])
elif len(time_components) == 3:
return int(time_components[0]) * 1440 + int(time_components[1]) * 60 + int(time_components[2])
if __name__ == "__main__":
# Load data into dataframe
all_data = pd.read_csv("online_sex_work.csv")
# Clean data
data = clean_data(all_data)
# Create data for visualisations
sundata = data_for_sunburst(data)
piedata = data_for_piechart(data)
bardata = data_for_barchart(data)
```
|
{
"source": "Jess-Doit/jess-doit-archive-engine",
"score": 3
}
|
#### File: jdae/src/configmanager.py
```python
import configparser
import importlib.resources as import_resources
class ConfigManager(object):
"""
Simplifies reading settings from user controlled config files
"""
# TODO: Sanatize all user input values. Must verify that things wont be
# broken or manipulated in unintended ways
# Config file names
GEN_CONFIG = "gen_config.ini"
URL_CONFIG = "url_list.ini"
# Resource base paths
AUDIO_RESOURCE = "jdae.sounds"
CONFIG_RESOURCE = "jdae.config"
# General config sections
GC_SETTINGS = "SETTINGS"
def __init__(self):
"""
ConfigManager constructor
"""
# Get paths to ini config files
self.gen_config_path = self._get_config_path(self.GEN_CONFIG)
self.url_config_path = self._get_config_path(self.URL_CONFIG)
# Create config parser and parse general config file
self.parser = configparser.ConfigParser()
self.parser.read(self.gen_config_path)
def _get_config_path(self, filename):
"""
Get path to config file in package
"""
return self._get_path(self.CONFIG_RESOURCE, filename)
def _get_audio_path(self, filename):
"""
Get path of audio file in package
"""
return self._get_path(self.AUDIO_RESOURCE, filename)
def _get_path(self, resource, filename):
"""
Get path of resource
"""
try:
with import_resources.path(resource, filename) as p:
config_path = p.as_posix()
return config_path
except:
return ""
def get_url_list(self):
"""
Returns all urls from url_list.ini
"""
# Read in all lines from config file
with open(self.url_config_path) as f:
url_list = [line.rstrip() for line in f]
# Remove first line "[URL LIST]"
# TODO: find way to use parser/check all lines for valid url first
if len(url_list) > 0:
url_list = url_list[1:]
return url_list
def get_boot_audio(self):
"""
Returns full path to audio file named in general config
"""
# Get file name from config
audio_filename = self.parser[self.GC_SETTINGS]["boot_audio"]
# Resolve path and return
audio_path = self._get_audio_path(audio_filename)
return audio_path
def get_skip_intro(self):
"""
Returns skip intro bool value
"""
val = self.parser[self.GC_SETTINGS]["skip_intro"]
# Convert string to bool value
if val in ["True", "true"]:
return True
return False
def get_output_dir(self):
"""
Returns base directory for archive output
"""
return self.parser[self.GC_SETTINGS]["output_dir"]
def get_archive_freq(self):
"""
Returns the number of seconds to wait between archive runs
"""
runtime = int(float(self.parser[self.GC_SETTINGS]["archive_frequency"]) * 3600)
return runtime
```
|
{
"source": "jessdtate/UncertainSCI",
"score": 3
}
|
#### File: UncertainSCI/UncertainSCI/model_examples.py
```python
import numpy as np
from scipy import sparse
from scipy.sparse import linalg as splinalg
import scipy.optimize
def taylor_frequency(p):
"""
Returns ( \\sum_{j=1}^d p_j^j )
"""
return np.sum(p**(1 + np.arange(p.size)))
def sine_modulation(left=-1, right=1, N=100):
"""
For a d-dimensional parameter p, defines the model,
f(x,p) = sin [ pi * ( \\sum_{j=1}^d p_j^j ) * x ],
where x is N equispaced points on the interval [left, right].
Returns a function pointer with the syntax p ----> f(p).
"""
x = np.linspace(left, right, N)
return lambda p: np.sin(np.pi * x * taylor_frequency(p))
def mercer_eigenvalues_exponential_kernel(N, a, b):
"""
For a 1D exponential covariance kernel,
K(s,t) = exp(-|t-s| / a), s, t \\in [-b,b],
computes the first N eigenvalues of the associated Mercer integral
operator.
Precisely, computes the first N/2 positive solutions to both of the following
transcendental equations for w and v:
1 - a v tan(v b) = 0
a w + tan(w b) = 0
The eigenvalues are subsequently defined through these solutions.
Returns (1) the N eigenvalues lamb, (2) the first ceil(N/2) solutions for
v, (3) the first floor(N/2) solutions for w.
"""
assert N > 0 and a > 0 and b > 0
M = int(np.ceil(N/2))
w = np.zeros(M)
v = np.zeros(M)
# First equation transformed:
# vt = v b
#
# -(b/a) / vt + tan(vt) = 0
def f(x):
return -(b/a)/x + np.tan(x)
for n in range(M):
# Compute bracketing interval
# root somewhere in right-hand part of [2*n-1, 2*n+1]*pi/2 interval
RH_value = -1
k = 4
while RH_value < 0:
k += 1
right = (2*n+1)*np.pi/2 - 1/k
RH_value = f(right)
# Root can't be on LHS of interval
if n == 0:
left = 1/k
while f(left) > 0:
k += 1
left = 1/k
else:
left = n*np.pi
v[n] = scipy.optimize.brentq(f, left, right)
v /= b
# Second equation transformed:
# wt = w b
#
# (a/b) wt + tan(wt) = 0
def f(x):
return (a/b)*x + np.tan(x)
for n in range(M):
# Compute bracketing interval
# root somewhere in [2*n+1, 2*n+3]*pi/2
LH_value = 1
k = 4
while LH_value > 0:
k += 1
left = (2*n+1)*np.pi/2 + 1/k
LH_value = f(left)
# Root can't be on RHS of interval
right = (n+1)*np.pi
w[n] = scipy.optimize.brentq(f, left, right)
w /= b
if (N % 2) == 1: # Don't need last root for w
w = w[:-1]
lamb = np.zeros(N)
oddinds = [i for i in range(N) if (i % 2) == 0] # Well, odd for 1-based indexing
lamb[oddinds] = 2*a/(1+(a*v)**2)
eveninds = [i for i in range(N) if (i % 2) == 1] # even for 1-based indexing
lamb[eveninds] = 2*a/(1+(a*w)**2)
return lamb, v, w
def KLE_exponential_covariance_1d(N, a, b, mn):
"""
Returns a pointer to a function the evaluates an N-term Karhunen-Loeve
Expansion of a stochastic process with exponential covariance function on a
bounded interval [-b,b]. Let the GP have the covariance function,
C(s,t) = exp(-|t-s| / a),
and mean function given by mn. Then the N-term KLE of the process is given
by
K_N(x,P) = mn(x) + \\sum_{n=1}^N P_n sqrt(\\lambda_n) \\phi_n(x),
where (lambda_n, phi_n) are the leading eigenpairs of the associated Mercer
kernel. The eigenvalues are computed in
mercer_eigenvalues_exponential_kernel. The (P_n) are iid standard normal
Gaussian random variables.
Returns a function lamb(x,P) that takes in a 1D np.ndarray x and a 1D
np.ndarray vector P and returns the KLE realization on x for that value of
P.
"""
lamb, v, w = mercer_eigenvalues_exponential_kernel(N, a, b)
efuns = N*[None]
for i in range(N):
if (i % 2) == 0:
i2 = int(i/2)
efuns[i] = (lambda i2: lambda x: np.cos(v[i2]*x) / np.sqrt(b + np.sin(2*v[i2]*b)/(2*v[i2])))(i2)
else:
i2 = int((i-1)/2)
efuns[i] = (lambda i2: lambda x: np.sin(w[i2]*x) / np.sqrt(b - np.sin(2*w[i2]*b)/(2*w[i2])))(i2)
def KLE(x, p):
return mn(x) + np.array([np.sqrt(lamb[i])*efuns[i](x) for i in range(N)]).T @ p
return KLE
def laplace_ode_diffusion(x, p):
""" Parameterized diffusion coefficient for 1D ODE
For a d-dimensional parameter p, the diffusion coefficient a(x,p) has the form
a(x,p) = pi^2/5 + sum_{j=1}^d p_j * sin(j*pi*(x+1)/2) / j^2,
which is positive for all x if all values of p lie between [-1,1].
"""
a_val = np.ones(x.shape)*np.pi**2/5
for q in range(p.size):
a_val += p[q] * np.sin((q+1)*np.pi*(x+1)/2)/(q+1)**2
return a_val
def laplace_grid_x(left, right, N):
"""
Computes one-dimensional equispaced grid with N points on the interval
(left, right).
"""
return np.linspace(left, right, N)
def laplace_ode(left=-1., right=1., N=100, f=None, diffusion=laplace_ode_diffusion):
"""
Computes the solution to the ODE:
-d/dx [ a(x,p) d/dx u(x,p) ] = f(x),
with homogeneous Dirichlet boundary conditions at x = left, x = right.
For a d-dimensional parameter p, a(x,p) is the function defined in laplace_ode_diffusion.
Uses an equispaced finite-difference discretization of the ODE.
"""
assert N > 2
if f is None:
def f(x):
return np.pi**2 * np.cos(np.pi*x)
x = laplace_grid_x(left, right, N)
h = x[1] - x[0]
fx = f(x)
# Set homogeneous Dirichlet conditions
fx[0], fx[-1] = 0., 0.
# i+1/2 points
xh = x[:-1] + h/2.
def create_system(p):
nonlocal x, xh, N
a = diffusion(xh, p)
number_nonzeros = 1 + 1 + (N-2)*3
rows = np.zeros(number_nonzeros, dtype=int)
cols = np.zeros(number_nonzeros, dtype=int)
vals = np.zeros(number_nonzeros, dtype=float)
# Set the homogeneous Dirichlet conditions
rows[0], cols[0], vals[0] = 0, 0, 1.
rows[1], cols[1], vals[1] = N-1, N-1, 1.
ind = 2
for q in range(1, N-1):
# Column q-1
rows[ind], cols[ind], vals[ind] = q, q-1, -a[q-1]
ind += 1
# Column q
rows[ind], cols[ind], vals[ind] = q, q, a[q-1] + a[q]
ind += 1
# Column q+1
rows[ind], cols[ind], vals[ind] = q, q+1, -a[q]
ind += 1
A = sparse.csc_matrix((vals, (rows, cols)), shape=(N, N))
return A
def solve_system(p):
nonlocal fx, h
return splinalg.spsolve(create_system(p), fx*(h**2))
return lambda p: solve_system(p)
def laplace_grid_xy(left, right, N1, down, up, N2):
"""
Computes two-dimensional tensorial equispaced grid corresponding to the
tensorization of N1 equispaced points on the interval (left, right) and N2
equispaced points on the interval (down, up).
"""
x = np.linspace(left, right, N1)
y = np.linspace(down, up, N2)
X, Y = np.meshgrid(x, y)
return X.flatten(order='C'), Y.flatten(order='C')
def laplace_pde_diffusion(x, p):
""" Parameterized diffusion coefficient for 2D PDE
For a d-dimensional parameter p, the diffusion coefficient a(x,p) has the form
a(x,p) = pi^2/5 + sum_{j=1}^d p_j * sin(j*pi*(x+1)/2) / j^2,
which is positive for all x if all values of p lie between [-1,1].
"""
a_val = np.ones(x.shape)*np.pi**2/5
for q in range(p.size):
a_val += p[q] * np.sin((q+1)*np.pi*(x+1)/2)/(q+1)**2
return a_val
def genz_oscillatory(w=0., c=None):
"""
Returns a pointer to the "oscillatory" Genz test function defined as
f(p) = \\cos{ 2\\pi w + \\sum_{i=1}^dim c_i p_i }
where p \\in R^d. The default value for w is 0, and that for c is a
d-dimensional vector of ones.
"""
def cos_eval(p):
nonlocal c
if c is None:
c = np.ones(p.size)
return np.cos(2*np.pi*w + np.dot(c, p))
return lambda p: cos_eval(p)
if __name__ == "__main__":
from matplotlib import pyplot as plt
import scipy as sp
dim = 5
a = 3
b = 1
def mn(x):
return np.zeros(x.shape)
KLE = KLE_exponential_covariance_1d(dim, a, b, mn)
def diffusion(x, p):
return np.exp(KLE(x, p))
left = -1.
right = 1.
N = 1000
model = laplace_ode(left=left, right=right, N=N, diffusion=diffusion)
x = laplace_grid_x(left, right, N)
K = 4
p = K*[None]
u = K*[None]
a = K*[None]
for k in range(K):
p[k] = np.random.rand(dim)*2 - 1
# a[k] = laplace_ode_diffusion(x, p[k])
a[k] = diffusion(x, p[k])
u[k] = model(p[k])
for k in range(K):
row = np.floor(k/2) + 1
col = k % (K/2) + 1
index = col + (row-1)*K/2
plt.subplot(2, K, k+1)
plt.plot(x, a[k], 'r')
plt.title('Diffusion coefficient')
plt.ylim([0, 3.0])
plt.subplot(2, K, k+1+K)
plt.plot(x, u[k])
plt.title('Solution u')
plt.ylim([-5, 5])
M = 1000
U = np.zeros([u[0].size, M])
for m in range(M):
U[m, :] = model(np.random.rand(dim)*2 - 1)
_, svs, _ = np.linalg.svd(U)
_, r, _ = sp.linalg.qr(U, pivoting=True)
plt.figure()
plt.semilogy(svs[:100], 'r')
plt.semilogy(np.abs(np.diag(r)[:100]), 'b')
plt.legend(["Singular values", "Orthogonalization residuals"])
plt.show()
```
|
{
"source": "jesse1029/caffe-jess",
"score": 3
}
|
#### File: caffe-jess/python/classification.py
```python
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# Make sure that caffe is on the python path:
caffe_root = '../' # this file is expected to be in {caffe_root}/examples
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
plt.rcParams['figure.figsize'] = (10, 10)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
import os
if not os.path.isfile(caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'):
print("Downloading pre-trained CaffeNet model...")
#!../scripts/download_model_binary.py ../models/bvlc_reference_caffenet
def vis_square(data, padsize=1, padval=0):
data -= data.min()
data /= data.max()
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.imshow(data)
plt.show();
def main():
caffe.set_mode_cpu()
net = caffe.Net(caffe_root + 'models/bvlc_googlenet/deploy.prototxt',
caffe_root + 'models/bvlc_googlenet/bvlc_googlenet.caffemodel',
caffe.TEST)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
transformer.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # mean pixel
transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB
# set net to batch size of 50
#net.blobs['data'].reshape(50,3,227,227)
# net.blobs['data'].data[...] = transformer.preprocess('data', caffe.io.load_image(caffe_root + 'examples/images/cat.jpg'))
# out = net.forward()
# print("Predicted class is #{}.".format(out['prob'].argmax()))
#plt.imshow(transformer.deprocess('data', net.blobs['data'].data[0]))
# load labels
imagenet_labels_filename = caffe_root + 'data/ilsvrc12/synset_words.txt'
try:
labels = np.loadtxt(imagenet_labels_filename, str, delimiter='\t')
except:
#!../data/ilsvrc12/get_ilsvrc_aux.sh
labels = np.loadtxt(imagenet_labels_filename, str, delimiter='\t')
# sort top k predictions from softmax output
top_k = net.blobs['prob'].data[0].flatten().argsort()[-1:-6:-1]
print labels[top_k]
# CPU mode
net.forward() # call once for allocation
#%timeit net.forward()
# GPU mode
caffe.set_device(0)
caffe.set_mode_gpu()
net.forward() # call once for allocation
#%timeit net.forward()
#feat = net.blobs['pool5'].data[0]
#vis_square(feat, padval=1)
if __name__ == '__main__':
main()
```
|
{
"source": "jesse1029/CCAT-CT-Detection",
"score": 3
}
|
#### File: simclr/modules/logistic_regression.py
```python
import torch.nn as nn
class LogisticRegression(nn.Module):
def __init__(self, n_features, n_classes):
super(LogisticRegression, self).__init__()
self.model = nn.Sequential(
nn.Linear(n_features, n_features//2),
nn.Dropout(0.5),
nn.LeakyReLU(),
nn.Linear(n_features//2, n_features//4),
nn.Dropout(0.5),
nn.LeakyReLU(),
nn.Linear(n_features//4, n_classes))
def forward(self, x):
return self.model(x)
```
#### File: CCAT-CT-Detection/utils/yaml_config_hook.py
```python
import os
import yaml
def yaml_config_hook(config_file):
"""
Custom YAML config loader, which can include other yaml files (I like using config files
insteaad of using argparser)
"""
# load yaml files in the nested 'defaults' section, which include defaults for experiments
with open(config_file, encoding="utf-8") as f:
cfg = yaml.safe_load(f)
for d in cfg.get("defaults", []):
config_dir, cf = d.popitem()
cf = os.path.join(os.path.dirname(config_file), config_dir, cf + ".yaml")
with open(cf) as f:
l = yaml.safe_load(f)
cfg.update(l)
if "defaults" in cfg.keys():
del cfg["defaults"]
return cfg
```
|
{
"source": "Jesse201147/v2et.cn",
"score": 2
}
|
#### File: v2et.cn/nav/views.py
```python
from django.shortcuts import render,redirect
from django.http import HttpResponse
def nav_home(request):
return render(request,'nav/nav.html',{})
def search(request):
if request.method == 'POST':
data=request.POST
kw=data.get('kw')
if 'baidu' in data:
url = f"https://www.baidu.com/s?wd={kw}"
else:
url = f"https://www.google.com/search?q={kw}"
return redirect(url)
else:
return HttpResponse('仅接受POST请求')
```
#### File: userprofile/utils/email_func.py
```python
from django.conf import settings
import os
import uuid
import hashlib
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import subprocess
def get_random_str():
uuid_val = uuid.uuid4()
uuid_str = str(uuid_val).encode("utf-8")
md5 = hashlib.md5()
md5.update(uuid_str)
return md5.hexdigest()
def send_register_email(address):
random_str=get_random_str()
link = settings.DOMAIN+'/userprofile/activate/'+random_str
title = '[v2et.cn]激活邮箱账号'
msg=f"""
您好!
<br>
<br>
点击链接即可激活您的v2et.cn账号,
<br>
<br>
{link}
<br>
<br>
为保障您的帐号安全,请在24小时内点击该链接,您也可以将链接复制到浏览器地址栏访问。
<br>
<br>
本邮件由系统自动发出,请勿直接回复!
"""
subprocess.Popen(['python',os.path.join(os.getcwd(),'userprofile','utils','email_func.py'),
title,msg,address])
return random_str
def sendmail(subject, msg, toaddrs, fromaddr, smtpaddr, password):
'''
@subject:邮件主题
@msg:邮件内容
@toaddrs:收信人的邮箱地址
@fromaddr:发信人的邮箱地址
@smtpaddr:smtp服务地址,可以在邮箱看,比如163邮箱为smtp.163.com
@password:<PASSWORD>
'''
if isinstance(toaddrs,str):
toaddrs=[toaddrs,]
mail_msg = MIMEMultipart()
mail_msg['Subject'] = subject
mail_msg['From'] = fromaddr
mail_msg['To'] = ','.join(toaddrs)
mail_msg.attach(MIMEText(msg, 'html', 'utf-8'))
try:
print('准备发送邮件')
s = smtplib.SMTP_SSL(host=smtpaddr)
s.connect(smtpaddr, 465) # 连接smtp服务器
s.login(fromaddr, password) # 登录邮箱
print('登录完成,开始发送')
s.sendmail(fromaddr, toaddrs, mail_msg.as_string()) # 发送邮件
s.quit()
print('发送完成')
except Exception as e:
print(e)
if __name__ == '__main__':
import sys
fromaddr = "<EMAIL>"
smtpaddr = "smtp.qq.com"
toaddrs = ["<EMAIL>",]
subject = "最新消息"
password = "<PASSWORD>"
msg = "测试"
print(sys.argv)
sendmail(sys.argv[1], sys.argv[2], sys.argv[3], fromaddr, smtpaddr, password)
```
#### File: v2et.cn/v2et/views.py
```python
from django.shortcuts import redirect
def home_page(request):
return redirect('nav:nav_home')
```
|
{
"source": "Jesse3692/async_network",
"score": 3
}
|
#### File: async_network/s2/three.py
```python
import time
import asyncio
import functools
def three():
start = time.time()
# @asyncio.coroutine
async def corowork():
print('[corowork]Start coroutine')
time.sleep(0.1)
print('[corowork]This is a coroutine')
def callback(name, task):
print('[callback] Hello {}'.format(name))
print('[callback] coroutine state: {}'.format(task._state))
loop = asyncio.get_event_loop()
coroutine = corowork()
task = loop.create_task(coroutine)
task.add_done_callback(functools.partial(callback, 'Jesse'))
loop.run_until_complete(task)
end = time.time()
print('运行耗时:{:.4f}'.format(end - start))
if __name__ == '__main__':
three()
```
#### File: async_network/s3/async_except_cancel.py
```python
import asyncio
import time
async def work(id, t):
print('Working...')
await asyncio.sleep(t)
# time.sleep(t)
print('Work {} done'.format(id))
def main():
loop = asyncio.get_event_loop()
coroutines = [work(i, i) for i in range(1, 4)]
try:
loop.run_until_complete(asyncio.gather(*coroutines))
except KeyboardInterrupt:
loop.stop()
finally:
loop.close()
if __name__ == "__main__":
main()
```
|
{
"source": "Jesse3692/flask_note",
"score": 3
}
|
#### File: simple/1.0.2/__config.py
```python
import errno
import json
import os
import types
from werkzeug.utils import import_string
from __compat import string_types, iteritems
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to ``True`` if you want silent failure for missing
files.
:return: bool. ``True`` if able to load config, ``False`` otherwise.
"""
rv = os.environ.get(variable_name) # pylint: disable=invalid-name
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to ``True`` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = types.ModuleType('config') # pylint: disable=invalid-name
d.__file__ = filename
try:
with open(filename, mode='rb') as config_file:
# pylint: disable=exec-used
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
except IOError as io_error:
if silent and io_error.errno in (
errno.ENOENT, errno.EISDIR, errno.ENOTDIR
):
return False
io_error.strerror = 'Unable to load configuration file (%s)' % io_error.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes. :meth:`from_object`
loads only the uppercase attributes of the module/class. A ``dict``
object will not work with :meth:`from_object` because the keys of a
``dict`` are not attributes of the ``dict`` class.
Example of module-based configuration::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
See :ref:`config-dev-prod` for an example of class-based configuration
using :meth:`from_object`.
:param obj: an import name or object
"""
if isinstance(obj, string_types):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def from_json(self, filename, silent=False):
"""Updates the values in the config from a JSON file. This function
behaves as if the JSON object was a dictionary and passed to the
:meth:`from_mapping` function.
:param filename: the filename of the JSON file. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to ``True`` if you want silent failure for missing
files.
.. versionadded:: 0.11
"""
filename = os.path.join(self.root_path, filename)
try:
with open(filename) as json_file:
obj = json.loads(json_file.read())
except IOError as io_error:
if silent and io_error.errno in (errno.ENOENT, errno.EISDIR):
return False
io_error.strerror = 'Unable to load configuration file (%s)' % io_error.strerror
raise
return self.from_mapping(obj)
def from_mapping(self, *mapping, **kwargs):
"""Updates the config like :meth:`update` ignoring items with non-upper
keys.
.. versionadded:: 0.11
"""
mappings = []
if len(mapping) == 1:
if hasattr(mapping[0], 'items'):
mappings.append(mapping[0].items())
else:
mappings.append(mapping[0])
elif len(mapping) > 1:
raise TypeError(
'expected at most 1 positional argument, got %d' % len(mapping)
)
mappings.append(kwargs.items())
for mapping in mappings:
for (key, value) in mapping:
if key.isupper():
self[key] = value
return True
def get_namespace(self, namespace, lowercase=True, trim_namespace=True):
"""Returns a dictionary containing a subset of configuration options
that match the specified namespace/prefix. Example usage::
app.config['IMAGE_STORE_TYPE'] = 'fs'
app.config['IMAGE_STORE_PATH'] = '/var/app/images'
app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'
image_store_config = app.config.get_namespace('IMAGE_STORE_')
The resulting dictionary `image_store_config` would look like::
{
'type': 'fs',
'path': '/var/app/images',
'base_url': 'http://img.website.com'
}
This is often useful when configuration options map directly to
keyword arguments in functions or class constructors.
:param namespace: a configuration namespace
:param lowercase: a flag indicating if the keys of the resulting
dictionary should be lowercase
:param trim_namespace: a flag indicating if the keys of the resulting
dictionary should not include the namespace
.. versionadded:: 0.11
"""
rv = {} # pylint: disable=invalid-name
for k, v in iteritems(self): # pylint: disable=invalid-name
if not k.startswith(namespace):
continue
if trim_namespace:
key = k[len(namespace):]
else:
key = k
if lowercase:
key = key.lower()
rv[key] = v
return rv
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
class ConfigAttribute(object):
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None): # pylint: disable=redefined-builtin
if obj is None:
return self
rv = obj.config[self.__name__] # pylint: disable=invalid-name
if self.get_converter is not None:
rv = self.get_converter(rv) # pylint: disable=invalid-name
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
```
#### File: simple/1.0.2/__flask.py
```python
import os
import sys
from datetime import timedelta
from functools import update_wrapper
from itertools import chain
from threading import Lock
from werkzeug.datastructures import Headers, ImmutableDict
from werkzeug.exceptions import (BadRequest, BadRequestKeyError, HTTPException,
MethodNotAllowed, default_exceptions, InternalServerError)
from werkzeug.routing import Map, RequestRedirect, Rule
import __cli as cli
import __json
from __compat import integer_types, reraise, string_types, text_type
from __config import Config, ConfigAttribute
from __ctx import AppContext, RequestContext, _AppCtxGlobals
from __globals import _request_ctx_stack, g, request, session
from __helpers import (_endpoint_from_view_func, _PackageBoundObject,
find_package, get_debug_flag, get_env,
get_flashed_messages, get_load_dotenv,
locked_cached_property, url_for)
from __logging import create_logger
from __sessions import SecureCookieSessionInterface
from __signals import (appcontext_tearing_down, request_finished,
request_started, request_tearing_down, got_request_exception)
from __templating import (DispatchingJinjaLoader, Environment,
_default_template_ctx_processor)
from __wrappers import Request, Response
# a singleton sentinel value for parameter defaults
_sentinel = object()
def setupmethod(f): # pylint: disable=invalid-name
"""Wraps a method so that it performs a check in debug mode if the
first request was already handled.
"""
def wrapper_func(self, *args, **kwargs):
if self.debug and self._got_first_request: # pylint: disable=protected-access
raise AssertionError('A setup function was called after the '
'first request was handled. This usually indicates a bug '
'in the application where a module was not imported '
'and decorators or other functionality was called too late.\n'
'To fix this make sure to import all your view modules, '
'database models and everything related at a central place '
'before the application starts serving requests.')
return f(self, *args, **kwargs)
return update_wrapper(wrapper_func, f)
class Flask(_PackageBoundObject):
"""Flask"""
response_class = Response
session_interface = SecureCookieSessionInterface()
request_class = Request
app_ctx_globals_class = _AppCtxGlobals
secret_key = ConfigAttribute('SECRET_KEY')
testing = ConfigAttribute('TESTING')
def __init__(
self,
import_name,
static_url_path=None,
static_folder='static',
static_host=None,
host_matching=False,
subdomain_matching=False,
template_folder='templates',
instance_path=None,
instance_relative_config=False,
root_path=None
):
_PackageBoundObject.__init__(
self,
import_name,
template_folder=template_folder,
root_path=root_path
)
if static_url_path is not None:
self.static_url_path = static_url_path
if static_folder is not None:
self.static_folder = static_folder
if instance_path is None:
instance_path = self.auto_find_instance_path()
elif not os.path.isabs(instance_path):
raise ValueError(
'If an instance path is provided it must be absolute.'
' A relative path was given instead.'
)
self.instance_path = instance_path
self.config = self.make_config(instance_relative_config)
self.view_functions = {}
self.error_handler_spec = {}
self.url_build_error_handlers = []
self.before_request_funcs = {}
self.before_first_request_funcs = []
self.after_request_funcs = {}
self.teardown_request_funcs = {}
self.teardown_appcontext_funcs = []
self.url_value_preprocessors = {}
self.url_default_functions = {}
self.template_context_processors = {
None: [_default_template_ctx_processor]
}
self.shell_context_processors = []
self.blueprints = {}
self._blueprint_order = []
self.extensions = {}
self.url_map = Map()
self.url_map.host_matching = host_matching
self.subdomain_matching = subdomain_matching
self._got_first_request = False
self._before_request_lock = Lock()
if self.has_static_folder:
assert bool(
static_host) == host_matching, 'Invalid static_host/host_matching combination'
self.add_url_rule(
self.static_url_path + '/<path:filename>',
endpoint='static',
host=static_host,
view_func=self.send_static_file
)
self.cli = cli.AppGroup(self.name)
default_config = ImmutableDict({
'ENV': None,
'DEBUG': None,
'TESTING': False,
'PROPAGATE_EXCEPTIONS': None,
'PRESERVE_CONTEXT_ON_EXCEPTION': None,
'SECRET_KEY': None,
'PERMANENT_SESSION_LIFETIME': timedelta(days=31),
'USE_X_SENDFILE': False,
'SERVER_NAME': None,
'APPLICATION_ROOT': '/',
'SESSION_COOKIE_NAME': 'session',
'SESSION_COOKIE_DOMAIN': None,
'SESSION_COOKIE_PATH': None,
'SESSION_COOKIE_HTTPONLY': True,
'SESSION_COOKIE_SECURE': False,
'SESSION_COOKIE_SAMESITE': None,
'SESSION_REFRESH_EACH_REQUEST': True,
'MAX_CONTENT_LENGTH': None,
'SEND_FILE_MAX_AGE_DEFAULT': timedelta(hours=12),
'TRAP_BAD_REQUEST_ERRORS': None,
'TRAP_HTTP_EXCEPTIONS': False,
'EXPLAIN_TEMPLATE_LOADING': False,
'PREFERRED_URL_SCHEME': 'http',
'JSON_AS_ASCII': True,
'JSON_SORT_KEYS': True,
'JSONIFY_PRETTYPRINT_REGULAR': False,
'JSONIFY_MIMETYPE': 'application/json',
'TEMPLATES_AUTO_RELOAD': None,
'MAX_COOKIE_SIZE': 4093,
})
url_rule_class = Rule
env = ConfigAttribute('ENV')
jinja_environment = Environment
config_class = Config
jinja_options = ImmutableDict(
extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_']
)
def _get_templates_auto_reload(self):
"""Reload templates when they are changed.
"""
rv = self.config['TEMPLATES_AUTO_RELOAD'] # pylint: disable=invalid-name
return rv if rv is not None else self.debug
def _set_templates_auto_reload(self, value):
self.config['TEMPLATES_AUTO_RELOAD'] = value
templates_auto_reload = property(
_get_templates_auto_reload, _set_templates_auto_reload
)
del _get_templates_auto_reload, _set_templates_auto_reload
def _get_debug(self):
return self.config['DEBUG']
def _set_debug(self, value):
self.config['DEBUG'] = value
self.jinja_env.auto_reload = self.templates_auto_reload
debug = property(_get_debug, _set_debug)
del _get_debug, _set_debug
def auto_find_instance_path(self):
"""Tries to locate the instance path if it was not provided to the
constructor of the application class. It will basically calculate
the path to a folder named ``instance`` next to your main file or
the package.
"""
prefix, package_path = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path, 'instance')
return os.path.join(prefix, 'var', self.name + '-instance')
def make_config(self, instance_relative=False):
"""Used to create the config attribute by the Flask constructor.
The `instance_relative` parameter is passed in from the constructor
of Flask (there named `instance_relative_config`) and indicates if
the config should be relative to the instance path or the root path
of the application.
"""
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
defaults = dict(self.default_config)
defaults['ENV'] = get_env()
defaults['DEBUG'] = get_debug_flag()
return self.config_class(root_path, defaults)
@setupmethod
def add_url_rule(self, rule, endpoint=None, view_func=None,
provide_automatic_options=None, **options):
"""Connects a URL rule. Works exactly like the :meth:`route`
decorator. If a view_func is provided it will be registered with the
endpoint.
"""
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
options['endpoint'] = endpoint
methods = options.pop('methods', None)
if methods is None:
methods = getattr(view_func, 'methods', None) or ('GET',)
if isinstance(methods, string_types):
raise TypeError('Allowed methods have to be iterables of strings, '
'for example: @app.route(..., methods=["POST"])')
methods = set(item.upper() for item in methods)
# Methods that should always be added
required_methods = set(getattr(view_func, 'required_methods', ()))
# starting with Flask 0.8 the view_func object can disable and
# force-enable the automatic options handling.
if provide_automatic_options is None:
provide_automatic_options = getattr(view_func,
'provide_automatic_options', None)
if provide_automatic_options is None:
if 'OPTIONS' not in methods:
provide_automatic_options = True
required_methods.add('OPTIONS')
else:
provide_automatic_options = False
# Add the required methods now.
methods |= required_methods
rule = self.url_rule_class(rule, methods=methods, **options)
rule.provide_automatic_options = provide_automatic_options
self.url_map.add(rule)
if view_func is not None:
old_func = self.view_functions.get(endpoint)
if old_func is not None and old_func != view_func:
raise AssertionError('View function mapping is overwriting an '
'existing endpoint function: %s' % endpoint)
self.view_functions[endpoint] = view_func
@locked_cached_property
def name(self):
"""The name of the application. This is usually the import name
with the difference that it's guessed from the run file if the
import name is main. This name is used as a display name when
Flask needs the name of the application. It can be set and overridden
to change the value.
"""
if self.import_name == '__main__':
# pylint: disable=invalid-name
fn = getattr(sys.modules['__main__'], '__file__', None)
if fn is None:
return '__main__'
return os.path.splitext(os.path.basename(fn))[0]
return self.import_name
def route(self, rule, **options):
"""A decorator that is used to register a view function for a
given URL rule. This does the same thing as :meth:`add_url_rule`
but is intended for decorator usage::
"""
def decorator(f): # pylint: disable=invalid-name
endpoint = options.pop('endpoint', None)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
# (__name__, self, host, port, debug, load_dotenv, options) ->
# < __flask.Flask object at 0x7f8b5d941048 > None 8080 None True {}
def run(self, host=None, port=None, debug=None,
load_dotenv=True, **options):
"""Runs the application on a local development server.
Do not use ``run()`` in a production setting. It is not intended to
meet security and performance requirements for a production server.
Instead, see :ref:`deployment` for WSGI server recommendations.
If the :attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
code execution on the interactive debugger, you can pass
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
It is not recommended to use this function for development with
automatic reloading as this is badly supported. Instead you should
be using the :command:`flask` command line script's ``run`` support.
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
Setting ``use_debugger`` to ``True`` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable
if present.
:param port: the port of the webserver. Defaults to ``5000`` or the
port defined in the ``SERVER_NAME`` config variable if present.
:param debug: if given, enable or disable debug mode. See
:attr:`debug`.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param options: the options to be forwarded to the underlying Werkzeug
server. See :func:`werkzeug.serving.run_simple` for more
information.
"""
# Change this into a no-op if the server is invoked from the
# command line. Have a look at cli.py for more information.
# os.environ.get('FLASK_RUN_FROM_CLI') -> None
if os.environ.get('FLASK_RUN_FROM_CLI') == 'true':
from __debughelpers import \
explain_ignored_app_run # pylint: disable=import-outside-toplevel,relative-beyond-top-level
explain_ignored_app_run()
return
if get_load_dotenv(load_dotenv): # FLASK_ENV and FLASK_DEBUG is None here
cli.load_dotenv()
# if set, let env vars override previous values
if 'FLASK_ENV' in os.environ:
self.env = get_env() # pylint: disable=attribute-defined-outside-init
self.debug = get_debug_flag()
elif 'FLASK_DEBUG' in os.environ:
self.debug = get_debug_flag()
# debug passed to method overrides all other sources
if debug is not None:
self.debug = bool(debug)
_host = '127.0.0.1'
_port = 5000
server_name = self.config.get('SERVER_NAME')
sn_host, sn_port = None, None
if server_name:
sn_host, _, sn_port = server_name.partition(':')
host = host or sn_host or _host
port = int(port or sn_port or _port)
options.setdefault('use_reloader', self.debug)
options.setdefault('use_debugger', self.debug)
options.setdefault('threaded', True)
cli.show_server_banner(self.env, self.debug, self.name, False)
from werkzeug.serving import \
run_simple # pylint: disable=import-outside-toplevel
try:
run_simple(host, port, self, **options)
finally:
# reset the first request information if the development server
# reset normally. This makes it possible to restart the server
# without reloader and that stuff from an interactive shell.
self._got_first_request = False
@locked_cached_property
def jinja_env(self):
"""The Jinja2 environment used to load templates."""
return self.create_jinja_environment()
def create_jinja_environment(self):
"""Creates the Jinja2 environment based on :attr:`jinja_options`
and :meth:`select_jinja_autoescape`. Since 0.7 this also adds
the Jinja2 globals and filters after initialization. Override
this function to customize the behavior.
"""
options = dict(
self.jinja_options) # __flask {'extensions': ['jinja2.ext.autoescape', 'jinja2.ext.with_']} # pylint: disable=line-too-long
if 'autoescape' not in options:
# __flask {
# 'extensions': ['jinja2.ext.autoescape', 'jinja2.ext.with_'],
# 'autoescape': <bound method Flask.select_jinja_autoescape of <__flask.Flask object at 0x7f678c254048>> # pylint: disable=line-too-long
# }
options['autoescape'] = self.select_jinja_autoescape
if 'auto_reload' not in options:
# __flask {
# 'extensions': ['jinja2.ext.autoescape', 'jinja2.ext.with_'],
# 'autoescape': <bound method Flask.select_jinja_autoescape of <__flask.Flask object at 0x7ff85ac0f080>>, # pylint: disable=line-too-long
# 'auto_reload': True
# }
options['auto_reload'] = self.templates_auto_reload
# __flask <__templating.Environment object at 0x7f08a39b7320>
# pylint: disable=invalid-name
rv = self.jinja_environment(self, **options)
# __flask rv.globals {
# 'range': <class 'range'>,
# 'dict': <class 'dict'>,
# 'lipsum': <function generate_lorem_ipsum at 0x7f425f9bb400>,
# 'cycler': <class 'jinja2.utils.Cycler'>,
# 'joiner': <class 'jinja2.utils.Joiner'>,
# 'namespace': <class 'jinja2.utils.Namespace'>
# }
rv.globals.update(
url_for=url_for,
get_flashed_messages=get_flashed_messages,
config=self.config,
# request, session and g are normally added with the
# context processor for efficiency reasons but for imported
# templates we also want the proxies in there.
request=request,
session=session,
g=g
)
# __flask {
# 'range': <class 'range'>,
# 'dict': <class 'dict'>,
# 'lipsum': <function generate_lorem_ipsum at 0x7f6f75211400>,
# 'cycler': <class 'jinja2.utils.Cycler'>,
# 'joiner': <class 'jinja2.utils.Joiner'>,
# 'namespace': <class 'jinja2.utils.Namespace'>,
# 'url_for': <function url_for at 0x7f6f75006048>,
# 'get_flashed_messages': <function get_flashed_messages at 0x7f6f75003f28>,
# 'config': <Config {
# 'ENV': 'production',
# 'DEBUG': True,
# 'TESTING': False,
# 'PROPAGATE_EXCEPTIONS': None,
# 'PRESERVE_CONTEXT_ON_EXCEPTION': None,
# 'SECRET_KEY': None,
# 'PERMANENT_SESSION_LIFETIME': datetime.timedelta(31),
# 'USE_X_SENDFILE': False,
# 'SERVER_NAME': None,
# 'APPLICATION_ROOT': '/',
# 'SESSION_COOKIE_NAME': 'session',
# 'SESSION_COOKIE_DOMAIN': None,
# 'SESSION_COOKIE_PATH': None,
# 'SESSION_COOKIE_HTTPONLY': True,
# 'SESSION_COOKIE_SECURE': False,
# 'SESSION_COOKIE_SAMESITE': None,
# 'SESSION_REFRESH_EACH_REQUEST': True,
# 'MAX_CONTENT_LENGTH': None,
# 'SEND_FILE_MAX_AGE_DEFAULT': datetime.timedelta(0, 43200),
# 'TRAP_BAD_REQUEST_ERRORS': None,
# 'TRAP_HTTP_EXCEPTIONS': False,
# 'EXPLAIN_TEMPLATE_LOADING': False,
# 'PREFERRED_URL_SCHEME': 'http',
# 'JSON_AS_ASCII': True,
# 'JSON_SORT_KEYS': True,
# 'JSONIFY_PRETTYPRINT_REGULAR': False,
# 'JSONIFY_MIMETYPE': 'application/json',
# 'TEMPLATES_AUTO_RELOAD': None,
# 'MAX_COOKIE_SIZE': 4093
# }>,
# 'request': <LocalProxy unbound>,
# 'session': <LocalProxy unbound>,
# 'g': <LocalProxy unbound>
# }
rv.filters['tojson'] = __json.tojson_filter # HACK
return rv
def select_jinja_autoescape(self, filename):
"""Returns ``True`` if autoescaping should be active for the given
template name. If no template name is given, returns `True`.
.. versionadded:: 0.5
"""
if filename is None:
return True
return filename.endswith(('.html', '.htm', '.xml', '.xhtml'))
def create_global_jinja_loader(self):
"""Creates the loader for the Jinja2 environment. Can be used to
override just the loader and keeping the rest unchanged. It's
discouraged to override this function. Instead one should override
the :meth:`jinja_loader` function instead.
The global loader dispatches between the loaders of the application
and the individual blueprints.
.. versionadded:: 0.7
"""
return DispatchingJinjaLoader(self)
def request_context(self, environ):
"""Create a :class:`~flask.ctx.RequestContext` representing a
WSGI environment. Use a ``with`` block to push the context,
which will make :data:`request` point at this request.
See :doc:`/reqcontext`.
Typically you should not call this from your own code. A request
context is automatically pushed by the :meth:`wsgi_app` when
handling a request. Use :meth:`test_request_context` to create
an environment and context instead of this method.
:param environ: a WSGI environment
"""
return RequestContext(self, environ)
def try_trigger_before_first_request_functions(self):
"""Called before each request and will ensure that it triggers
the :attr:`before_first_request_funcs` and only exactly once per
application instance (which means process usually).
:internal:
"""
if self._got_first_request:
return
with self._before_request_lock:
if self._got_first_request:
return
for func in self.before_first_request_funcs:
func()
self._got_first_request = True
def preprocess_request(self):
"""Called before the request is dispatched. Calls
:attr:`url_value_preprocessors` registered with the app and the
current blueprint (if any). Then calls :attr:`before_request_funcs`
registered with the app and the blueprint.
If any :meth:`before_request` handler returns a non-None value, the
value is handled as if it was the return value from the view, and
further request handling is stopped.
"""
bp = _request_ctx_stack.top.request.blueprint # pylint: disable=invalid-name
funcs = self.url_value_preprocessors.get(None, ())
if bp is not None and bp in self.url_value_preprocessors:
funcs = chain(funcs, self.url_value_preprocessors[bp])
for func in funcs:
func(request.endpoint, request.view_args)
funcs = self.before_request_funcs.get(None, ())
if bp is not None and bp in self.before_request_funcs:
funcs = chain(funcs, self.before_request_funcs[bp])
for func in funcs:
rv = func() # pylint: disable=invalid-name
if rv is not None:
return rv
def raise_routing_exception(self, request): # pylint: disable=redefined-outer-name
"""Exceptions that are recording during routing are reraised with
this method. During debug we are not reraising redirect requests
for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising
a different error instead to help debug situations.
:internal:
"""
if not self.debug \
or not isinstance(request.routing_exception, RequestRedirect) \
or request.method in ('GET', 'HEAD', 'OPTIONS'):
raise request.routing_exception
from __debughelpers import \
FormDataRoutingRedirect # pylint: disable=import-outside-toplevel
raise FormDataRoutingRedirect(request)
def make_default_options_response(self):
"""This method is called to create the default ``OPTIONS`` response.
This can be changed through subclassing to change the default
behavior of ``OPTIONS`` responses.
.. versionadded:: 0.7
"""
adapter = _request_ctx_stack.top.url_adapter
if hasattr(adapter, 'allowed_methods'):
methods = adapter.allowed_methods()
else:
# fallback for Werkzeug < 0.7
methods = []
try:
adapter.match(method='--')
except MethodNotAllowed as e: # pylint: disable=invalid-name
methods = e.valid_methods
except HTTPException as e: # pylint: disable=invalid-name
pass
rv = self.response_class() # pylint: disable=invalid-name
rv.allow.update(methods)
return rv
def dispatch_request(self):
"""Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`.
"""
req = _request_ctx_stack.top.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return self.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
return self.view_functions[rule.endpoint](**req.view_args)
def make_response(self, rv): # pylint: disable=invalid-name
"""Convert the return value from a view function to an instance of
:attr:`response_class`.
:param rv: the return value from the view function. The view function
must return a response. Returning ``None``, or the view ending
without returning, is not allowed. The following types are allowed
for ``view_rv``:
``str`` (``unicode`` in Python 2)
A response object is created with the string encoded to UTF-8
as the body.
``bytes`` (``str`` in Python 2)
A response object is created with the bytes as the body.
``tuple``
Either ``(body, status, headers)``, ``(body, status)``, or
``(body, headers)``, where ``body`` is any of the other types
allowed here, ``status`` is a string or an integer, and
``headers`` is a dictionary or a list of ``(key, value)``
tuples. If ``body`` is a :attr:`response_class` instance,
``status`` overwrites the exiting value and ``headers`` are
extended.
:attr:`response_class`
The object is returned unchanged.
other :class:`~werkzeug.wrappers.Response` class
The object is coerced to :attr:`response_class`.
:func:`callable`
The function is called as a WSGI application. The result is
used to create a response object.
.. versionchanged:: 0.9
Previously a tuple was interpreted as the arguments for the
response object.
"""
status = headers = None
# unpack tuple returns
if isinstance(rv, tuple):
len_rv = len(rv)
# a 3-tuple is unpacked directly
if len_rv == 3:
rv, status, headers = rv
# decide if a 2-tuple has status or headers
elif len_rv == 2:
if isinstance(rv[1], (Headers, dict, tuple, list)):
rv, headers = rv
else:
rv, status = rv
# other sized tuples are not allowed
else:
raise TypeError(
'The view function did not return a valid response tuple.'
' The tuple must have the form (body, status, headers),'
' (body, status), or (body, headers).'
)
# the body must not be None
if rv is None:
raise TypeError(
'The view function did not return a valid response. The'
' function either returned None or ended without a return'
' statement.'
)
# make sure the body is an instance of the response class
if not isinstance(rv, self.response_class):
if isinstance(rv, (text_type, bytes, bytearray)):
# let the response class set the status and headers instead of
# waiting to do it manually, so that the class can handle any
# special logic
rv = self.response_class(rv, status=status, headers=headers)
status = headers = None
else:
# evaluate a WSGI callable, or coerce a different response
# class to the correct type
try:
rv = self.response_class.force_type(rv, request.environ)
except TypeError as e: # pylint: disable=invalid-name
new_error = TypeError(
'{e}\nThe view function did not return a valid'
' response. The return type must be a string, tuple,'
' Response instance, or WSGI callable, but it was a'
' {rv.__class__.__name__}.'.format(e=e, rv=rv)
)
reraise(TypeError, new_error, sys.exc_info()[2])
# prefer the status if it was provided
if status is not None:
if isinstance(status, (text_type, bytes, bytearray)):
rv.status = status
else:
rv.status_code = status
# extend existing headers with provided headers
if headers:
rv.headers.extend(headers)
return rv
def process_response(self, response):
"""Can be overridden in order to modify the response object
before it's sent to the WSGI server. By default this will
call all the :meth:`after_request` decorated functions.
.. versionchanged:: 0.5
As of Flask 0.5 the functions registered for after request
execution are called in reverse order of registration.
:param response: a :attr:`response_class` object.
:return: a new response object or the same, has to be an
instance of :attr:`response_class`.
"""
ctx = _request_ctx_stack.top
bp = ctx.request.blueprint # pylint: disable=invalid-name
funcs = ctx._after_request_functions # pylint: disable=protected-access
if bp is not None and bp in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[bp]))
if None in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[None]))
for handler in funcs:
response = handler(response)
if not self.session_interface.is_null_session(ctx.session):
self.session_interface.save_session(self, ctx.session, response)
return response
@locked_cached_property
def logger(self):
"""The ``'flask.app'`` logger, a standard Python
:class:`~logging.Logger`.
In debug mode, the logger's :attr:`~logging.Logger.level` will be set
to :data:`~logging.DEBUG`.
If there are no handlers configured, a default handler will be added.
See :ref:`logging` for more information.
.. versionchanged:: 1.0
Behavior was simplified. The logger is always named
``flask.app``. The level is only set during configuration, it
doesn't check ``app.debug`` each time. Only one format is used,
not different ones depending on ``app.debug``. No handlers are
removed, and a handler is only added if no handlers are already
configured.
.. versionadded:: 0.3
"""
return create_logger(self)
def log_exception(self, exc_info):
"""Logs an exception. This is called by :meth:`handle_exception`
if debugging is disabled and right before the handler is called.
The default implementation logs the exception as error on the
:attr:`logger`.
.. versionadded:: 0.8
"""
self.logger.error('Exception on %s [%s]' % ( # pylint: disable=no-member
request.path,
request.method
), exc_info=exc_info)
def finalize_request(self, rv, from_error_handler=False): # pylint: disable=invalid-name
"""Given the return value from a view function this finalizes
the request by converting it into a response and invoking the
postprocessing functions. This is invoked for both normal
request dispatching as well as error handlers.
Because this means that it might be called as a result of a
failure a special safe mode is available which can be enabled
with the `from_error_handler` flag. If enabled, failures in
response processing will be logged and otherwise ignored.
:internal:
"""
response = self.make_response(rv)
try:
response = self.process_response(response)
request_finished.send(self, response=response)
except Exception: # pylint: disable=broad-except
if not from_error_handler:
raise
self.logger.exception('Request finalizing failed with an ' # pylint: disable=no-member
'error while handling an error')
return response
def full_dispatch_request(self):
"""Dispatches the request and on top of that performs request
pre and postprocessing as well as HTTP exception catching and
error handling.
.. versionadded:: 0.7
"""
self.try_trigger_before_first_request_functions()
try:
request_started.send(self)
rv = self.preprocess_request() # pylint: disable=invalid-name
if rv is None:
rv = self.dispatch_request() # pylint: disable=invalid-name
except Exception as e: # pylint: disable=broad-except,invalid-name
rv = self.handle_user_exception(e) # pylint: disable=invalid-name
return self.finalize_request(rv)
def should_ignore_error(self, error): # pylint: disable=unused-argument
"""This is called to figure out if an error should be ignored
or not as far as the teardown system is concerned. If this
function returns ``True`` then the teardown handlers will not be
passed the error.
.. versionadded:: 0.10
"""
return False
@property
def preserve_context_on_exception(self):
"""Returns the value of the ``PRESERVE_CONTEXT_ON_EXCEPTION``
configuration value in case it's set, otherwise a sensible default
is returned.
.. versionadded:: 0.7
"""
rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION'] # pylint: disable=invalid-name
if rv is not None:
return rv
return self.debug
def create_url_adapter(self, request): # pylint: disable=redefined-outer-name
"""Creates a URL adapter for the given request. The URL adapter
is created at a point where the request context is not yet set
up so the request is passed explicitly.
.. versionadded:: 0.6
.. versionchanged:: 0.9
This can now also be called without a request object when the
URL adapter is created for the application context.
.. versionchanged:: 1.0
:data:`SERVER_NAME` no longer implicitly enables subdomain
matching. Use :attr:`subdomain_matching` instead.
"""
if request is not None:
# If subdomain matching is disabled (the default), use the
# default subdomain in all cases. This should be the default
# in Werkzeug but it currently does not have that feature.
subdomain = ((self.url_map.default_subdomain or None)
if not self.subdomain_matching else None)
return self.url_map.bind_to_environ(
request.environ,
server_name=self.config['SERVER_NAME'],
subdomain=subdomain)
# We need at the very least the server name to be set for this
# to work.
if self.config['SERVER_NAME'] is not None:
return self.url_map.bind(
self.config['SERVER_NAME'],
script_name=self.config['APPLICATION_ROOT'],
url_scheme=self.config['PREFERRED_URL_SCHEME'])
@staticmethod
def _get_exc_class_and_code(exc_class_or_code):
"""Ensure that we register only exceptions as handler keys"""
if isinstance(exc_class_or_code, integer_types):
exc_class = default_exceptions[exc_class_or_code]
else:
exc_class = exc_class_or_code
assert issubclass(exc_class, Exception)
if issubclass(exc_class, HTTPException):
return exc_class, exc_class.code
else:
return exc_class, None
def _find_error_handler(self, e): # pylint: disable=invalid-name
"""Return a registered error handler for an exception in this order:
blueprint handler for a specific code, app handler for a specific code,
blueprint handler for an exception class, app handler for an exception
class, or ``None`` if a suitable handler is not found.
"""
exc_class, code = self._get_exc_class_and_code(type(e))
for name, c in ( # pylint: disable=invalid-name
(request.blueprint, code), (None, code),
(request.blueprint, None), (None, None)
):
handler_map = self.error_handler_spec.setdefault(name, {}).get(c)
if not handler_map:
continue
for cls in exc_class.__mro__:
handler = handler_map.get(cls)
if handler is not None:
return handler
def handle_http_exception(self, e): # pylint: disable=invalid-name
"""Handles an HTTP exception. By default this will invoke the
registered error handlers and fall back to returning the
exception as response.
.. versionadded:: 0.3
"""
# Proxy exceptions don't have error codes. We want to always return
# those unchanged as errors
if e.code is None:
return e
handler = self._find_error_handler(e)
if handler is None:
return e
return handler(e)
def trap_http_exception(self, e): # pylint: disable=invalid-name
"""Checks if an HTTP exception should be trapped or not. By default
this will return ``False`` for all exceptions except for a bad request
key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It
also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.
This is called for all HTTP exceptions raised by a view function.
If it returns ``True`` for any exception the error handler for this
exception is not called and it shows up as regular exception in the
traceback. This is helpful for debugging implicitly raised HTTP
exceptions.
.. versionchanged:: 1.0
Bad request errors are not trapped by default in debug mode.
.. versionadded:: 0.8
"""
if self.config['TRAP_HTTP_EXCEPTIONS']:
return True
trap_bad_request = self.config['TRAP_BAD_REQUEST_ERRORS']
# if unset, trap key errors in debug mode
if (
trap_bad_request is None and self.debug
and isinstance(e, BadRequestKeyError)
):
return True
if trap_bad_request:
return isinstance(e, BadRequest)
return False
def handle_user_exception(self, e): # pylint: disable=invalid-name
"""This method is called whenever an exception occurs that should be
handled. A special case are
:class:`~werkzeug.exception.HTTPException` which are forwarded by
this function to the :meth:`handle_http_exception` method. This
function will either return a response value or reraise the
exception with the same traceback.
.. versionchanged:: 1.0
Key errors raised from request data like ``form`` show the the bad
key in debug mode rather than a generic bad request message.
.. versionadded:: 0.7
"""
exc_type, exc_value, tb = sys.exc_info() # pylint: disable=invalid-name
assert exc_value is e
# ensure not to trash sys.exc_info() at that point in case someone
# wants the traceback preserved in handle_http_exception. Of course
# we cannot prevent users from trashing it themselves in a custom
# trap_http_exception method so that's their fault then.
# MultiDict passes the key to the exception, but that's ignored
# when generating the response message. Set an informative
# description for key errors in debug mode or when trapping errors.
if (
(self.debug or self.config['TRAP_BAD_REQUEST_ERRORS'])
and isinstance(e, BadRequestKeyError)
# only set it if it's still the default description
and e.description is BadRequestKeyError.description
):
e.description = "KeyError: '{0}'".format(*e.args)
if isinstance(e, HTTPException) and not self.trap_http_exception(e):
return self.handle_http_exception(e)
handler = self._find_error_handler(e)
if handler is None:
reraise(exc_type, exc_value, tb)
return handler(e)
@property
def propagate_exceptions(self):
"""Returns the value of the ``PROPAGATE_EXCEPTIONS`` configuration
value in case it's set, otherwise a sensible default is returned.
.. versionadded:: 0.7
"""
rv = self.config['PROPAGATE_EXCEPTIONS'] # pylint: disable=invalid-name
if rv is not None:
return rv
return self.testing or self.debug
def handle_exception(self, e): # pylint: disable=invalid-name
"""Default exception handling that kicks in when an exception
occurs that is not caught. In debug mode the exception will
be re-raised immediately, otherwise it is logged and the handler
for a 500 internal server error is used. If no such handler
exists, a default 500 internal server error message is displayed.
.. versionadded:: 0.3
"""
exc_type, exc_value, tb = sys.exc_info() # pylint: disable=invalid-name
got_request_exception.send(self, exception=e)
handler = self._find_error_handler(InternalServerError())
if self.propagate_exceptions:
# if we want to repropagate the exception, we can attempt to
# raise it with the whole traceback in case we can do that
# (the function was actually called from the except part)
# otherwise, we just raise the error again
if exc_value is e:
reraise(exc_type, exc_value,
tb) # pylint: disable=invalid-name
else:
raise e
self.log_exception((exc_type, exc_value, tb))
if handler is None:
return InternalServerError()
return self.finalize_request(handler(e), from_error_handler=True)
def wsgi_app(self, environ, start_response):
"""The actual WSGI application. This is not implemented in
:meth:`__call__` so that middlewares can be applied without
losing a reference to the app object. Instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
.. versionchanged:: 0.7
Teardown events for the request and app contexts are called
even if an unhandled error occurs. Other events may not be
called depending on when an error occurs during dispatch.
See :ref:`callbacks-and-errors`.
:param environ: A WSGI environment.
:param start_response: A callable accepting a status code,
a list of headers, and an optional exception context to
start the response.
"""
ctx = self.request_context(environ)
error = None
try:
try:
ctx.push()
response = self.full_dispatch_request()
except Exception as e: # pylint: disable=invalid-name,broad-except
error = e
response = self.handle_exception(e)
except:
error = sys.exc_info()[1]
raise
return response(environ, start_response)
finally:
if self.should_ignore_error(error):
error = None
ctx.auto_pop(error)
def app_context(self):
"""Create an :class:`~flask.ctx.AppContext`. Use as a ``with``
block to push the context, which will make :data:`current_app`
point at this application.
An application context is automatically pushed by
:meth:`RequestContext.push() <flask.ctx.RequestContext.push>`
when handling a request, and when running a CLI command. Use
this to manually create a context outside of these situations.
::
with app.app_context():
init_db()
See :doc:`/appcontext`.
.. versionadded:: 0.9
"""
return AppContext(self)
def do_teardown_appcontext(self, exc=_sentinel):
"""Called right before the application context is popped.
When handling a request, the application context is popped
after the request context. See :meth:`do_teardown_request`.
This calls all functions decorated with
:meth:`teardown_appcontext`. Then the
:data:`appcontext_tearing_down` signal is sent.
This is called by
:meth:`AppContext.pop() <flask.ctx.AppContext.pop>`.
.. versionadded:: 0.9
"""
if exc is _sentinel:
exc = sys.exc_info()[1]
for func in reversed(self.teardown_appcontext_funcs):
func(exc)
appcontext_tearing_down.send(self, exc=exc)
def do_teardown_request(self, exc=_sentinel):
"""Called after the request is dispatched and the response is
returned, right before the request context is popped.
This calls all functions decorated with
:meth:`teardown_request`, and :meth:`Blueprint.teardown_request`
if a blueprint handled the request. Finally, the
:data:`request_tearing_down` signal is sent.
This is called by
:meth:`RequestContext.pop() <flask.ctx.RequestContext.pop>`,
which may be delayed during testing to maintain access to
resources.
:param exc: An unhandled exception raised while dispatching the
request. Detected from the current exception information if
not passed. Passed to each teardown function.
.. versionchanged:: 0.9
Added the ``exc`` argument.
"""
if exc is _sentinel:
exc = sys.exc_info()[1]
funcs = reversed(self.teardown_request_funcs.get(None, ()))
bp = _request_ctx_stack.top.request.blueprint # pylint: disable=invalid-name
if bp is not None and bp in self.teardown_request_funcs:
funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))
for func in funcs:
func(exc)
request_tearing_down.send(self, exc=exc)
def __call__(self, environ, start_response):
"""The WSGI server calls the Flask application object as the
WSGI application. This calls :meth:`wsgi_app` which can be
wrapped to applying middleware."""
return self.wsgi_app(environ, start_response)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.name,
)
```
#### File: simple/1.0.2/__templating.py
```python
from jinja2 import Environment as BaseEnvironment, TemplateNotFound, BaseLoader
from __globals import _request_ctx_stack, _app_ctx_stack
class Environment(BaseEnvironment):
"""Works like a regular Jinja2 environment but has some additional
knowledge of how Flask's blueprint works so that it can prepend the
name of the blueprint to referenced templates if necessary.
"""
def __init__(self, app, **options):
if 'loader' not in options:
options['loader'] = app.create_global_jinja_loader()
BaseEnvironment.__init__(self, **options)
self.app = app
class DispatchingJinjaLoader(BaseLoader):
"""A loader that looks for templates in the application and all
the blueprint folders.
"""
def __init__(self, app):
self.app = app
def get_source(self, environment, template):
if self.app.config['EXPLAIN_TEMPLATE_LOADING']:
return self._get_source_explained(environment, template)
return self._get_source_fast(environment, template)
def _get_source_explained(self, environment, template):
attempts = []
trv = None
for srcobj, loader in self._iter_loaders(template):
try:
# pylint: disable=invalid-name
rv = loader.get_source(
environment, template)
if trv is None:
trv = rv
except TemplateNotFound:
rv = None # pylint: disable=invalid-name
attempts.append((loader, srcobj, rv))
from __debughelpers import explain_template_loading_attempts # pylint: disable=relative-beyond-top-level,import-outside-toplevel
explain_template_loading_attempts(self.app, template, attempts)
if trv is not None:
return trv
raise TemplateNotFound(template)
def _get_source_fast(self, environment, template):
for srcobj, loader in self._iter_loaders(template): # pylint: disable=unused-variable
try:
return loader.get_source(environment, template)
except TemplateNotFound:
continue
raise TemplateNotFound(template)
def _iter_loaders(self, template): # pylint: disable=unused-argument
loader = self.app.jinja_loader
if loader is not None:
yield self.app, loader
for blueprint in self.app.iter_blueprints():
loader = blueprint.jinja_loader
if loader is not None:
yield blueprint, loader
def list_templates(self):
result = set()
loader = self.app.jinja_loader
if loader is not None:
result.update(loader.list_templates())
for blueprint in self.app.iter_blueprints():
loader = blueprint.jinja_loader
if loader is not None:
for template in loader.list_templates():
result.add(template)
return list(result)
def _default_template_ctx_processor():
"""Default template context processor. Injects `request`,
`session` and `g`.
"""
reqctx = _request_ctx_stack.top
appctx = _app_ctx_stack.top
rv = {} # pylint: disable=invalid-name
if appctx is not None:
rv['g'] = appctx.g
if reqctx is not None:
rv['request'] = reqctx.request
rv['session'] = reqctx.session
return rv
```
|
{
"source": "Jesse3692/Python-High-Performance-Second-Edition",
"score": 3
}
|
#### File: Python-High-Performance-Second-Edition/Chapter01/simul.py
```python
from matplotlib import pyplot as plt
from matplotlib import animation
from random import uniform
import timeit
class Particle:
__slots__ = ('x', 'y', 'ang_speed')
def __init__(self, x, y, ang_speed):
self.x = x
self.y = y
self.ang_speed = ang_speed
class ParticleSimulator:
def __init__(self, particles):
self.particles = particles
def evolve(self, dt):
timestep = 0.00001
nsteps = int(dt/timestep)
for i in range(nsteps):
for p in self.particles:
norm = (p.x**2 + p.y**2)**0.5
v_x = (-p.y)/norm
v_y = p.x/norm
d_x = timestep * p.ang_speed * v_x
d_y = timestep * p.ang_speed * v_y
p.x += d_x
p.y += d_y
# def evolve(self, dt):
# timestep = 0.00001
# nsteps = int(dt/timestep)
# # First, change the loop order
# for p in self.particles:
# t_x_ang = timestep * p.ang_speed
# for i in range(nsteps):
# norm = (p.x**2 + p.y**2)**0.5
# p.x, p.y = p.x - t_x_ang*p.y/norm, p.y + t_x_ang * p.x/norm
def visualize(simulator):
X = [p.x for p in simulator.particles]
Y = [p.y for p in simulator.particles]
fig = plt.figure()
ax = plt.subplot(111, aspect='equal')
line, = ax.plot(X, Y, 'ro')
# Axis limits
plt.xlim(-1, 1)
plt.ylim(-1, 1)
# It will be run when the animation starts
def init():
line.set_data([], [])
return line,
def animate(i):
# We let the particle evolve for 0.1 time units
simulator.evolve(0.01)
X = [p.x for p in simulator.particles]
Y = [p.y for p in simulator.particles]
line.set_data(X, Y)
return line,
# Call the animate function each 10 ms
anim = animation.FuncAnimation(fig,
animate,
init_func=init,
blit=True,
interval=10)
plt.show()
def test_visualize():
particles = [Particle( 0.3, 0.5, +1),
Particle( 0.0, -0.5, -1),
Particle(-0.1, -0.4, +3)]
simulator = ParticleSimulator(particles)
visualize(simulator)
def test_evolve():
particles = [Particle( 0.3, 0.5, +1),
Particle( 0.0, -0.5, -1),
Particle(-0.1, -0.4, +3)]
simulator = ParticleSimulator(particles)
simulator.evolve(0.1)
p0, p1, p2 = particles
def fequal(a, b):
return abs(a - b) < 1e-5
assert fequal(p0.x, 0.2102698450356825)
assert fequal(p0.y, 0.5438635787296997)
assert fequal(p1.x, -0.0993347660567358)
assert fequal(p1.y, -0.4900342888538049)
assert fequal(p2.x, 0.1913585038252641)
assert fequal(p2.y, -0.3652272210744360)
def benchmark():
particles = [Particle(uniform(-1.0, 1.0),
uniform(-1.0, 1.0),
uniform(-1.0, 1.0))
for i in range(100)]
simulator = ParticleSimulator(particles)
simulator.evolve(0.1)
def timing():
result = timeit.timeit('benchmark()',
setup='from __main__ import benchmark',
number=10)
# Result is the time it takes to run the whole loop
print(result)
result = timeit.repeat('benchmark()',
setup='from __main__ import benchmark',
number=10,
repeat=3)
# Result is a list of times
print(result)
def benchmark_memory():
particles = [Particle(uniform(-1.0, 1.0),
uniform(-1.0, 1.0),
uniform(-1.0, 1.0))
for i in range(100000)]
simulator = ParticleSimulator(particles)
simulator.evolve(0.001)
if __name__ == '__main__':
benchmark()
```
#### File: Python-High-Performance-Second-Edition/Chapter07/processes.py
```python
import multiprocessing
import time
class Process(multiprocessing.Process):
def __init__(self, id):
super(Process, self).__init__()
self.id = id
def run(self):
time.sleep(1)
print("I'm the process with id: {}".format(self.id))
def square(x):
return x * x
def map_test():
pool = multiprocessing.Pool()
inputs = [0, 1, 2, 3, 4]
outputs = pool.map(square, inputs)
print(outputs)
outputs_async = pool.map_async(square, inputs)
outputs = outputs_async.get()
print(outputs)
if __name__ == '__main__':
processes = Process(1), Process(2), Process(3), Process(4)
[p.start() for p in processes]
map_test()
```
|
{
"source": "Jesse3692/python_note",
"score": 3
}
|
#### File: python_note/database/utils.py
```python
def printer(func):
"""打印函数的返回值
Args:
func ([type]): [description]
"""
def inner(*args, **kwargs):
print(func(*args, **kwargs))
return func(*args, **kwargs)
return inner
```
#### File: examples/route_guide_simple/route_guide_server.py
```python
from concurrent import futures
import time
import math
import logging
import grpc
import route_guide_pb2
import route_guide_pb2_grpc
import route_guide_resources
def get_feature(feature_db, point):
"""Returns Feature at given location or None."""
for feature in feature_db:
if feature.location == point:
return feature
return None
class RouteGuideServicer(route_guide_pb2_grpc.RouteGuideServicer):
"""Provides methods that implement functionality of route guide server."""
def __init__(self):
self.db = route_guide_resources.read_route_guide_database()
def GetFeature(self, request, context):
feature = get_feature(self.db, request)
if feature is None:
return route_guide_pb2.Feature(name="", location=request)
else:
return feature
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
route_guide_pb2_grpc.add_RouteGuideServicer_to_server(
RouteGuideServicer(), server)
server.add_insecure_port('[::]:50051')
server.start()
server.wait_for_termination()
if __name__ == '__main__':
logging.basicConfig()
serve()
```
|
{
"source": "Jesse3692/ttskit",
"score": 2
}
|
#### File: ttskit/ttskit/http_server.py
```python
from pathlib import Path
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(Path(__file__).stem)
import sys
if not sys.platform.startswith('win'):
from gevent import monkey
monkey.patch_all()
import os
from multiprocessing import Process
from flask import Flask, request, render_template, Response
import argparse
from gevent import pywsgi as wsgi
import yaml
def set_args():
"""设置所需参数"""
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='_', type=str, help='设置预测时使用的显卡,使用CPU设置成_即可')
parser.add_argument('--host', type=str, default="0.0.0.0", help='IP地址')
parser.add_argument('--port', type=int, default=9000, help='端口号')
parser.add_argument('--processes', type=int, default=1, help='进程数')
return parser.parse_args()
def start_sever():
"""部署网页服务。"""
args = set_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
from . import sdk_api
app = Flask(__name__)
@app.route('/')
def index():
return 'hello' # "这是语言合成工具箱网页服务"
@app.route('/ttskit', methods=['GET', 'POST'])
def response_request():
if request.method == 'POST':
content = request.form.get('content')
title = request.form.get('title')
return render_template("index.html")
content = '欢迎使用语音合成工具箱,请输入需要合成的文本。'
title = 'format: yaml\nmode: mspk\naudio: 14\nspeaker: Aiyue\nvocoder: melgan\n'
return render_template("index.html", content=content, title=title)
@app.route('/synthesize', methods=['GET', 'POST'])
def synthesize():
if request.method == 'GET':
text = request.args.get('text')
kwargs_str = request.args.get('kwargs')
kwargs = yaml.load(kwargs_str)
# kwargs['processes'] = 1
wav = sdk_api.tts_sdk(text=text, **kwargs)
return Response(wav, mimetype='audio/wav')
logger.info(f'Http server: http://{args.host}:{args.port}/ttskit'.replace('0.0.0.0', 'localhost'))
server = wsgi.WSGIServer((args.host, args.port), app)
def serve_forever(server):
server.start_accepting()
server._stop_event.wait()
if args.processes == 1:
server.serve_forever()
elif args.processes >= 2:
server.start()
for i in range(args.processes):
p = Process(target=serve_forever, args=(server,))
p.start()
else:
logger.info('Please start http server!')
return server
def serve_forever(server):
server.start_accepting()
server._stop_event.wait()
if __name__ == '__main__':
server = start_sever()
# 单进程
# server.serve_forever()
# 多进程
# server.start()
# for i in range(6):
# # Process(target=serve_forever, args=(server,)).start()
# p = Process(target=serve_forever, args=(server,))
# p.start()
```
#### File: melgan/datasets/dataloader.py
```python
import os
import glob
import torch
import random
import numpy as np
from torch.utils.data import Dataset, DataLoader
from ..utils.utils import read_wav_np, load_wav
from ..utils.stft import TacotronSTFT
def create_dataloader(hp, args, train):
dataset = MelFromDisk(hp, args, train)
if train:
return DataLoader(dataset=dataset, batch_size=hp.train.batch_size, shuffle=True,
num_workers=hp.train.num_workers, pin_memory=True, drop_last=True)
else:
# 随机选一个batch即可
dataset.wav_list = np.random.choice(dataset.wav_list, hp.train.batch_size)
return DataLoader(dataset=dataset, batch_size=1, shuffle=False,
num_workers=hp.train.num_workers, pin_memory=True, drop_last=False)
class MelFromDisk(Dataset):
def __init__(self, hp, args, train):
self.hp = hp
self.args = args
self.train = train
self.path = hp.data.train if train else hp.data.validation
# self.wav_list = glob.glob(os.path.join(self.path, '**', '*.wav'), recursive=True)
self.load_data()
self.mel_segment_length = hp.audio.segment_length // hp.audio.hop_length + 2
self.mapping = [i for i in range(len(self.wav_list))]
self.stft = TacotronSTFT(filter_length=hp.audio.filter_length,
hop_length=hp.audio.hop_length,
win_length=hp.audio.win_length,
n_mel_channels=hp.audio.n_mel_channels,
sampling_rate=hp.audio.sampling_rate,
mel_fmin=hp.audio.mel_fmin,
mel_fmax=hp.audio.mel_fmax)
def load_data(self):
"""导入语音数据。"""
curdir = os.path.dirname(os.path.abspath(self.path))
filepaths = []
with open(self.path, encoding='utf-8') as f:
for line in f:
tmp = line.strip().split('\t')
if len(tmp) == 2:
tmp.append('0')
fpath = os.path.join(curdir, tmp[0])
filepaths.append(fpath)
self.wav_list = filepaths
def __len__(self):
return len(self.wav_list)
def __getitem__(self, idx):
if self.train:
idx1 = idx
idx2 = self.mapping[idx1]
return self.my_getitem(idx1), self.my_getitem(idx2)
else:
return self.my_getitem(idx)
def shuffle_mapping(self):
random.shuffle(self.mapping)
def my_getitem(self, idx):
wavpath = self.wav_list[idx]
# melpath = wavpath.replace('.wav', '.mel')
# sr, audio = read_wav_np(wavpath)
sr, audio = load_wav(wavpath, self.hp.audio.sampling_rate)
if len(audio) < self.hp.audio.segment_length + self.hp.audio.pad_short:
audio = np.pad(audio, (0, self.hp.audio.segment_length + self.hp.audio.pad_short - len(audio)),
mode='constant', constant_values=0.0)
audio = torch.from_numpy(audio).unsqueeze(0)
# mel = torch.load(melpath).squeeze(0)
mel = self.stft.mel_spectrogram(audio).squeeze(0)
if self.train:
max_mel_start = mel.size(1) - self.mel_segment_length
mel_start = random.randint(0, max_mel_start)
mel_end = mel_start + self.mel_segment_length
mel = mel[:, mel_start:mel_end]
audio_start = mel_start * self.hp.audio.hop_length
audio = audio[:, audio_start:audio_start + self.hp.audio.segment_length]
audio = audio + (1 / 32768) * torch.randn_like(audio)
return mel, audio
```
#### File: melgan/utils/train.py
```python
import os
import math
import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
import traceback
from ..model.generator import Generator
from ..model.multiscale import MultiScaleDiscriminator
from .utils import get_commit_hash
from .validation import validate
def train(args, pt_dir, chkpt_path, trainloader, valloader, writer, logger, hp, hp_str):
model_g = Generator(hp.audio.n_mel_channels).cuda()
model_d = MultiScaleDiscriminator().cuda()
optim_g = torch.optim.Adam(model_g.parameters(),
lr=hp.train.adam.lr,
betas=(hp.train.adam.beta1, hp.train.adam.beta2))
optim_d = torch.optim.Adam(model_d.parameters(),
lr=hp.train.adam.lr,
betas=(hp.train.adam.beta1, hp.train.adam.beta2))
githash = get_commit_hash()
init_epoch = -1
step = 0
if chkpt_path is not None:
logger.info("Resuming from checkpoint: %s" % chkpt_path)
checkpoint = torch.load(chkpt_path)
model_g.load_state_dict(checkpoint['model_g'])
try:
model_d.load_state_dict(checkpoint['model_d'])
optim_g.load_state_dict(checkpoint['optim_g'])
optim_d.load_state_dict(checkpoint['optim_d'])
except AttributeError:
logger.info('Loaded Generator parameters.')
step = checkpoint['step']
init_epoch = checkpoint['epoch']
logger.info(f'Loaded model: step: {step}, epoch: {init_epoch}.')
if hp_str != checkpoint['hp_str']:
logger.warning("New hparams is different from checkpoint. Will use new.")
if githash != checkpoint['githash']:
logger.warning("Code might be different: git hash is different.")
logger.warning("%s -> %s" % (checkpoint['githash'], githash))
else:
logger.info("Starting new training run.")
# this accelerates training when the size of minibatch is always consistent.
# if not consistent, it'll horribly slow down.
torch.backends.cudnn.benchmark = True
try:
model_g.train()
model_d.train()
for epoch in itertools.count(init_epoch + 1):
if epoch % hp.log.validation_interval == 0:
with torch.no_grad():
validate(hp, args, model_g, model_d, valloader, writer, step)
trainloader.dataset.shuffle_mapping()
loader = tqdm.tqdm(trainloader, desc='Loading train data', ncols=100)
for (melG, audioG), (melD, audioD) in loader:
melG = melG.cuda()
audioG = audioG.cuda()
melD = melD.cuda()
audioD = audioD.cuda()
# generator
optim_g.zero_grad()
fake_audio = model_g(melG)[:, :, :hp.audio.segment_length]
disc_fake = model_d(fake_audio)
disc_real = model_d(audioG)
loss_g = 0.0
for (feats_fake, score_fake), (feats_real, _) in zip(disc_fake, disc_real):
loss_g += torch.mean(torch.sum(torch.pow(score_fake - 1.0, 2), dim=[1, 2]))
for feat_f, feat_r in zip(feats_fake, feats_real):
loss_g += hp.model.feat_match * torch.mean(torch.abs(feat_f - feat_r))
loss_g.backward()
optim_g.step()
# discriminator
fake_audio = model_g(melD)[:, :, :hp.audio.segment_length]
fake_audio = fake_audio.detach()
loss_d_sum = 0.0
for _ in range(hp.train.rep_discriminator):
optim_d.zero_grad()
disc_fake = model_d(fake_audio)
disc_real = model_d(audioD)
loss_d = 0.0
for (_, score_fake), (_, score_real) in zip(disc_fake, disc_real):
loss_d += torch.mean(torch.sum(torch.pow(score_real - 1.0, 2), dim=[1, 2]))
loss_d += torch.mean(torch.sum(torch.pow(score_fake, 2), dim=[1, 2]))
loss_d.backward()
optim_d.step()
loss_d_sum += loss_d
step += 1
# logging
loss_g = loss_g.item()
loss_d_avg = loss_d_sum / hp.train.rep_discriminator
loss_d_avg = loss_d_avg.item()
if any([loss_g > 1e8, math.isnan(loss_g), loss_d_avg > 1e8, math.isnan(loss_d_avg)]):
logger.error("loss_g %.01f loss_d_avg %.01f at step %d!" % (loss_g, loss_d_avg, step))
raise Exception("Loss exploded")
if step % hp.log.summary_interval == 0:
writer.log_training(loss_g, loss_d_avg, step)
loader.set_description("g %.04f d %.04f | step %d" % (loss_g, loss_d_avg, step))
if epoch % hp.log.save_interval == 0:
save_path = os.path.join(pt_dir, '%s_%s_%04d.pt' % (os.path.basename(args.name), githash, epoch))
torch.save({
'model_g': model_g.state_dict(),
'model_d': model_d.state_dict(),
'optim_g': optim_g.state_dict(),
'optim_d': optim_d.state_dict(),
'step': step,
'epoch': epoch,
'hp_str': hp_str,
'githash': githash,
}, save_path)
logger.info("Saved checkpoint to: %s" % save_path)
except Exception as e:
logger.info("Exiting due to exception: %s" % e)
traceback.print_exc()
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.