metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jeppeter/py-obcode",
"score": 2
}
|
#### File: py-obcode/src/cobfilebase.py
```python
import re
import logging
import sys
import os
import random
##importdebugstart
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from strparser import *
from filehdl import *
from cobattr import *
##importdebugend
##extractcode_start
class COBFileBase(object):
def __init__(self,sfile,dfile,cfg=None):
self.__define_expr = []
self.srcfile = sfile
self.dstfile = dfile
self.__append_define_expr('^\s*\#\s*define\s+')
self.__append_define_expr('^\s*\#\s*undef\s+')
self.__append_define_expr('^\s*\#\s*if\s+')
self.__append_define_expr('^\s*\#\s*ifdef\s+')
self.__append_define_expr('^\s*\#\s*ifndef\s+')
self.__append_define_expr('^\s*\#\s*endif\s+')
self.__append_define_expr('^\s*\#\s*else\s+')
self.__append_define_expr('^\s*\#\s*elif\s+')
if cfg is not None:
self.base_cfg = cfg
else:
self.base_cfg = CompoundAttr()
if sfile is not None:
self.in_lines = get_file_lines(sfile)
else:
self.in_lines = []
self.cur_line = 0
return
def __append_define_expr(self,exprstr):
expr = re.compile(exprstr)
self.__define_expr.append(expr)
return
def get_filter_expr_not_defined(self,l,expr1):
m = expr1.findall(l)
if m is not None and len(m) > 0:
filtered = False
for expr in self.__define_expr:
m = expr.findall(l)
if m is not None and len(m) > 0:
#logging.info('[match] %s'%(m[0]))
filtered = True
break
if not filtered:
return True
return False
def get_variables(self,l,expr1):
variables = expr1.findall(l)
# we do this on the increment
#logging.info('[%d][%s] variables[%d]'%(self.cur_line,l, len(variables)))
assert(len(variables) == 1)
assert(len(variables[0]) > 1)
cfgattr = self.base_cfg
#logging.info('v [%s]'%(variables[0][1]))
sbyte = string_to_ints(variables[0][1])
params, lbyte = parse_param(sbyte)
before = l.replace(variables[0][0],'',1)
return cfgattr,params,before,ints_to_string(lbyte)
def get_spec_config_variables(self,l,expr1):
leftvars = []
cfgattr = None
variables = expr1.findall(l)
assert(len(variables) == 1)
assert(len(variables[0]) > 1)
sbyte = string_to_ints(variables[0][1])
params,lbyte = parse_param(sbyte)
if len(params) < 1:
raise Exception('at [%d] line [%s] not valid for specific'%(self.cur_line,l))
after = ints_to_string(lbyte)
cfgstr,lbyte = parse_raw_string(string_to_ints(params[0]))
cfgstr = ints_to_string(cfgstr)
#logging.info('cfgstr [%s]'%(cfgstr))
cfgattr = self.base_cfg.get_file_config(cfgstr)
retparams = []
if len(params) > 1:
retparams = params[1:]
before = l.replace(variables[0][0],'',1)
return cfgattr,retparams, before,after
def __format_one_variable_handle(self,varname,ptrvar,obaddrvar,szvar,pcvar,tabs):
cbytes = get_random_bytes(8)
ccidx = []
for i in range(8):
ccidx.append(i)
curbytes = []
curidx = []
rets=''
rets += format_line('', tabs)
rets += format_debug_line('to handle %s variable'%(varname),tabs,3)
rets += format_debug_line('format %s xors'%(format_bytes_c(cbytes)), tabs, 3)
rets += format_line('%s = (void*)&%s;'%(ptrvar,varname), tabs)
rets += format_line('%s = (OB_ADDR)%s;'%(obaddrvar,ptrvar), tabs)
rets += format_line('%s = sizeof(%s);'%(szvar,obaddrvar), tabs)
rets += format_line('%s = (unsigned char*)%s;'%(pcvar,obaddrvar), tabs)
rets += format_line('', tabs)
rets += format_debug_line('encoding', tabs, 3)
while len(cbytes) > 0:
idx = random.randint(0,len(cbytes) - 1)
curidx.append(ccidx[idx])
del ccidx[idx]
curbytes.append(cbytes[idx])
del cbytes[idx]
rets += format_line('if (%d < %s){'%(curidx[-1],szvar), tabs)
rets += format_line('%s[%d] ^= 0x%x;'%(pcvar,curidx[-1],curbytes[-1]), tabs + 1)
rets += format_line('}', tabs)
rets += format_line('', tabs)
rets += format_debug_line('decoding', tabs, 3)
cbytes.extend(curbytes)
curbytes = []
ccidx.extend(curidx)
curidx = []
while len(cbytes) > 0:
idx = random.randint(0,len(cbytes) - 1)
curidx.append(ccidx[idx])
del ccidx[idx]
curbytes.append(cbytes[idx])
del cbytes[idx]
rets += format_line('if (%d < %s){'%(curidx[-1],szvar), tabs)
rets += format_line('%s[%d] ^= 0x%x;'%(pcvar,curidx[-1],curbytes[-1]), tabs + 1)
rets += format_line('}', tabs)
# now to give the value
rets += format_line('%s = *((OB_TYPEOF(%s)*)%s);'%(varname,varname,pcvar), tabs)
return rets
def expand_code(self,l,params,cfg,before,after):
rets = ''
obaddrvar = get_random_name(random.randint(cfg.namemin,cfg.namemax))
ptrvar = get_random_name(random.randint(cfg.namemin,cfg.namemax))
pcvar = get_random_name(random.randint(cfg.namemin,cfg.namemax))
szvar = get_random_name(random.randint(cfg.namemin,cfg.namemax))
tabs = count_tabs(l)
rets += format_line('do{', tabs)
rets += format_line('void* %s;'%(ptrvar),tabs + 1)
rets += format_line('OB_ADDR %s;'%(obaddrvar), tabs + 1)
rets += format_line('unsigned char* %s;'%(pcvar), tabs + 1)
rets += format_line('unsigned int %s;'%(szvar), tabs + 1)
idx = 0
while idx < len(params):
rets += self.__format_one_variable_handle(params[idx],ptrvar,obaddrvar,szvar,pcvar,tabs + 1)
idx += 1
rets += format_line('', tabs+1)
rets += format_line('}while(0);', tabs)
return rets
##extractcode_end
```
#### File: py-obcode/src/fmthdl.py
```python
import random
import json
import logging
import re
import sys
import os
##importdebugstart
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from strparser import *
##importdebugend
##extractcode_start
class Utf8Encode(object):
def __dict_utf8(self,val):
newdict =dict()
for k in val.keys():
newk = self.__encode_utf8(k)
newv = self.__encode_utf8(val[k])
newdict[newk] = newv
return newdict
def __list_utf8(self,val):
newlist = []
for k in val:
newk = self.__encode_utf8(k)
newlist.append(newk)
return newlist
def __encode_utf8(self,val):
retval = val
if sys.version[0]=='2' and isinstance(val,unicode):
retval = val.encode('utf8')
elif isinstance(val,dict):
retval = self.__dict_utf8(val)
elif isinstance(val,list):
retval = self.__list_utf8(val)
return retval
def __init__(self,val):
self.__val = self.__encode_utf8(val)
return
def __str__(self):
return self.__val
def __repr__(self):
return self.__val
def get_val(self):
return self.__val
def format_line(l, tab=0):
retstr = ''
retstr += format_tabs(tab)
retstr += '%s\n'%(l)
return retstr
def format_comment_line(l):
s = ''
idx = 0
while idx < len(l):
if l[idx] == '*':
s += '\\*'
elif l[idx] == '/':
s += '\\/'
else:
s += l[idx]
idx += 1
return s
def format_debug_line(l,tab=0,debug=0):
rets = ''
if debug >= 3:
rets += format_line('/* %s */'%(format_comment_line(l)), tab)
return rets
def format_xor_encode_function(nameprefix='prefix',namelen=10,tabs=0,debug=0):
funcstr = ''
funcname = '%s_%s'%(nameprefix,get_random_name(namelen))
if debug >= 3:
funcstr += format_line('/*********************************************',tabs)
funcstr += format_line('* to make xor encoded functions', tabs)
funcstr += format_line('* it will be simple ,may be it will give more complex', tabs)
funcstr += format_line('*********************************************/',tabs)
funcstr += format_debug_line('variable:namelen %d variable:prefix [%s]'%(namelen,nameprefix), tabs,debug)
funcstr += format_line('int %s(unsigned char* pbuf,int size,unsigned char* pxorcode, int xorsize)'%(funcname),tabs)
funcstr += format_line('{',tabs)
funcstr += format_line('int i,curi;',tabs+1)
funcstr += format_line('',tabs)
funcstr += format_line('for (i=0;i<size;i++){', tabs + 1)
funcstr += format_line('curi = (i % xorsize);',tabs + 2)
funcstr += format_line('pbuf[i] = (unsigned char)(pbuf[i] ^ pxorcode[curi]);', tabs + 2)
funcstr += format_line('}', tabs + 1)
funcstr += format_line('', tabs)
funcstr += format_line('return size;',tabs + 1)
funcstr += format_line('}',tabs)
return funcstr,funcname
def format_xor_decode_function(nameprefix='prefix_',namelen=10, tabs=0, debug=0):
funcstr = ''
funcname = '%s_%s'%(nameprefix,get_random_name(namelen))
if debug >= 3:
funcstr += format_line('/*********************************************',tabs)
funcstr += format_line('* to make xor decoded functions', tabs)
funcstr += format_line('* it will be simple ,may be it will give more complex', tabs)
funcstr += format_line('*********************************************/',tabs)
funcstr += format_debug_line('variable:namelen %d variable:prefix [%s]'%(namelen,nameprefix), tabs,debug)
funcstr += format_line('int %s(unsigned char* pbuf,int size,unsigned char* pxorcode, int xorsize)'%(funcname),tabs)
funcstr += format_line('{',tabs)
funcstr += format_line('int i,curi;',tabs+1)
funcstr += format_line('',tabs)
funcstr += format_line('for (i=0;i<size;i++){', tabs + 1)
funcstr += format_line('curi = (i % xorsize);',tabs + 2)
funcstr += format_line('pbuf[i] = (unsigned char)(pbuf[i] ^ pxorcode[curi]);', tabs + 2)
funcstr += format_line('}', tabs + 1)
funcstr += format_line('', tabs)
funcstr += format_line('return size;',tabs + 1)
funcstr += format_line('}',tabs)
return funcstr,funcname
def get_xor_code(cnum=16):
xorcode = []
for i in range(cnum):
xorcode.append(random.randint(0,255))
return xorcode
def format_key_ctr_function(xorcode,nameprefix='prefix', namelen=10, numturns=30, tabs=0,debug=0):
funcstr = ''
funcname = '%s_%s'%(nameprefix,get_random_name(namelen))
presentxor = []
funcstr += format_line('int %s(unsigned char* pbuf,int size)'%(funcname),tabs)
funcstr += format_line('{',tabs)
codestr = ''
for i in range(len(xorcode)):
if i > 0:
codestr += ','
codestr += '0x%02x'%(xorcode[i])
funcstr += format_debug_line('keys %s size %d'%(codestr,len(xorcode)), tabs + 1 , debug)
funcstr += format_line('',tabs)
for i in range(len(xorcode)):
if (i%5) == 0:
funcstr += format_line('',tabs)
curnum = random.randint(0, 255)
funcstr += format_line('if ( %d < size) {'%(i), tabs + 1)
funcstr += format_line('pbuf[%d] = %d;'%(i,curnum), tabs + 2)
funcstr += format_line('}',tabs + 1)
presentxor.append(curnum)
funcstr += format_line('',tabs)
funcstr += format_debug_line('variable:numturns %d'%(numturns), tabs + 1, debug)
for i in range(numturns):
if (i%5) == 0 and i > 0:
funcstr += format_line('',tabs)
curi = random.randint(0, len(xorcode)-1)
curj = random.randint(0, len(xorcode)-1)
funcstr += format_line('if (%d < size && %d < size){'%(curi,curj), tabs + 1)
funcstr += format_debug_line('%d = %d ^ %d'%((presentxor[curi] ^ presentxor[curj]) & 0xff, presentxor[curi],presentxor[curj]), tabs + 2,debug)
presentxor[curi] = (presentxor[curi] ^ presentxor[curj]) & 0xff
funcstr += format_line('pbuf[%d] = (unsigned char)(pbuf[%d] ^ pbuf[%d]);'%(curi, curi, curj),tabs + 2)
funcstr += format_line('}', tabs + 1)
for i in range(len(xorcode)):
if (i%3) == 0:
funcstr += format_line('', tabs)
curi = random.randint(0, len(xorcode)-1)
curv = presentxor[i] ^ presentxor[curi]
curv = xorcode[i] ^ curv
funcstr += format_line('if (%d < size){'%(curi), tabs + 1)
funcstr += format_debug_line('%d = %d ^ %d'%((presentxor[curi] ^ presentxor[curj]) & 0xff, presentxor[curi],presentxor[curj]), tabs + 2,debug)
presentxor[i] = (presentxor[i] ^ presentxor[curi]) & 0xff
funcstr += format_debug_line('%d = %d ^ %d'%((presentxor[i] ^ curv) & 0xff, presentxor[curi],curv), tabs + 2,debug)
presentxor[i] = (presentxor[i] ^ curv) & 0xff
assert(presentxor[i] == xorcode[i])
funcstr += format_line('pbuf[%d] = (unsigned char)(pbuf[%d] ^ pbuf[%d]);'%(i ,i,curi), tabs+2)
funcstr += format_line('pbuf[%d] = (unsigned char)(pbuf[%d] ^ %d);'%(i,i,curv), tabs+2)
funcstr += format_line('}', tabs + 1)
funcstr += format_line('',tabs)
funcstr += format_line('return %d < size ? %d : size;'%(len(xorcode),len(xorcode)), tabs+1)
funcstr += format_line('}',tabs)
return funcstr,funcname
def format_key_dtr_function(xorcode,nameprefix='prefix', namelen=10, numturns=30, tabs=0,debug=0):
funcstr = ''
funcname = '%s_%s'%(nameprefix,get_random_name(namelen))
funcstr += format_line('void %s(unsigned char* pbuf,int size)'%(funcname),tabs)
funcstr += format_line('{',tabs)
funcstr += format_debug_line('variable:nameprefix %s variable:namelen %d variable:numturns %d'%(nameprefix,namelen,numturns), tabs+1,debug)
funcstr += format_line('',tabs)
storecode = []
for x in xorcode:
storecode.append(x)
for i in range(numturns):
if (i%5) == 0:
funcstr += format_line('',tabs)
curi = random.randint(0, len(xorcode)-1)
curj = random.randint(0, len(xorcode)-1)
funcstr += format_line('if (%d < size && %d < size){'%(curi,curj), tabs+1)
funcstr += format_debug_line('%d = %d ^ %d'%((storecode[curi] ^ storecode[curj]),storecode[curi],storecode[curj]), tabs + 2, debug)
funcstr += format_line('pbuf[%d] = (unsigned char)(pbuf[%d] ^ pbuf[%d]);'%(curi, curi, curj),tabs + 2)
funcstr += format_line('}',tabs + 1)
funcstr += format_line('',tabs)
funcstr += format_line('return;',tabs + 1)
funcstr += format_line('}',tabs)
return funcstr,funcname
def format_printf_func(l,tabs):
#return format_line(l,tabs)
return ''
def format_bytes_set_function(sbyte,nameprefix='prefix', namelen=10, numturns=30, tabs=0,debug=0,line=None):
funcname = '%s_%s'%(nameprefix,get_random_name(namelen))
funcstr = ''
funcstr += format_line('int %s(unsigned char* pbuf, int size)'%(funcname),tabs)
funcstr += format_line('{', tabs)
funcstr += format_printf_func('int i;', tabs + 1)
if line is not None:
funcstr += format_debug_line('first at line [%d]'%(line), tabs + 1 , debug)
funcstr += format_debug_line('variable:nameprefix %s variable:namelen %d variable:numturns %d length(%d)'%(nameprefix,namelen,numturns,len(sbyte)), tabs + 1, debug)
ss = ''
for c in sbyte:
if len(ss) > 0:
ss += ',0x%02x'%(c)
else:
ss += 'sbyte [0x%02x'%(c)
ss += ']'
funcstr += format_debug_line('%s'%(ss), tabs + 1, debug)
funcstr += format_printf_func('for (i=0;i<size;i++){', tabs + 1)
funcstr += format_printf_func('printf("[%d]=[%d:0x%x]\\n",i,pbuf[i],pbuf[i]);', tabs + 2)
funcstr += format_printf_func('}', tabs + 1)
clbits = []
leastmask = []
for i in range(len(sbyte)):
clbits.append(random.randint(0,255))
leastmask.append(0)
ss = ''
for c in clbits:
if len(ss) > 0:
ss += ',0x%x:%d'%(c,c)
else:
ss += 'clbits [0x%x:%d'%(c,c)
ss += ']'
funcstr += format_printf_func('/* %s */'%(ss), tabs+1)
for i in range(len(sbyte)):
curnum = clear_bit(sbyte[i], clbits[i])
funcstr += format_line('',tabs+1)
funcstr += format_debug_line('pbuf[%d] & 0x%x ?=> 0x%x'%(i,curnum,sbyte[i]), tabs + 1, debug)
funcstr += format_line('if (size > %d){'%(i), tabs + 1)
funcstr += format_printf_func('printf("[%d][%%d:0x%%x] & [%%d:0x%%x] = [%%d:0x%%x] [target:0x%x:%d]\\n",pbuf[%d], pbuf[%d], %d,%d, (pbuf[%d] & %d), (pbuf[%d] & %d));'%(i,sbyte[i],sbyte[i],i,i,curnum,curnum,i,curnum,i,curnum), tabs+2)
funcstr += format_line('pbuf[%d] = (unsigned char)(pbuf[%d] & 0x%x);'%(i,i,curnum), tabs + 2)
funcstr += format_line('}',tabs + 1)
# we need not to set the number directly ,but just used
for i in range(numturns):
cidx = random.randint(0, len(sbyte) - 1)
cbit = random.randint(0, 255)
if random.randint(0,1) == 1:
cnum = clear_bit(sbyte[cidx], cbit)
leastmask[cidx] = leastmask[cidx] | cnum
funcstr += format_line('', tabs + 1)
funcstr += format_line('if ( size > %d){'%(cidx), tabs + 1)
funcstr += format_printf_func('printf("||[%d][%%d:0x%%x] | [%%d:0x%%x] = [%%d:0x%%x] [target:0x%x:%d]\\n",pbuf[%d],pbuf[%d],%d,%d,(pbuf[%d] | %d), (pbuf[%d] | %d));'%(cidx,sbyte[cidx],sbyte[cidx],cidx,cidx,cnum,cnum,cidx,cnum,cidx,cnum), tabs + 2)
funcstr += format_line('pbuf[%d] = (unsigned char)(pbuf[%d] | 0x%x);'%(cidx,cidx, cnum), tabs + 2)
funcstr += format_line('}', tabs + 1)
else:
cnum = expand_bit(sbyte[cidx], cbit)
funcstr += format_line('', tabs + 1)
funcstr += format_line('if ( size > %d){'%(cidx), tabs + 1)
funcstr += format_printf_func('printf("&&[%d][%%d:0x%%x] & [%%d:0x%%x] = [%%d:0x%%x] [target:0x%x:%d]\\n",pbuf[%d],pbuf[%d],%d,%d,(pbuf[%d] & %d), (pbuf[%d] & %d));'%(cidx,sbyte[cidx],sbyte[cidx],cidx,cidx,cnum,cnum,cidx,cnum,cidx,cnum), tabs + 2)
funcstr += format_line('pbuf[%d] = (unsigned char)(pbuf[%d] & 0x%x);'%(cidx,cidx, cnum), tabs + 2)
funcstr += format_line('}', tabs + 1)
# now we should filled the number
for i in range(len(sbyte)):
cnum = sbyte[i] & (~leastmask[i])
if cnum > 0:
funcstr += format_line('',tabs+1)
funcstr += format_debug_line('pbuf[%d] | 0x%x ?=> 0x%x'%(i,cnum, sbyte[i]), tabs + 1, debug)
funcstr += format_line('if (size > %d){'%(i), tabs + 1)
funcstr += format_printf_func('printf("[%d] [%%d:0x%%x] | [%%d:0x%%x] = [%%d:0x%%x] [target:0x%x:%d]\\n", pbuf[%d], pbuf[%d], %d ,%d , (pbuf[%d] | %d), (pbuf[%d] | %d));'%(i,sbyte[i],sbyte[i],i,i,cnum,cnum,i,cnum,i,cnum), tabs + 2)
funcstr += format_line('pbuf[%d] = (unsigned char)(pbuf[%d] | 0x%x);'%(i,i,cnum), tabs + 2)
funcstr += format_line('}',tabs + 1)
funcstr += format_line('', tabs + 1)
funcstr += format_line('return size;', tabs + 1)
funcstr += format_line('}', tabs)
return funcstr,funcname
def format_bytes_xor_function(sbyte,abyte,abytefunc,bbyte,bbytefunc,nameprefix='prefix', namelen=10, numturns=30, tabs=0,debug=0, line=None):
assert(len(sbyte) == len(abyte))
assert(len(abyte) == len(bbyte))
funcname = '%s_%s'%(nameprefix,get_random_name(namelen))
bname = '%s_%s'%(nameprefix, get_random_name(namelen))
retname = '%s_%s'%(nameprefix, get_random_name(namelen))
funcstr = ''
funcstr += format_line('int %s(unsigned char* pbuf, int size)'%(funcname),tabs)
funcstr += format_line('{', tabs)
if line is not None:
funcstr += format_debug_line('first at line [%d]'%(line),tabs + 1, debug)
funcstr += format_debug_line('variable:nameprefix %s variable:namelen %d variable:numturns %d length(%d)'%(nameprefix,namelen,numturns,len(sbyte)), tabs + 1, debug)
ss = ''
for c in sbyte:
if len(ss) > 0:
ss += ',0x%x'%(c)
else:
ss += 'sbyte [0x%x'%(c)
if len(ss) > 0:
ss += ']'
funcstr += format_debug_line('%s'%(ss), tabs + 1, debug)
if len(sbyte) >= 4 and sbyte[-1] == 0 and sbyte[-2] == 0 and sbyte[-3] == 0 and sbyte[-4] == 0:
funcstr += format_debug_line('var wstring:[%s]'%(quote_string(uni32_to_string(sbyte))), tabs + 1, debug)
elif len(sbyte) >= 2 and sbyte[-1] == 0 and sbyte[-2] == 0:
funcstr += format_debug_line('var wstring:[%s]'%(quote_string(uni16_to_string(sbyte))), tabs + 1, debug)
else:
funcstr += format_debug_line('var string:[%s]'%(quote_string(ints_to_string(sbyte))), tabs + 1, debug)
funcstr += format_line('unsigned char %s[%d];'%(bname, len(sbyte)), tabs + 1)
funcstr += format_line('int ret;', tabs + 1)
funcstr += format_line('', tabs + 1)
funcstr += format_line('ret = %s(pbuf,size);'%(abytefunc), tabs + 1)
funcstr += format_line('if ( ret < 0) {' , tabs + 1)
funcstr += format_line('return ret;', tabs + 2)
funcstr += format_line('}', tabs + 1)
funcstr += format_line('', tabs + 1)
funcstr += format_line('ret = %s(%s,%d);'%(bbytefunc, bname, len(sbyte)), tabs + 1)
funcstr += format_line('if ( ret < 0) {' , tabs + 1)
funcstr += format_line('return ret;', tabs + 2)
funcstr += format_line('}', tabs + 1)
# now to give the value
for i in range(numturns):
cidx = random.randint(0, len(sbyte) - 1)
didx = random.randint(0, len(sbyte) - 1)
hdl = random.randint(0, 5)
funcstr += format_line('', tabs + 1)
if hdl == 0:
# to make abyte[cidx] = abyte[cidx] & bbyte[didx]
funcstr += format_debug_line('abyte[%d] = abyte[%d] & bbyte[%d]'%(cidx,cidx,didx), tabs + 1, debug)
funcstr += format_debug_line('0x%x & 0x%x = 0x%0x'%(abyte[cidx],bbyte[didx], (abyte[cidx] & bbyte[didx])), tabs + 1, debug)
funcstr += format_line('if ( %d < size && %d < size ) {'%(cidx, didx), tabs + 1 )
funcstr += format_line('pbuf[%d] = (unsigned char)(pbuf[%d] & %s[%d]);'%(cidx, cidx, bname, didx), tabs + 2)
funcstr += format_line('}', tabs + 1)
abyte[cidx] = abyte[cidx] & bbyte[didx]
elif hdl == 1:
# to make abyte[cidx] = abyte[cidx] | bbyte[didx]
funcstr += format_debug_line('abyte[%d] = abyte[%d] | bbyte[%d]'%(cidx,cidx,didx), tabs + 1, debug)
funcstr += format_debug_line('0x%x | 0x%x = 0x%0x'%(abyte[cidx],bbyte[didx], (abyte[cidx] | bbyte[didx])), tabs + 1, debug)
funcstr += format_line('if ( %d < size && %d < size ) {'%(cidx, didx), tabs + 1 )
funcstr += format_line('pbuf[%d] = (unsigned char)(pbuf[%d] | %s[%d]);'%(cidx, cidx, bname, didx), tabs + 2)
funcstr += format_line('}', tabs + 1)
abyte[cidx] = abyte[cidx] | bbyte[didx]
elif hdl == 2:
# to make bbyte[didx] = abyte[cidx] & bbyte[didx]
funcstr += format_debug_line('bbyte[%d] = abyte[%d] & bbyte[%d]'%(didx,cidx,didx), tabs + 1, debug)
funcstr += format_debug_line('0x%x & 0x%x = 0x%0x'%(abyte[cidx],bbyte[didx], (abyte[cidx] & bbyte[didx])), tabs + 1, debug)
funcstr += format_line('if ( %d < size && %d < size ) {'%(cidx, didx), tabs + 1 )
funcstr += format_line('%s[%d] = (unsigned char)(pbuf[%d] & %s[%d]);'%(bname,didx, cidx, bname, didx), tabs + 2)
funcstr += format_line('}', tabs + 1)
bbyte[didx] = abyte[cidx] & bbyte[didx]
elif hdl == 3:
# to make bbyte[didx] = abyte[cidx] | bbyte[didx]
funcstr += format_debug_line('bbyte[%d] = abyte[%d] | bbyte[%d]'%(didx,cidx,didx), tabs + 1, debug)
funcstr += format_debug_line('0x%x & 0x%x = 0x%0x'%(abyte[cidx],bbyte[didx], (abyte[cidx] | bbyte[didx])), tabs + 1, debug)
funcstr += format_line('if ( %d < size && %d < size ) {'%(cidx, didx), tabs + 1 )
funcstr += format_line('%s[%d] = (unsigned char)(pbuf[%d] | %s[%d]);'%(bname,didx, cidx, bname, didx), tabs + 2)
funcstr += format_line('}', tabs + 1)
bbyte[didx] = abyte[cidx] | bbyte[didx]
elif hdl == 4:
# to make abyte[cidx] = abyte[cidx] ^ bbyte[didx]
funcstr += format_debug_line('abyte[%d] = abyte[%d] ^ bbyte[%d]'%(cidx,cidx,didx), tabs + 1, debug)
funcstr += format_debug_line('0x%x ^ 0x%x = 0x%0x'%(abyte[cidx],bbyte[didx], (abyte[cidx] ^ bbyte[didx])), tabs + 1, debug)
funcstr += format_line('if ( %d < size && %d < size ) {'%(cidx, didx), tabs + 1 )
funcstr += format_line('pbuf[%d] = (unsigned char)(pbuf[%d] ^ %s[%d]);'%(cidx, cidx, bname, didx), tabs + 2)
funcstr += format_line('}', tabs + 1)
abyte[cidx] = abyte[cidx] ^ bbyte[didx]
elif hdl == 5:
# to make bbyte[didx] = abyte[cidx] ^ bbyte[didx]
funcstr += format_debug_line('bbyte[%d] = abyte[%d] ^ bbyte[%d]'%(didx,cidx,didx), tabs + 1, debug)
funcstr += format_debug_line('0x%x ^ 0x%x = 0x%0x'%(abyte[cidx],bbyte[didx], (abyte[cidx] ^ bbyte[didx])), tabs + 1, debug)
funcstr += format_line('if ( %d < size && %d < size ) {'%(cidx, didx), tabs + 1 )
funcstr += format_line('%s[%d] = (unsigned char)(pbuf[%d] ^ %s[%d]);'%(bname,didx, cidx, bname, didx), tabs + 2)
funcstr += format_line('}', tabs + 1)
bbyte[didx] = abyte[cidx] ^ bbyte[didx]
else:
raise Exception('unexpected random valud [%d]'%(hdl))
for i in range(len(sbyte)):
hdl = random.randint(0, 1)
funcstr += format_line('',tabs + 1)
if hdl == 0:
cnum = sbyte[i] ^ abyte[i]
funcstr += format_debug_line('pbuf[%d] = (abyte[%d])0x%x ^ 0x%x'%(i,i,abyte[i], cnum),tabs + 1, debug)
funcstr += format_line('if (%d < size){'%(i), tabs + 1)
funcstr += format_line('pbuf[%d] = (unsigned char)(pbuf[%d] ^ 0x%x);'%(i,i,cnum),tabs + 2)
funcstr += format_line('}',tabs + 1)
elif hdl == 1:
cnum = sbyte[i] ^ bbyte[i]
funcstr += format_debug_line('pbuf[%d] = (bbyte[%d])0x%x ^ 0x%x'%(i,i,bbyte[i], cnum),tabs + 1, debug)
funcstr += format_line('if (%d < size){'%(i), tabs + 1)
funcstr += format_line('pbuf[%d] = (unsigned char)(%s[%d] ^ 0x%x);'%(i,bname,i,cnum),tabs + 2)
funcstr += format_line('}',tabs + 1)
else:
raise Exception('unexpected random valud [%d]'%(hdl))
funcstr += format_line('',tabs + 1)
funcstr += format_line('return size;', tabs + 1)
funcstr += format_line('}',tabs)
return funcstr, funcname
##extractcode_end
```
#### File: py-obcode/src/obmak_debug.py
```python
import sys
import os
import extargsparse
import re
import time
##importdebugstart
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from strparser import *
from filehdl import *
from fmthdl import *
from extract_ob import *
from obmaklib import *
##importdebugend
REPLACE_IMPORT_LIB=1
REPLACE_STR_PARSER=1
REPLACE_FILE_HDL=1
REPLACE_FMT_HDL=1
REPLACE_EXTRACT_OB=1
REPLACE_OBMAK_LIB=1
def main():
commandline='''
{
"verbose|v" : "+",
"version|V" : false,
"dump|D" : "obcode.json",
"benchmark|B" : false,
"makob<makob_handler>##srcfile to give the other code file ,this need environment variable MAKOB_FILE to get the default (makob.json)##" : {
"namemin" : 5,
"namemax" : 20,
"$" : "+"
},
"unmakob<unmakob_handler>##dstfile to give the origin ,this need environment variable MAKOB_FILE to get the default (makob.json)##" : {
"short" : false,
"$" : "+"
},
"basename<basename_handler>##to make basename##" : {
"$" : "+"
},
"obtrans<obtrans_handler>##translate the srcdir to dstdir in makob file##" : {
"srcdir" : "",
"dstdir" : "",
"$" : "+"
},
"oblist<oblist_handler>##to list files ob files##" : {
"$" : "*"
},
"obuntrans<obuntrans_handler>##inputfile [outputfile] to trans file from MAKOB_FILE##" : {
"$" : "+"
},
"obunfunc<obunfunc_handler>##inputfile;outfile;funcs... to set obfuncs##" : {
"$" : "+"
}
}
'''
d = dict()
d['version'] = "VERSION_RELACE_STRING"
options = extargsparse.ExtArgsOptions(d)
stime = time.time()
parser = extargsparse.ExtArgsParse(options)
parser.load_command_line_string(commandline)
args = parser.parse_command_line(None,parser)
if args.version:
sys.stdout.write('%s\n'%(options.version))
sys.exit(0)
if args.benchmark:
etime = time.time()
sys.stderr.write('run %s time %s second\n'%(sys.argv[1:],etime - stime))
return
##importdebugstart
from obrelease import *
import re
def debug_release():
if '-v' in sys.argv[1:]:
#sys.stderr.write('will make verbose\n')
loglvl = logging.DEBUG
if logging.root is not None and len(logging.root.handlers) > 0:
logging.root.handlers = []
logging.basicConfig(level=loglvl,format='%(asctime)s:%(filename)s:%(funcName)s:%(lineno)d\t%(message)s')
topdir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..'))
tofile= os.path.abspath(os.path.join(topdir,'obmak.py'))
curdir = os.path.abspath(os.path.dirname(__file__))
rlfiles = ReleaseFiles(__file__)
rlfiles.add_python_file(os.path.abspath(os.path.join(curdir,'strparser.py')),r'REPLACE_STR_PARSER=1')
rlfiles.add_python_file(os.path.abspath(os.path.join(curdir,'fmthdl.py')),r'REPLACE_FMT_HDL=1')
rlfiles.add_python_file(os.path.abspath(os.path.join(curdir,'filehdl.py')),r'REPLACE_FILE_HDL=1')
rlfiles.add_python_file(os.path.abspath(os.path.join(curdir,'obmaklib.py')),r'REPLACE_OBMAK_LIB=1')
rlfiles.add_python_file(os.path.abspath(os.path.join(curdir,'extract_ob.py')),r'REPLACE_EXTRACT_OB=1')
if len(sys.argv) > 2:
for k in sys.argv[1:]:
if not k.startswith('-'):
tofile = k
break
versionfile = os.path.abspath(os.path.join(topdir,'VERSION'))
if not os.path.exists(versionfile):
raise Exception('can not find VERSION file')
with open(versionfile,'r') as f:
for l in f:
l = l.rstrip('\r\n')
vernum = l
break
#logging.info('str_c\n%s'%(strparser_c))
sarr = re.split('\.',vernum)
if len(sarr) != 3:
raise Exception('version (%s) not format x.x.x'%(vernum))
VERSIONNUMBER = vernum
import_rets = fromat_ext_import_files(__file__,rlfiles.get_includes())
logging.info('import_rets\n%s'%(import_rets))
rlfiles.add_repls(r'VERSION_RELACE_STRING',VERSIONNUMBER)
rlfiles.add_repls(r'debug_main','main')
rlfiles.add_repls(r'REPLACE_IMPORT_LIB=1',make_string_slash_ok(import_rets))
#logging.info('repls %s'%(repls.keys()))
disttools.release_file('__main__',tofile,[],[[r'##importdebugstart.*',r'##importdebugend.*']],[],rlfiles.get_repls())
return
def debug_main():
if '--release' in sys.argv[1:]:
debug_release()
return
main()
return
##importdebugend
if __name__ == '__main__':
debug_main()
```
#### File: py-obcode/src/obrelease.py
```python
import sys
import os
import disttools
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from strparser import *
from filehdl import *
from pyparser import *
def make_string_slash_ok(s):
sarr = re.split('\n', s)
rets = ''
for l in sarr:
l = l.rstrip('\r\n')
cexpr = None
if len(l) > 0 and l[-1] == '\\':
cexpr = get_random_name(20)
cont = True
while cont:
cont = False
matchl = re.sub(cexpr, '', l)
if matchl != l:
cont = True
cexpr = get_random_name(20)
l = re.sub('\\$', cexpr, l)
l = re.sub(r'\\', r'\\\\', l)
if cexpr is not None:
l = re.sub(cexpr, r'\\', l)
rets += '%s\n'%(l)
return rets
def get_import_file(fname):
rets = ''
started = False
with open(fname,'r+b') as f:
for l in f:
if sys.version[0] == '3':
l = l.decode('utf8')
l = l.rstrip('\r\n')
if not started:
if l.startswith('##extractcode_start'):
started = True
else:
if l.startswith('##extractcode_end'):
started = False
else:
rets += l
rets += '\n'
return rets
def make_filters_out(ims,files):
cont = True
jdx = 0
idx = 0
while cont:
cont = False
idx = 0
while idx < len(ims) and not cont:
jdx = 0
while jdx < len(files) and not cont:
if ims[idx].frommodule == files[jdx]:
cont = True
logging.info('del [%d] jdx [%d] [%s]'%(idx,jdx,ims[idx]))
del ims[idx]
logging.info('%s'%(ims))
break
jdx += 1
idx += 1
return ims
def fromat_ext_import_files(origfile,files):
curbase = re.sub('\.py$','',os.path.basename(origfile))
allims = []
for f in files:
allims.extend(get_import_names(f))
curims= get_import_names(origfile)
curims = packed_import(curims)
curims = make_filters_out(curims, files)
logging.info('curims %s'%(curims))
allims = packed_import(allims)
allims = make_filters_out(allims, files)
logging.info('allims %s'%(allims))
cont = True
seccont = True
while cont:
cont = False
idx = 0
while idx < len(allims) :
jdx = 0
while jdx < len(curims) :
if allims[idx].frommodule == curims[jdx].frommodule and \
allims[idx].module == curims[jdx].module:
cont = True
#logging.info('del [%d] %s'%(idx,allims[idx]))
del allims[idx]
break
jdx += 1
if cont:
break
idx += 1
rets = ''
for m in allims:
rets += '%s\n'%(format_import(m))
return rets
class ReleaseFiles(object):
def __init__(self, basefile=__file__):
self.__includes = []
self.__basefile = basefile
self.__repls = dict()
return
def add_python_file(self,path,rex):
c = get_import_file(path)
self.__repls[rex] = make_string_slash_ok(c)
self.__includes.append(path)
return
def add_repls(self,k,v):
self.__repls[k]= v
return
def get_repls(self):
return self.__repls
def get_includes(self):
return self.__includes
```
#### File: test/chkintegrity/chkintegrity.py
```python
import extargsparse
import sys
import os
import zlib
import hashlib
import sha3
import re
from Crypto.Cipher import AES
##importdebugstart
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)),'..','..','src'))
from strparser import *
##importdebugend
def read_bytes(infile):
data = b''
with open(infile,'rb') as fin:
data = fin.read()
return data
def write_bytes(b, outfile):
with open(outfile,'wb') as fout:
fout.write(b)
return
def crc32_calc(infile):
crcval = zlib.crc32(read_bytes(infile))
return crcval
def md5_calc(infile):
m = hashlib.md5()
m.update(read_bytes(infile))
return m.hexdigest()
def sha256_calc(infile):
m = hashlib.sha256()
m.update(read_bytes(infile))
return m.hexdigest()
def sha3_calc(infile):
m = sha3.sha3_512()
m.update(read_bytes(infile))
return m.hexdigest()
def crc32_handler(args,parser):
set_logging_level(args)
for f in args.subnargs:
crcval = crc32_calc(f)
if crcval < 0:
crcval = crcval + 0xffffffff + 1
sys.stdout.write('[%s] crc32 [0x%x:%d]\n'%(f,crcval,crcval))
sys.exit(0)
return
def md5_handler(args,parser):
set_logging_level(args)
for f in args.subnargs:
sys.stdout.write('[%s] md5 %s\n'%(f, md5_calc(f)))
sys.exit(0)
return
def sha256_handler(args,parser):
set_logging_level(args)
for f in args.subnargs:
sys.stdout.write('[%s] sha256 %s\n'%(f, sha256_calc(f)))
sys.exit(0)
return
def sha3_handler(args,parser):
set_logging_level(args)
for f in args.subnargs:
sys.stdout.write('[%s] sha3 %s\n'%(f, sha3_calc(f)))
sys.exit(0)
return
def trans_bytes(infile):
retb = b''
with open(infile,'r') as fin:
for l in fin:
l = l.rstrip('\r\n')
sarr = re.split(':',l)
if len(sarr) < 2:
continue
nsarr = re.split('\s{2,}', sarr[1])
if len(nsarr) < 1:
continue
nsarr = re.split('\s+',nsarr[0])
logging.info('nsarr %s'%(nsarr))
for b in nsarr:
if len(b) == 0:
continue
if b.startswith('0x') or \
b.startswith('0X'):
b = b[2:]
elif b.startswith('x') or \
b.startswith('X'):
b = b[1:]
curb = int(b,16)
if sys.version[0] == '3':
retb += curb.to_bytes(1,'little')
else:
retb += chr(curb)
return retb
def dump_handler(args,parser):
set_logging_level(args)
b = trans_bytes(args.subnargs[0])
write_bytes(b,args.subnargs[1])
sys.exit(0)
return
def read_ints(infile=None):
keybytes = read_bytes(infile)
s = ''
if sys.version[0] == '3':
s = keybytes.decode('utf-8')
else:
s = str(keybytes)
k = []
idx = 0
while idx < len(s):
curs = s[idx:(idx+2)]
k.append(int(curs,16))
idx += 2
return k
def aesenccbc_handler(args,parser):
set_logging_level(args)
if len(args.subnargs) < 3:
raise Exception('need keyfile ivfile input...')
k = read_ints(args.subnargs[0])
iv = read_ints(args.subnargs[1])
k = ints_to_bytes(k)
iv = ints_to_bytes(iv)
aes = AES.new(k,AES.MODE_CBC,iv)
inputs = read_bytes(args.subnargs[2])
outputs = aes.encrypt(inputs)
write_bytes(outputs,args.output)
sys.exit(0)
return
def aesdeccbc_handler(args,parser):
set_logging_level(args)
if len(args.subnargs) < 3:
raise Exception('need keyfile ivfile input...')
k = read_ints(args.subnargs[0])
iv = read_ints(args.subnargs[1])
k = ints_to_bytes(k)
iv = ints_to_bytes(iv)
aes = AES.new(k,AES.MODE_CBC,iv)
inputs = read_bytes(args.subnargs[2])
outputs = aes.decrypt(inputs)
write_bytes(outputs,args.output)
sys.exit(0)
return
def main():
commandline='''
{
"verbose|v" : "+",
"output|o" : null,
"crc32<crc32_handler>" : {
"$" : "+"
},
"md5<md5_handler>" : {
"$" : "+"
},
"sha256<sha256_handler>" : {
"$" : "+"
},
"sha3<sha3_handler>" : {
"$" : "+"
},
"dump<dump_handler>" : {
"$" : 2
},
"aesenccbc<aesenccbc_handler>" : {
"$" : "+"
},
"aesdeccbc<aesdeccbc_handler>" : {
"$" : "+"
}
}
'''
parser = extargsparse.ExtArgsParse()
parser.load_command_line_string(commandline)
args = parser.parse_command_line(None,parser)
raise Exception('can not parse %s for [%s]'%(sys.argv[1:], args))
return
if __name__ == '__main__':
main()
```
|
{
"source": "Jeppe-T-K/stable-baselines",
"score": 2
}
|
#### File: stable_baselines/common/action_mask_env.py
```python
import gym
import numpy as np
from gym.spaces import Discrete, MultiDiscrete
class DiscreteActionMaskEnv(gym.Env):
metadata = {'render.modes': ['human', 'system', 'none']}
def __init__(self):
self.action_space = Discrete(3)
self.observation_shape = (1, 10, 10)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=self.observation_shape, dtype=np.float16)
self.counter = 0
self.valid_actions = [1, 1, 1]
def reset(self):
self.counter = 0
self.valid_actions = [1, 1, 1]
return self.state()
def step(self, action: int):
valid_actions = [1, 1, 1]
if self.valid_actions[action] == 0:
raise Exception("Invalid action was selected! Valid actions: {}, "
"action taken: {}".format(self.valid_actions, action))
valid_actions[action] = 0
self.counter += 1
self.valid_actions = valid_actions
return self.state(), 0, self.finish(), {'action_mask': self.valid_actions}
def render(self, mode='human'):
pass
def finish(self):
return self.counter == 250
def state(self):
tmp = np.reshape(np.array([*range(100)]), self.observation_shape)
obs = tmp / 100
return obs
class MultiDiscreteActionMaskEnv(gym.Env):
metadata = {'render.modes': ['human', 'system', 'none']}
def __init__(self):
self.action_space = MultiDiscrete([3, 3, 3])
self.observation_shape = (1, 10, 10)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=self.observation_shape, dtype=np.float16)
self.counter = 0
self.valid_actions = [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]
def reset(self):
self.counter = 0
self.valid_actions = [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]
return self.state()
def step(self, actions):
valid_actions = [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]
for i, action in enumerate(actions):
if self.valid_actions[i][action] == 0:
raise Exception("Invalid action was selected! Valid actions: {}, "
"action taken: {}".format(self.valid_actions, actions))
valid_actions[i][action] = 0
self.valid_actions = valid_actions
self.counter += 1
return self.state(), 0, self.finish(), {'action_mask': self.valid_actions}
def render(self, mode='human'):
pass
def finish(self):
return self.counter == 250
def state(self):
tmp = np.reshape(np.array([*range(100)]), self.observation_shape)
obs = tmp / 100
return obs
class MultiDiscreteUnbalancedActionMaskEnv(gym.Env):
metadata = {'render.modes': ['human', 'system', 'none']}
def __init__(self):
self.action_space = MultiDiscrete([2, 3, 4])
self.observation_shape = (1, 10, 10)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=self.observation_shape, dtype=np.float16)
self.counter = 0
self.valid_actions = [[1, 1],
[1, 1, 1],
[1, 1, 1, 1]]
def reset(self):
self.counter = 0
self.valid_actions = [[1, 1],
[1, 1, 1],
[1, 1, 1, 1]]
return self.state()
def step(self, actions):
valid_actions = [[1, 1],
[1, 1, 1],
[1, 1, 1, 1]]
for i, action in enumerate(actions):
if self.valid_actions[i][action] == 0:
raise Exception("Invalid action was selected! Valid actions: {}, "
"action taken: {}".format(self.valid_actions, actions))
valid_actions[i][action] = 0
self.valid_actions = valid_actions
self.counter += 1
return self.state(), 0, self.finish(), {'action_mask': self.valid_actions}
def render(self, mode='human'):
pass
def finish(self):
return self.counter == 250
def state(self):
tmp = np.reshape(np.array([*range(100)]), self.observation_shape)
obs = tmp / 100
return obs
```
|
{
"source": "jeppetrost/plaso",
"score": 2
}
|
#### File: plaso/parsers/mac_securityd.py
```python
from __future__ import unicode_literals
import pyparsing
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import errors
from plaso.lib import definitions
from plaso.lib import timelib
from plaso.parsers import logger
from plaso.parsers import manager
from plaso.parsers import text_parser
class MacOSSecuritydLogEventData(events.EventData):
"""MacOS securityd log event data.
Attributes:
caller (str): caller, consists of two hex numbers.
facility (str): facility.
level (str): priority level.
message (str): message.
security_api (str): name of securityd function.
sender_pid (int): process identifier of the sender.
sender (str): name of the sender.
"""
DATA_TYPE = 'mac:securityd:line'
def __init__(self):
"""Initializes event data."""
super(MacOSSecuritydLogEventData, self).__init__(data_type=self.DATA_TYPE)
self.caller = None
self.facility = None
self.level = None
self.message = None
self.security_api = None
self.sender = None
self.sender_pid = None
class MacOSSecuritydLogParser(text_parser.PyparsingSingleLineTextParser):
"""Parses the securityd file that contains logs from the security daemon."""
NAME = 'mac_securityd'
DESCRIPTION = 'Parser for MacOS securityd log files.'
_ENCODING = 'utf-8'
_DEFAULT_YEAR = 2012
DATE_TIME = pyparsing.Group(
text_parser.PyparsingConstants.THREE_LETTERS.setResultsName('month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName('day') +
text_parser.PyparsingConstants.TIME_ELEMENTS)
SECURITYD_LINE = (
DATE_TIME.setResultsName('date_time') +
pyparsing.CharsNotIn('[').setResultsName('sender') +
pyparsing.Literal('[').suppress() +
text_parser.PyparsingConstants.PID.setResultsName('sender_pid') +
pyparsing.Literal(']').suppress() +
pyparsing.Literal('<').suppress() +
pyparsing.CharsNotIn('>').setResultsName('level') +
pyparsing.Literal('>').suppress() +
pyparsing.Literal('[').suppress() +
pyparsing.CharsNotIn('{').setResultsName('facility') +
pyparsing.Literal('{').suppress() +
pyparsing.Optional(pyparsing.CharsNotIn(
'}').setResultsName('security_api')) +
pyparsing.Literal('}').suppress() +
pyparsing.Optional(pyparsing.CharsNotIn(']:').setResultsName(
'caller')) + pyparsing.Literal(']:').suppress() +
pyparsing.SkipTo(pyparsing.lineEnd).setResultsName('message'))
REPEATED_LINE = (
DATE_TIME.setResultsName('date_time') +
pyparsing.Literal('--- last message repeated').suppress() +
text_parser.PyparsingConstants.INTEGER.setResultsName('times') +
pyparsing.Literal('time ---').suppress())
LINE_STRUCTURES = [
('logline', SECURITYD_LINE),
('repeated', REPEATED_LINE)]
def __init__(self):
"""Initializes a parser object."""
super(MacOSSecuritydLogParser, self).__init__()
self._last_month = None
self._previous_structure = None
self._year_use = 0
def _GetTimeElementsTuple(self, structure):
"""Retrieves a time elements tuple from the structure.
Args:
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Returns:
tuple: containing:
year (int): year.
month (int): month, where 1 represents January.
day_of_month (int): day of month, where 1 is the first day of the month.
hours (int): hours.
minutes (int): minutes.
seconds (int): seconds.
"""
month, day, hours, minutes, seconds = structure.date_time
# Note that dfdatetime_time_elements.TimeElements will raise ValueError
# for an invalid month.
month = timelib.MONTH_DICT.get(month.lower(), 0)
if month != 0 and month < self._last_month:
# Gap detected between years.
self._year_use += 1
return (self._year_use, month, day, hours, minutes, seconds)
def _ParseLogLine(self, parser_mediator, structure, key):
"""Parse a single log line and produce an event object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
key (str): name of the parsed structure.
"""
time_elements_tuple = self._GetTimeElementsTuple(structure)
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
self._last_month = time_elements_tuple[1]
if key == 'logline':
self._previous_structure = structure
message = structure.message
else:
message = 'Repeated {0:d} times: {1:s}'.format(
structure.times, self._previous_structure.message)
structure = self._previous_structure
# It uses CarsNotIn structure which leaves whitespaces
# at the beginning of the sender and the caller.
event_data = MacOSSecuritydLogEventData()
event_data.caller = structure.caller.strip() or 'unknown'
event_data.facility = structure.facility
event_data.level = structure.level
event_data.message = message
event_data.security_api = structure.security_api or 'unknown'
event_data.sender_pid = structure.sender_pid
event_data.sender = structure.sender.strip()
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a log record structure and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
"""
if key not in ('logline', 'repeated'):
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
self._ParseLogLine(parser_mediator, structure, key)
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a securityd log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
self._last_month = 0
self._year_use = parser_mediator.GetEstimatedYear()
try:
structure = self.SECURITYD_LINE.parseString(line)
except pyparsing.ParseException:
logger.debug('Not a MacOS securityd log file')
return False
time_elements_tuple = self._GetTimeElementsTuple(structure)
try:
dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug(
'Not a MacOS securityd log file, invalid date and time: {0!s}'.format(
structure.date_time))
return False
self._last_month = time_elements_tuple[1]
return True
manager.ParsersManager.RegisterParser(MacOSSecuritydLogParser)
```
#### File: plaso/parsers/santa.py
```python
from __future__ import unicode_literals
import re
import pyparsing
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import errors
from plaso.lib import definitions
from plaso.parsers import manager
from plaso.parsers import text_parser
class SantaExecutionEventData(events.EventData):
"""Santa execution event data.
Attributes:
action (str): action recorded by Santa.
decision (str): if the process was allowed or blocked.
reason (str): reason behind santa decision to execute or block a process.
process_hash (str): SHA256 hash for the executed process.
certificate_hash (str): SHA256 hash for the certificate associated with the
executed process.
certificate_common_name (str): certificate common name.
pid (str): process id for the process.
ppid (str): parent process id for the executed process.
uid (str): user id associated with the executed process.
user (str): user name associated with the executed process.
gid (str): group id associated with the executed process.
group (str): group name associated with the executed process.
mode (str): Santa execution mode, for example Monitor or Lockdown.
process_path (str): process file path.
process_arguments (str): executed process with its arguments.
"""
DATA_TYPE = 'santa:execution'
def __init__(self):
"""Initializes event data."""
super(SantaExecutionEventData, self).__init__(data_type=self.DATA_TYPE)
self.action = None
self.decision = None
self.reason = None
self.process_hash = None
self.certificate_hash = None
self.certificate_common_name = None
self.quarantine_url = None
self.pid = None
self.ppid = None
self.uid = None
self.user = None
self.gid = None
self.group = None
self.mode = None
self.process_path = None
self.process_arguments = None
class SantaFileSystemEventData(events.EventData):
"""Santa file system event data.
Attributes:
action (str): event type recorded by Santa.
file_path (str): file path and name for WRITE/DELETE events.
file_new_path (str): new file path and name for RENAME events.
pid (str): process id for the process.
ppid (str): parent process id for the executed process.
process (str): process name.
process_path (str): process file path.
uid (str): user id associated with the executed process.
user (str): user name associated with the executed process.
gid (str): group id associated with the executed process.
group (str): group name associated with the executed process.
"""
DATA_TYPE = 'santa:file_system_event'
def __init__(self):
"""Initializes event data."""
super(SantaFileSystemEventData, self).__init__(data_type=self.DATA_TYPE)
self.action = None
self.file_path = None
self.file_new_path = None
self.pid = None
self.ppid = None
self.process = None
self.process_path = None
self.uid = None
self.user = None
self.gid = None
self.group = None
class SantaMountEventData(events.EventData):
"""Santa mount event data.
Attributes:
action (str): event type recorded by Santa.
mount (str): disk mount point.
volume (str): disk volume name.
bsd_name (str): disk BSD name.
fs (str): disk volume kind.
model (str): disk model.
serial (str): disk serial.
bus (str): device protocol.
dmg_path (str): DMG file path.
appearance (str): disk appearance date.
"""
DATA_TYPE = 'santa:diskmount'
def __init__(self):
"""Initializes event data."""
super(SantaMountEventData, self).__init__(data_type=self.DATA_TYPE)
self.action = None
self.mount = None
self.volume = None
self.bsd_name = None
self.fs = None
self.model = None
self.serial = None
self.bus = None
self.dmg_path = None
self.appearance = None
class SantaParser(text_parser.PyparsingSingleLineTextParser):
"""Parses santa log files"""
NAME = 'santa'
DESCRIPTION = 'Santa Parser'
_ENCODING = 'utf-8'
MAX_LINE_LENGTH = 16384
_SEP_TOKEN = pyparsing.Suppress('|')
_SKIP_SEP = pyparsing.SkipTo('|')
_SKIP_END = pyparsing.SkipTo(pyparsing.lineEnd)
_PYPARSING_COMPONENTS = {
'action': pyparsing.Suppress('action=') +
_SKIP_SEP.setResultsName('action') +_SEP_TOKEN,
'decision': pyparsing.Suppress('decision=') +
_SKIP_SEP.setResultsName('decision') + _SEP_TOKEN,
'reason': pyparsing.Suppress('reason=') +
_SKIP_SEP.setResultsName('reason') + _SEP_TOKEN,
'process': pyparsing.Suppress('process=') +
_SKIP_SEP.setResultsName('process') + _SEP_TOKEN,
'processpath': pyparsing.Suppress('processpath=') +
_SKIP_SEP.setResultsName('processpath') + _SEP_TOKEN,
'sha256': pyparsing.Suppress('sha256=') +
_SKIP_SEP.setResultsName('sha256') + _SEP_TOKEN,
'cert_sha256': pyparsing.Suppress('cert_sha256=') +
_SKIP_SEP.setResultsName('cert_sha256') + _SEP_TOKEN,
'cert_cn': pyparsing.Suppress('cert_cn=') +
_SKIP_SEP.setResultsName('cert_cn') + _SEP_TOKEN,
'quarantine_url': pyparsing.Suppress('quarantine_url=') +
_SKIP_SEP.setResultsName('quarantine_url') + _SEP_TOKEN,
'pid': pyparsing.Suppress('pid=') + _SKIP_SEP.setResultsName('pid') +
_SEP_TOKEN,
'ppid': pyparsing.Suppress('ppid=') + _SKIP_SEP.setResultsName('ppid') +
_SEP_TOKEN,
'uid': pyparsing.Suppress('uid=') + _SKIP_SEP.setResultsName('uid') +
_SEP_TOKEN,
'user': pyparsing.Suppress('user=') + _SKIP_SEP.setResultsName('user') +
_SEP_TOKEN,
'gid': pyparsing.Suppress('gid=') + _SKIP_SEP.setResultsName('gid') +
_SEP_TOKEN,
'group': pyparsing.Suppress('group=') +
(_SKIP_SEP | _SKIP_END).setResultsName('group') +
pyparsing.Optional(_SEP_TOKEN),
'mode': pyparsing.Suppress('mode=') + _SKIP_SEP.setResultsName('mode') +
_SEP_TOKEN,
'newpath': pyparsing.Suppress('newpath=') +
_SKIP_SEP.setResultsName('newpath') + _SEP_TOKEN,
'path': pyparsing.Suppress('path=') +
(_SKIP_SEP | _SKIP_END).setResultsName('path') +
pyparsing.Optional(_SEP_TOKEN),
'args': pyparsing.Suppress('args=') + _SKIP_END.setResultsName('args'),
'mount': pyparsing.Suppress('mount=') +
_SKIP_SEP.setResultsName('mount') + _SEP_TOKEN,
'volume': pyparsing.Suppress('volume=') +
_SKIP_SEP.setResultsName('volume') + _SEP_TOKEN,
'bsd_name': pyparsing.Suppress('bsdname=') +
(_SKIP_SEP | _SKIP_END).setResultsName('bsd_name') +
pyparsing.Optional(_SEP_TOKEN),
'fs': pyparsing.Suppress('fs=') + _SKIP_SEP.setResultsName('fs') +
_SEP_TOKEN,
'model': pyparsing.Suppress('model=') +
_SKIP_SEP.setResultsName('model') + _SEP_TOKEN,
'serial': pyparsing.Suppress('serial=') +
_SKIP_SEP.setResultsName('serial') + _SEP_TOKEN,
'bus': pyparsing.Suppress('bus=') + _SKIP_SEP.setResultsName('bus') +
_SEP_TOKEN,
'dmg_path': pyparsing.Suppress('dmgpath=') +
_SKIP_SEP.setResultsName('dmg_path') + _SEP_TOKEN,
'appearance': pyparsing.Suppress('appearance=') +
_SKIP_END.setResultsName('appearance')
}
_PYPARSING_COMPONENTS['date'] = pyparsing.Combine(
pyparsing.Suppress('[') +
pyparsing.Word(pyparsing.nums, exact=4) + pyparsing.Literal('-') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal('-') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal('T') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal(':') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal(':') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal('.') +
pyparsing.Word(pyparsing.nums, exact=3) + pyparsing.Literal('Z') +
pyparsing.Suppress(']'))
_VERIFICATION_REGEX = re.compile(
r'^\[\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z\] [EACWNID] santad:')
_QUOTA_EXCEEDED_LINE = (
_PYPARSING_COMPONENTS['date'] +
pyparsing.Literal(
'*** LOG MESSAGE QUOTA EXCEEDED - SOME MESSAGES FROM THIS PROCESS '
'HAVE BEEN DISCARDED ***'))
_EXECUTION_LINE = (
_PYPARSING_COMPONENTS['date'].setResultsName('date') +
pyparsing.Suppress('I santad:') +
pyparsing.Suppress('action=') +
pyparsing.Literal('EXEC').setResultsName('action') + _SEP_TOKEN +
_PYPARSING_COMPONENTS['decision'] +
_PYPARSING_COMPONENTS['reason'] +
_PYPARSING_COMPONENTS['sha256'] +
pyparsing.Optional(_PYPARSING_COMPONENTS['cert_sha256']) +
pyparsing.Optional(_PYPARSING_COMPONENTS['cert_cn']) +
pyparsing.Optional(_PYPARSING_COMPONENTS['quarantine_url']) +
_PYPARSING_COMPONENTS['pid'] +
_PYPARSING_COMPONENTS['ppid'] +
_PYPARSING_COMPONENTS['uid'] +
_PYPARSING_COMPONENTS['user'] +
_PYPARSING_COMPONENTS['gid'] +
_PYPARSING_COMPONENTS['group'] +
_PYPARSING_COMPONENTS['mode'] +
_PYPARSING_COMPONENTS['path'] +
pyparsing.Optional(_PYPARSING_COMPONENTS['args']))
_FILE_OPERATION_LINE = (
_PYPARSING_COMPONENTS['date'].setResultsName('date') +
pyparsing.Suppress('I santad:') +
pyparsing.Suppress('action=') +
(pyparsing.Literal('WRITE') ^
pyparsing.Literal('RENAME') ^
pyparsing.Literal('DELETE')).setResultsName('action') +
_SEP_TOKEN +
_PYPARSING_COMPONENTS['path'] +
pyparsing.Optional(_PYPARSING_COMPONENTS['newpath']) +
_PYPARSING_COMPONENTS['pid'] +
_PYPARSING_COMPONENTS['ppid'] +
_PYPARSING_COMPONENTS['process'] +
_PYPARSING_COMPONENTS['processpath'] +
_PYPARSING_COMPONENTS['uid'] +
_PYPARSING_COMPONENTS['user'] +
_PYPARSING_COMPONENTS['gid'] +
_PYPARSING_COMPONENTS['group'])
_DISK_MOUNT_LINE = (
_PYPARSING_COMPONENTS['date'].setResultsName('date') +
pyparsing.Suppress('I santad:') +
pyparsing.Suppress('action=') +
pyparsing.Literal('DISKAPPEAR').setResultsName('action') + _SEP_TOKEN +
_PYPARSING_COMPONENTS['mount'] +
_PYPARSING_COMPONENTS['volume'] +
_PYPARSING_COMPONENTS['bsd_name'] +
_PYPARSING_COMPONENTS['fs'] +
_PYPARSING_COMPONENTS['model'] +
_PYPARSING_COMPONENTS['serial'] +
_PYPARSING_COMPONENTS['bus'] +
_PYPARSING_COMPONENTS['dmg_path'] +
_PYPARSING_COMPONENTS['appearance'])
_DISK_UMOUNT_LINE = (
_PYPARSING_COMPONENTS['date'].setResultsName('date') +
pyparsing.Suppress('I santad:') +
pyparsing.Suppress('action=') +
pyparsing.Literal('DISKDISAPPEAR').setResultsName('action') + _SEP_TOKEN +
_PYPARSING_COMPONENTS['mount'] +
_PYPARSING_COMPONENTS['volume'] +
_PYPARSING_COMPONENTS['bsd_name'])
LINE_STRUCTURES = [
('execution_line', _EXECUTION_LINE),
('file_system_event_line', _FILE_OPERATION_LINE),
('mount_line', _DISK_MOUNT_LINE),
('umount_line', _DISK_UMOUNT_LINE),
('quota_exceeded_line', _QUOTA_EXCEEDED_LINE)]
_SUPPORTED_KEYS = frozenset([key for key, _ in LINE_STRUCTURES])
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a matching entry.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): elements parsed from the file.
Raises:
ParseError: when the structure type is unknown.
"""
if key not in self._SUPPORTED_KEYS:
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
if key == 'quota_exceeded_line':
# skip this line
return
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds()
try:
date_time.CopyFromStringISO8601(structure.date)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0:s}'.format(structure.date))
return
if key == 'execution_line':
event_data = SantaExecutionEventData()
event_data.action = structure.action
event_data.decision = structure.decision
event_data.reason = structure.reason
event_data.process_hash = structure.sha256
event_data.certificate_hash = structure.get('cert_sha256', None)
event_data.certificate_common_name = structure.get('cert_cn', None)
event_data.quarantine_url = structure.get('quarantine_url', None)
event_data.pid = structure.pid
event_data.ppid = structure.ppid
event_data.uid = structure.uid
event_data.user = structure.user
event_data.gid = structure.gid
event_data.group = structure.group
event_data.mode = structure.mode
event_data.process_path = structure.path
event_data.process_arguments = structure.get('args', None)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_RUN)
if key == 'file_system_event_line':
event_data = SantaFileSystemEventData()
event_data.action = structure.action
event_data.file_path = structure.path
event_data.file_new_path = structure.get('newpath', None)
event_data.pid = structure.pid
event_data.ppid = structure.ppid
event_data.process = structure.process
event_data.process_path = structure.processpath
event_data.uid = structure.uid
event_data.user = structure.user
event_data.gid = structure.gid
event_data.group = structure.group
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
if key == 'umount_line':
event_data = SantaMountEventData()
event_data.action = structure.action
event_data.mount = structure.mount
event_data.volume = structure.volume
event_data.bsd_name = structure.bsd_name
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
if key == 'mount_line':
event_data = SantaMountEventData()
event_data.action = structure.action
event_data.mount = structure.mount
event_data.volume = structure.volume
event_data.bsd_name = structure.bsd_name
event_data.fs = structure.fs
event_data.model = structure.model
event_data.serial = structure.serial
event_data.bus = structure.bus
event_data.dmg_path = structure.dmg_path
event_data.appearance = structure.appearance
if event_data.appearance:
new_date_time = dfdatetime_time_elements.TimeElementsInMilliseconds()
try:
new_date_time.CopyFromStringISO8601(event_data.appearance)
new_event = time_events.DateTimeValuesEvent(
new_date_time, definitions.TIME_DESCRIPTION_FIRST_CONNECTED)
parser_mediator.ProduceEventWithEventData(new_event, event_data)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0:s}'.format(event_data.appearance))
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
# pylint: disable=unused-argument
def VerifyStructure(self, parser_mediator, line):
"""Verifies that this is a santa log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from the text file.
Returns:
bool: True if this is the correct parser, False otherwise.
"""
return re.match(self._VERIFICATION_REGEX, line) is not None
manager.ParsersManager.RegisterParser(SantaParser)
```
#### File: parsers/winreg_plugins/mrulistex.py
```python
from __future__ import unicode_literals
import abc
from dtfabric.runtime import data_maps as dtfabric_data_maps
from plaso.containers import time_events
from plaso.containers import windows_events
from plaso.lib import errors
from plaso.lib import definitions
from plaso.parsers import logger
from plaso.parsers import winreg
from plaso.parsers.shared import shell_items
from plaso.parsers.winreg_plugins import dtfabric_plugin
from plaso.parsers.winreg_plugins import interface
class MRUListExStringRegistryKeyFilter(
interface.WindowsRegistryKeyWithValuesFilter):
"""Windows Registry key with values filter."""
_IGNORE_KEY_PATH_SEGMENTS = frozenset([
'\\BagMRU\\'.upper(),
'\\Explorer\\ComDlg32\\OpenSavePidlMRU\\'.upper()])
_IGNORE_KEY_PATH_SUFFIXES = frozenset([
'\\BagMRU'.upper(),
'\\Explorer\\StreamMRU'.upper(),
'\\Explorer\\ComDlg32\\OpenSavePidlMRU'.upper()])
_VALUE_NAMES = ['0', 'MRUListEx']
def __init__(self):
"""Initializes Windows Registry key filter object."""
super(MRUListExStringRegistryKeyFilter, self).__init__(self._VALUE_NAMES)
def Match(self, registry_key):
"""Determines if a Windows Registry key matches the filter.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
bool: True if the Windows Registry key matches the filter.
"""
key_path_upper = registry_key.path.upper()
# Prevent this filter matching non-string MRUListEx values.
for ignore_key_path_suffix in self._IGNORE_KEY_PATH_SUFFIXES:
if key_path_upper.endswith(ignore_key_path_suffix):
return False
for ignore_key_path_segment in self._IGNORE_KEY_PATH_SEGMENTS:
if ignore_key_path_segment in key_path_upper:
return False
return super(MRUListExStringRegistryKeyFilter, self).Match(registry_key)
class BaseMRUListExWindowsRegistryPlugin(
dtfabric_plugin.DtFabricBaseWindowsRegistryPlugin):
"""Class for common MRUListEx Windows Registry plugin functionality."""
_SOURCE_APPEND = ': MRUListEx'
_DEFINITION_FILE = 'mru.yaml'
@abc.abstractmethod
def _ParseMRUListExEntryValue(
self, parser_mediator, registry_key, entry_index, entry_number, **kwargs):
"""Parses the MRUListEx entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
entry_index (int): MRUListEx entry index.
entry_number (int): entry number.
Returns:
str: MRUList entry value.
"""
def _ParseMRUListExValue(self, registry_key):
"""Parses the MRUListEx value in a given Registry key.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
Returns:
mrulistex_entries: MRUListEx entries or None if not available.
"""
mrulistex_value = registry_key.GetValueByName('MRUListEx')
# The key exists but does not contain a value named "MRUList".
if not mrulistex_value:
return None
mrulistex_entries_map = self._GetDataTypeMap('mrulistex_entries')
context = dtfabric_data_maps.DataTypeMapContext(values={
'data_size': len(mrulistex_value.data)})
return self._ReadStructureFromByteStream(
mrulistex_value.data, 0, mrulistex_entries_map, context=context)
def _ParseMRUListExKey(
self, parser_mediator, registry_key, codepage='cp1252'):
"""Extract event objects from a MRUListEx Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
"""
try:
mrulistex = self._ParseMRUListExValue(registry_key)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse MRUListEx value with error: {0!s}'.format(exception))
return
if not mrulistex:
return
values_dict = {}
found_terminator = False
for entry_index, entry_number in enumerate(mrulistex):
# The MRU list is terminated with -1 (0xffffffff).
if entry_number == -1:
break
if found_terminator:
parser_mediator.ProduceExtractionWarning((
'found additional MRUListEx entries after terminator in key: '
'{0:s}.').format(registry_key.path))
# Only create one parser error per terminator.
found_terminator = False
value_string = self._ParseMRUListExEntryValue(
parser_mediator, registry_key, entry_index, entry_number,
codepage=codepage)
value_text = 'Index: {0:d} [MRU Value {1:d}]'.format(
entry_index + 1, entry_number)
values_dict[value_text] = value_string
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event_data.source_append = self._SOURCE_APPEND
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
class MRUListExStringWindowsRegistryPlugin(BaseMRUListExWindowsRegistryPlugin):
"""Windows Registry plugin to parse a string MRUListEx."""
NAME = 'mrulistex_string'
DESCRIPTION = 'Parser for Most Recently Used (MRU) Registry data.'
FILTERS = frozenset([MRUListExStringRegistryKeyFilter()])
URLS = [
'http://forensicartifacts.com/2011/02/recentdocs/',
'https://github.com/libyal/winreg-kb/wiki/MRU-keys']
# pylint: disable=arguments-differ
def _ParseMRUListExEntryValue(
self, parser_mediator, registry_key, entry_index, entry_number, **kwargs):
"""Parses the MRUListEx entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
entry_index (int): MRUListEx entry index.
entry_number (int): entry number.
Returns:
str: MRUList entry value.
"""
value_string = ''
value = registry_key.GetValueByName('{0:d}'.format(entry_number))
if value is None:
parser_mediator.ProduceExtractionWarning(
'missing MRUListEx value: {0:d} in key: {1:s}.'.format(
entry_number, registry_key.path))
elif value.DataIsString():
value_string = value.GetDataAsObject()
elif value.DataIsBinaryData():
utf16le_string_map = self._GetDataTypeMap('utf16le_string')
try:
value_string = self._ReadStructureFromByteStream(
value.data, 0, utf16le_string_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse MRUListEx entry value: {0:d} with error: '
'{1!s}').format(entry_number, exception))
value_string = value_string.rstrip('\x00')
return value_string
# pylint: disable=arguments-differ
def ExtractEvents(
self, parser_mediator, registry_key, codepage='cp1252', **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
"""
self._ParseMRUListExKey(parser_mediator, registry_key, codepage=codepage)
class MRUListExShellItemListWindowsRegistryPlugin(
BaseMRUListExWindowsRegistryPlugin):
"""Windows Registry plugin to parse a shell item list MRUListEx."""
NAME = 'mrulistex_shell_item_list'
DESCRIPTION = 'Parser for Most Recently Used (MRU) Registry data.'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\ComDlg32\\OpenSavePidlMRU'),
interface.WindowsRegistryKeyPathFilter(
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\StreamMRU')])
# pylint: disable=arguments-differ
def _ParseMRUListExEntryValue(
self, parser_mediator, registry_key, entry_index, entry_number,
codepage='cp1252', **kwargs):
"""Parses the MRUListEx entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
entry_index (int): MRUListEx entry index.
entry_number (int): entry number.
codepage (Optional[str]): extended ASCII string codepage.
Returns:
str: MRUList entry value.
"""
value_string = ''
value = registry_key.GetValueByName('{0:d}'.format(entry_number))
if value is None:
parser_mediator.ProduceExtractionWarning(
'missing MRUListEx value: {0:d} in key: {1:s}.'.format(
entry_number, registry_key.path))
elif not value.DataIsBinaryData():
logger.debug((
'[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: '
'{2:s}.').format(self.NAME, entry_number, registry_key.path))
elif value.data:
shell_items_parser = shell_items.ShellItemsParser(registry_key.path)
shell_items_parser.ParseByteStream(
parser_mediator, value.data, codepage=codepage)
value_string = 'Shell item path: {0:s}'.format(
shell_items_parser.CopyToPath())
return value_string
# pylint: disable=arguments-differ
def ExtractEvents(
self, parser_mediator, registry_key, codepage='cp1252', **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
"""
if registry_key.name != 'OpenSavePidlMRU':
self._ParseMRUListExKey(parser_mediator, registry_key, codepage=codepage)
if registry_key.name == 'OpenSavePidlMRU':
# For the OpenSavePidlMRU MRUListEx we also need to parse its subkeys
# since the Registry key path does not support wildcards yet.
for subkey in registry_key.GetSubkeys():
self._ParseMRUListExKey(parser_mediator, subkey, codepage=codepage)
class MRUListExStringAndShellItemWindowsRegistryPlugin(
BaseMRUListExWindowsRegistryPlugin):
"""Windows Registry plugin to parse a string and shell item MRUListEx."""
NAME = 'mrulistex_string_and_shell_item'
DESCRIPTION = 'Parser for Most Recently Used (MRU) Registry data.'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\RecentDocs')])
# pylint: disable=arguments-differ
def _ParseMRUListExEntryValue(
self, parser_mediator, registry_key, entry_index, entry_number,
codepage='cp1252', **kwargs):
"""Parses the MRUListEx entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
entry_index (int): MRUListEx entry index.
entry_number (int): entry number.
codepage (Optional[str]): extended ASCII string codepage.
Returns:
str: MRUList entry value.
"""
value_string = ''
value = registry_key.GetValueByName('{0:d}'.format(entry_number))
if value is None:
parser_mediator.ProduceExtractionWarning(
'missing MRUListEx value: {0:d} in key: {1:s}.'.format(
entry_number, registry_key.path))
elif not value.DataIsBinaryData():
logger.debug((
'[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: '
'{2:s}.').format(self.NAME, entry_number, registry_key.path))
elif value.data:
utf16le_string_map = self._GetDataTypeMap('utf16le_string')
context = dtfabric_data_maps.DataTypeMapContext()
try:
path = self._ReadStructureFromByteStream(
value.data, 0, utf16le_string_map, context=context)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse MRUListEx entry value: {0:d} with error: '
'{1!s}').format(entry_number, exception))
return value_string
path = path.rstrip('\x00')
shell_item_data = value.data[context.byte_size:]
if not shell_item_data:
parser_mediator.ProduceExtractionWarning((
'missing shell item in MRUListEx value: {0:d} in key: '
'{1:s}.').format(entry_number, registry_key.path))
value_string = 'Path: {0:s}'.format(path)
else:
shell_items_parser = shell_items.ShellItemsParser(registry_key.path)
shell_items_parser.ParseByteStream(
parser_mediator, shell_item_data, codepage=codepage)
value_string = 'Path: {0:s}, Shell item: [{1:s}]'.format(
path, shell_items_parser.CopyToPath())
return value_string
# pylint: disable=arguments-differ
def ExtractEvents(
self, parser_mediator, registry_key, codepage='cp1252', **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
"""
self._ParseMRUListExKey(parser_mediator, registry_key, codepage=codepage)
if registry_key.name == 'RecentDocs':
# For the RecentDocs MRUListEx we also need to parse its subkeys
# since the Registry key path does not support wildcards yet.
for subkey in registry_key.GetSubkeys():
self._ParseMRUListExKey(parser_mediator, subkey, codepage=codepage)
class MRUListExStringAndShellItemListWindowsRegistryPlugin(
BaseMRUListExWindowsRegistryPlugin):
"""Windows Registry plugin to parse a string and shell item list MRUListEx."""
NAME = 'mrulistex_string_and_shell_item_list'
DESCRIPTION = 'Parser for Most Recently Used (MRU) Registry data.'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\ComDlg32\\LastVisitedPidlMRU')])
# pylint: disable=arguments-differ
def _ParseMRUListExEntryValue(
self, parser_mediator, registry_key, entry_index, entry_number,
codepage='cp1252', **kwargs):
"""Parses the MRUListEx entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
entry_index (int): MRUListEx entry index.
entry_number (int): entry number.
codepage (Optional[str]): extended ASCII string codepage.
Returns:
str: MRUList entry value.
"""
value_string = ''
value = registry_key.GetValueByName('{0:d}'.format(entry_number))
if value is None:
parser_mediator.ProduceExtractionWarning(
'missing MRUListEx value: {0:d} in key: {1:s}.'.format(
entry_number, registry_key.path))
elif not value.DataIsBinaryData():
logger.debug((
'[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: '
'{2:s}.').format(self.NAME, entry_number, registry_key.path))
elif value.data:
utf16le_string_map = self._GetDataTypeMap('utf16le_string')
context = dtfabric_data_maps.DataTypeMapContext()
try:
path = self._ReadStructureFromByteStream(
value.data, 0, utf16le_string_map, context=context)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse MRUListEx entry value: {0:d} with error: '
'{1!s}').format(entry_number, exception))
return value_string
path = path.rstrip('\x00')
shell_item_list_data = value.data[context.byte_size:]
if not shell_item_list_data:
parser_mediator.ProduceExtractionWarning((
'missing shell item in MRUListEx value: {0:d} in key: '
'{1:s}.').format(entry_number, registry_key.path))
value_string = 'Path: {0:s}'.format(path)
else:
shell_items_parser = shell_items.ShellItemsParser(registry_key.path)
shell_items_parser.ParseByteStream(
parser_mediator, shell_item_list_data, codepage=codepage)
value_string = 'Path: {0:s}, Shell item path: {1:s}'.format(
path, shell_items_parser.CopyToPath())
return value_string
# pylint: disable=arguments-differ
def ExtractEvents(
self, parser_mediator, registry_key, codepage='cp1252', **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
"""
self._ParseMRUListExKey(parser_mediator, registry_key, codepage=codepage)
winreg.WinRegistryParser.RegisterPlugins([
MRUListExStringWindowsRegistryPlugin,
MRUListExShellItemListWindowsRegistryPlugin,
MRUListExStringAndShellItemWindowsRegistryPlugin,
MRUListExStringAndShellItemListWindowsRegistryPlugin])
```
#### File: tests/parsers/java_idx.py
```python
from __future__ import unicode_literals
import unittest
from plaso.formatters import java_idx as _ # pylint: disable=unused-import
from plaso.lib import definitions
from plaso.parsers import java_idx
from tests import test_lib as shared_test_lib
from tests.parsers import test_lib
class IDXTest(test_lib.ParserTestCase):
"""Tests for Java Cache IDX file parser."""
@shared_test_lib.skipUnlessHasTestFile(['java_602.idx'])
def testParse602(self):
"""Tests the Parse function on a version 602 IDX file."""
parser = java_idx.JavaIDXParser()
storage_writer = self._ParseFile(['java_602.idx'], parser)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2010-05-05 01:34:19.720000')
self.assertEqual(event.idx_version, 602)
expected_url = 'http://www.gxxxxx.com/a/java/xxz.jar'
self.assertEqual(event.url, expected_url)
description_expected = 'File Hosted Date'
self.assertEqual(event.timestamp_desc, description_expected)
# Parse second event. Same metadata; different timestamp event.
event = events[1]
self.CheckTimestamp(event.timestamp, '2010-05-05 03:52:31.000000')
self.assertEqual(event.idx_version, 602)
expected_url = 'http://www.gxxxxx.com/a/java/xxz.jar'
self.assertEqual(event.url, expected_url)
description_expected = definitions.TIME_DESCRIPTION_FILE_DOWNLOADED
self.assertEqual(event.timestamp_desc, description_expected)
@shared_test_lib.skipUnlessHasTestFile(['java.idx'])
def testParse605(self):
"""Tests the Parse function on a version 605 IDX file."""
parser = java_idx.JavaIDXParser()
storage_writer = self._ParseFile(['java.idx'], parser)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2001-07-26 05:00:00.000000')
self.assertEqual(event.idx_version, 605)
self.assertEqual(event.ip_address, '10.7.119.10')
expected_url = (
'http://xxxxc146d3.gxhjxxwsf.xx:82/forum/dare.php?'
'hsh=6&key=<KEY>')
self.assertEqual(event.url, expected_url)
description_expected = 'File Hosted Date'
self.assertEqual(event.timestamp_desc, description_expected)
# Parse second event. Same metadata; different timestamp event.
event = events[1]
self.CheckTimestamp(event.timestamp, '2013-01-13 16:22:01.000000')
self.assertEqual(event.idx_version, 605)
self.assertEqual(event.ip_address, '10.7.119.10')
expected_url = (
'http://xxxxc146d3.gxhjxxwsf.xx:82/forum/dare.php?'
'hsh=6&key=<KEY>')
self.assertEqual(event.url, expected_url)
description_expected = definitions.TIME_DESCRIPTION_FILE_DOWNLOADED
self.assertEqual(event.timestamp_desc, description_expected)
if __name__ == '__main__':
unittest.main()
```
#### File: parsers/olecf_plugins/summary.py
```python
from __future__ import unicode_literals
import unittest
from plaso.formatters import olecf # pylint: disable=unused-import
from plaso.parsers.olecf_plugins import summary
from tests import test_lib as shared_test_lib
from tests.parsers.olecf_plugins import test_lib
class TestSummaryInformationOLECFPlugin(test_lib.OLECFPluginTestCase):
"""Tests for the OLECF summary information plugin."""
@shared_test_lib.skipUnlessHasTestFile(['Document.doc'])
def testProcess(self):
"""Tests the Process function on a Summary Information stream."""
plugin = summary.SummaryInformationOLECFPlugin()
storage_writer = self._ParseOLECFFileWithPlugin(['Document.doc'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 3)
events = list(storage_writer.GetSortedEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2012-12-10 18:38:00.000000')
self.assertEqual(event.timestamp_desc, 'Document Creation Time')
self.assertEqual(event.name, 'Summary Information')
self.assertEqual(event.title, 'Table of Context')
self.assertEqual(event.author, '<NAME>')
self.assertEqual(event.template, 'Normal.dotm')
self.assertEqual(event.last_saved_by, 'Nides')
self.assertEqual(event.revision_number, '4')
self.assertEqual(event.number_of_characters, 18)
self.assertEqual(event.application, 'Microsoft Office Word')
self.assertEqual(event.security, 0)
expected_message = (
'Title: Table of Context '
'Author: <NAME> '
'Template: Normal.dotm '
'Revision number: 4 '
'Last saved by: Nides '
'Number of pages: 1 '
'Number of words: 3 '
'Number of characters: 18 '
'Application: Microsoft Office Word '
'Security: 0')
expected_short_message = (
'Title: Table of Context '
'Author: <NAME> '
'Revision number: 4')
# TODO: add support for:
# 'Total edit time (secs): 0 '
self._TestGetMessageStrings(event, expected_message, expected_short_message)
class TestDocumentSummaryInformationOLECFPlugin(test_lib.OLECFPluginTestCase):
"""Tests for the OLECF document summary information plugin."""
@shared_test_lib.skipUnlessHasTestFile(['Document.doc'])
def testProcess(self):
"""Tests the Process function on a Document Summary Information stream."""
plugin = summary.DocumentSummaryInformationOLECFPlugin()
storage_writer = self._ParseOLECFFileWithPlugin(['Document.doc'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 1)
events = list(storage_writer.GetSortedEvents())
event = events[0]
self.assertEqual(event.name, 'Document Summary Information')
self.assertEqual(event.number_of_lines, 1)
self.assertEqual(event.number_of_paragraphs, 1)
self.assertEqual(event.company, 'KPMG')
self.assertFalse(event.shared_document)
self.assertEqual(event.application_version, '14.0')
# TODO: add support for:
# self.assertEqual(event.is_shared, False)
expected_message = (
'Number of lines: 1 '
'Number of paragraphs: 1 '
'Company: KPMG '
'Shared document: False '
'Application version: 14.0')
expected_short_message = (
'Company: KPMG')
self._TestGetMessageStrings(event, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
```
#### File: parsers/sqlite_plugins/chrome_extension_activity.py
```python
from __future__ import unicode_literals
import unittest
from plaso.formatters import chrome_extension_activity as _ # pylint: disable=unused-import
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import chrome_extension_activity
from tests import test_lib as shared_test_lib
from tests.parsers.sqlite_plugins import test_lib
class ChromeExtensionActivityPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Google Chrome extension activity database plugin."""
@shared_test_lib.skipUnlessHasTestFile(['Extension Activity'])
def testProcess(self):
"""Tests the Process function on a Chrome extension activity database."""
plugin = chrome_extension_activity.ChromeExtensionActivityPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['Extension Activity'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 56)
events = list(storage_writer.GetEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2014-11-25 21:08:23.698737')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_UNKNOWN)
expected_extension_id = 'ognampngfcbddbfemdapefohjiobgbdl'
self.assertEqual(event.extension_id, expected_extension_id)
self.assertEqual(event.action_type, 1)
self.assertEqual(event.activity_id, 48)
self.assertEqual(event.api_name, 'browserAction.onClicked')
expected_message = (
'Chrome extension: ognampngfcbddbfemdapefohjiobgbdl '
'Action type: API event callback (type 1) '
'Activity identifier: 48 '
'API name: browserAction.onClicked')
expected_short_message = (
'ognampngfcbddbfemdapefohjiobgbdl browserAction.onClicked')
self._TestGetMessageStrings(event, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jepret/jepret-backend",
"score": 3
}
|
#### File: jepret-backend/core/util.py
```python
from string import ascii_letters
from flask import jsonify
from random import choice
def respond_data(data, status_code=200):
return jsonify({"data": data, "code": status_code}), status_code
def generate_random_str(length=30):
return "".join([choice(ascii_letters) for _ in range(length)])
```
#### File: jepret-backend/core/validator.py
```python
from cerberus.validator import Validator
from flask import request
from core.middleware import Middleware
from core.error import BaseError
class BaseValidator(Middleware):
def __init__(self, schema):
Middleware.__init__(self)
self.next = None
self.validator = Validator()
self.schema = schema
def check(self):
return self.validator.validate(request.json, self.schema)
def default(self):
raise BaseError(message=self.validator.errors, status_code=400)
```
#### File: jepret-backend/handler/file.py
```python
import os
from flask import request, g, send_file
from werkzeug.utils import secure_filename
from core.util import *
from core.error import BaseError
from model import File, User
ALLOWED_EXTENSIONS = ["png", "jpg", "jpeg", "gif"]
def upload_file():
if "file[]" not in request.files:
raise BaseError("No file uploaded", 400)
user = User.get_or_none(User.id == g.user["id"])
if not user:
raise BaseError("Unauthorized", 401)
files = request.files.getlist("file[]")
for file in files:
if file.filename == "":
raise BaseError("No filename uploaded", 400)
filename = secure_filename(file.filename)
extension = filename.split(".")[-1]
if extension not in ALLOWED_EXTENSIONS:
raise BaseError(extension + " not allowed", 400)
result = []
for file in files:
filename = secure_filename(file.filename)
db_file = File(filename=filename, owner=user)
db_file.save()
file.save(os.path.join(os.environ["UPLOAD_DIRECTORY"], db_file.unique_id))
result.append(db_file.to_dict())
return respond_data(result, 201)
def get_file(unique_id):
file = File.get_or_none(File.unique_id == unique_id)
if not file:
raise BaseError("File not found", 404)
extension = file.filename.split(".")[-1]
if extension == "jpg":
extension = "jpeg"
mime_type = "image/" + extension
return send_file(
os.path.join(os.environ["UPLOAD_DIRECTORY"], file.unique_id), mime_type
)
```
#### File: jepret-backend/handler/statistic.py
```python
from flask import request, g
from core.error import BaseError
from core.util import *
from model import UMKMStatistic, UMKM
def get_umkm_statistic(umkm_id):
umkm = UMKM.get_or_none((UMKM.id == umkm_id) & (UMKM.owner == g.user['id']))
if not umkm:
raise BaseError("UMKM not found", 404)
stat = UMKMStatistic.get_or_none(UMKMStatistic.umkm == umkm)
if not stat:
stat = UMKMStatistic(umkm=umkm)
stat.save()
return respond_data(stat.to_dict())
```
#### File: jepret-backend/handler/transaction.py
```python
from flask import request, g
from core.error import BaseError
from core.util import *
from model import Transaction, UMKM, User
def create_transaction():
data = request.json
umkm = UMKM.get_or_none(UMKM.unique_id == data['umkm_uid'])
if not umkm:
raise BaseError("UMKM not found", 404)
user = User.get_or_none(User.id == g.user['id'])
if not user:
raise BaseError("Unauthorized", 401)
if data['amount'] > user.balance:
raise BaseError("Not enough balance", 400)
user.balance -= data['amount']
user.save()
umkm.balance += data['amount']
umkm.save()
transaction = Transaction(
sender=user,
receiver=umkm,
amount=data['amount']
)
transaction.save()
return respond_data(transaction.to_dict())
```
#### File: jepret-backend/route/UMKM_detail.py
```python
from core.router import Router
from core.validator import BaseValidator
import handler.UMKM_detail as handler
from middleware.auth import AuthMiddleware
def validate_gender(field, value, error):
if value != "m" and value != "f":
error(field, "Gender only m/f")
def validate_rate(field, value, error):
if value <= 0 or value > 5:
error(field, "Rate 1- 5")
create_umkm_detail = AuthMiddleware()
create_umkm_detail = create_umkm_detail.add_next(
BaseValidator({
"owner_name": {
"type": "string",
"required": True,
"empty": False
},
"position": {
"type": "string",
"required": True,
"empty": False
},
"gender": {
"type": "string",
"required": True,
"empty": False,
"check_with": validate_gender
},
"birth_date": {
"type": "string",
"required": True,
"empty": False
},
"expert_count": {
"type": "integer",
"required": True,
},
"worker_count": {
"type": "integer",
"required": True,
},
"gross_revenue": {
"type": "integer",
"required": True
},
"average_price": {
"type": "integer",
"required": True
},
"operational_cost": {
"type": "integer",
"required": True
},
"need_funding": {
"type": "boolean",
"required": True
},
"funding_amount": {
"type": "integer",
"required": True
},
"funding_month_count": {
"type": "integer",
"required": True
},
"money_eq_success": {
"type": "integer",
"required": True,
"check_with": validate_rate
},
"money_eq_competence": {
"type": "integer",
"required": True,
"check_with": validate_rate
},
"do_care_money": {
"type": "integer",
"required": True,
"check_with": validate_rate
},
})
)
create_umkm_detail = create_umkm_detail.add_next(handler.create_umkm_detail)
router = Router("/umkm-detail")
router.route("/", create_umkm_detail, methods=["POST"])
```
|
{
"source": "jepst/Anubis",
"score": 2
}
|
#### File: views/pipeline/pipeline.py
```python
import json
from typing import Union
from flask import Blueprint, request
from parse import parse
from anubis.models import AssignmentTest, Submission, SubmissionTestResult, db
from anubis.utils.http import success_response
from anubis.utils.data import MYSQL_TEXT_MAX_LENGTH
from anubis.utils.http.decorators import json_endpoint, json_response
from anubis.utils.logging import logger
from anubis.utils.pipeline.decorators import check_submission_token
pipeline = Blueprint("pipeline", __name__, url_prefix="/pipeline")
@pipeline.route("/report/panic/<string:submission_id>", methods=["POST"])
@check_submission_token
@json_response
def pipeline_report_panic(submission: Submission):
"""
Pipeline workers will hit this endpoint if there was
a panic that needs to be reported. This view function
should mark the submission as processed, and update
its state.
POSTed json should be of the shape:
{
"message": "yikers this was a bad error",
"traceback": "optional traceback",
}
:param submission:
:return:
"""
# log th panic
logger.error(
"submission panic reported",
extra={
"type": "panic_report",
"submission_id": submission.id,
"assignment_id": submission.assignment_id,
"owner_id": submission.owner_id,
"data": json.dumps(request.json),
},
)
# Set the submission state
submission.processed = True
submission.state = "Whoops! There was an error on our end. The error has been logged."
submission.errors = {"panic": request.json}
# commit the changes to the session
db.session.add(submission)
db.session.commit()
return success_response("Panic successfully reported")
@pipeline.route("/report/build/<string:submission_id>", methods=["POST"])
@check_submission_token
@json_endpoint([("stdout", str), ("passed", bool)])
def pipeline_report_build(submission: Submission, stdout: str, passed: bool, **_):
"""
POSTed json should be of the shape:
{
"stdout": "build logs...",
"passed": True
}
:param submission:
:param stdout:
:param passed:
:return:
"""
if len(stdout) > MYSQL_TEXT_MAX_LENGTH:
stdout = stdout[:MYSQL_TEXT_MAX_LENGTH]
# Log the build being reported
logger.info(
"submission build reported",
extra={
"type": "build_report",
"submission_id": submission.id,
"assignment_id": submission.assignment_id,
"owner_id": submission.owner_id,
"passed": passed,
"stdout": stdout,
},
)
# Update submission build
submission.build.stdout = stdout
submission.build.passed = passed
# If the build did not passed, then the
# submission pipeline is done
if passed is False:
submission.processed = True
submission.state = "Build did not succeed"
# Add and commit
db.session.add(submission)
db.session.add(submission.build)
db.session.commit()
# Report success
return success_response("Build successfully reported.")
@pipeline.route("/report/test/<string:submission_id>", methods=["POST"])
@check_submission_token
@json_endpoint([("test_name", str), ("passed", bool), ("message", str), ('output_type', str), ("output", str)])
def pipeline_report_test(submission: Submission, test_name: str, passed: bool, message: str, output_type: str, output: str, **_):
"""
Submission pipelines will hit this endpoint when there
is a test result to report.
POSTed json should be of the shape:
{
"test_name": "name of the test",
"passed": True,
"message": "This test worked",
"output_type": "diff",
"output": "--- \n\n+++ \n\n@@ -1,3 +1,3 @@\n\n a\n-c\n+b\n d"
}
:param submission:
:param test_name:
:param passed:
:param message:
:param output:
:param output_type:
:return:
"""
if len(output) > MYSQL_TEXT_MAX_LENGTH:
output = output[:MYSQL_TEXT_MAX_LENGTH]
# Log the build
logger.info(
"submission test reported",
extra={
"type": "test_result",
"submission_id": submission.id,
"assignment_id": submission.assignment_id,
"owner_id": submission.owner_id,
"test_name": test_name,
"test_message": message,
"passed": passed,
"output_type": output_type,
"output": output,
},
)
submission_test_result: Union[SubmissionTestResult, None] = None
# Look for corresponding submission_test_result based on given name
for result in submission.test_results:
# Compare name
if result.assignment_test.name == test_name:
submission_test_result = result
break
# Verify we got a match
if submission_test_result is None:
logger.error("Invalid submission test result reported", extra={"request": request.json})
return success_response({"status": "invalid test name"})
# Update the fields
submission_test_result.passed = passed
submission_test_result.message = message
submission_test_result.output_type = output_type
submission_test_result.output = output
# Add and commit
db.session.add(submission_test_result)
db.session.commit()
return success_response("Test data successfully added.")
@pipeline.route("/report/state/<string:submission_id>", methods=["POST"])
@check_submission_token
@json_endpoint(required_fields=[("state", str)])
def pipeline_report_state(submission: Submission, state: str, **kwargs):
"""
When a submission pipeline wants to report a state, it
hits this endpoint. If there is a ?processed=1 in the
http query, then the submission will also be marked as
processed.
POSTed json should be of the shape:
{
"state": "",
"processed": True # optional
}
:param submission:
:param state:
:return:
"""
# Log the state update
logger.info(
"submission state update",
extra={
"type": "state_report",
"submission_id": submission.id,
"assignment_id": submission.assignment_id,
"owner_id": submission.owner_id,
"state": state,
},
)
# Get the processed option if it was specified
processed = request.args.get("processed", default="0")
# Set the processed field if it was specified
submission.processed = processed != "0"
# Figure out if the test is hidden
# We do this by checking the state that was given,
# to read the name of the test. If the assignment
# test that was found is marked as hidden, then
# we should not update the state of the submission
# model.
#
# If we were to update the state of the submission
# when a hidden test is reported, then it would be
# visible to the students in the frontend.
hidden_test = False
# Do a basic match on the expected test
match = parse("Running test: {}", state)
# If we got a match
if match:
# Get the parsed assignment test name
test_name = match[0]
# Try to get the assignment test
assignment_test = AssignmentTest.query.filter(
AssignmentTest.assignment_id == submission.assignment_id,
AssignmentTest.name == test_name,
).first()
# Set hidden_test to True if the test exists, and if it is marked as hidden
hidden_test = assignment_test is not None and assignment_test.hidden
# Update state field if the state report is not for a hidden test
if not hidden_test:
submission.state = state
# If processed was specified and is of type bool, then update that too
if "processed" in request.json and isinstance(request.json["processed"], bool):
submission.processed = request.json["processed"]
# Add and commit
db.session.add(submission)
db.session.commit()
return success_response("State successfully updated.")
```
#### File: migrations/versions/df5aa182233f_add_test_points.py
```python
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "df<PASSWORD>"
down_revision = "0<PASSWORD>"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"assignment_test", sa.Column("points", sa.Integer(), nullable=True)
)
conn = op.get_bind()
conn.execute('update assignment_test set points = 10;')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("assignment_test", "points")
# ### end Alembic commands ###
```
#### File: api/tests/test_assignments_public.py
```python
import copy
from datetime import datetime, timedelta
from utils import Session, with_context
sample_sync = {
"name": "CS-UY 3224 TEST PUBLIC HIDDEN 1",
"course": "CS-UY 3224",
"unique_code": "aaaaa1",
"pipeline_image": "registry.digitalocean.com/anubis/assignment/aa11bb2233",
"tests": [{"name": "abc123", "hidden": False, "points": 10}],
}
sample_sync_2 = copy.deepcopy(sample_sync)
sample_sync_2["name"] = "CS-UY 3224 TEST PUBLIC HIDDEN 2"
sample_sync_2["unique_code"] = "aaaaa2"
@with_context
def update_hidden_assignments(aid1, aid2):
from anubis.models import db, Assignment
db.session.expire_all()
db.session.expunge_all()
assignment1: Assignment = Assignment.query.filter(Assignment.id == aid1).first()
assignment2: Assignment = Assignment.query.filter(Assignment.id == aid2).first()
assignment1.hidden = True
assignment1.release_date = datetime.now() - timedelta(hours=2)
assignment2.hidden = False
assignment2.release_date = datetime.now() + timedelta(hours=2)
db.session.commit()
def create_hidden_assignments():
s = Session("superuser")
d1 = s.post_json("/admin/assignments/sync", json={"assignment": sample_sync})
d2 = s.post_json("/admin/assignments/sync", json={"assignment": sample_sync_2})
update_hidden_assignments(d1['assignment']['id'], d2['assignment']['id'])
def test_assignment_public():
create_hidden_assignments()
s = Session("student")
s.get("/public/assignments")
r = s.get("/public/assignments/list")
assert all(map(lambda a: a["name"].startswith("CS-UY 3224"), r["assignments"]))
assert any(map(lambda a: a["name"] != "CS-UY 3224 TEST PUBLIC HIDDEN 1", r["assignments"]))
assert any(map(lambda a: a["name"] != "CS-UY 3224 TEST PUBLIC HIDDEN 2", r["assignments"]))
s = Session("ta")
r = s.get("/public/assignments")
assert all(map(lambda a: a["name"].startswith("CS-UY 3224"), r["assignments"]))
assert any(map(lambda a: a["name"] == "CS-UY 3224 TEST PUBLIC HIDDEN 1", r["assignments"]))
assert any(map(lambda a: a["name"] == "CS-UY 3224 TEST PUBLIC HIDDEN 2", r["assignments"]))
s = Session("superuser")
r = s.get("/public/assignments")
assert any(map(lambda a: a["name"].startswith("CS-UY 3224"), r["assignments"]))
assert any(map(lambda a: a["name"].startswith("CS-UY 3843"), r["assignments"]))
assert any(map(lambda a: a["name"] == "CS-UY 3224 TEST PUBLIC HIDDEN 1", r["assignments"]))
assert any(map(lambda a: a["name"] == "CS-UY 3224 TEST PUBLIC HIDDEN 2", r["assignments"]))
```
#### File: anubis/assignment/utils.py
```python
import collections
import difflib
import functools
import json
import logging
import os
import subprocess
import typing
import warnings
registered_tests: typing.Dict[str, typing.Callable[[], "TestResult"]] = {}
build_function = None
CompareFuncReturnT = typing.Tuple[bool, typing.List[str]]
CompareFuncT = typing.Callable[[typing.List[str], typing.List[str], bool], CompareFuncReturnT]
class TestResult(object):
def __init__(self):
self.output_type = 'text'
self.output = ""
self.message = ""
self.passed = True
def __repr__(self):
return "<TestResult\n passed={}\n message='{:5.5}'\n output_type='{}'\n output='{:5.5}'\n>".format(
self.passed,
self.message,
self.output_type,
self.output,
)
class BuildResult(object):
def __init__(self):
self.stdout = ""
self.passed = True
def __repr__(self):
return "<BuildResult\n passed={}\n stdout='{:5.5}'>".format(
self.passed,
self.stdout
)
class Panic(Exception):
pass
def exec_as_student(cmd, timeout=60) -> typing.Tuple[str, int]:
"""
Run a command as the student. Any and all times that student
code is run, it should be done through this function. Any other
way would be incredibly insecure.
:param cmd: Command to run
:param timeout: Timeout for command
:return: bytes output, int return code
"""
if os.getcwd() == '/home/anubis':
os.chdir('./student')
return_code = 0
try:
print('{} {}'.format(os.getcwd(), ["env", "-i", "su", "student", "-c", cmd]))
stdout = subprocess.check_output(
["env", "-i", "PATH={}".format(os.environ["PATH"]), "su", "student", "-c", cmd],
timeout=timeout,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as e:
stdout = e.output
return_code = e.returncode
# Normalize stdout to string
if isinstance(stdout, bytes):
stdout = stdout.decode('utf-8', 'ignore')
logging.info('exec_as_student command={} return_code={} stdout={}'.format(
cmd, return_code, stdout
))
return stdout, return_code
def fix_permissions():
"""
Fix the file permissions of the student repo
* DEPRECATED *
:return:
"""
warnings.warn('DEPRECATED WARNING: fix_permissions no longer has any affect')
def register_test(test_name):
def decorator(func):
@functools.wraps(func)
def wrapper():
result = TestResult()
func(result)
return result
if wrapper.__dict__.get('test', None) is None:
wrapper.test = {}
wrapper.test['name'] = test_name
if 'hidden' not in wrapper.test:
wrapper.test['hidden'] = False
if 'points' not in wrapper.test:
wrapper.test['points'] = 10
registered_tests[test_name] = wrapper
return wrapper
return decorator
def hide_test():
def decorator(func):
if func.__dict__.get('test', None) is None:
func.test = {}
func.test['hidden'] = True
return func
return decorator
def points_test(points: int):
def decorator(func):
if func.__dict__.get('test', None) is None:
func.test = {}
func.test['points'] = points
return func
return decorator
def register_build(func):
@functools.wraps(func)
def wrapper():
result = BuildResult()
func(result)
return result
global build_function
build_function = wrapper
return wrapper
def trim(stdout: str) -> typing.List[str]:
"""
This mess of a function is where we parse out the
pieces we want from the xv6 output.
A parsed list of string lines is returned.
:param stdout:
:return:
"""
stdout_lines = stdout.split('\n')
try:
stdout_lines = stdout_lines[stdout_lines.index('init: starting sh') + 1:]
except ValueError or IndexError:
return stdout_lines
while len(stdout_lines) != 0 and (len(stdout_lines[-1].strip()) == 0 or stdout_lines[-1].strip() == '$'):
stdout_lines.pop()
if len(stdout_lines) != 0 and stdout_lines[-1].endswith('$'):
stdout_lines[-1] = stdout_lines[-1].rstrip('$')
if len(stdout_lines) != 0 and stdout_lines[0].startswith('$'):
stdout_lines[0] = stdout_lines[0].lstrip('$').strip()
for index in range(len(stdout_lines)):
stdout_lines[index] = stdout_lines[index].strip()
if len(stdout_lines) != 0 and 'terminating on signal 15' in stdout_lines[-1]:
stdout_lines.pop()
if len(stdout_lines) != 0:
stdout_lines[-1] = stdout_lines[-1].strip('$')
print(json.dumps(stdout_lines, indent=2))
return stdout_lines
def search_lines(
stdout_lines: typing.List[str],
expected_lines: typing.List[str],
case_sensitive: bool = True
) -> CompareFuncReturnT:
"""
Search lines for expected lines. This will return true if all expected lines are in the
student standard out lines in order. There can be interruptions in the student standard out.
This function has the advantage of allowing students to still print out debugging lines
while their output is still accurately checked for the expected result. The diff is not
available for this.
>>> search_lines(['a', 'b', 'c'], ['a', 'b', 'c']) -> (True, [])
>>> search_lines(['a', 'debugging', 'b', 'c'], ['a', 'b', 'c']) -> (True, [])
>>> search_lines(['a', 'b'], ['a', 'b', 'c']) -> (False, [])
* Optionally specify if the equality comparison should be case sensitive *
:param stdout_lines:
:param expected_lines:
:param case_sensitive:
:return:
"""
if not case_sensitive:
stdout_lines = list(map(lambda x: x.lower(), stdout_lines))
found = []
for line in expected_lines:
l = line.strip()
if not case_sensitive:
l = l.lower()
for _aindex, _aline in enumerate(stdout_lines):
if l in _aline:
found.append(_aindex)
break
else:
found.append(-1)
if -1 in found:
return False, []
return list(sorted(found)) == found, []
def test_lines(
stdout_lines: typing.List[str],
expected_lines: typing.List[str],
case_sensitive: bool = True,
context_length: int = 5,
) -> CompareFuncReturnT:
"""
Test lines for exact equality. Whitespace will be stripped off each line automatically.
* Optionally specify if the equality comparison should be case sensitive *
>>> test_lines(['a', 'b', 'c'], ['a', 'b', 'c']) -> (True, [])
>>> test_lines(['a', 'debugging', 'b', 'c'], ['a', 'b', 'c'])
# -> (False, ['--- \n', '+++ \n', '@@ -1,3 +1,4 @@\n', ' a', '+debugging', ' b', ' c'])
>>> test_lines(['a', 'b'], ['a', 'b', 'c'])
# -> (False, ['--- \n', '+++ \n', '@@ -1,3 +1,2 @@\n', ' a', ' b', '-c'])
:param stdout_lines: students standard out lines as a list of strings
:param expected_lines: expected lines as a list of strings
:param case_sensitive: optional boolean to indicate if comparison should be case sensitive
:param context_length: the length of the context of the generated diff (the smaller the faster)
:return: True and an empty list if exact match was found, False with the unified diff otherwise
"""
# A rolling deque containing the lines of context of the diff that occurs (if any)
context = collections.deque(maxlen=context_length)
# Record the first occurence of a mismatch
mismatch_index = -1
# The remaining offset until the first occurence of mismatch is centralized
# within the context
context_remaining_offset = context_length // 2
# A general preprocessor function for text
if case_sensitive:
preprocess_func = lambda *texts: tuple(text.strip() for text in texts)
else:
preprocess_func = lambda *texts: tuple(text.strip().lower() for text in texts)
for index, (_a, _b) in enumerate(zip(expected_lines, stdout_lines)):
# We defer text preprocessing until we need the lines
_a, _b = preprocess_func(_a, _b)
context.append((_a, _b))
# When there is a mismatch already, we are only motivated to fill up
# the appropriate context lines
if mismatch_index != -1:
# Break when the context is full and the mismatched line is
# centralized
if len(context) == context_length and context_remaining_offset <= 0:
break
# Continue until we fill up the context
context_remaining_offset -= 1
continue
elif _a != _b:
mismatch_index = index
# unzip the context as tuples
expected_context, stdout_context = zip(*context)
if mismatch_index == -1:
if len(expected_lines) == len(stdout_lines):
return True, []
# When there is no mismatch and the length of the lines are different,
# we fill the context with the leading part of the lines that
# only present in the longer list of lines
start = min(len(expected_lines), len(stdout_lines))
end = start + context_length
if len(expected_lines) > len(stdout_lines):
expected_context += preprocess_func(*expected_lines[start:end])
else:
stdout_context += preprocess_func(*stdout_lines[start:end])
return False, list(difflib.unified_diff(expected_context, stdout_context))
def verify_expected(
stdout_lines: typing.List[str],
expected_lines: typing.List[str],
test_result: TestResult,
case_sensitive: bool = True,
search: bool = False
):
"""
Check to lists of strings for quality. Will strip off whitespace from each line
before checking for equality. The stdout_lines should be from the student code.
The expected_lines should then be whichever lines are expected for this test.
* The fields on the test_result object will be set automatically based on if the
expected output was found. *
:param stdout_lines: students lines as a list of strings
:param expected_lines: expected lines as a list of strings
:param test_result: TestResult object for this test
:param case_sensitive: boolean to indicate if the comparison should be case sensitive
:param search: boolean to indicate if the stdout should be searched instead of
directly compared for equality
:return:
"""
compare_func: CompareFuncT = search_lines if search else test_lines
passed, diff = compare_func(stdout_lines, expected_lines, case_sensitive=case_sensitive)
if not passed:
if diff:
test_result.output_type = 'diff'
test_result.output += '\n'.join(diff)
else:
# If diff is not available, fall back to the old way of displaying outputs
test_result.output_type = 'text'
test_result.output += 'your lines:\n' + '\n'.join(stdout_lines) + '\n\n' \
+ 'we expected:\n' + '\n'.join(expected_lines)
test_result.message = 'Did not receive expected output'
test_result.passed = False
else:
test_result.output_type = 'text'
test_result.output += 'test passed, we received the expected output'
test_result.message = 'Expected output found'
test_result.passed = True
def xv6_run(cmd: str, test_result: TestResult, timeout=5) -> typing.List[str]:
"""
Start xv6 and run command specified. The test_result.stdout will
be appended with a message saying what command is being run.
We return a list of the lines parsed
:param cmd:
:param test_result:
:param timeout:
:return:
"""
command = 'timeout {} qemu-system-i386 -serial mon:stdio ' \
'-drive file=./xv6.img,media=disk,index=0,format=raw ' \
'-drive file=./fs.img,media=disk,index=1,format=raw ' \
'-smp 1 -m 512 -display none -nographic'.format(timeout)
test_result.stdout += 'Running "{}" in xv6\n\n'.format(cmd)
with open('command', 'w') as f:
f.write('\n' + cmd + '\n')
stdout, retcode = exec_as_student(command + ' < command', timeout=timeout + 1)
stdout = stdout.split('\n')
boot_line = None
for index, line in enumerate(stdout):
if line.endswith('xv6...'):
boot_line = index
break
if boot_line is not None:
stdout = stdout[boot_line:]
stdout = '\n'.join(stdout)
return trim(stdout)
def did_xv6_crash(stdout_lines: typing.List[str], test_result: TestResult):
"""
Will check output to see if xv6 crashed. We look for cpu0: panic, and
or unexpected traps.
If a crash is detected, the test_result will be set with and True will
be returned,
:param stdout_lines:
:param test_result:
:return:
"""
if any('cpu0: panic' in line for line in stdout_lines):
test_result.output_type = 'text'
test_result.output += 'xv6 did not boot\n\n' + '-' * 20 \
+ '\nstdout:\n' + '\n'.join(stdout_lines)
test_result.passed = False
test_result.message = 'xv6 does not boot!\n'
return True
passed = True
for line in stdout_lines:
passed = 'unexpected trap' not in line and passed
if not passed:
test_result.output_type = 'text'
test_result.output += 'trap error detected\n\n' + '-' * 20 \
+ '\nstdout:\n' + '\n'.join(stdout_lines)
test_result.passed = False
test_result.message = 'xv6 does not boot!\n'
return True
return False
```
|
{
"source": "jepster/python_advanced_techniques",
"score": 4
}
|
#### File: jepster/python_advanced_techniques/decorator_2.py
```python
def make_divisibility_test(n):
def divisible_by_n(m):
return m % n == 0
return divisible_by_n
div_by_3 = make_divisibility_test(3)
tuple(filter(div_by_3, range(10))) # => (0, 3, 6, 9)
print(make_divisibility_test(5)(10)) # => True
```
#### File: python_advanced_techniques/exercise_runtime_type_checker/checker.py
```python
import functools
import helper
import inspect
def check_types(severity=1):
if severity == 0:
return lambda function: function
def message(msg):
if severity == 1:
print(msg)
else:
raise TypeError(msg)
def checker(function):
expected = function.__annotations__
assert(all(map(lambda exp: isinstance(exp, type), expected.values())))
if not expected:
return function
@functools.wraps(function)
def wrapper(*args, **kwargs):
bound_arguments = helper.bind_args(function, *args, **kwargs)
for arg, val in bound_arguments.items():
if arg not in expected:
continue
if not isinstance(val, expected[arg]):
message(f"Bad Argument! Received {arg}={val}, expecting object of type {expected[arg]}")
retval = function(*args, **kwargs)
if 'return' in expected and not isinstance(retval, expected['return']):
message(f"Bad Return Value! Received {retval}, but expected value of type {expected['return']}")
return retval
return wrapper
return checker
@check_types(severity=1)
def foo(a: int, b: str) -> bool:
return b[a] == 'X'
foo('WXYZ', 1)
```
#### File: python_advanced_techniques/exercise_runtime_type_checker/helper.py
```python
import inspect
def bind_args(function, *args, **kwargs):
return inspect.signature(function).bind(*args, **kwargs).arguments
```
#### File: python_advanced_techniques/object_oriented_programming/class_test_2.py
```python
class Dog:
def __init__(self, name, **tricks):
self.name = name
if (bool(tricks) != False):
self.tricks = tricks.get('tricks')
else:
self.tricks = set()
def teach(self, trick):
self.tricks.add(trick)
# Change the broken code above so that the following lines work:
#
buddy = Dog('Buddy')
pascal = Dog('Pascal')
kimber = Dog('Kimber', tricks={'lie down', 'shake'})
buddy.teach('roll over')
buddy.teach('sit')
pascal.teach('fetch')
kimber.teach('fetch')
print(buddy.tricks) # {'sit', 'roll over'}
print(pascal.tricks) # {'fetch'}
print(kimber.tricks) # {'lie down', 'shake', 'fetch'}
```
#### File: python_advanced_techniques/object_oriented_programming/class_test_3.py
```python
class User:
# An (intentionally shared) collection storing users who sign up for some hypothetical service.
# There's only one set of members, so it lives at the class level!
members = {}
names = set()
def __init__(self, name):
if not self.names:
self.names.add(name)
else:
self.names = set(name)
if self.members == {}:
self.members = set() # Not signed up to begin with.
def sign_up(self):
self.members.add(self.name)
# Change the code above so that the following lines work:
#
sarah = User('sarah')
heather = User('heather')
cristina = User('cristina')
print(User.members) # {}
heather.sign_up()
cristina.sign_up()
print(User.members) # {'heather', 'cristina'}
```
#### File: object_oriented_programming/exercise_online_shopping/product.py
```python
class Product:
def __init__(self, name, description, seller, price, availability):
self.name = name
self.description = description
self.seller = seller
self.reviews = []
self.price = price
self.availability = availability
def __str__(self):
return f"Product({self.name}, {self.description}) at ${self.price}"
```
#### File: object_oriented_programming/exercise_online_shopping/review.py
```python
class Review:
def __init__(self, content, user, product):
self.content = content
self.user = user
self.product = product
def __str__(self):
return f"Review of {self.product} by {self.user}: '{self.content}'"
```
|
{
"source": "jepster/python_basics_learning_scripts",
"score": 4
}
|
#### File: jepster/python_basics_learning_scripts/exception-handling.py
```python
def party_planner(cookies, people):
leftovers = None
num_each = None
# TODO: Add a try-except block here to
# make sure no ZeroDivisionError occurs.
try:
num_each = cookies // people
leftovers = cookies % people
except (ZeroDivisionError, ValueError):
print('Try again. Wrong value.')
return(num_each, leftovers)
# The main code block is below; do not edit this
lets_party = 'y'
while lets_party == 'y':
cookies = int(input("How many cookies are you baking? "))
people = int(input("How many people are attending? "))
cookies_each, leftovers = party_planner(cookies, people)
if cookies_each: # if cookies_each is not None
message = "\nLet's party! We'll have {} people attending, they'll each get to eat {} cookies, and we'll have {} left over."
print(message.format(people, cookies_each, leftovers))
lets_party = input("\nWould you like to party more? (y or n) ")
```
#### File: jepster/python_basics_learning_scripts/generator-function-1.py
```python
lessons = ["Why Python Programming", "Data Types and Operators", "Control Flow", "Functions", "Scripting"]
def my_enumerate(iterable, start=0):
return [(start + i, lessons[i]) for i in range(len(lessons))]
for i, lesson in my_enumerate(lessons, 1):
print("Lesson {}: {}".format(i, lesson))
```
#### File: jepster/python_basics_learning_scripts/generator-function-2.py
```python
def chunker(iterable, size):
"""Yield successive chunks from iterable of length size."""
for i in range(0, len(iterable), size):
yield iterable[i:i + size]
for chunk in chunker(range(25), 4):
print(list(chunk))
```
|
{
"source": "jepster/python_for_web",
"score": 3
}
|
#### File: jepster/python_for_web/app.py
```python
from flask import Flask, request
app = Flask(__name__)
"""
Run like this:
> flask run --host 0.0.0.0 --port 3000
"""
@app.route('/')
def home():
return 'hello world!'
```
|
{
"source": "jepster/python_meme_generator",
"score": 3
}
|
#### File: jepster/python_meme_generator/app.py
```python
import random
import os
import requests
from flask import Flask, render_template, abort, request
from meme import MemeEngine
from ingestors import Ingestor
app = Flask(__name__)
meme = MemeEngine('./static')
def setup():
""" Load all resources """
quote_files = ["./_data/DogQuotes/DogQuotesTXT.txt",
"./_data/DogQuotes/DogQuotesDOCX.docx",
"./_data/DogQuotes/DogQuotesPDF.pdf",
"./_data/DogQuotes/DogQuotesCSV.csv"]
all_quotes = []
for f in quote_files:
try:
all_quotes.extend(Ingestor.parse(f))
except ValueError as error:
print(f"ValueError: {error}")
images_path = "./_data/Photos/Dog/"
all_images = []
for root, dirs, files in os.walk(images_path):
all_images = [os.path.join(root, name) for name in files]
return all_quotes, all_images
quotes, images = setup()
@app.route('/')
def meme_rand():
""" Generate a random meme """
img = random.choice(images)
quote = random.choice(quotes)
path = meme.make_meme(img, quote.body, quote.author)
return render_template("meme.html", path=path)
@app.route('/create', methods=['GET'])
def meme_form():
""" User input for meme information """
return render_template('meme_form.html')
def is_image_downloadable(url):
"""
Does the url contain a downloadable resource
"""
h = requests.head(url, allow_redirects=True)
header = h.headers
content_type = header.get('content-type')
if 'image' in content_type.lower():
return True
return False
@app.route('/create', methods=['POST'])
def meme_post():
""" Create a user defined meme """
img = "./temp_image.jpg"
image_url = request.form.get("image_url")
if is_image_downloadable(image_url) == False:
return render_template('meme_form.html', error_message='The provided url does not point to a valid image. Please try again.')
img_data = requests.get(image_url, stream=True).content
with open(img, "wb") as f:
f.write(img_data)
body = request.form.get("body", "")
author = request.form.get("author", "")
path = meme.make_meme(img, body, author)
print(path)
os.remove(img)
return render_template("meme.html", path=path)
if __name__ == "__main__":
app.run()
```
#### File: python_meme_generator/ingestors/ingestor_interface.py
```python
from abc import ABC, abstractmethod
extensions = {
"TEXT": ".txt",
"CSV": ".csv",
"PDF": ".pdf",
"DOCX": ".docx",
}
class IngestorInterface(ABC):
@classmethod
def verify(cls, file_extension):
return file_extension in extensions.values()
@abstractmethod
def parse(cls, path):
pass
```
#### File: python_meme_generator/ingestors/text_ingestor.py
```python
from models import QuoteModel
from ingestors.ingestor_interface import IngestorInterface
class TextIngestor(IngestorInterface):
@classmethod
def parse(cls, path):
file = open(path, "r", encoding="utf-8-sig")
lines = file.readlines()
file.close()
return [QuoteModel(*quote.rstrip("\n").split(" - ")) for quote in lines]
```
|
{
"source": "jepster/python_project_near_earth_objects",
"score": 3
}
|
#### File: jepster/python_project_near_earth_objects/close_approach.py
```python
from helpers import cd_to_datetime, datetime_to_str
class CloseApproach:
"""A close approach to Earth by an NEO.
A `CloseApproach` encapsulates information about the NEO's close approach
to Earth, such as the date and time (in UTC) of closest approach, the
nominal approach distance in astronomical units, and the relative approach
velocity in kilometers per second.
A `CloseApproach` also maintains a reference to its `NearEarthObject` -
initially, this information (the NEO's primary designation) is saved in a
private attribute, but the referenced NEO is eventually replaced in the
`NEODatabase` constructor.
"""
def __init__(self, **info):
"""Create a new `CloseApproach`.
:param string time: The date and time (in UTC) of closest approach.
NASA's format, at least in the `cd`
field of close approach data, uses the English locale's month names.
For example, December 31st, 2020 at noon
is: 2020-Dec-31 12:00
:param float distance: The nominal approach distance in astronomical
units.
:param float velocity: The relative approach velocity in kilometers per
second.
:param NearEarthObject neo: Reference to its `NearEarthObject` -
initially, this information
(the NEO's primary designation) is saved in a private attribute, but
the referenced NEO is
eventually replaced in the `NEODatabase` constructor.
"""
for key, value in info.items():
# assign the designation parameter
if key.lower() == 'des':
# check the value of the parameter to avoid
# an inappropriate value
try:
# if the type of value is not string
self._designation = str(value)
except ValueError:
# print the text message
print(f'The type of {key} is not string')
# assign the time parameter
elif key.lower() == 'cd':
# check the value of the parameter to avoid
# an inappropriate value
try:
# if the type of value is not string
self.time = str(value)
self.time = cd_to_datetime(self.time)
except ValueError:
# print the text message
print(f'The type of {key} is not string')
# assign the distance parameter
elif key.lower() == 'dist':
# check the value of the parameter to avoid
# an inappropriate value
try:
# if the type of value is not float
self.distance = float(value)
except ValueError:
# print the text message
print(f'The type of {key} is not float')
# assign the velocity parameter
elif key.lower() == 'v_rel':
# check the value of the parameter to avoid
# an inappropriate value
try:
# if the type of value is not float
self.velocity = float(value)
except ValueError:
# print the text message
print(f'The type of {key} is not float')
self.neo = self._designation
@property
def time_str(self):
"""Return a formatted representation of this `CloseApproach`'s
approach time.
The value in `self.time` should be a Python `datetime` object. While a
`datetime` object has a string representation, the default
representation includes seconds - significant figures that don't
exist in our input data set.
The `datetime_to_str` method converts a `datetime` object to a
formatted string that can be used in human-readable representations and
in serialization to CSV and JSON files.
"""
return f"Approach time of {self._designation} was at " \
f"{datetime_to_str(self.time)}"
def get_neo_primary_designation(self) -> str:
return self._designation
@property
def designation(self):
"""To access to the self._designation.
:return: self._designation
"""
return self._designation
def __str__(self):
"""Return `str(self)`."""
return f"A CloseApproach time={self.time_str} " \
f"distance={self.distance} velocity={self.velocity} " \
f"neo={self.neo}"
def __repr__(self):
"""Return `repr(self)`, a computer-readable string representation of
this object."""
return (
f"CloseApproach(time={self.time_str!r}, "
f"distance={self.distance:.2f}, "f"velocity={self.velocity:.2f}, "
f"neo={self.neo!r})")
```
#### File: jepster/python_project_near_earth_objects/extract.py
```python
import csv
import json
from near_earth_object import NearEarthObject
from close_approach import CloseApproach
def load_neos(neo_csv_path) -> list:
"""Read near-Earth object information from a CSV file.
:param neo_csv_path: A path to a CSV file containing data about
near-Earth objects.
:return: A collection of `NearEarthObject`s.
"""
csv_list = []
with open(neo_csv_path, 'r') as csv_to_read:
data_from_csv = csv.DictReader(csv_to_read)
for each_row_csv in data_from_csv:
csv_list.append(NearEarthObject(**each_row_csv))
return csv_list
def load_approaches(cad_json_path) -> list:
"""Read close approach data from a JSON file.
:param str cad_json_path: A path to a JSON file containing data about close
approaches.
:return list: A collection of `CloseApproach`es.
"""
json_list = []
with open(cad_json_path) as json_to_read:
data_from_json = json.load(json_to_read)
for each_row_json in data_from_json['data']:
each_row_json = dict(zip(data_from_json['fields'], each_row_json))
json_list.append(CloseApproach(**each_row_json))
return json_list
def _string_to_float(value: str) -> float:
diameter_default = float('nan')
try:
return float(value)
except ValueError:
return diameter_default
```
|
{
"source": "jepster/python_using_libraries",
"score": 3
}
|
#### File: complex_strategy/ImportEngine/DocxImporter.py
```python
from typing import List
import docx
from .ImportInterface import ImportInterface
from .Cat import Cat
class DocxImporter(ImportInterface):
allowed_extensions = ['docx']
@classmethod
def parse(cls, path: str) -> List[Cat]:
if not cls.can_ingest(path):
raise Exception('cannot ingest exception')
cats = []
doc = docx.Document(path)
for para in doc.paragraphs:
if para.text != "":
parse = para.text.split(',')
new_cat = Cat(parse[0], int(parse[1]), bool(parse[2]))
cats.append(new_cat)
return cats
```
|
{
"source": "jeptechnology/jude",
"score": 2
}
|
#### File: generator/printers/enum_printer.py
```python
class EnumPrinter:
enum_object_template = '''/* Autogenerated Code - do not edit directly */
#pragma once
#include <stdint.h>
#include <jude/core/c/jude_enum.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef uint32_t %ENUM%_t;
extern const jude_enum_map_t %ENUM%_enum_map[];
#if defined(__cplusplus)
}
namespace jude
{
namespace %ENUM%
{
enum Value
{
%VALUES%,
COUNT,
////////////////////////////////////////////////////////
// Protobuf backwards compatibility
%PROTOBUF_VALUES%,
////////////////////////////////////////////////////////
__INVALID_VALUE = COUNT
};
const char* GetString(Value value);
const char* GetDescription(Value value);
const Value* FindValue(const char* name);
Value GetValue(const char* name);
// Protobuf backwards compatibility
static auto AsText(Value value) { return GetString(value); };
};
}
////////////////////////////////////////////////////////
// Protobuf backwards compatibility
using %ENUM%Enum = jude::%ENUM%::Value;
%USING_VALUES%
static constexpr %ENUM%Enum %ENUM%_COUNT = jude::%ENUM%::Value::COUNT;
////////////////////////////////////////////////////////
#endif // __cplusplus
'''
enum_source_template = '''
#include "%ENUM%.h"
extern "C" const jude_enum_map_t %ENUM%_enum_map[] =
{
%VALUES%,
JUDE_ENUM_MAP_END
};
namespace jude
{
constexpr jude_size_t %ENUM%_COUNT = (jude_size_t)(sizeof(%ENUM%_enum_map) / sizeof(%ENUM%_enum_map[0]));
const char* %ENUM%::GetString(%ENUM%::Value value)
{
return jude_enum_find_string(%ENUM%_enum_map, value);
}
const char* %ENUM%::GetDescription(%ENUM%::Value value)
{
return jude_enum_find_description(%ENUM%_enum_map, value);
}
const %ENUM%::Value* %ENUM%::FindValue(const char* name)
{
return (const %ENUM%::Value*)jude_enum_find_value(%ENUM%_enum_map, name);
}
%ENUM%::Value %ENUM%::GetValue(const char* name)
{
return (%ENUM%::Value)jude_enum_get_value(%ENUM%_enum_map, name);
}
}
'''
def __init__(self, importPrefix, name, enum_def):
print("Parsing enum: ", name, "...")
self.name = name
self.importPrefix = importPrefix
self.elements = []
for label, data in enum_def.items():
value = 0
description = ''
if isinstance(data,dict):
if not data.__contains__('value'):
raise SyntaxError("enum value defined as dictionary but no 'value' given: " + data)
value = (int(data['value']))
if data.__contains__('description'):
description = data['description']
elif isinstance(data,int):
value = data
else:
raise SyntaxError("enum element not defined as dictionary or int: " + value)
# Tidy up label for code
varname = label
if not varname[0].isalpha():
varname = "_" + varname
varname = varname.replace("-", "_")
self.elements.append((varname, label, value, description))
def create_object(self):
c_values = ',\n'.join([" %s = %d" % (w, y) for (w,x,y,z) in self.elements])
protobuf_values = ',\n'.join([" %s_%s = %s" % (self.name, w, w) for (w,x,y,z) in self.elements])
using_values = '\n'.join(["static constexpr jude::%s::Value %s_%s = jude::%s::%s;" % (self.name, self.name, w, self.name, w) for (w,x,y,z) in self.elements])
return self.enum_object_template.replace("%VALUES%", str(c_values)) \
.replace("%USING_VALUES%", str(using_values)) \
.replace("%PROTOBUF_VALUES%", str(protobuf_values)) \
.replace("%ENUM%", str(self.name)) \
.replace("%FILE%", str(self.name).upper())
def create_source(self):
values = ',\n'.join([' JUDE_ENUM_MAP_ENTRY(%s, %s, "%s")' % (x,y,z) for (w,x,y,z) in self.elements])
return self.enum_source_template.replace("%VALUES%", str(values)) \
.replace("%ENUM%", str(self.name)) \
.replace("%FILE%", str(self.name).upper())
```
#### File: generator/templates/ObjectAccessors.py
```python
ObjectAccessorPrototypes = '''
bool Has_%NAME%() const { return Has(%INDEX%); }
%CLASS%& Clear_%NAME%() { Clear(%INDEX%); return *this; }
%TYPE% Get_%NAME%();
const %TYPE% Get_%NAME%() const;
%CLASS%& Set_%NAME%(const %TYPE%& value);
'''
ObjectAccessorImplementations = '''
// Accessors for %NAME%
%TYPE% %CLASS%::Get_%NAME%()
{
return GetChild<%TYPE%>(m_pData->%MEMBER%);
}
const %TYPE% %CLASS%::Get_%NAME%() const
{
return const_cast<%CLASS%*>(this)->Get_%NAME%();
}
%CLASS%& %CLASS%::Set_%NAME%(const %TYPE%& value)
{
bool alwaysNotify = RTTI()->field_list[%INDEX%].always_notify; // if we have to always notify, force a "change" bit
if (alwaysNotify || !Has_%NAME%())
{
Get_%NAME%().OverwriteData(value);
MarkFieldSet(%INDEX%, true);
}
else
{
auto subObj = Get_%NAME%();
bool hasChanged = (value != subObj);
if (hasChanged)
{
subObj.OverwriteData(value);
}
MarkFieldSet(%INDEX%, hasChanged);
}
return *this;
}
'''
ObjectArrayAccessorPrototypes = '''
ObjectArray<%TYPE%> Get_%PLURAL_NAME%();
const ObjectArray<%TYPE%> Get_%PLURAL_NAME%() const;
auto Add_%NAME%() { return Get_%PLURAL_NAME%().Add(); }
auto Add_%NAME%(jude_id_t id) { return Get_%PLURAL_NAME%().Add(id); }
%TYPE% Get_%NAME%(jude_size_t index) { return Get_%PLURAL_NAME%()[index]; }
const %TYPE% Get_%NAME%(jude_size_t index) const { return Get_%PLURAL_NAME%()[index].Clone(); }
std::optional<%TYPE%> Find_%NAME%(jude_id_t id) { return Get_%PLURAL_NAME%().Find(id); };
std::optional<const %TYPE%> Find_%NAME%(jude_id_t id) const { return Get_%PLURAL_NAME%().Find(id); };
'''
ObjectArrayAccessorImplementations = '''
// Accessors for %NAME%
ObjectArray<%TYPE%> %CLASS%::Get_%PLURAL_NAME%()
{
return ObjectArray<%TYPE%>(*this, %INDEX%);
}
const ObjectArray<%TYPE%> %CLASS%::Get_%PLURAL_NAME%() const
{
return const_cast<%CLASS%*>(this)->Get_%PLURAL_NAME%();
}
'''
def ObjectTemplateMap():
return {
'cpp': {
'objects': {
'single':ObjectAccessorPrototypes,
'repeated':ObjectArrayAccessorPrototypes
},
'source': {
'single':ObjectAccessorImplementations,
'repeated':ObjectArrayAccessorImplementations
}
}
}
```
|
{
"source": "jeptoong/bap-checkin",
"score": 3
}
|
#### File: main/crypto/crypto.py
```python
import main.share as share
import os
import logging as log
from cryptography.fernet import Fernet
from util.yaml_util import save_config
#---------------
# Crypto class
#---------------
class Crypto:
def __init__(self):
if not os.path.isdir('keys'):
log.debug('Create keys folder')
os.makedirs('keys')
if not os.path.isfile('keys/key_crypto'):
log.debug('Generate crypto key')
# generate key
self.key = Fernet.generate_key()
with open('keys/key_crypto', 'wb') as binary_stream:
# write bytes to file
binary_stream.write(self.key)
else:
with open('keys/key_crypto', 'rb') as binary_stream:
self.key = binary_stream.read()
# Encrypt password in config file
def encrypt_pass(self):
log.debug('Encrypt password')
if share.config:
share.config['user']['password'] = self.encrypt(share.config['user']['password'])
share.config['mail']['password'] = self.encrypt(share.config['mail']['password'])
save_config(share.config)
# Encrypt value
def encrypt(self, value:str):
has_encrypt = False
try:
self.decrypt(value)
has_encrypt = True
except:
pass
if not has_encrypt:
fernet = Fernet(self.key)
return fernet.encrypt(value.encode()).decode()
return value
# Decrypt value
def decrypt(self, value):
fernet = Fernet(self.key)
return fernet.decrypt(value.encode()).decode()
```
#### File: main/mail/mail.py
```python
import smtplib, ssl
import logging as log
import main.share as share
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from util.file_util import read_template
from main.timez import TimeZ
from main.crypto.crypto import Crypto
#-------------
# Mail class
#-------------
class Mail(TimeZ):
def __init__(self):
self.crypto = Crypto()
def send(self, attendance, late_time = 0.0):
if attendance:
log.debug('Send email')
# Prepare data for email
check_in = self.convert_tz(attendance['check_in'])
check_out = self.convert_tz(attendance['check_out'])
# Prepare template data
template_data = {
'WORKING_DAY': check_in.date().strftime('%d-%m-%Y'),
'CHECK_IN_TIME': check_in.time(),
'CHECK_OUT_TIME': check_out.time(),
'WORKING_TIME': self.convert_to_hours(attendance['actual_working_hours'])
}
# Calculate late time of today
today_late_time = attendance['hours_arrive_late'] + attendance['hours_leave_soon']
if today_late_time > 0.0:
template_data['TODAY_LATE_TIME'] = self.convert_to_hours(today_late_time)
# Calculate late time for month
late_time += today_late_time
if late_time > 0.0:
template_data['LATE_TIME'] = self.convert_to_hours(late_time)
# Get template name
template_name = 'check-in'
if check_in != check_out:
template_name = 'check-out'
# Read template
message_template = read_template(template_name)
smtp_server = share.config['mail']['smtp-server']
port = share.config['mail']['port']
sender_email = share.config['mail']['sender-email']
sender_name = share.config['mail']['sender-name']
password = self.crypto.decrypt(share.config['mail']['password'])
receiver_email = share.config['mail']['receiver-email']
# Create a secure SSL context
context = ssl.create_default_context()
# Create message
message = MIMEMultipart('alternative')
message['Subject'] = f'CHECKIN at {template_data["CHECK_IN_TIME"]}' if template_name == 'check-in' else f'CHECKOUT at {template_data["CHECK_OUT_TIME"]}'
message['From'] = f'{sender_name} <{sender_email}>'
message['To'] = receiver_email
html = message_template.render(**template_data)
# part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# message.attach(part1)
message.attach(part2)
context = ssl.create_default_context()
with smtplib.SMTP(smtp_server, port) as server:
server.ehlo() # Can be omitted
server.starttls(context=context)
server.ehlo() # Can be omitted
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message.as_string())
# Convert number to hours
def convert_to_hours(self, value:float):
hours = int(value)
minutes = round((value - hours) * 60)
return f'{hours}:{minutes:0>2}'
```
#### File: bap-checkin/main/request_payload.py
```python
import random
#------------------------
# Request Payload class
#------------------------
class RequestPayload:
def __init__(self):
self.__model = None
self.__domain = None
self.__fields = None
self.__limit = None
self.__user_info = None
self.__partner_ids = None
# self.__ip_client = None
self.__args = None
self.__method = None
self.__context = {}
# Set model
def model(self, model:str):
self.__model = model
return self
# Set domain
def domain(self, domain:list):
self.__domain = domain
return self
# Set fields
def fields(self, fields:list):
self.__fields = fields
return self
# Set limit
def limit(self, limit:int):
self.__limit = limit
return self
# Set user information
def user_info(self, user_info):
self.__user_info = user_info
return self
# Set Partner ids
def partner_ids(self, partner_ids):
self.__partner_ids = partner_ids
return self
# Set ip client
# def ip_client(self, ip_client):
# self.__ip_client = ip_client
# return self
# Set args
def args(self, args):
self.__args = args
return self
# Set method
def method(self, method):
self.__method = method
return self
# Add context
def add_context(self, context_value:dict):
self.__context = self.__context | context_value
return self
# Build Request payload
def build(self, context_kwargs=False):
id = random.randint(0, 999999999)
context = {}
if self.__user_info and 'user_context' in self.__user_info:
context = self.__user_info['user_context'] | {'bin_size': True, 'allowed_company_ids': [self.__user_info['user_companies']['allowed_companies'][0][0]]}
# if self.__ip_client:
# context['ip_client'] = self.__ip_client
context = context | self.__context
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'call'
payload['params'] = {}
if self.__model:
payload['params']['model'] = self.__model
if self.__domain:
payload['params']['domain'] = self.__domain
if self.__fields:
payload['params']['fields'] = self.__fields
if self.__limit:
payload['params']['limit'] = self.__limit
if context:
if not context_kwargs:
payload['params']['context'] = context
else:
payload['params']['kwargs'] = {'context': context}
if self.__partner_ids:
payload['params']['partner_ids'] = self.__partner_ids
if self.__args != None:
payload['params']['args'] = self.__args
if self.__method:
payload['params']['method'] = self.__method
payload['id'] = id
return payload, id
```
|
{
"source": "Jepty/Google-build",
"score": 2
}
|
#### File: Jepty/Google-build/build.py
```python
import telebot # pip3 install pyTelegramBotAPI
from telebot import types
import datetime
import traceback
import logging
import threading
import time as time
from keyboard import config as config
from keyboard import lang as lang
# from keyboard import con
import functions
import cherrypy
bot = functions.bot
config.bot_name = bot.get_me().username
logging.basicConfig(filename="exeption.log", level = logging.INFO)
finished = False
# class WebhookServer(object):
# # index равнозначно /, т.к. отсутствию части после ip-адреса (грубо говоря)
# @cherrypy.expose
# def index(self):
# length = int(cherrypy.request.headers['content-length'])
# json_string = cherrypy.request.body.read(length).decode("utf-8")
# update = telebot.types.Update.de_json(json_string)
# bot.process_new_updates([update])
# return ''
# def timer():
# print('timer')
# while not(finished):
# with con:
# cur = con.cursor()
# cur.execute(f"SELECT data, chat_id FROM {config.user_tb}")
# ls = cur.fetchall()
# now = datetime.datetime.now()
# for i in ls:
# times = datetime.datetime.strptime(i[0], '%Y-%m-%d %H:%M:%S')
# try:
# if now.date() == times.date() and now.hour == times.hour:
# bot.send_message(i[1], lang.text_notif_today)
# except:
# print(traceback.format_exc())
# time.sleep(3600)
# threading.Thread(target=timer, daemon=True).start()
def main():
print('start')
try:
# # bot.polling(none_stop=True)
# cherrypy.config.update({
# 'server.socket_host': '127.0.0.1',
# 'server.socket_port': config.WEBHOOK_PORT,
# 'engine.autoreload.on': False
# })
# cherrypy.quickstart(WebhookServer(), '/', {'/': {}})
finished = False
bot.remove_webhook()
bot.polling(none_stop=True)
except Exception as e:
# print(e)
# logging.error(str(datetime.datetime.now()), e)
print(traceback.format_exc())
main()
finished = True
print('stop')
if __name__ == '__main__':
main()
```
|
{
"source": "jepz20/meetupplanner",
"score": 3
}
|
#### File: jepz20/meetupplanner/main.py
```python
from datetime import datetime, timedelta
import os
from flask import Flask, g, send_file, request, redirect, url_for, jsonify
from werkzeug.security import generate_password_hash, check_password_hash
import requests
import logging
from functools import wraps
from urlparse import parse_qs, parse_qsl
from urllib import urlencode
from requests_oauthlib import OAuth1
from google.appengine.api import urlfetch
import jwt
from jwt import DecodeError, ExpiredSignature
try:
# For c speedups
from simplejson import loads, dumps
except ImportError:
from json import loads, dumps
app = Flask(__name__)
app.config.from_object('config')
firebaseUrl = 'https://popping-heat-5589.firebaseio.com/'
def create_token(user):
payload = {
'sub': user.id,
'iat': datetime.utcnow(),
'exp': datetime.utcnow() + timedelta(days=14)
}
token = jwt.encode(payload, app.config['TOKEN_SECRET'])
return token.decode('unicode_escape')
def parse_token(req):
token = req.headers.get('Authorization').split()[1]
return jwt.decode(token, app.config['TOKEN_SECRET'])
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not request.headers.get('Authorization'):
response = jsonify(message='Missing authorization header')
response.status_code = 401
return response
try:
payload = parse_token(request)
except DecodeError:
response = jsonify(message='Token is invalid')
response.status_code = 401
return response
except ExpiredSignature:
response = jsonify(message='Token has expired')
response.status_code = 401
return response
g.user_id = payload['sub']
return f(*args, **kwargs)
return decorated_function
# Routes
@app.route('/auth/login', methods=['POST'])
def login():
user = User.query.filter_by(email=request.json['email']).first()
if not user or not user.check_password(request.json['password']):
response = jsonify(message='Wrong Email or Password')
response.status_code = 401
return response
token = create_token(user)
return jsonify(token=token)
@app.route('/auth/signup', methods=['POST'])
def signup():
userEmail = request.json['email']
userGet = getUserFromFirebase(userEmail)
if userGet is not None:
print userGet
existingUser = userGet
existingUser['name'] = request.json['name']
existingUser['password'] = request.json['password']
userData = existingUser
else:
userData = {"name": request.json['name'], "email": request.json['email'],
'password': request.json['password']}
userEmail = request.json['email']
return putUserInFirebase(dumps(userData), userEmail)
# /jsonify(token=token)
@app.route('/auth/facebook', methods=['POST'])
def facebook():
access_token_url = 'https://graph.facebook.com/v2.5/oauth/access_token'
graph_api_url = 'https://graph.facebook.com/v2.5/me?fields=email,name,picture'
try:
params = {
'client_id': request.json['clientId'],
'redirect_uri': request.json['redirectUri'],
'client_secret': app.config['FACEBOOK_SECRET'],
'code': request.json['code']
}
except Exception, e:
return "Error: %s" % e
encoded_params = urlencode(params)
access_token_url_with_params = '%s?%s' %(access_token_url, encoded_params)
# Step 1. Exchange authorization code for access token.
r = urlfetch.fetch(url=access_token_url_with_params, method=urlfetch.GET)
access_token = loads(r.content)
encoded_access_token = urlencode(access_token)
graph_api_url_with_params = '%s&%s' %(graph_api_url, encoded_access_token)
# Step 2. Retrieve information about the current user.
r = urlfetch.fetch(url=graph_api_url_with_params, method=urlfetch.GET)
# try:
profile = loads(r.content)
userEmail = profile['email']
userGet = getUserFromFirebase(userEmail)
if userGet is not None:
print userGet
existingUser = userGet
existingUser['name'] = profile['name']
existingUser['email'] = profile['email']
existingUser['facebook_id'] = profile['id']
userData = existingUser
else:
userData = {"name": profile['name'], "email": profile['email'],
'facebook_id': profile['id']}
userEmail = profile['email']
return putUserInFirebase(dumps(userData),userEmail)
@app.route('/auth/google', methods=['POST'])
def google():
access_token_url = 'https://accounts.google.com/o/oauth2/token'
people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect'
print request.json
payload = dict(client_id=request.json['clientId'],
redirect_uri=request.json['redirectUri'],
client_secret=app.config['GOOGLE_SECRET'],
code=request.json['code'],
grant_type='authorization_code')
# Step 1. Exchange authorization code for access token.
r = requests.post(access_token_url, data=payload)
token = loads(r.text)
headers = {'Authorization': 'Bearer {0}'.format(token['access_token'])}
# Step 2. Retrieve information about the current user.
r = requests.get(people_api_url, headers=headers)
profile = loads(r.text)
userEmail = profile['email']
userGet = getUserFromFirebase(userEmail)
if userGet is not None:
print userGet
existingUser = userGet
existingUser['name'] = profile['name']
existingUser['email'] = profile['email']
existingUser['google_id'] = profile['sub']
userData = existingUser
else:
userData = {"name": profile['name'], "email": profile['email'],
'google_id': profile['sub']}
userEmail = profile['email']
return putUserInFirebase(dumps(userData), userEmail)
def getUserFromFirebase(userEmail):
userUrl = '%susers/%s.json' %(firebaseUrl, userEmail.replace('@','at').replace('.','dot'))
fetchResult = urlfetch.fetch(url=userUrl, method=urlfetch.GET)
try:
user = loads(fetchResult.content)
except Exception, e:
user = None
return user
def putUserInFirebase(payload,userEmail):
userUrl = '%susers/%s.json' %(firebaseUrl, userEmail.replace('@','at').replace('.','dot'))
fetchResult = urlfetch.fetch(url=userUrl, method=urlfetch.PUT, payload=payload)
try:
user = loads(fetchResult.content)
try:
del user['password']
user = dumps(user)
except Exception, e:
user = dumps(user)
except Exception, e:
user = ""
return user
```
|
{
"source": "jeqb/RaspberryFlask",
"score": 3
}
|
#### File: jeqb/RaspberryFlask/app.py
```python
import os
import signal
from flask import Flask, render_template
import RPi.GPIO as GPIO
ledPin = 11
app = Flask(__name__)
def setup():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(ledPin, GPIO.OUT)
GPIO.output(ledPin, GPIO.LOW)
print('setup complete for pin {}'.format(ledPin))
@app.route('/on', methods=['GET'])
def turn_on():
GPIO.output(ledPin, GPIO.HIGH)
status = 'Led has been turned on'
return render_template('index.html', status=status)
@app.route('/off', methods=['GET'])
def turn_off():
GPIO.output(ledPin, GPIO.LOW)
status = 'Led has been turned off'
return render_template('index.html', status=status)
@app.route('/', methods=['GET'])
def main():
return render_template('menu.html')
@app.route('/terminate', methods=['GET'])
def destroy():
GPIO.output(ledPin, GPIO.LOW)
GPIO.cleanup()
os.kill(os.getpid(), signal.SIGTERM)
if __name__ == '__main__':
setup()
# dev server is fine for something this simple
app.run(host='0.0.0.0')
```
|
{
"source": "jeqcho/kopitiam-maths",
"score": 3
}
|
#### File: kopitiam-maths/Functions/functions.py
```python
from manim import *
from manim.mobject.geometry import ArrowTriangleFilledTip
class Intro(Scene):
def construct(self):
who = Tex("Who is your mother?")
name = MathTex(r"\rightarrow \text{A unique human}")
txt = VGroup(who, name).arrange(RIGHT)
self.play(
Write(who)
)
self.wait()
self.play(
Write(name)
)
self.wait()
func = Text("Function")
self.play(
ReplacementTransform(txt, func)
)
self.wait(2)
ex_func = MathTex(r"\text{mother} : \text{A} \rightarrow \text{B}").scale(0.5).next_to(func, DOWN)
self.play(
Write(ex_func)
)
self.wait(2)
mother = Text("Fatiha")
aiman = Text("Aiman")
faizal = Text("Faizal")
mother_sons = VGroup(mother, aiman, faizal).arrange(DOWN)
self.play(
ReplacementTransform(VGroup(func, ex_func), mother_sons)
)
self.wait(3)
dis = MathTex(r"\text{mother} : \text{Aiman} \rightarrow \text{Fatiha}").move_to(aiman.get_center())
self.play(
ReplacementTransform(aiman, dis)
)
self.wait(3)
dis = MathTex(r"\text{mother} : \text{Faizal} \rightarrow \text{Fatiha}").move_to(faizal.get_center())
self.play(
ReplacementTransform(faizal, dis)
)
self.wait(3)
son_function = MathTex(r"\text{son} : \text{Fatiha} \rightarrow")
to_aiman = MathTex(r"\text{Aiman}").next_to(son_function, RIGHT)
son_function_full = VGroup(son_function, to_aiman.copy()).move_to(mother.get_center())
to_aiman = MathTex(r"\text{Aiman}").next_to(son_function, RIGHT)
to_faizal = MathTex(r"\text{Faizal}").next_to(son_function, RIGHT)
self.play(
ReplacementTransform(mother, son_function_full)
)
self.wait(2)
self.play(
Transform(son_function_full[1], to_faizal.copy())
)
self.play(
Transform(son_function_full[1], to_aiman.copy())
)
self.play(
Transform(son_function_full[1], to_faizal.copy())
)
self.play(
Transform(son_function_full[1], to_aiman.copy())
)
self.play(
Transform(son_function_full[1], to_faizal.copy())
)
self.play(
Transform(son_function_full[1], to_aiman.copy())
)
self.wait()
self.play(
*[FadeOut(x) for x in self.mobjects]
)
self.wait()
class Terminology(Scene):
def construct(self):
self.wait()
tapir = SVGMobject('img/tapir.svg')
four = MathTex(r'4')
element_group = VGroup(four, tapir)
element_group.arrange(buff=2)
element = Title("Elements")
self.play(
Write(element)
)
self.wait()
dis = Tex("A thing").next_to(element, DOWN)
self.play(
Write(dis)
)
self.wait()
self.play(
Write(four)
)
self.wait()
self.play(
FadeIn(tapir)
)
self.wait(2)
self.play(
FadeOut(element_group),
FadeOut(element),
FadeOut(dis)
)
relation = Title("Relation")
self.play(
Write(relation)
)
self.wait()
dis = Tex("A connection").next_to(relation, DOWN)
self.play(
Write(dis)
)
self.wait()
arrow_1 = Arrow(start=ORIGIN + LEFT * 3, end=ORIGIN + RIGHT * 3).add_tip(tip_shape=ArrowTriangleFilledTip,
at_start=True)
three = MathTex(r'3').next_to(ORIGIN + LEFT * 3, LEFT)
seven = MathTex(r'7').next_to(ORIGIN + RIGHT * 3, RIGHT)
both_odd = Tex(r'odd numbers').next_to(ORIGIN, UP)
number_relation = VGroup(three, seven, arrow_1, both_odd)
self.play(
Create(number_relation)
)
self.wait()
self.play(
FadeOut(number_relation)
)
self.wait()
you = Tex("You").next_to(ORIGIN + LEFT * 3, LEFT)
mom = Tex("Your mom").next_to(ORIGIN + RIGHT * 3, RIGHT)
child_of = Tex('family').next_to(ORIGIN, UP)
child_relation = VGroup(you, mom, arrow_1, child_of)
self.play(
Create(child_relation)
)
self.wait()
self.play(
FadeOut(child_relation)
)
self.wait()
banana = SVGMobject("img/banana.svg")
yellow = Tex("Yellow", color=YELLOW)
Group(banana, yellow).arrange(buff=3)
arrow_1 = Arrow(start=banana.get_right(), end=yellow.get_left())
colour_of = Tex('colour of').next_to(arrow_1, UP)
colour_relation = VGroup(yellow, arrow_1, colour_of)
self.play(
Write(banana),
Create(colour_relation)
)
self.wait(3)
self.play(
FadeOut(banana),
FadeOut(colour_relation)
)
self.wait()
self.play(
FadeOut(relation),
FadeOut(dis)
)
self.wait()
function_title = Title('Function')
self.play(
Write(function_title)
)
self.wait()
dis = Text("A special relation where an element is mapped to exactly one element").scale(0.7).next_to(
function_title, DOWN)
self.play(
Write(dis),
run_time=4
)
self.wait(6)
condition_1 = Text("Each input cannot have no corresponding output").scale(0.6)
condition_2 = Text("Each input cannot have more than one output").scale(0.6)
conditions = VGroup(condition_1, condition_2).arrange(DOWN)
self.play(
Write(condition_1)
)
self.wait(2)
self.play(
Write(condition_2)
)
self.wait(2)
condition = Text("Each input must have exactly one output").scale(0.6)
self.play(
ReplacementTransform(conditions, condition)
)
self.wait(4)
self.play(
FadeOut(dis),
FadeOut(condition)
)
X = Ellipse(2, 5, color=WHITE)
Y = Ellipse(2, 5, color=WHITE)
VGroup(X, Y).arrange(buff=3)
set_x = VGroup(*[MathTex(x) for x in 'abcd'])
set_x.arrange(DOWN, buff=1).move_to(X)
set_y = VGroup(*[MathTex(x) for x in 'pqrs'])
set_y.arrange(DOWN, buff=1).move_to(Y)
self.play(
Create(X),
Create(Y),
Create(set_x),
Create(set_y),
)
arrows = VGroup()
for i in range(3):
arrow = Arrow(start=set_x[i].get_center(), end=set_y[i].get_center())
self.play(
Create(arrow)
)
arrows += arrow
self.wait(2)
arrow = Arrow(start=set_x[3].get_center(), end=set_y[2].get_center())
valid = Text("Still a function", color=GREEN).to_edge(DOWN)
self.play(
Create(arrow),
Write(valid)
)
self.wait(2)
self.play(
Uncreate(arrow),
FadeOut(valid)
)
self.wait()
invalid = Text("NOT a function", color=RED).to_edge(DOWN)
arrow = Arrow(start=set_x[2].get_center(), end=set_y[3].get_center())
self.play(
Create(arrow),
Write(invalid)
)
self.wait()
self.play(
Uncreate(arrow),
FadeOut(invalid)
)
self.wait()
self.play(
FadeOut(function_title),
FadeOut(set_x[3])
)
self.wait()
objects = Text("Objects").next_to(X, DOWN)
self.play(
Write(objects),
Indicate(set_x[0])
)
self.play(
Indicate(set_x[1])
)
self.play(
Indicate(set_x[2])
)
self.wait()
self.play(
FadeOut(objects)
)
self.wait()
images = Text("Images").next_to(Y, DOWN)
self.play(
Write(images),
Indicate(set_y[0])
)
self.play(
Indicate(set_y[1])
)
self.play(
Indicate(set_y[2])
)
self.wait()
self.play(
FadeOut(images)
)
self.wait()
domain = Text("Domain").next_to(X, DOWN)
x_title = MathTex('X').next_to(X, UP)
self.play(
Write(domain),
Write(x_title)
)
self.wait()
self.play(
Indicate(X)
)
self.wait()
self.play(
FadeOut(domain)
)
self.wait()
codomain = Text("Codomain").next_to(Y, DOWN)
y_title = MathTex('Y').next_to(Y, UP)
self.play(
Write(codomain),
Write(y_title)
)
self.wait()
self.play(
Indicate(Y)
)
self.wait()
self.play(
FadeOut(codomain)
)
self.wait()
range_brace = Brace(Line(set_y[0].get_top(), set_y[2].get_bottom()), direction=RIGHT).shift(RIGHT)
range_title = Text("Range").next_to(range_brace, RIGHT)
self.play(
Write(range_brace),
Write(range_title)
)
self.wait(5)
self.play(
FadeOut(range_brace),
FadeOut(range_title)
)
self.wait()
f_note = MathTex(r"y=f(x)")
a_note = MathTex(r"f:X \rightarrow Y")
notations = VGroup(f_note, a_note)
notations.arrange(DOWN).to_edge(UP)
self.play(
Write(f_note)
)
self.wait(3)
self.play(
Write(a_note)
)
self.wait(3)
self.play(
FadeOut(arrows),
FadeOut(set_x[0:3]),
FadeOut(set_y),
FadeOut(X),
FadeOut(Y),
FadeOut(x_title),
FadeOut(y_title),
FadeOut(notations)
)
self.wait()
class Analogy(Scene):
def construct(self):
self.wait()
mfm_title = VGroup(*[Text(x) for x in ['Magical', 'Fruit', 'Machine']]).arrange(DOWN)
mfm_title[0].set_color(PINK).set_sheen(-0.1, DR)
mfm_title[1].set_color(PURPLE).set_sheen(-0.1, DR)
mfm_title[2].set_color(TEAL).set_sheen(-0.1, DR)
mfm_box = Rectangle(width=mfm_title.width * 1.1, height=mfm_title.height * 1.1).set_fill(BLACK).set_opacity(100)
mfm = VGroup(mfm_box, mfm_title)
self.bring_to_front(mfm)
self.play(
Create(mfm),
run_time=2
)
self.wait(2)
orange = SVGMobject('img/orange.svg').to_edge(LEFT).shift(RIGHT)
orange_juice = SVGMobject('img/orange_juice.svg').to_edge(RIGHT).shift(LEFT)
self.play(
Write(orange)
)
self.wait()
self.bring_to_front(mfm)
self.play(
ReplacementTransform(orange, orange_juice)
)
self.wait()
self.play(
FadeOut(orange_juice)
)
melon = SVGMobject('img/melon.svg').scale(0.7).to_edge(LEFT).shift(RIGHT)
melon_juice = SVGMobject('img/melon_juice.svg').to_edge(RIGHT).shift(LEFT)
self.play(
Write(melon)
)
self.bring_to_front(mfm)
self.play(
ReplacementTransform(melon, melon_juice)
)
self.wait()
self.play(
FadeOut(melon_juice)
)
self.wait()
blender = SVGMobject('img/blender.svg').scale(1.2)
self.play(
ReplacementTransform(mfm, blender)
)
self.wait(3)
function_title = Text("Function").next_to(blender, DOWN)
self.play(
Write(function_title)
)
self.wait(2)
orange = SVGMobject('img/orange.svg').to_edge(LEFT).shift(RIGHT)
self.play(
Write(orange)
)
self.wait()
objects = Text("Object").next_to(orange, DOWN)
self.play(
Write(objects)
)
self.wait()
orange_juice = SVGMobject('img/orange_juice.svg').to_edge(RIGHT).shift(LEFT)
self.play(
Write(orange_juice)
)
self.wait()
images = Text("Image").next_to(orange_juice, DOWN)
self.play(
Write(images)
)
self.wait()
self.play(
FadeOut(objects),
FadeOut(images),
FadeOut(function_title)
)
self.wait()
melon = SVGMobject('img/melon.svg').scale(0.7)
fruits = VGroup(orange.copy(), melon).arrange(DOWN).to_edge(LEFT).shift(RIGHT)
melon_juice = SVGMobject('img/melon_juice.svg')
juices = VGroup(orange_juice.copy(), melon_juice).arrange(DOWN).to_edge(RIGHT).shift(LEFT)
self.play(
orange.animate.move_to(fruits[0])
)
self.wait()
self.play(
Write(melon)
)
self.wait()
domain = Text("Domain").next_to(fruits, DOWN)
set_fruit = SurroundingRectangle(fruits)
self.play(
Create(set_fruit),
Write(domain)
)
self.wait()
self.play(
orange_juice.animate.move_to(juices[0])
)
self.wait()
self.play(
Write(melon_juice)
)
self.wait()
range_title = Text("Range").next_to(juices, DOWN)
set_juices = SurroundingRectangle(juices)
self.play(
Create(set_juices),
Write(range_title)
)
self.wait()
self.play(
Uncreate(set_fruit),
FadeOut(domain),
Uncreate(set_juices),
FadeOut(range_title)
)
self.wait()
coconut = SVGMobject('img/coconut.svg').scale(1.2)
coconut_juice = SVGMobject('img/coconut_juice.svg')
ordered_fruits = fruits.copy()
ordered_fruits += coconut
ordered_fruits.arrange(DOWN).to_edge(LEFT).shift(RIGHT)
ordered_juices = juices.copy()
ordered_juices += coconut_juice
ordered_juices.arrange(DOWN).to_edge(RIGHT).shift(LEFT)
self.play(
orange.animate.move_to(ordered_fruits[0]),
melon.animate.move_to(ordered_fruits[1]),
orange_juice.animate.move_to(ordered_juices[0]),
melon_juice.animate.move_to(ordered_juices[1])
)
self.wait()
self.play(
Write(coconut),
Write(coconut_juice)
)
self.wait()
arrow = Arrow(start=coconut.get_right(), end=coconut_juice.get_left())
slash = Cross(arrow.copy().scale(0.1))
self.play(
Create(arrow),
Create(slash)
)
self.wait()
codomain = Text('Codomain').next_to(ordered_juices, DOWN)
set_codomain = SurroundingRectangle(ordered_juices)
self.play(
Write(codomain),
Create(set_codomain)
)
self.wait(4)
self.play(
FadeOut(codomain),
Uncreate(set_codomain)
)
self.play(
*[FadeOut(mob) for mob in self.mobjects]
)
self.wait()
class DiscreteVSContinuous(Scene):
def construct(self):
self.wait(2)
linear = MathTex(r"f(x)=mx+c")
self.play(
Write(linear)
)
self.wait(2)
binomial = MathTex(r"P(X=r)=\binom{n}{r}p^r q^{n-r}")
self.play(
Transform(linear, binomial)
)
self.wait(2)
self.play(
FadeOut(linear)
)
func = Text("Function")
self.play(
Write(func)
)
self.wait()
txt = VGroup(Text("Discrete"), Text("Continuous")).arrange(RIGHT, buff=2)
self.play(
Transform(func, txt)
)
self.wait()
self.play(
FadeOut(func)
)
continuous_title = Title("Continuous Functions")
self.play(
Write(continuous_title)
)
self.wait()
number_line = NumberLine().to_edge(DOWN)
tracker = ValueTracker(0)
pointer = Vector(DOWN).next_to(number_line.n2p(0), UP)
pointer.add_updater(
lambda m: m.next_to(
number_line.n2p(tracker.get_value()),
UP
)
)
def upd():
return MathTex('{0:.1f}'.format(tracker.get_value())).next_to(pointer, UP)
label = always_redraw(upd)
self.play(
Create(number_line),
Create(pointer),
Write(label)
)
self.play(tracker.animate.set_value(5))
self.play(tracker.animate.set_value(-3))
self.play(tracker.animate.increment_value(+2))
self.wait(0.5)
fx = MathTex(r"f(x)", r" = 2", r"x+1").shift(UP)
self.play(
Write(fx)
)
self.wait()
result = MathTex(r"=", "5").next_to(fx, DOWN).align_to(fx[1], LEFT)
self.play(
Transform(fx[0], MathTex(r"f(2)").move_to(fx[0].get_center()).align_to(fx[0], RIGHT)),
Transform(fx[2], MathTex(r"(2)+1").move_to(fx[2].get_center()).align_to(fx[2], LEFT)),
tracker.animate.set_value(2),
Write(result)
)
self.wait()
self.play(
Transform(fx[0], MathTex(r"f(1)").move_to(fx[0].get_center()).align_to(fx[0], RIGHT)),
Transform(fx[2], MathTex(r"(1)+1").move_to(fx[2].get_center()).align_to(fx[2], LEFT)),
tracker.animate.set_value(1),
Transform(result[1], MathTex(r"3").move_to(result[1].get_center()).align_to(result[1], LEFT))
)
self.wait()
self.play(
Transform(fx[0], MathTex(r"f(1.5)").move_to(fx[0].get_center()).align_to(fx[0], RIGHT)),
Transform(fx[2], MathTex(r"(1.5)+1").move_to(fx[2].get_center()).align_to(fx[2], LEFT)),
tracker.animate.set_value(1.5),
Transform(result[1], MathTex(r"4").move_to(result[1].get_center()).align_to(result[1], LEFT))
)
self.wait()
self.play(
FadeOut(continuous_title),
FadeOut(fx),
FadeOut(result),
FadeOut(number_line),
FadeOut(pointer),
FadeOut(label)
)
self.wait()
discrete_title = Title("Discrete Functions")
self.play(
Write(discrete_title)
)
blender = SVGMobject('img/blender.svg').shift(UP)
orange = SVGMobject('img/orange.svg').shift(DOWN)
melon = SVGMobject('img/melon.svg').scale(0.7).shift(DOWN)
oron = orange.copy()
orange.to_edge(LEFT, buff=1)
melon.to_edge(RIGHT, buff=1)
self.play(
Write(blender)
)
self.wait(2)
self.play(
Write(orange),
Write(melon)
)
self.wait()
self.play(
ReplacementTransform(melon.copy(), oron)
)
self.play(
Transform(oron, melon.copy().move_to(oron.get_center()))
)
self.play(
Transform(oron, orange.copy().move_to(oron.get_center()))
)
self.play(
Transform(oron, melon.copy().move_to(oron.get_center()))
)
self.play(
FadeOut(blender),
FadeOut(oron),
FadeOut(orange),
FadeOut(melon),
)
des = MathTex(r"\text{Let }f(x)\text{ be the number of ways to arrange }x\text{ people in a line}") \
.next_to(discrete_title, DOWN)
dis = MathTex(r"f(x)=x!").to_edge(DL)
self.play(
Write(des),
FadeIn(dis)
)
self.wait()
fx = MathTex(r"f(4)", r"=", r"24")
self.play(
Write(fx)
)
self.wait(2)
nfx = MathTex(r"f(7.5)", r"???")
self.play(
Transform(fx[0], nfx[0].move_to(fx[0].get_center()).align_to(fx[0], RIGHT)),
Transform(fx[2], nfx[1].move_to(fx[2].get_center()).align_to(fx[2], LEFT))
)
self.wait(3)
nfx = MathTex(r"f(???)", r"2.3")
self.play(
Transform(fx[0], nfx[0].move_to(fx[0].get_center()).align_to(fx[0], RIGHT)),
Transform(fx[2], nfx[1].move_to(fx[2].get_center()).align_to(fx[2], LEFT))
)
self.wait(2)
self.play(
*[FadeOut(x) for x in self.mobjects]
)
self.wait()
class Mapping(Scene):
def construct(self):
self.wait()
function_title = MathTex("f(x)=2x+1")
self.play(
Write(function_title)
)
self.play(
function_title.animate.to_edge(UP)
)
x_min = 0
x_max = 6
axes = Axes(x_range=[x_min, x_max, 1],
y_range=[0, 11, 1],
axis_config={"include_numbers": True}
)
x = ValueTracker(0)
def f(x1):
return 2 * x1 + 1
g = axes.get_graph(f, [x_min, x_max])
mx_obj = 0
mx_img = 0
def eq_text():
return MathTex('{0:.1f}'.format(f(x.get_value())), r"=",
"2(" + '{0:.2f}'.format(x.get_value()) + ")+1").to_corner(UR)
def trace_dot():
nonlocal mx_obj, mx_img
mx_obj = max(mx_obj, x.get_value())
mx_img = max(mx_img, f(x.get_value()))
return axes.get_line_graph([0, mx_obj], [f(0), mx_img], add_vertex_dots=False)
def trace_x():
return axes.get_T_label(x.get_value(), g, label=Tex('{0:.2f}'.format(x.get_value())))
def trace_y():
pointy = axes.coords_to_point(0, f(x.get_value()))
label_y = Tex('{0:.1f}'.format(f(x.get_value()))).next_to(pointy, UR)
return VGroup(label_y, axes.get_horizontal_line(axes.coords_to_point(x.get_value(), f(x.get_value()))))
eq_line = always_redraw(trace_dot)
object_line = always_redraw(trace_x)
image_line = always_redraw(trace_y)
eq = always_redraw(eq_text)
obj = Tex("Object").to_corner(DR)
img = Tex("Image").move_to(axes.get_axes()[1].get_center()).to_edge(UP, buff=0.2)
self.play(
Create(axes),
Write(obj),
Write(img)
)
self.play(
Write(eq),
Create(eq_line),
Create(object_line),
Create(image_line),
run_time=3
)
self.play(
x.animate.set_value(4.5),
run_time=7
)
self.wait(3)
self.play(
x.animate.set_value(1),
run_time=2
)
self.play(
x.animate.set_value(3),
run_time=2
)
self.play(
x.animate.set_value(2)
)
self.wait(2)
self.play(
x.animate.set_value(3.5)
)
self.wait(2)
self.play(
*[FadeOut(x) for x in self.mobjects]
)
class VerticalLineTest(Scene):
def construct(self):
plane = Axes()
top = plane.get_graph(lambda x: 3.5)
bottom = plane.get_graph(lambda x: -3.5)
title = Text("Vertical Line Test").scale(1.5)
self.play(
Write(title)
)
self.wait()
self.play(
title.animate.scale(1 / 1.5).to_corner(UL)
)
self.play(
Create(plane)
)
self.wait()
a = 1 / 3
c = -2
def f(x):
return a * x ** 2 + c
equation = MathTex(r"y=\frac{1}{3}x^2-2").to_corner(UR)
graph = plane.get_graph(f, [-4, 4]).set_color(GREEN)
self.play(
Write(equation)
)
self.wait()
self.play(
Create(graph)
)
self.wait()
vl1 = plane.get_vertical_lines_to_graph(top, [-4, 4], 5, line_func=Line, color=YELLOW, stroke_width=3)
vl2 = plane.get_vertical_lines_to_graph(bottom, [-4, 4], 5, line_func=Line, color=YELLOW,
stroke_width=3)
self.play(
Create(vl1),
Create(vl2)
)
self.wait(4)
ok = Text("It's a function!", color=GREEN).to_corner(DR)
self.play(
Write(ok)
)
self.wait(2)
self.play(
Uncreate(graph),
Uncreate(equation),
Uncreate(vl1),
Uncreate(vl2),
FadeOut(ok)
)
self.wait()
a = 4
b = 2
equation = MathTex(r"\frac{x^2}{16}+\frac{y^2}{4}=1").to_corner(UR)
def f(x):
return b * (1 - (x / a) ** 2) ** 0.5
def f2(x):
return -b * (1 - (x / a) ** 2) ** 0.5
graph = plane.get_graph(f, [-a, a]).set_color(RED)
graph2 = plane.get_graph(f2, [-a, a]).set_color(RED)
self.play(
Write(equation)
)
self.wait()
self.play(
Create(graph),
Create(graph2)
)
self.wait(2)
vl1 = plane.get_vertical_lines_to_graph(top, [-5, 5], 5, line_func=Line, color=YELLOW, stroke_width=3)
vl2 = plane.get_vertical_lines_to_graph(bottom, [-5, 5], 5, line_func=Line, color=YELLOW,
stroke_width=3)
self.play(
Create(vl1),
Create(vl2)
)
self.wait(3)
arrow1 = Arrow(end=plane.coords_to_point(0, 2))
arrow2 = Arrow(end=plane.coords_to_point(0, -2))
self.play(
Create(arrow1),
Create(arrow2)
)
self.wait(4)
nope = Text("NOT a function!", color=RED).to_corner(DR)
self.play(
Write(nope)
)
self.wait(5)
self.play(
*[FadeOut(x) for x in self.mobjects]
)
class Outro(Scene):
def construct(self):
title = Title("The Golden Rule of Functions")
self.play(
Write(title)
)
self.wait()
rule = Tex("Each object maps to exactly one image").set_color(GOLD).next_to(title, DOWN)
self.play(
Write(rule)
)
self.wait(2)
ex_title = VGroup()
ex_func = VGroup()
ex_title += Tex("Wavefunction of a free particle, ")
ex_func += MathTex(r"\Psi(x,t)=Ae^{i(kx-\omega t)}")
ex_title += Tex("Euler's totient function, ")
ex_func += MathTex(r"\varphi(n) =n \prod_{p\mid n} \left(1-\frac{1}{p}\right)")
ex_title += Tex("Riemann zeta function, ")
ex_func += MathTex(r"\zeta(s) = \frac{1}{\Gamma(s)} \int_0^\infty \frac{x ^ {s-1}}{e ^ x - 1} dx\,")
txt = VGroup()
for x in range(3):
txt += VGroup(ex_title[x], ex_func[x]).arrange(RIGHT)
txt.arrange(DOWN)
for x in range(3):
self.play(
Write(txt[x])
)
self.wait()
self.wait(2)
self.play(
*[FadeOut(x) for x in self.mobjects]
)
self.wait()
class Ending(Scene):
def construct(self):
kopi = SVGMobject("img/kopi.svg")
self.play(
Write(kopi),
run_time=2
)
kopi_2 = SVGMobject("img/kopi.svg")
title = Tex("Kopi", "tiam Maths").scale(2)
# title[0].set_color("#905e2e")
title[0].set_color("#e1dbca")
title[1].set_color("#e1dbca")
closing = VGroup(
kopi_2, title
)
closing.arrange()
title.align_to(kopi, DOWN)
self.play(
Transform(kopi, kopi_2)
)
self.play(
Write(title)
)
self.wait(8)
class Thumbnail(Scene):
def construct(self):
function_title = Tex('Functions').scale(2).to_edge(UP)
self.play(
Write(function_title)
)
self.wait(2)
X = Ellipse(2, 5, color=WHITE)
Y = Ellipse(2, 5, color=WHITE)
VGroup(X, Y).arrange(buff=3)
set_x = VGroup(*[MathTex(x) for x in 'abcd'])
set_x.arrange(DOWN, buff=1).move_to(X)
set_y = VGroup(*[MathTex(x) for x in 'pqrs'])
set_y.arrange(DOWN, buff=1).move_to(Y)
self.play(
Create(X),
Create(Y),
Create(set_x),
Create(set_y),
)
arrows = VGroup()
for i in range(3):
arrow = Arrow(start=set_x[i].get_center(), end=set_y[i].get_center())
self.play(
Create(arrow)
)
arrows += arrow
```
|
{
"source": "jeqinchooi/Stereophonic-hearing",
"score": 3
}
|
#### File: jeqinchooi/Stereophonic-hearing/config.py
```python
import visualisation_lab as vl
import random
import pygame
import math
# IMPORTANT: SPECIFY TO LOAD DATA OR TO CREATE FROM SCRATCH
# If set to true, the neural networks and related data from data file will be loaded
# IF SET TO FALSE, THE DATA IN data_file WILL BE OVERIDED
# A corresponding json file will be created for human reading
load_data = True
data_file = 'network_data.db'
# the top percentage where entities in each species are allowed to randomly mate
merit_bias = .5
# initialise
# ticks_per_test = 500
tick_per_animation = 500
population_limit = 200
num_sensory = 4
num_effector = 2
new_species_id = 1
excess_c = 1.0
disjoint_c = 1.0
weight_diff_c = 3.0
comp_threshold = 1.5
# Environment
num_speaker = 6
speaker_pos = []
def to_deg(y, x):
angle = math.atan2(y, x)
angle = math.degrees(angle)
return angle
def preprocess(response, answer):
angle1 = to_deg(response[1], response[0])
angle2 = to_deg(answer[1], answer[0])
return angle1, angle2
def grade(angle1, angle2):
# 2 responses, for each axes on Cartesian plane
score = 100
diff = abs(angle1-angle2)
if diff > 180:
diff = 360 - diff
score *= 1 - diff/180
return score
def test(network, animate_flag=False):
# clock = pygame.time.Clock()
running = True
speaker_counter = 0
score = 0
host = vl.Host((200, 0, 0))
while running:
# clock.tick(60)
if speaker_counter == num_speaker:
return score
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
break
# intensity 1, intensity 2, time 1, time 2
speaker = vl.Speaker(speaker_pos[speaker_counter][0], speaker_pos[speaker_counter][1])
sq_dist_to_ear1 = pow(host.ear1['x']-speaker.x, 2) + pow(host.ear1['y']-speaker.y, 2)
intensity1 = speaker.intensity / sq_dist_to_ear1
time1 = math.sqrt(sq_dist_to_ear1) / speaker.velocity
sq_dist_to_ear2 = pow(host.ear2['x']-speaker.x, 2) + pow(host.ear2['y']-speaker.y, 2)
intensity2 = speaker.intensity / sq_dist_to_ear2
time2 = math.sqrt(sq_dist_to_ear2) / speaker.velocity
sense = [
intensity1,
intensity2,
time1,
time2
]
# think
response = network.think(sense)
# perform
answer = [speaker.x-host.x, -(speaker.y-host.y)]
angles = preprocess(response, answer)
this_score = grade(angles[0], angles[1])
score += this_score
# refresh the screen
tick_counter = 0
if animate_flag:
print("SENSE OF CHAMPION")
print(sense)
print("RESPONSE OF CHAMPION")
print(response)
print("ANSWER")
print(answer)
print("ANGLE OF CHAMPION")
print(angles[0])
print("ANGLE OF ANSWER")
print(angles[1])
print("THIS SCORE")
print(this_score)
vl.reset()
host = vl.Host((200, 0, 0))
speaker = vl.Speaker(speaker_pos[speaker_counter][0], speaker_pos[speaker_counter][1])
vl.screen.blit(vl.background, (0, 0))
vl.all_sprites.draw(vl.screen)
vl.draw_response(response, vl.screen)
vl.draw_answer(speaker_pos[speaker_counter], vl.screen)
pygame.display.update()
tick_counter += 1
input('Press enter to continue')
# count the number of ticks
speaker_counter += 1
def start_round():
global speaker_pos
speaker_pos = []
for i in range(0, num_speaker):
x = random.random() * (vl.screen_width - vl.speaker_width)
y = random.random() * (vl.screen_height - vl.speaker_height)
speaker_pos.append([x, y])
```
|
{
"source": "jeqo/peru-constitucion",
"score": 3
}
|
#### File: peru-constitucion/scripts/extract.py
```python
import os
import PyPDF4
class Constitucion:
def __init__(self, titulos):
self.titulos = titulos
def __str__(self):
txt = ""
for i in self.titulos:
txt = txt + str(i) + "\n"
return txt
class Titulo:
def __init__(self, nombre, capitulos, articulos):
self.nombre = nombre
self.numero = nombre.split(" ")[1]
self.capitulos = capitulos
if (len(capitulos) == 0):
self.articulos = articulos
else:
self.articulos = []
def __str__(self):
txt = self.nombre + "\n"
for i in self.capitulos:
txt = txt + str(i) + "\n"
for i in self.articulos:
txt = txt + str(i) + "\n"
return txt
class Capitulo:
def __init__(self, nombre, articulos):
self.nombre = nombre
self.numero = nombre.split(" ")[1]
self.articulos = articulos
def __str__(self):
txt = self.nombre + "\n"
for i in self.articulos:
txt = txt + str(i) + "\n"
return txt
class Articulo:
def __init__(self, texto):
parts = texto.split(" ", 2)
self.nombre = (parts[0] + " " + parts[1]).replace(".-", "")
self.numero = self.nombre.split(" ")[1]
self.texto = parts[2]
def __str__(self):
return self.texto
def pdf_text(pdf_path):
# open the pdf file
pdf_file = open(pdf_path, 'rb')
# read pdf
read_pdf = PyPDF4.PdfFileReader(pdf_file)
all_text = ""
for num in range(0, read_pdf.numPages - 1):
page = read_pdf.getPage(num)
txt = page.extractText()
all_text = all_text + txt
all_text = all_text.replace("\n", "").replace(" "," ").split("DISPOSICIONES FINALES Y TRANSITORIAS")[0]
return all_text
def parse_constitucion(constitucion_text):
constitucion = Constitucion([])
for it, titulo in enumerate(constitucion_text.split("TITULO")):
if (it > 0):
nombre_titulo = ""
capitulos = []
articulos = []
if ("CAPITULO" in titulo):
nombre_titulo = ("TITULO" + titulo.split("CAPITULO", 1)[0]).strip()
for ic, capitulo in enumerate(titulo.split("CAPITULO")):
if (ic > 0):
nombre_capitulo = ("CAPITULO" + capitulo.split("Artículo", 1)[0]).strip()
articulos = []
for ia, articulo in enumerate(capitulo.split("Artículo")):
if (ia > 0):
articulos.append(Articulo("Artículo" + articulo))
capitulos.append(Capitulo(nombre_capitulo, articulos))
else:
nombre_titulo = ("TITULO" + titulo.split("Artículo", 1)[0]).strip()
articulos = []
for ia, articulo in enumerate(titulo.split("Artículo")):
if (ia > 0):
articulos.append(Articulo("Artículo" + articulo))
constitucion.titulos.append(Titulo(nombre_titulo, capitulos, articulos))
return constitucion
def write_constitucion(year, constitucion, constitucion_path):
summary = "- [Constitución Política de " + year +"](./" + year + "/README.md)\n"
constitucion_file = open(constitucion_path + "/README.md", "a")
for titulo in constitucion.titulos:
titulo_dir = "titulo-" + titulo.numero.lower()
summary = summary + " - [" + titulo.nombre + "](./" + year + "/" + titulo_dir + "/README.md)\n"
titulo_path = constitucion_path + "/" + titulo_dir
try:
os.makedirs(titulo_path)
except OSError:
print(titulo_path + " already exists")
if (len(titulo.capitulos) > 0):
titulo_file = open(titulo_path + "/README.md", "a")
titulo_file.write("# "+ titulo.nombre )
for capitulo in titulo.capitulos:
capitulo_filename = "capitulo-" + capitulo.numero.lower() + ".md"
summary = summary + " - [" + capitulo.nombre + "](./" + year + "/" + titulo_dir + "/" + capitulo_filename + ")\n"
capitulo_path = titulo_path + "/" + capitulo_filename
capitulo_file = open(capitulo_path, "a")
capitulo_file.write("# " + capitulo.nombre)
for articulo in capitulo.articulos:
capitulo_file.write("\n" + "## " + articulo.nombre + "\n")
capitulo_file.write(articulo.texto + "\n" + "\n")
capitulo_file.close()
titulo_file.close()
if (len(titulo.articulos) > 0):
titulo_file = open(titulo_path + "/README.md", "a")
titulo_file.write("# "+ titulo.nombre)
for articulo in titulo.articulos:
titulo_file.write("\n" + "## " + articulo.nombre + "\n")
titulo_file.write(articulo.texto)
titulo_file.close()
constitucion_file.close()
return summary
path = os.getcwd()
constitucion_year = "1993"
constitucion_path = path + "/src/" + constitucion_year
os.mkdir(constitucion_path)
constitucion_pdf_path = 'static/Texto_actualizado_CONS_1993.pdf'
constitucion_text = pdf_text(constitucion_pdf_path)
constitucion = parse_constitucion(constitucion_text)
summary = write_constitucion(constitucion_year, constitucion, constitucion_path)
print(summary)
```
|
{
"source": "Jeqqe/GitClips",
"score": 3
}
|
#### File: GitClips/api/twitch.py
```python
import requests
# Simple function to download the mp4 file based on
# the clip's url.
def downloadClip(clip):
name = str(clip['id']) + '.mp4'
response = requests.get(clip['mp4'])
file = open(f'clips/{name}', 'wb')
for chunk in response.iter_content(chunk_size=255):
if chunk:
file.write(chunk)
file.close()
```
#### File: Jeqqe/GitClips/main.py
```python
import os
from api import reddit, twitch, youtube
def setup():
if not os.path.exists('clips'):
os.mkdir('clips')
print('GitClips > Clips folder created (/clips)')
if not os.path.exists('data'):
os.mkdir('data')
print('GitClips > Data folder created (/data)')
if not os.path.exists('data/description.txt'):
with open('data/description.txt', 'w') as description:
description.write('Default description.')
description.close()
print('GitClips > Default description created. (/data/description.txt)')
if not os.path.exists('data/tags.txt'):
with open('data/tags.txt', 'w') as description:
description.write('gitclips,default,tags')
description.close()
print('GitClips > Default tags created. (/data/tags.txt)')
if __name__ == '__main__':
setup()
# Setup the youtube API service
service = youtube.setupService()
# Check if secret_file is successfully found, if not, break the loop and
# end program.
if not service:
exit()
# Request (6) clips from reddit [r/LiveStreamFail]
clips = reddit.getClips(6)
# Go through each fetched clip, download it and upload to youtube
# with title, description and tag
for clip in clips:
twitch.downloadClip(clip)
youtube.initUpload(service, clip)
```
|
{
"source": "jer123se12/fakecountry",
"score": 3
}
|
#### File: jer123se12/fakecountry/main.py
```python
import discord
import os
from discord.ext import commands
import json
import datetime
import math
from dotenv import load_dotenv
load_dotenv()
TOKEN=os.getenv("TOKEN")
prefix = "$"
bot = commands.Bot(command_prefix=prefix)
@bot.event
async def on_message(message):
global msg
msg = message
await bot.process_commands(message)
@bot.command(
name="ping",
help = "Sends the ping of the bot back"
)
async def pingReply(ctx, *args):
timeSent = msg.created_at.strftime("%S.%f")
timeNow = datetime.datetime.utcnow().strftime("%S.%f")
timeDiff = float(timeNow) - float(timeSent)
response = math.floor(timeDiff*1000000)/1000
response = "Ping: **" + str(response) + "ms" +"**"
await ctx.channel.send(response)
@bot.command(
name="sort",
help = "sorts an array seperated by ','"
)
async def sorting(ctx, message):
message=message.split(',')
l=[]
for i in message:
if i.isdigit():
l.append(int(i))
await ctx.channel.send(','.join([str(a) for a in sorted(l)]))
#Join event
@bot.event
async def on_member_join(member):
print(f'{member} has joined the country.')
#kick command
re = 'Reason:'
@bot.command()
async def kick(ctx, member : discord.Member, *, re, reason=None):
await member.kick(reason=reason)
await ctx.send(f'{member} has been deported from this country.')
#Leave event
@bot.event
async def on_member_remove(member):
print(f'{member} has left this country')
#Ban/Unban command
@bot.command()
async def ban(ctx, member : discord.Member, *, reason=''):
await member.ban(reason=reason)
if reason == '':
await ctx.send(f'{member} has been exiled from this country for being treason.')
else:
await ctx.send(f'{member} has been exiled from this country for being {reason}.')
@bot.command()
async def unban(ctx, *, member):
banned_users = await ctx.guild.bans()
member_name, member_discriminator = member.split('#')
for ban_entry in banned_users:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user)
await ctx.send(f'{user.mention} has been accepted back into the country.')
return
bot.run(TOKEN)
```
|
{
"source": "jer8856/acme",
"score": 3
}
|
#### File: acme/tests/test_Payment.py
```python
import acme
import unittest
class TestPayment(unittest.TestCase):
__test = {
'ceil' : {
'input' : [1.5, 2.7, 45.8, 400.01],
'output': [2,3,46,401]
},
'range' : {
'input' : ["00:50", "23:50",
"18:01"],
'output': ["early", "later", "later"]
},
'salary' : {
'input' : [("00:50", "01:00"),
("10:00", "12:00")],
'output': {
'weekday' : [4.17, 30],
'weekend' : [5, 40]
}
}
}
def setUp(self):
self.payWeekdays = acme.Payment("weekdays")
self.payWeekend = acme.Payment("weekend")
def test_ceil(self, _input = "input", output = "output"):
for index, value in enumerate(self.__test["ceil"][_input]):
ceilValue: int = acme.Payment.ceil(value)
self.assertEqual(ceilValue, self.__test["ceil"][output][index])
def test_checkRange(self, _input = "input", output = "output"):
for index, value in enumerate(self.__test["range"][_input]):
_range: str = acme.Payment.checkRange(acme.DayTime.strptime(value))
self.assertEqual(_range, self.__test["range"][output][index])
def test_getSalary(self, _input = "input", output = "output"):
for index, (v1, v2) in enumerate(self.__test["salary"][_input]):
v1t = acme.DayTime.strptime(v1)
v2t = acme.DayTime.strptime(v2)
# print(f'range {v1t} - {v2t}')
_salaryWeekday: float = self.payWeekdays.getSalary(v1t, v2t)
_salaryWeekend: float = self.payWeekend.getSalary(v1t, v2t)
# print(f'salario: end{_salaryWeekend}, day,{_salaryWeekday}')
self.assertEqual(_salaryWeekend, self.__test["salary"][output]['weekend'][index])
self.assertEqual(_salaryWeekday, self.__test["salary"][output]['weekday'][index])
```
|
{
"source": "jerabaul29/config_scripts_snippets",
"score": 4
}
|
#### File: python/asyncio_async_await/asyncio_hello_world.py
```python
import time
import asyncio
from typing import NoReturn
"""A small hello world for asyncio async await
The idea here is to show "how to think about async await". I like to have the following mental model:
- in Python, "usual" async await allows to get concurrency within a single thread; i.e., switch between different slow
tasks (when we know that something is slow and that we may do something else in the meantime), using only 1 CPU on the
physical machine. This is like having 1 bartender, serving several clients in the pub.
- i.e. this is very useful for using 1 OS thread / 1 machine physical CPU for doing several slow tasks
concurrently, i.e. switching between these tasks to make all of them advance as fast as possible together,
even though each task may have some forced waiting time now and then (you do not serve a new beer to a
client before the ongoing beer is drunk finished).
- i.e., in python, "usual" async await asyncio is a way to share a thread / machine CPU time between several
concurrent tasks.
- for this, one needs to:
-- "schedule" some tasks with asyncio.create_task or similar
-- give asyncio the chance to switch between tasks; this is done through the await keyword; when hitting
an await keyword, if the corresponding function call is not ready yet, the asyncio runner is free to look
for / jump to other async tasks and spend time on these
-- be CAREFUL not to use non-async (i.e. blocking) slow functions when trying to formulate some async code: these do not get
awaited and cannot give time for the runner to do anything else asyncronously.
The async keyword "adds some syntaxic sugar to make this function return a future".
The await keyword says to the asyncio executor to "feel free to attend other futures, somewhere else in the code, while this thing
resolves and gets ready". In practise, when an await function call is hit, the async executor is free to jump to another await
call and process it while the first call is resolving, before coming back to the initial await location.
The program will not progress past a given await call before it is fully resolved, but may do some other useful tasks (like, resolving
other awaits), while waiting for the initial await call to resolve.
Be sure to hit the await keyword often, and that all potentially lenghty calls are behind an await.
The async await model of python is well adapted for concurrency, ie code with slow io / network / database access, but not for cpu
intensive tasks.
So the whole async / await system is here to make it easier to work with futures that are used under the hood.
"""
def print_hello() -> NoReturn:
"""A non async hello world function cannot be called with an await
keyword."""
time.sleep(0.5)
print("hello")
async def async_print_hello(time_start: float) -> int:
"""An async hello world function can be called with an await keyword,
but if it makes no await call to slow async functions, this will not give
us any gain and is not very helpful."""
print(f"start async hello without internal awaitable awaited calls, elapsed: {time.time() - time_start}s")
time.sleep(0.5)
print(f"done async hello without internal awaitable awaited calls, elapsed: {time.time() - time_start}s")
return 0
async def slow_function(input: int, time_start: float) -> int:
"""A slow function, for which the slow operations are provided with a future API,
through asyncio calls or our own future-returning code, and are called as await, will be executed asyncronously
in an effective way in the context of asyncio."""
print(f"start slow_function with input {input}, elapsed: {time.time() - time_start}s")
await asyncio.sleep(input)
print(f"done waiting slow_function with input {input}, elapsed: {time.time() - time_start}s")
return input
async def main_async() -> NoReturn:
time_start = time.time()
print(f"*** start scheduling some slow functions, elapsed: {time.time()-time_start}s")
task_1 = asyncio.create_task(slow_function(1, time_start))
task_2 = asyncio.create_task(slow_function(2, time_start))
task_3 = asyncio.create_task(slow_function(3, time_start))
task_4 = asyncio.create_task(slow_function(4, time_start))
print(f"*** all slow functions scheduled, elapsed: {time.time()-time_start}s")
# at this step, we have "scheduled" the async tasks, but never got the time to start executing code in them
# none of the following 2 commands will work: cannot await a function, neither a result, need to (under the hood)
# await a future (possibly created for us by declaring a function as async)
# await print_hello
# await print_hello()
# we await a function without any await on awaitable call within itself, so there will not be ways for the asyncio runner
# to look at other tasks and we get no gains
await async_print_hello(time_start)
# calling an async function directly without scheduling it as a task or awaiting it is an error and the corresponding
# function will never get executed
# slow_function(5, time_start)
# we never hit an await where we actually had something to await for until now, and will not look for executing other
# tasks until we do so; for example, this will not let us do any progress on our async tasks for now
print("*** do something slow sync, that does not await (i.e. cannot wait)")
time.sleep(1.5)
print(f"*** done something slow sync, elapsed: {time.time()-time_start}s")
# now we finally do hit an await, on something that does has some slow awaitable parts,
# so asyncio will try to execute as many async tasks as possible
# concurrently, jumping between "non ready" await calls in its own smart way :)
print("*** do something slow async")
await asyncio.sleep(1.5)
print(f"*** done something slow async, elapsed: {time.time()-time_start}s")
# one way to run all the tasks async
# rest_1, res_2, res_3, res_4 = await asyncio.gather(task_1, task_2, task_3, task_4)
# another way, using list and unpacking, that may be more elegant
list_tasks = [task_1, task_2, task_3, task_4]
# if we do not await here, we will get an error since we exit the function given to asyncio.run without await-ing that
# all tasks are ready: i.e. need to await all tasks and collect them before moving out of async
# list_res = asyncio.gather(*list_tasks)
list_res = await asyncio.gather(*list_tasks)
print(f"*** done awaiting the gather, got results {list_res}, elapsed: {time.time()-time_start}s")
# the line under cannot be run: cannot await outside of async bloc or asyncio executor
# await main_async
# There are different kinds of executors, based on threads or processes; the default executor (if not specified) will be
# a concurrent.futures.ThreadPoolExecutor with some default settings; due to the Python GIL, this poses some limitations
# (the GIL means, that for a given Python process, only one given thread can execute code at a time). There are other
# executors available (based on processes in particular), this is discussed in the asyncio_process_pool_executor examples.
# For now, let's use the default thread pool executor, and not care too much about the GIL :) .
# this will run the async function without problem: we explicitly ask asyncio to run tasks for us
# once this is done, the script will finish
asyncio.run(main_async())
# this is another way to do the same; this makes it clear that there is actually an asyncio "loop" running,
# that keeps checking for which async task can be performed every time an await is hit.
# once this is done, the script will finish
# loop = asyncio.get_event_loop()
# loop.run_until_complete(main_async())
# we can also force this asyncio loop to run forever: this script will then never finish, the asyncio loop will keep trying
# to find some async work to do, even if there is none; this is "like" the old way of having a main event loop looking for
# tasks / callbacks / work to do, again and again.
# loop = asyncio.get_event_loop()
# loop.create_task(main_async())
# loop.run_forever()
```
#### File: python/extract_colormap_rgb/extract_colormap_rgb.py
```python
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cm
class ColormapMapper:
"""A mapper from values to RGB colors using built in colormaps
and scaling these."""
def __init__(self, cmap, vmin, vmax, warn_saturated=False):
"""cmap: the matplotlib colormap to use, min: the min value to be plotted,
max: the max value to be plotted."""
self.vmin = vmin
self.vmax = vmax
self.warn_saturated = warn_saturated
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
self.normalized_colormap = cm.ScalarMappable(norm=norm, cmap=cmap)
def get_rgb(self, val):
"""Get the RGB value associated with val given the normalized colormap
settings."""
if self.warn_saturated:
if val < self.vmin:
print("ColormapMapper warning: saturated low value")
if val > self.vmax:
print("ColormapMapper warning: saturated high value")
return self.normalized_colormap.to_rgba(val)
if __name__ == "__main__":
import numpy as np
arange = np.arange(0, 2, 0.10)
all_zeros = np.zeros(arange.shape)
colormap_mapper = ColormapMapper(plt.get_cmap("viridis"), 0, 2)
colors = np.transpose(np.vectorize(colormap_mapper.get_rgb)(arange))
plt.figure()
sc = plt.scatter(x=arange, y=all_zeros, s=300, c=colors)
cbar = plt.colorbar()
cbar.set_label("some_information ")
sc.set_clim(0, 2)
plt.show()
```
#### File: python/pipe_functional_programming/pipe_example.py
```python
from pipe import Pipe
from pipe import select as pmap
from pipe import where as filter
from pipe import take
import functools
from icecream import ic
ic.configureOutput(prefix="", outputFunction=print)
"""
For my part, I like to stick to the usual functional programming terminology:
take
map
filter
reduce
"""
# add a reduce value
@Pipe
def preduce(iterable, function):
return functools.reduce(function, iterable)
def dummy_func(x):
print(f"processing at value {x}")
return x
print("----- test using a range() as input -----")
res_with_range = (range(100) | pmap(dummy_func)
| filter(lambda x: x % 2 == 0)
| take(2) )
print("*** what is the resulting object ***")
ic(res_with_range)
print("*** what happens when we force evaluation ***")
ic(list(res_with_range))
"""
This prints:
----- test using a range() as input -----
*** what is the resulting object ***
res_with_range: <generator object take at 0x7f60bd506d60>
*** what happens when we force evaluation ***
processing at value 0
processing at value 1
processing at value 2
processing at value 3
processing at value 4
list(res_with_range): [0, 2]
"""
print()
print("----- test using a range() as input but outputing a value not iterator -----")
res_with_reduce = (range(100) | pmap(dummy_func)
| filter(lambda x: x % 3 == 1)
| take(2)
| preduce(lambda x, y: x + y))
ic(res_with_reduce)
```
#### File: python/typing/test_incorrect_typing.py
```python
def perform_addition(a: int, b: int) -> int:
return a + b
res_int = perform_addition(1, 2)
print("got through a full int operation correctly, result: {}".format(res_int))
res_float = perform_addition(1.5, 2.0)
print("got through a float operation while this is an int typed function, result: {}".format(res_float))
print("i.e., typing is only an annotation, this does get executed!")
print("at present, my linter does not even catch anything")
```
|
{
"source": "jerabaul29/EffectFoldAngleAutorotatingSeeds",
"score": 3
}
|
#### File: EffectFoldAngleAutorotatingSeeds/code_experiments/select_valid_range.py
```python
from scipy import misc
import os
import fnmatch
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
from scipy import signal
from tqdm import tqdm
import pickle
# %matplotlib inline
#################################################################################
# all functions ----------------------------------------------------------------
class generateDataOnClick:
def __init__(self, verbose=0):
self.position_on_click_accumulator = []
self.verbose = verbose
def position_on_click(self, event):
x, y = event.x, event.y
if event.button == 1:
if event.inaxes is not None:
if self.verbose > 0:
print 'data coords:' + str(event.xdata) + " , " + str(event.ydata)
self.position_on_click_accumulator.append((event.xdata, event.ydata))
plt.axvline(event.xdata, color='r')
plt.show()
def return_positions(self):
return self.position_on_click_accumulator
def select_valid_range(verbose=0):
pos_seed = np.asarray(list_pos_seed)
plt.figure()
plt.plot(pos_seed[:, 0], label='x position')
plt.plot(pos_seed[:, 1], label='y position')
plt.xlabel('Frame number')
plt.ylabel('Position seed (pxl)')
plt.legend(loc=2)
generate_data_on_click_object = generateDataOnClick()
plt.connect('button_press_event', generate_data_on_click_object.position_on_click)
plt.show()
selected_positions_pixels = generate_data_on_click_object.return_positions()
x_position_1 = int(np.floor(selected_positions_pixels[0][0]))
x_position_2 = int(np.floor(selected_positions_pixels[1][0]))
data_valid_range = np.array([x_position_1, x_position_2])
# save the valid range
np.savetxt(path + list_cases[ind_case] + '/' + "valid_range.csv", data_valid_range, delimiter=",")
def save_one_result(result_data, result_name):
with open(path + list_cases[ind_case] + '/' + result_name + '.pkl', 'w') as crrt_file:
pickle.dump(result_data, crrt_file, pickle.HIGHEST_PROTOCOL)
def load_one_result(result_name):
with open(path + list_cases[ind_case] + '/' + result_name + '.pkl', 'r') as crrt_file:
result_data = pickle.load(crrt_file)
return result_data
#################################################################################
# analysis of the data
path = '/home/richaraf/Desktop/stroboscopic_strouhal/'
sampling_frequency = 30
# loads the calibration --------------------------------------------------------
poly_fit_calibration = np.load(path + 'poly_fit_calibration.npy')
# load list of all cases -------------------------------------------------------
list_cases = []
for file_name in os.listdir(path):
if fnmatch.fnmatch(file_name, 'seed_35mm_2*'):
list_cases.append(file_name)
print "Cases to process:"
for crrt_case in list_cases:
print crrt_case
print " "
nbr_cases = len(list_cases)
print "Number of cases: " + str(nbr_cases)
# select range on all cases ----------------------------------------------------
for ind_case in range(nbr_cases):
print ""
print "------------------------------------------------------------"
print "Analysing case: " + str(list_cases[ind_case])
path_to_images = path + list_cases[ind_case] + '/'
print "Load generated data"
list_pos_seed = load_one_result('list_pos_seed')
list_width_data_seed = load_one_result('list_width_data_seed')
list_true_wing_tip = load_one_result('list_true_wing_tip')
print ""
print "Select valid range"
print "Click on the figure to select the range to use for later analysis"
print "then close the figure."
select_valid_range()
```
|
{
"source": "jerabaul29/example_python_package",
"score": 2
}
|
#### File: example_package/some_other_sub_module/some_other_sub_module_1.py
```python
def some_other_sub_module_2_hello():
print("hello from some_other_sub_module_2")
```
|
{
"source": "jerabaul29/fenics-calc",
"score": 3
}
|
#### File: fenics-calc/test/test_interpreter.py
```python
from xcalc.interpreter import Eval
from dolfin import *
import unittest
def error(true, me):
mesh = me.function_space().mesh()
return sqrt(abs(assemble(inner(me - true, me - true)*dx(domain=mesh))))
class TestCases(unittest.TestCase):
'''UnitTest for (some of) xcalc.interpreter (no timeseries)'''
def test_sanity0(self):
mesh = UnitSquareMesh(4, 4)
V = FunctionSpace(mesh, 'CG', 1)
f = Expression('x[0]', degree=1)
g = Expression('x[1]', degree=1)
a = 3
b = -2
u = interpolate(f, V)
v = interpolate(g, V)
expr = a*u + b*v
me = Eval(expr)
true = Expression('a*f+b*g', f=f, g=g, a=a, b=b, degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity1(self):
mesh = UnitSquareMesh(5, 5)
T = TensorFunctionSpace(mesh, 'DG', 0)
u = interpolate(Expression((('x[0]', 'x[1]'),
('2*x[0]+x[1]', 'x[0]+3*x[1]')), degree=1), T)
expr = sym(u) + skew(u)
me = Eval(expr)
true = u
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity2(self):
mesh = UnitSquareMesh(5, 5)
T = TensorFunctionSpace(mesh, 'CG', 1)
A = interpolate(Expression((('x[0]', 'x[1]'),
('2*x[0]+x[1]', 'x[0]+3*x[1]')), degree=1), T)
expr = tr(sym(A) + skew(A))
me = Eval(expr)
true = Expression('x[0] + x[0] + 3*x[1]', degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity3(self):
mesh = UnitSquareMesh(5, 5)
T = TensorFunctionSpace(mesh, 'CG', 1)
A = interpolate(Expression((('x[0]', 'x[1]'),
('2*x[0]+x[1]', 'x[0]+3*x[1]')), degree=1), T)
expr = (sym(A) + skew(A))[0, 0]
me = Eval(expr)
true = Expression('x[0]', degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity4(self):
mesh = UnitSquareMesh(5, 5)
T = TensorFunctionSpace(mesh, 'CG', 1)
A = interpolate(Expression((('x[0]', 'x[1]'),
('2*x[0]+x[1]', 'x[0]+3*x[1]')), degree=1), T)
expr = (sym(A) + skew(A))[:, 0]
me = Eval(expr)
true = Expression(('x[0]', '2*x[0]+x[1]'), degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity5(self):
mesh = UnitSquareMesh(5, 5)
T = TensorFunctionSpace(mesh, 'CG', 1)
A = interpolate(Expression((('1', 'x[0]'),
('2', 'x[1]')), degree=1), T)
expr = det(A)
me = Eval(expr)
true = Expression('x[1]-2*x[0]', degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity6(self):
mesh = UnitCubeMesh(5, 5, 5)
T = TensorFunctionSpace(mesh, 'CG', 1)
A = interpolate(Expression((('x[0]', '0', '1'),
('0', '1', 'x[1]'),
('x[2]', '0', '1')), degree=1), T)
expr = det(A)
me = Eval(expr)
true = Expression('x[0]-x[2]', degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity7(self):
mesh = UnitSquareMesh(5, 5)
T = TensorFunctionSpace(mesh, 'CG', 1)
A = interpolate(Expression((('1', 'x[0]'),
('2', 'x[1]')), degree=1), T)
V = VectorFunctionSpace(mesh, 'CG', 1)
v = interpolate(Expression(('x[0]+x[1]', '1'), degree=1), V)
me = Eval(dot(A, v))
true = Expression(('x[1]+2*x[0]', '2*x[0]+3*x[1]'), degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity8(self):
mesh = UnitSquareMesh(5, 5)
T = TensorFunctionSpace(mesh, 'CG', 1)
A = interpolate(Expression((('1', 'x[0]'),
('2', 'x[1]')), degree=1), T)
V = VectorFunctionSpace(mesh, 'CG', 1)
v = interpolate(Expression(('x[0]+x[1]', '1'), degree=1), V)
me = Eval(dot(v, transpose(A)))
true = Expression(('x[1]+2*x[0]', '2*x[0]+3*x[1]'), degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
def test_sanity8(self):
mesh = UnitSquareMesh(5, 5)
V = VectorFunctionSpace(mesh, 'CG', 1)
v0 = interpolate(Expression(('x[0]+x[1]', '1'), degree=1), V)
v1 = interpolate(Expression(('1', 'x[0]'), degree=1), V)
me = Eval(inner(v0, v1))
true = Expression('x[1]+2*x[0]', degree=1)
e = error(true, me)
self.assertTrue(e < 1E-14)
```
#### File: fenics-calc/test/test_utils.py
```python
from xcalc.utils import (space_of, common_sub_element, coefs_of, numpy_op_indices,
find_first, find_last, clip_index)
from dolfin import *
import numpy as np
import unittest
class TestCases(unittest.TestCase):
'''UnitTest for (some of) xcalc.utils'''
def test_spaces_of_ok(self):
mesh = UnitSquareMesh(3, 3)
V = FunctionSpace(mesh, 'CG', 1)
f = Function(V)
V_ = space_of((f, ))
self.assertEqual(mesh.id(), V_.mesh().id())
self.assertEqual(V.ufl_element(), V_.ufl_element())
def test_spaces_of_fail(self):
mesh = UnitSquareMesh(3, 3)
V = FunctionSpace(mesh, 'CG', 1)
f = Function(V)
V = FunctionSpace(mesh, 'CG', 2)
g = Function(V)
with self.assertRaises(AssertionError):
space_of((f, g))
def test_common_sub_element_ok(self):
mesh = UnitSquareMesh(3, 3)
V = FunctionSpace(mesh, 'CG', 1)
self.assertEqual(V.ufl_element(), common_sub_element((V, )))
p = FiniteElement('Lagrange', triangle, 1)
V = VectorFunctionSpace(mesh, 'CG', 1)
X = FunctionSpace(mesh, MixedElement([p, p]))
self.assertEqual(p, common_sub_element((V, X)))
def test_common_sub_element_fail_mixed(self):
mesh = UnitSquareMesh(3, 3)
p = FiniteElement('Lagrange', triangle, 1)
q = FiniteElement('Lagrange', triangle, 2)
X = FunctionSpace(mesh, MixedElement([p, q]))
with self.assertRaises(ValueError):
common_sub_element((X, ))
def test_common_sub_element_fail_no_common(self):
mesh = UnitSquareMesh(3, 3)
V = FunctionSpace(mesh, 'CG', 1)
W = VectorFunctionSpace(mesh, 'CG', 2)
with self.assertRaises(AssertionError):
common_sub_element((V, W))
def test_coef_ok(self):
a = 1
self.assertEqual(coefs_of(a), a)
def test_coef_fail(self):
a = Constant(1)
with self.assertRaises(AssertionError):
coefs_of(a)
def test_indices(self):
mesh = UnitSquareMesh(3, 3)
W = VectorFunctionSpace(mesh, 'CG', 2)
true = np.column_stack([W.sub(i).dofmap().dofs() for i in range(2)])
me = np.zeros_like(true)
for row, row_values in enumerate(numpy_op_indices(W, (2, ))):
me[row] = row_values
error = np.linalg.norm(me - true)
self.assertEqual(error, 0)
def test_clipping(self):
a = (-2, -1, 0, 2, 3, 5, 10, 12, 14, 23, 49, 65, 79)
self.assertTrue(find_first(a, lambda x: x < 0) == 0)
self.assertTrue(find_first(a, lambda x: x > 0 and x % 2 == 1) == 4)
self.assertTrue(find_last(a, lambda x: x == 10) == -7)
self.assertTrue(find_last(a, lambda x: x > 0) == -1)
f, l = 2, 20
i = clip_index(a, f, l)
self.assertTrue(all(f < x < l for x in a[i]))
```
#### File: fenics-calc/xcalc/timeseries.py
```python
import xml.etree.ElementTree as ET
from function_read import (read_h5_function, read_vtu_function,
read_h5_mesh, read_vtu_mesh)
from dolfin import (Function, XDMFFile, HDF5File, FunctionSpace,
VectorFunctionSpace, TensorFunctionSpace, warning)
from utils import space_of, clip_index
import numpy as np
import itertools
import os
class TempSeries(Function):
'''Collection of snapshots that are function in same V'''
def __init__(self, ft_pairs):
# NOTE: this is derived from Function just to allow nice
# interplay with the interpreter. If there were space time
# elements then we could have eval f(t, x) support
functions, times = list(zip(*ft_pairs))
# Check that every f has same f
V = space_of(functions)
# Time interval check
dt = np.diff(times)
assert (dt > 0).all()
self.functions = functions
self.times = times
self.V = V
Function.__init__(self, V)
def __iter__(self):
# op(series) = series(op(functions))
for f in self.functions: yield f
def __len__(self):
return len(self.functions)
def __getitem__(self, index):
# NOTE: by having this [] is access to functions not a new time series
# of the components
if isinstance(index, int):
return self.functions[index]
else:
return TempSeries(zip(self.functions[index], self.times[index]))
def stream(series, f):
'''Pipe series through f'''
space_of((series, f))
for f_ in series.functions: # Get your own iterator
f.vector().set_local(f_.vector().get_local())
yield f
def clip(series, t0, t1):
'''A view of the series with times such that t0 < times < t1'''
index = clip_index(series.times, t0, t1)
functions = series.functions[index]
times = series.times[index]
return TempSeries(zip(functions, times))
def common_interval(series):
'''Series are compatible if they over same V and cover same interval'''
series = filter(lambda s: isinstance(s, TempSeries), series)
interval, V = [], None
for s in series:
V_ = s.V
assert V is None or (V.mesh().id() == V_.mesh().id()
and
V.ufl_element() == V_.ufl_element())
interval_ = np.array(s.times)
assert not len(interval) or np.linalg.norm(interval - interval_) < 1E-14
V = V_
interval = interval_
return interval
def get_P1_space(V):
'''Get the Lagrange CG1 space corresponding to V'''
# This is how in essence FEniCS 2017.2.0 dumps data, i.e. there is
# no support for higher order spaces
assert V.ufl_element().family() != 'Discontinuous Lagrange' # Cell data needed
mesh = V.mesh()
elm = V.ufl_element()
if elm.value_shape() == ():
return FunctionSpace(mesh, 'CG', 1)
if len(elm.value_shape()) == 1:
return VectorFunctionSpace(mesh, 'CG', 1)
return TensorFunctionSpace(mesh, 'CG', 1)
def PVDTempSeries(path, V=None, first=0, last=None):
'''
Read in the temp series of functions in V from PVD file. If V is not
a function space then a finite element has to be provided for constructing
the space on the recovered mesh.
'''
_, ext = os.path.splitext(path)
assert ext == '.pvd'
tree = ET.parse(path)
collection = list(tree.getroot())[0]
path = os.path.dirname(os.path.abspath(path))
# Read in paths/timestamps for VTUs. NOTE: as thus is supposed to be serial
# assert part 0
vtus, times = [], []
for dataset in collection:
assert dataset.attrib['part'] == '0'
vtus.append(os.path.join(path, dataset.attrib['file']))
times.append(float(dataset.attrib['timestep']))
vtus, times = vtus[slice(first, last, None)], times[slice(first, last, None)]
# path.vtu -> function. But vertex values!!!!
if not isinstance(V, FunctionSpace):
warning('Setting up P1 space on the recovered mesh')
cell_type = V.cell() # Dangerously assuming this is a UFL element
mesh = read_vtu_mesh(vtus[0], cell_type)
V = FunctionSpace(mesh, V)
V = get_P1_space(V)
functions = read_vtu_function(vtus, V)
ft_pairs = zip(functions, times)
return TempSeries(ft_pairs)
def XDMFTempSeries(path, V, first=0, last=None):
'''
Read in the temp series of functions in V from XDMF file. If V is not
a function space then a finite element has to be provided for constructing
the space on the recovered mesh.
'''
# NOTE: in 2017.2.0 fenics only stores vertex values so CG1 functions
# is what we go for
_, ext = os.path.splitext(path)
assert ext == '.xdmf'
tree = ET.parse(path)
domain = list(tree.getroot())[0]
grid = list(domain)[0]
times = [] # Only collect time stamps so that we access in right order
h5_file = '' # Consistency of piece as VisualisationVector ...
for item in grid:
_, __, time, attrib = list(item)
time = time.attrib['Value']
times.append(time)
piece = list(attrib)[0]
h5_file_, fdata = piece.text.split(':/')
assert not h5_file or h5_file == h5_file_
h5_file = h5_file_
times = times[slice(first, last, None)]
# We read visualization vector from this
h5_file = os.path.join(os.path.dirname(os.path.abspath(path)), h5_file)
if not isinstance(V, FunctionSpace):
warning('Setting up P1 space on the recovered mesh')
cell_type = V.cell() # Dangerously assuming this is a UFL element
mesh = read_h5_mesh(h5_file, cell_type)
V = FunctionSpace(mesh, V)
V = get_P1_space(V)
functions = read_h5_function(h5_file, times, V)
ft_pairs = zip(functions, map(float, times))
return TempSeries(ft_pairs)
```
|
{
"source": "jerabaul29/kartverket_storm_surge_data",
"score": 2
}
|
#### File: kartverket_stormsurge/helper/datetimes_test.py
```python
import datetime
import pytz
from kartverket_stormsurge.helper.datetimes import datetime_range
from kartverket_stormsurge.helper.datetimes import datetime_segments
def test_datetime_range_1():
datetime_start = datetime.datetime(2020, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
datetime_end = datetime.datetime(2020, 1, 3, 0, 0, 0, tzinfo=pytz.utc)
step_timedelta = datetime.timedelta(days=1)
result = datetime_range(datetime_start, datetime_end, step_timedelta)
correct_result = [datetime.datetime(2020, 1, 1, 0, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2020, 1, 2, 0, 0, 0, tzinfo=pytz.utc),
]
assert list(result) == correct_result
def test_datetime_range_2():
datetime_start = datetime.datetime(2020, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
datetime_end = datetime.datetime(2020, 1, 5, 0, 0, 0, tzinfo=pytz.utc)
step_timedelta = datetime.timedelta(days=2)
result = datetime_range(datetime_start, datetime_end, step_timedelta)
correct_result = [datetime.datetime(2020, 1, 1, 0, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2020, 1, 3, 0, 0, 0, tzinfo=pytz.utc),
]
assert list(result) == correct_result
def test_datetime_segments_1():
datetime_start = datetime.datetime(2020, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
datetime_end = datetime.datetime(2020, 1, 3, 0, 0, 0, tzinfo=pytz.utc)
step_timedelta = datetime.timedelta(days=1)
result = datetime_segments(datetime_start, datetime_end, step_timedelta)
correct_result = [(datetime.datetime(2020, 1, 1, 0, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2020, 1, 2, 0, 0, 0, tzinfo=pytz.utc)),
(datetime.datetime(2020, 1, 2, 0, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2020, 1, 3, 0, 0, 0, tzinfo=pytz.utc)),
]
assert list(result) == correct_result
def test_datetime_segments_2():
datetime_start = datetime.datetime(2020, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
datetime_end = datetime.datetime(2020, 1, 4, 0, 0, 0, tzinfo=pytz.utc)
step_timedelta = datetime.timedelta(days=2)
result = datetime_segments(datetime_start, datetime_end, step_timedelta)
correct_result = [(datetime.datetime(2020, 1, 1, 0, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2020, 1, 3, 0, 0, 0, tzinfo=pytz.utc)),
(datetime.datetime(2020, 1, 3, 0, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2020, 1, 4, 0, 0, 0, tzinfo=pytz.utc)),
]
assert list(result) == correct_result
```
#### File: kartverket_stormsurge/helper/last_thursday_of_the_month.py
```python
import datetime
import pytz
from kartverket_stormsurge.helper.datetimes import assert_is_utc_datetime
def get_last_thursday_in_month(datetime_in):
assert_is_utc_datetime(datetime_in)
thursday_index = 3
crrt_month = datetime_in.month
crrt_year = datetime_in.year
crrt_day = datetime.datetime(crrt_year, crrt_month, 1, 0, 0, tzinfo=pytz.utc)
last_thursday = None
while True:
if crrt_day.month != crrt_month:
break
elif crrt_day.weekday() == thursday_index:
last_thursday = crrt_day
crrt_day += datetime.timedelta(days=1)
return(last_thursday)
```
|
{
"source": "jerabaul29/LoggerWavesInIce",
"score": 3
}
|
#### File: LoggerWavesInIce/Logger_GPS_SD_VN_Binary_output_Parser/parser_logger.py
```python
from crcmod import mkCrcFun
crc16 = mkCrcFun(0x11021, 0x0000, False, 0x0000)
import numpy as np
from binascii import hexlify, unhexlify
from struct import unpack
import os
import fnmatch
# length of one binary dataframe
length_binary_frame = 124
# number of 4 bits floating points in one frame
number_of_VN100_fields = 29
def checksum_vn100(binary_data):
"""
checksum binary_data
"""
crc1 = np.int('0x' + binary_data[-4:], 16)
crc2 = crc16(unhexlify(binary_data[2:-4]))
if crc1 == crc2:
return True
else:
return False
def add_entry_list_strings(list_string, entry):
"""
add a new entry to a list of strings, including some newlines
"""
list_string.append(entry)
list_string.append('\n')
return list_string
def parse_frame_vn100(data_frame, verbose=0):
"""
parse one VN100 checksummed frame
"""
output = np.zeros((number_of_VN100_fields,))
for ind_data in range(number_of_VN100_fields):
ind_float_start = 8 + 4 + 4 * 2 * ind_data
ind_float_final = ind_float_start + 4 * 2
current_binary_bytes = data_frame[ind_float_start:ind_float_final]
if verbose > 1:
print "Parsing bytes:"
display_binary_data(current_binary_bytes)
current_value = np.float(unpack('<f', unhexlify(current_binary_bytes))[0])
output[ind_data] = current_value
if verbose > 1:
print "Parsed VN100 data frame"
print output
return output
def print_parsed_frame_vn100(output):
"""
user friendly print of VN100 parsed data
"""
# Mag
print output[0:3]
# Accel
print output[3:6]
# Gyro
print output[6:9]
# Temp
print output[9]
# Pres
print output[10]
# YawPitchRoll
print output[11:14]
# DCM
print output[14:23].reshape((3, 3))
# MagNed
print output[23:26]
# AccNed
print output[26:29]
def convert_numpy_to_scientific_string(numpy_array):
"""
converts a numpy array in a string composed of scientific notation numbers
"""
list_strings = []
for value in numpy_array:
list_strings.append("%e" % value)
list_strings.append(",")
string_result = "".join(list_strings)
return string_result
def display_binary_data(binary_data):
"""
Display some binary data in hex format
"""
print "Print binary data"
length_data = len(binary_data)
print "Length of binary data as ASCII: " + str(length_data)
str_print = ""
for ind in range(int(length_data / 2)):
str_print += binary_data[2 * ind:2 * ind + 2]
str_print += " "
print str_print
def load_until_end_line(data, start_index):
"""
Loads the data in an accumulator starting at start_index until hit end of line character
Return the accumulator
"""
accumulator = []
current_index = start_index
hit_end_of_line = False
while not hit_end_of_line:
try:
current_char = data[current_index]
except IndexError:
print "bad end of line at the end of the file; probably due to power disconnect"
return (accumulator, current_index - 1)
break
current_index += 1
if current_char == '\n':
return (accumulator, current_index)
else:
accumulator.append(current_char)
class Parser_logger():
"""
A class for helping to parse output from the Waves in ice loggers
"""
def __init__(self, path=None, path_output=None):
self.path = path
self.path_output = path_output
def process_folder(self, verbose=0):
"""
Process all the files in the self.path folder
"""
for file_crrt in os.listdir(self.path):
if fnmatch.fnmatch(file_crrt, 'F*'):
print "Processing file: " + str(file_crrt)
self.load_file(self.path + file_crrt, verbose=verbose)
self.parse_current_data(self.path_output + file_crrt, verbose=verbose)
def load_file(self, path_to_file, verbose=0):
with open(path_to_file, 'r') as file:
self.current_data = file.read()
self.current_data_length = len(self.current_data)
if verbose > 0:
print "Total data length: " + str(self.current_data_length)
def parse_current_data(self, path_output, verbose=0):
current_data_index = 0
list_strings_log_S = []
list_strings_log_R = []
list_strings_log_R_time = []
list_strings_log_C = []
list_strings_log_C_time = []
list_strings_log_G = []
list_strings_log_G_time = []
list_strings_log_B = ['MagX, MagY, MagZ, AccX, AccY, AccZ, GyroX, GyroY, GyroZ, Temp, Pres, Yaw, Pitch, Roll, DCM1, DCM2, DCM3, DCM4, DCM5, DCM6, DCM7, DCM8, DCM9, MagNED1, MagNED2, MagNED3, AccNED1, AccNED2, ACCNED3, \n']
list_strings_log_B_time = []
list_strings_log_broken = []
list_strings_log_broken_time = []
# expected_next_timestamp says what if a timestamp is expected
# 0: no timestamp expected
expected_next_timestamp = 0
# while some data to analyse in the file, go through it
while current_data_index < self.current_data_length:
if verbose > 1:
print "Current index: " + str(current_data_index)
# coming next may be just an empty line
if self.current_data[current_data_index] == '\n' or self.current_data[current_data_index] == '\r':
current_data_index += 1
if verbose > 0:
print "Newline char"
else:
# at this point, look for the indication of which type of data to expect
next_two_chars = self.current_data[current_data_index:current_data_index + 2]
current_data_index += 2
if verbose > 1:
print "Current next two chars: " + str(next_two_chars)
# case information about timestamp in milliseconds
if next_two_chars == 'M,':
if verbose > 0:
print "Hit start of a milliseconds timestamp"
(message, current_data_index) = load_until_end_line(self.current_data, current_data_index)
message_string = ''.join(message)
if verbose > 0:
print "Message: " + message_string
if expected_next_timestamp == 0:
print "Expected no timestamp!"
elif expected_next_timestamp == 2:
list_strings_log_R_time = add_entry_list_strings(list_strings_log_R_time, message_string)
list_strings_log_C_time = add_entry_list_strings(list_strings_log_C_time, message_string)
elif expected_next_timestamp == 3:
list_strings_log_G_time = add_entry_list_strings(list_strings_log_G_time, message_string)
elif expected_next_timestamp == 4:
list_strings_log_B_time = add_entry_list_strings(list_strings_log_B_time, message_string)
elif expected_next_timestamp == 5:
list_strings_log_broken_time = add_entry_list_strings(list_strings_log_broken_time, message_string)
# case start message of a file
elif next_two_chars == 'S,':
if verbose > 0:
print "Hit start of a file"
(message, current_data_index) = load_until_end_line(self.current_data, current_data_index)
message_string = ''.join(message)
if verbose > 0:
print "Message: " + message_string
expected_next_timestamp = 0
list_strings_log_S = add_entry_list_strings(list_strings_log_S, message_string)
# case information about battery level (raw)
elif next_two_chars == 'R,':
if verbose > 0:
print "Hit start of a raw reading battery message"
(message, current_data_index) = load_until_end_line(self.current_data, current_data_index)
message_string = ''.join(message)
if verbose > 0:
print "Message: " + message_string
expected_next_timestamp = 0
list_strings_log_R = add_entry_list_strings(list_strings_log_R, message_string)
# case information about converted battery level
elif next_two_chars == 'C,':
if verbose > 0:
print "Hit start of a converted level battery message"
(message, current_data_index) = load_until_end_line(self.current_data, current_data_index)
message_string = ''.join(message)
if verbose > 0:
print "Message: " + message_string
expected_next_timestamp = 2
list_strings_log_C = add_entry_list_strings(list_strings_log_C, message_string)
# case GPS data
elif next_two_chars == '$G':
if verbose > 0:
print "Hit start of a GPS data string"
(message, current_data_index) = load_until_end_line(self.current_data, current_data_index)
message_string = ''.join(message)
if verbose > 0:
print "Message: G" + message_string
expected_next_timestamp = 3
list_strings_log_G.append('G')
list_strings_log_G = add_entry_list_strings(list_strings_log_G, message_string)
# case binary data
elif next_two_chars == 'B,':
if verbose > 0:
print "Hit start of a binary data frame"
current_data_index += 3
current_binary_data = hexlify(self.current_data[current_data_index:current_data_index + length_binary_frame])
current_data_index += length_binary_frame
if verbose > 0:
display_binary_data(current_binary_data)
validity_checksum = checksum_vn100(current_binary_data)
if verbose > 0:
print "Validity checksum " + str(validity_checksum)
if validity_checksum:
output = parse_frame_vn100(current_binary_data, verbose=verbose)
if verbose > 0:
print_parsed_frame_vn100(output)
else:
output = np.array([0])
expected_next_timestamp = 4
list_strings_log_B = add_entry_list_strings(list_strings_log_B, convert_numpy_to_scientific_string(output))
# or broken message
else:
if verbose > 0:
print "Broken message, read until next line break"
(message, current_data_index) = load_until_end_line(self.current_data, current_data_index)
message_string = ''.join(message)
if verbose > 0:
print "Message: " + message_string
expected_next_timestamp = 5
list_strings_log_broken = add_entry_list_strings(list_strings_log_broken, message_string)
# generate the data strings and save them
S_data = "".join(list_strings_log_S)
with open(path_output + "_S", "w") as text_file:
text_file.write(S_data)
R_data = "".join(list_strings_log_R)
Rt_data = "".join(list_strings_log_R_time)
with open(path_output + "_R", "w") as text_file:
text_file.write(R_data)
with open(path_output + "_Rt", "w") as text_file:
text_file.write(Rt_data)
C_data = "".join(list_strings_log_C)
Ct_data = "".join(list_strings_log_C_time)
with open(path_output + "_C", "w") as text_file:
text_file.write(C_data)
with open(path_output + "_Ct", "w") as text_file:
text_file.write(Ct_data)
G_data = "".join(list_strings_log_G)
Gt_data = "".join(list_strings_log_G_time)
with open(path_output + "_G", "w") as text_file:
text_file.write(G_data)
with open(path_output + "_Gt", "w") as text_file:
text_file.write(Gt_data)
B_data = "".join(list_strings_log_B)
Bt_data = "".join(list_strings_log_B_time)
with open(path_output + "_B", "w") as text_file:
text_file.write(B_data)
with open(path_output + "_Bt", "w") as text_file:
text_file.write(Bt_data)
br_data = "".join(list_strings_log_broken)
brt_data = "".join(list_strings_log_broken_time)
with open(path_output + "_P", "w") as text_file:
text_file.write(br_data)
with open(path_output + "_Pt", "w") as text_file:
text_file.write(brt_data)
```
#### File: LoggerWavesInIce/processing_scripts/example_fit_from_JoG_article.py
```python
g = 9.81
def funcFitDecay(frq, nuFit):
return (nuFit**0.5) * ((2 * np.pi * frq)**(7. / 2.)) / (2**0.5) / (g**2)
# fit without taking into account uncertainty on spectra
viscosity, covariance = scipy.optimize.curve_fit(funcFitDecay, listFrequenciesForFit, listDampingForFit,p0=0.01,sigma=0.1)
perr = np.sqrt(np.diag(covariance))
print " "
print "Parameters and fit quality from machine fit"
print "Viscosity from machine fit: " + str(viscosity)
print "1 sigma confidence: " + str(perr)
# residuals in the fitting of the model
residuals = listDampingForFit - funcFitDecay(listFrequenciesForFit, viscosity)
# check the quality statistics
# from http://stackoverflow.com/questions/19189362/getting-the-r-squared-value-using-curve-fit
ss_res = np.sum(residuals**2)
ss_tot = np.sum((listDampingForFit-np.mean(listDampingForFit))**2)
r_squared = 1 - (ss_res / ss_tot)
print "R2: " + str(r_squared)
# compute MAE (Mean Absolute Error) and RMSE (Root Mean Square Error)
MAE = np.mean(np.abs(residuals))
RMSE = np.sqrt(np.mean(residuals**2))
print "MAE of residuals damping: " + str(MAE)
print "RMSE of residuals damping: " + str(RMSE)
```
#### File: LoggerWavesInIce/processing_scripts/filter_resample_csvWrite_acceleration.py
```python
from __future__ import print_function
import numpy as np
import pickle
import matplotlib.pyplot as plt
from scipy.signal import butter, lfilter
import datetime
# TODO: maybe would be cleaner to put this as functions rather than script
class BandPass(object):
"""A class to perform bandpass filtering using Butter filter."""
def __init__(self, lowcut=0.05, highcut=0.25, fs=10.0, order=3):
"""lowcut, highcut and fs are in Hz."""
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
self.b, self.a = butter(order, [low, high], btype='band')
def filter_data(self, data):
"""filter the data."""
result = lfilter(self.b, self.a, data)
return(result)
path_IMU_data = "/home/jrlab/Desktop/Data/Data_For_Aleksey_Harbor_2019/Out/3/_IMU_TNEDXYZ"
path_output = "/home/jrlab/Desktop/Data/Data_For_Aleksey_Harbor_2019/Out/3/_IMU_TNEDXYZ.csv"
# load the saved data
with open(path_IMU_data, "rb") as crrt_file:
dict_data_loaded_IMU = pickle.load(crrt_file)
list_IMUs_for_plot = ["3"]
for crrt_IMU in list_IMUs_for_plot:
size_data = np.size(dict_data_loaded_IMU[crrt_IMU].D)
print("IMU {}".format(crrt_IMU))
print("Number of points: {}".format(size_data))
print("Corresponding duration (hr): {}".format(size_data / 10.0 / 3600))
print("Corresponding numbe of 15 minutes files read: {}".format(size_data / 10 / 3600 * 4.0))
dict_filtered_resampled_data = {}
FS = 10
band_pass_filter = BandPass(lowcut=0.03, highcut=0.25, order=2)
str_time_min = "2019-04-02 09:30:00.000"
time_min = datetime.datetime.strptime(str_time_min, "%Y-%m-%d %H:%M:%S.%f")
time_max = datetime.datetime.strptime("2019-04-02 14:00:00.000", "%Y-%m-%d %H:%M:%S.%f")
time_base_start = 0
time_base_duration = (time_max - time_min).total_seconds() - time_base_start
time_base = np.arange(start=time_base_start, stop=time_base_duration, step=1.0 / FS)
for crrt_IMU in list_IMUs_for_plot:
print("Look at instrument {}".format(crrt_IMU))
acc_D_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].D)
acc_N_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].N)
acc_E_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].E)
acc_X_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].X)
acc_Y_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].Y)
acc_Z_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].Z)
Yaw_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].Yaw)
Pitch_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].Pitch)
Roll_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].Roll)
time_sec_since_time_min = []
for crrt_timestamp in dict_data_loaded_IMU[crrt_IMU].T:
time_sec_since_time_min.append((crrt_timestamp - time_min).total_seconds())
time_sec_since_time_min = np.array(time_sec_since_time_min)
delta_time = time_sec_since_time_min[1:] - time_sec_since_time_min[0:-1]
delta_time_anomaly = delta_time - 0.1
missing_points = np.where(delta_time_anomaly > 0.06)
number_missing_points = np.sum(delta_time_anomaly[missing_points])
total_number_points = time_sec_since_time_min.size
percentage_missing_points = number_missing_points * 100.0 / total_number_points
print("percentage missing points: {}".format(percentage_missing_points))
acc_D_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_D_filtered)
acc_N_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_N_filtered)
acc_E_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_E_filtered)
acc_X_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_X_filtered)
acc_Y_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_Y_filtered)
acc_Z_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_Z_filtered)
Yaw_filtered_resampled = np.interp(time_base, time_sec_since_time_min, Yaw_filtered)
Pitch_filtered_resampled = np.interp(time_base, time_sec_since_time_min, Pitch_filtered)
Roll_filtered_resampled = np.interp(time_base, time_sec_since_time_min, Roll_filtered)
plt.figure()
plt.plot(time_sec_since_time_min, acc_D_filtered, label="filtered D")
plt.plot(time_base, acc_D_filtered_resampled, label="filtered resampled D")
plt.plot(time_base, acc_N_filtered_resampled, label="filtered resampled N")
plt.plot(time_base, acc_E_filtered_resampled, label="filtered resampled E")
# plt.xlim([3600 * 10 - 120, 3600 * 10 + 120])
ampl_acc_plot = 0.01
# plt.ylim([-ampl_acc_plot, ampl_acc_plot])
plt.legend()
plt.tight_layout()
plt.show()
plt.figure()
plt.plot(time_sec_since_time_min, acc_D_filtered, label="filtered D")
plt.plot(time_base, acc_X_filtered_resampled, label="filtered resampled X")
plt.plot(time_base, acc_Y_filtered_resampled, label="filtered resampled Y")
plt.plot(time_base, acc_Z_filtered_resampled, label="filtered resampled Z")
# plt.xlim([3600 * 10 - 120, 3600 * 10 + 120])
ampl_acc_plot = 0.01
# plt.ylim([-ampl_acc_plot, ampl_acc_plot])
plt.legend()
plt.tight_layout()
plt.show()
plt.figure()
plt.plot(time_sec_since_time_min, acc_D_filtered, label="filtered D")
plt.plot(time_base, Yaw_filtered_resampled, label="filtered resampled Yaw")
plt.plot(time_base, Pitch_filtered_resampled, label="filtered resampled Pitch")
plt.plot(time_base, Roll_filtered_resampled, label="filtered resampled Roll")
# plt.xlim([3600 * 10 - 120, 3600 * 10 + 120])
ampl_acc_plot = 2
# plt.ylim([-ampl_acc_plot, ampl_acc_plot])
plt.legend()
plt.tight_layout()
plt.show()
# TODO: add quality check figure yaw pitch roll
data_IMU_filtered_resampled = np.zeros((time_base.size, 10))
data_IMU_filtered_resampled[:, 0] = time_base
data_IMU_filtered_resampled[:, 1] = acc_X_filtered_resampled
data_IMU_filtered_resampled[:, 2] = acc_Y_filtered_resampled
data_IMU_filtered_resampled[:, 3] = acc_Z_filtered_resampled
data_IMU_filtered_resampled[:, 4] = acc_N_filtered_resampled
data_IMU_filtered_resampled[:, 5] = acc_E_filtered_resampled
data_IMU_filtered_resampled[:, 6] = acc_D_filtered_resampled
data_IMU_filtered_resampled[:, 7] = Yaw_filtered_resampled
data_IMU_filtered_resampled[:, 8] = Pitch_filtered_resampled
data_IMU_filtered_resampled[:, 9] = Roll_filtered_resampled
# TODO: add yaw pitch roll
crrt_file_save = path_output + "CSV_DATA_" + str(crrt_IMU) + ".csv"
header = "Seconds_since_{} ACC_X ACC_Y ACC_Z ACC_N ACC_E ACC_D YAW PITCH ROLL".format(str_time_min)
np.savetxt(crrt_file_save, data_IMU_filtered_resampled, header=header)
end = True
```
|
{
"source": "jerabaul29/logging_ultrasonic_gauges",
"score": 3
}
|
#### File: logging_ultrasonic_gauges/log_gauges/log_gauges.py
```python
from __future__ import division
from __future__ import print_function
import glob
import struct
import time
import serial
import numpy as np
from datetime import datetime
import os
from matplotlib import animation
import matplotlib.pyplot as plt
import multiprocessing
# // define all functions /////////////////////////////////////////////////////
def look_for_available_ports(verbose=0):
"""Find available serial ports that can potentially be Arduino cards.
"""
available_ports = glob.glob('/dev/ttyACM*')
if verbose > 0:
print("Available porst: ")
print(available_ports)
return available_ports
def get_time_micros():
return(int(round(time.time() * 1000000)))
def get_time_millis():
return(int(round(time.time() * 1000)))
def get_time_seconds():
return(int(round(time.time() * 1)))
def print_values(print_function, times, measurements, number, ID):
"""Print the logged values and timestamps obtained by the program from the
Arduino logger."""
print_function("")
print_function("Logger ID: " + str(ID))
print_function("Measurement number: " + str(number))
for crrt_measurement, crrt_time in zip(measurements, times):
print_function("%4i - uS %10i" % (crrt_measurement, crrt_time))
# list_colors should agree with the keys of bcolor_print function under
list_colors = ['OKBLUE', 'OKGREEN', 'WARNING', 'FAIL']
def bcolor_print(string_in, bcolor='WARNING'):
"""note: maybe this would be better with clearer color names. Color names are
the keys of dict_colors."""
dict_colors = {'HEADER': '\033[95m',
'OKBLUE': '\033[94m',
'OKGREEN': '\033[92m',
'WARNING': '\033[93m',
'FAIL': '\033[91m',
'ENDC': '\033[0m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m'}
print(dict_colors[bcolor] + string_in + dict_colors['ENDC'])
class ReadFromArduino(object):
"""A class to read the serial messages from Arduino."""
def __init__(self, port, SIZE_STRUCT=29, verbose=0, print_color='ENDC', nbr_points_animate_plot=2000, filename=None, nbr_gauges=4, refresh_rate=0.050):
self.port = port
self.uS_last_measurement = get_time_micros()
self.SIZE_STRUCT = SIZE_STRUCT
self.verbose = verbose
self.latest_values = ()
self.t_reference = get_time_micros()
self.time_since_start_logging_uS = 0
self.time_elapsed_uS = 0
self.logged_data = []
self.utc_time_start = 0
self.utc_time_finish = 0
self.print_color = print_color
self.read_and_plot_status = -1
self.current_logged_data = [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]
self.nbr_points_animate_plot = nbr_points_animate_plot
self.filename = filename
self.crrt_file = None
self.nbr_gauges = nbr_gauges
self.fig = None
self.ax = None
self.mode_interactive_plot = None
self.refresh_rate = refresh_rate
self.latest_measurement_utc = None
self.port.flushInput()
def read_next(self):
"""Read the next serial message from the Arduino: eiter wait, or a full
data structure. Return a flag about which kind of result was obtained."""
myByte = self.port.read(1)
def print_in_color(string_in):
bcolor_print(string_in, self.print_color)
if myByte == 'S': # start of a struct packet
data = self.port.read(self.SIZE_STRUCT) # read a whole structure in length
myByte = self.port.read(1)
if myByte == 'E': # check if it actually were a struct packet
current_time_uS = get_time_micros()
self.time_since_start_logging_uS = (current_time_uS - self.t_reference)
self.time_elapsed_uS = current_time_uS - self.uS_last_measurement
self.uS_last_measurement = current_time_uS
# is a valid message struct: unpack the data
self.latest_values = list(struct.unpack('<IIIIhhhhIB', data))
if self.verbose > 1:
print("Python time elapsed since start logging (uS): " + str(self.time_since_start_logging_uS))
print("Python time elapsed since start logging (S): " + str(int(round(self.time_since_start_logging_uS) / 1000000)))
print("Python time elapsed since last logging (uS): " + str(self.time_elapsed_uS))
if self.verbose > 0:
print_values(print_in_color, self.latest_values[0:4], self.latest_values[4:8], self.latest_values[8], self.latest_values[9])
return('V') # valid flag
else:
return('E') # error: got the beginning of a struct, but not the end of it
elif myByte == 'W':
return('W') # wait flag
else:
return('M') # missaligned
return('T') # no input flag
def read_continuously(self, timeout_S=None):
"""Log continuously, with serial plotting on terminal but no plotting."""
if self.verbose > 0:
bcolor_print("start read_continuously")
continue_logging = True
logging = False
self.logged_data = []
time_start = get_time_seconds()
while continue_logging:
return_flag = self.read_next()
# take care of error flags
if return_flag == 'E':
bcolor_print("received flag E: error, found a Start but no End of struct")
pass
elif return_flag == 'M':
pass
# bcolor_print("received flag M: missaligned input")
elif return_flag == 'T':
bcolor_print("received flag T: serial read timeout")
pass
# take care of normal program execution
if logging is False and return_flag == 'V':
logging = True
self.utc_time_start = datetime.utcnow()
self.logged_data.append(self.latest_values)
bcolor_print("start logging")
elif logging and return_flag == 'V':
self.logged_data.append(self.latest_values)
elif logging and return_flag == 'W':
logging = False
self.utc_time_finish = datetime.utcnow()
continue_logging = False
bcolor_print("done logging")
# take care of function timeout
if timeout_S is not None:
if (get_time_seconds() - time_start) > timeout_S:
bcolor_print("read_continuously timeout: stop logging")
continue_logging = False
def read_and_plot(self, timeout_S=None):
"""Log all messages to be processed, until near exhaustion of serial port
data, and prepare the data for real-time plotting."""
if self.read_and_plot_status < 0:
self.read_and_plot_status += 1
if self.verbose > 0:
bcolor_print("start read_and_plot: current status " + str(self.read_and_plot_status))
continue_logging = True
logging = False
if self.read_and_plot_status == 1:
logging = True
while continue_logging and self.port.in_waiting > self.SIZE_STRUCT + 2: # +2 is to make sure we will have the S and E flags
self.latest_measurement_utc = datetime.utcnow()
# if necessary, pop out some of the data to plot to keep it at max self.nbr_points_animate_plot
while len(self.current_logged_data) > self.nbr_points_animate_plot:
self.current_logged_data.pop(0)
return_flag = self.read_next()
# take care of error flags
if return_flag == 'E':
bcolor_print("received flag E: error, found a Start but no End of struct")
pass
elif return_flag == 'M':
pass
# bcolor_print("received flag M: missaligned input")
elif return_flag == 'T':
bcolor_print("received flag T: serial read timeout")
pass
# take care of normal program execution
if logging is False and return_flag == 'V':
# update the logging and status flags
logging = True
self.read_and_plot_status = 1
self.utc_time_start = datetime.utcnow()
self.current_logged_data.append(self.latest_values)
self.logged_data.append(self.latest_values)
if self.crrt_file is not None:
self.crrt_file.write("Computer UTC timestamp start logging: ")
self.crrt_file.write(str(self.utc_time_start))
self.crrt_file.write('\n')
# generate the header
header = ""
for ind_gauge in range(self.nbr_gauges):
header += "Arduino time Gauge " + str(ind_gauge) + " (uS)"
header += " | "
for ind_gauge in range(self.nbr_gauges):
header += "Gauge " + str(ind_gauge) + " (raw ADC)"
header += " | "
header += "Measurement nbr"
header += " | "
header += "Logger ID\n"
self.crrt_file.write(header)
self.crrt_file.write(str(self.latest_values)[1:-1])
self.crrt_file.write('\n')
bcolor_print("start logging")
elif logging and return_flag == 'V':
self.current_logged_data.append(self.latest_values)
self.logged_data.append(self.latest_values)
if self.crrt_file is not None:
self.crrt_file.write(str(self.latest_values)[1:-1])
self.crrt_file.write('\n')
elif logging and return_flag == 'W':
logging = False
continue_logging = False
self.read_and_plot_status = 2
self.utc_time_finish = datetime.utcnow()
if self.crrt_file is not None:
self.crrt_file.write("Computer UTC timestamp finished logging: ")
self.crrt_file.write(str(self.utc_time_finish))
bcolor_print("done logging")
# generate the frames data
current_logged_data_as_numpy = np.array(self.current_logged_data)
list_plots = []
list_colors = ['k', 'b', 'g', 'r', 'c', 'm', 'y']
for ind_gauge in range(self.nbr_gauges):
crrt_color = list_colors[ind_gauge]
if self.mode_interactive_plot == 'ANIMATE':
self.fig.clear()
list_plots.append(plt.plot(current_logged_data_as_numpy[:, ind_gauge + self.nbr_gauges], color=crrt_color, label='gauge ' + str(ind_gauge)))
elif self.mode_interactive_plot == 'DRAW':
list_plots.append((current_logged_data_as_numpy[:, ind_gauge + self.nbr_gauges], crrt_color, 'gauge ' + str(ind_gauge)))
return(list_plots)
def animate_logging(self):
self.mode_interactive_plot = 'ANIMATE'
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1, 1, 1)
self.ax.set_xlim([0, self.nbr_points_animate_plot])
self.ax.set_ylim([0, 1024])
with open(self.filename, 'w') as self.crrt_file:
anim = animation.FuncAnimation(
self.fig,
self.read_and_plot,
blit=False,
interval=self.refresh_rate * 1000)
plt.show()
def log_and_draw(self):
self.mode_interactive_plot = 'DRAW'
plt.ion()
fig, ax = plt.subplots(1)
ax.set_xlim([0, self.nbr_points_animate_plot])
ax.set_ylim([0, 1024])
averaged_fps = 3
with open(self.filename, 'w') as self.crrt_file:
while self.read_and_plot_status < 2:
crrt_utc = datetime.utcnow()
title_string = ""
if self.read_and_plot_status > -1:
averaged_fps = 0.8 * averaged_fps + 0.2 * 1.0 / (crrt_utc - last_draw_utc).total_seconds()
title_string += str(averaged_fps)[0: 3]
title_string += " averaged fps"
last_draw_utc = crrt_utc
if self.read_and_plot_status > 0:
crrt_time_elapsed_S = (self.latest_measurement_utc - self.utc_time_start).total_seconds()
title_string += " | logging time "
title_string += str(crrt_time_elapsed_S)[0: -4]
title_string += " s"
list_plots = self.read_and_plot()
fig.clear()
for crrt_plot in list_plots:
plt.plot(crrt_plot[0], color=crrt_plot[1], label=crrt_plot[2])
ax.set_xlim([0, self.nbr_points_animate_plot])
ax.set_ylim([0, 1024])
plt.title(title_string)
plt.legend(loc=2)
plt.draw()
plt.pause(self.refresh_rate)
class perform_several_loggings(object):
"""A class to perform several loggins simultaneously."""
def __init__(self, baud_rate=2000000, verbose=0, mode_detect_usb_port='AUTOMATIC', nbr_gauges=4, path_to_save=None, case_name="logging_"):
self.baud_rate = baud_rate
self.dict_logging_instances = {}
self.dict_threads = {}
self.verbose = verbose
self.dict_all_data = {}
self.dict_utc_timestamps = {}
self.mode_detect_usb_port = mode_detect_usb_port
self.nbr_gauges = nbr_gauges
self.path_to_save = path_to_save
self.case_name = case_name
self.list_filenames = []
if self.path_to_save is None:
self.path_to_save = os.getcwd()
all_ports = look_for_available_ports()
nbr_logging = 0
utc_start_logging = datetime.utcnow()
list_usb_ports = []
if mode_detect_usb_port == 'SELECT_PORT':
for crrt_port in all_ports:
print("Showing the output of port: " + crrt_port + " at baud rate " + str(self.baud_rate))
print("-----------------------------")
usb_port = serial.Serial(crrt_port, baudrate=baud_rate, timeout=0.1)
usb_port.flushInput()
for i in range(5):
crrt_char = usb_port.read()
print(crrt_char)
print("-----------------------------")
print("Log this port? [y]es, [n]o")
wait_for_answer = True
while wait_for_answer:
answer = raw_input()
if answer == 'y':
list_usb_ports.append((usb_port, crrt_port))
wait_for_answer = False
elif answer == 'n':
wait_for_answer = False
else:
print("[y]es or [n]o")
# idea here: catch the 'W' wait flags, this is specific to the loggers
elif mode_detect_usb_port == 'AUTOMATIC':
for crrt_port in all_ports:
usb_port = serial.Serial(crrt_port, baudrate=baud_rate, timeout=0.1)
usb_port.flushInput()
for i in range(5):
crrt_char = usb_port.read()
if crrt_char == 'W':
print("Adding " + crrt_port + " to list ports to use")
list_usb_ports.append((usb_port, crrt_port))
break
else:
print("mode_detect_usb_port " + self.mode_detect_usb_port + " is not implemented!")
for crrt_usb_port in list_usb_ports:
# determine the filename to use
filename_crrt = self.path_to_save + "/" + case_name + str(utc_start_logging) + "_" + str(nbr_logging)
filename_crrt = filename_crrt.replace(" ", "_")
filename_crrt = filename_crrt.replace(".", "")
filename_crrt += ".logdat"
print("Using filename: " + filename_crrt)
self.list_filenames.append(filename_crrt)
self.dict_logging_instances[crrt_usb_port[1]] = ReadFromArduino(crrt_usb_port[0], verbose=self.verbose,
print_color=list_colors[nbr_logging],
filename=filename_crrt, nbr_gauges=self.nbr_gauges)
nbr_logging += 1
def perform_logging(self, mode='DRAW'):
print("create all logging instances")
for crrt_logging in self.dict_logging_instances:
if mode == 'ANIMATE':
crrt_thread = multiprocessing.Process(target=self.dict_logging_instances[crrt_logging].animate_logging)
elif mode == 'MINIMAL':
crrt_thread = multiprocessing.Process(target=self.dict_logging_instances[crrt_logging].read_continuously)
elif mode == 'DRAW':
crrt_thread = multiprocessing.Process(target=self.dict_logging_instances[crrt_logging].log_and_draw)
else:
print("mode " + mode + " in perform_several_loggings.perform_logging not implemented")
self.dict_threads[crrt_logging] = crrt_thread
print("start all threads")
for crrt_thread in self.dict_threads:
self.dict_threads[crrt_thread].start()
for crrt_thread in self.dict_threads:
self.dict_threads[crrt_thread].join()
print("joined all threads")
def return_filenames(self):
return(self.list_filenames)
# // use the code //////////////////////////////////////////////////////////////
instance_perform_all_logging = perform_several_loggings(verbose=0, path_to_save='/home/jrlab/Desktop/Data/DataHSVA/')
instance_perform_all_logging.perform_logging()
list_loggedd_filenames = instance_perform_all_logging.return_filenames()
```
|
{
"source": "jerabaul29/OpenMetBuoy-v2021a",
"score": 2
}
|
#### File: legacy_firmware/utils/script_plot_spectra.py
```python
import math
import pickle as pkl
import matplotlib.pyplot as plt
import datetime
import numpy as np
import matplotlib.dates as mdates
import matplotlib.colors as mcolors
from utils import sliding_filter_nsigma
import os
import time
# ------------------------------------------------------------------------------------------
print("***** Put the interpreter in UTC, to make sure no TZ issues")
os.environ["TZ"] = "UTC"
time.tzset()
# ------------------------------------------------------------------------------------------
print("***** configure matplotlib")
plt.rcParams.update({'font.size': 14})
list_colors = list(mcolors.TABLEAU_COLORS)
list_colors.append("w")
list_colors.append("k")
with open("./dict_all_data.pkl", "rb") as fh:
dict_data_each_logger = pkl.load(fh)
list_instruments_with_spectra = dict_data_each_logger.keys()
show_spectrum = "normalized_elevation"
fig, axes = plt.subplots(nrows=len(list_instruments_with_spectra), ncols=1)
for ind, crrt_instrument in enumerate(list_instruments_with_spectra):
val = 100 * len(list_instruments_with_spectra) + 10 + ind + 1
plt.subplot(val)
list_spectra_data = dict_data_each_logger[crrt_instrument]["spectra"]
list_frequencies = list_spectra_data[0].list_frequencies
list_datetimes = [list_spectra_data[0].datetime_fix]
list_spectra = [list_spectra_data[0].list_elevation_energies]
shape_spectrum = (len(list_spectra[0]), )
for crrt_entry in list_spectra_data[1:]:
if (crrt_entry.datetime_fix - list_datetimes[-1] > datetime.timedelta(hours=6)):
list_datetimes.append(list_datetimes[-1] + datetime.timedelta(hours=2))
list_datetimes.append(crrt_entry.datetime_fix - datetime.timedelta(hours=2))
list_spectra.append(np.full(shape_spectrum, np.nan))
list_spectra.append(np.full(shape_spectrum, np.nan))
list_datetimes.append(crrt_entry.datetime_fix)
if show_spectrum == "normalized_elevation":
crrt_spectrum = crrt_entry.list_elevation_energies
list_spectra.append(crrt_spectrum)
pclr = plt.pcolor(list_datetimes, list_frequencies, np.log10(np.transpose(np.array(list_spectra))))
# pclr = plt.pcolor(list_datetimes, list_frequencies, np.log10(np.transpose(np.array(list_spectra))), vmin=vmin_pcolor, vmax=vmax_pcolor)
# plt.xlim([date_start_md, date_end_md])
plt.ylim([0.05, 0.30])
if ind < len(list_instruments_with_spectra)-1:
plt.xticks([])
plt.xticks(rotation=30)
plt.ylabel("f [Hz]\n({})".format(crrt_instrument))
plt.tight_layout()
plt.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.02, 0.7])
cbar = fig.colorbar(pclr, cax=cbar_ax)
if show_spectrum == "normalized_elevation":
cbar.set_label('log$_{10}$(S) [m$^2$/Hz]')
plt.savefig("spectra_" + show_spectrum + ".png")
plt.show()
fig, axes = plt.subplots(1, ncols=2)
for ind, crrt_instrument in enumerate(list_instruments_with_spectra):
crrt_color = list_colors[ind]
list_spectra_data = dict_data_each_logger[crrt_instrument]["spectra"]
list_datetimes = [crrt_data.datetime_fix for crrt_data in list_spectra_data]
list_swh = [crrt_data.Hs for crrt_data in list_spectra_data]
list_tp = [crrt_data.Tz for crrt_data in list_spectra_data]
list_spectra = [crrt_data.list_elevation_energies for crrt_data in list_spectra_data]
list_frequencies = [crrt_data.list_frequencies for crrt_data in list_spectra_data]
def compute_spectral_moment(list_frequencies, list_elevation_energies, order):
list_to_integrate = [
math.pow(crrt_freq, order) * crrt_energy for (crrt_freq, crrt_energy) in zip(list_frequencies, list_elevation_energies)
]
moment = np.trapz(list_to_integrate, list_frequencies)
return moment
list_processed_swh = []
list_processed_tp = []
for crrt_entry in list_spectra_data:
m0 = compute_spectral_moment(crrt_entry.list_frequencies, crrt_entry.list_elevation_energies, 0)
m2 = compute_spectral_moment(crrt_entry.list_frequencies, crrt_entry.list_elevation_energies, 2)
m4 = compute_spectral_moment(crrt_entry.list_frequencies, crrt_entry.list_elevation_energies, 4)
list_processed_tp.append(math.sqrt(m2/m0))
list_processed_swh.append(4.0 * math.sqrt(m0))
plt.subplot(121)
plt.plot(list_datetimes, sliding_filter_nsigma(np.array(list_swh), nsigma=2.0), color=crrt_color, marker=".", label="swh {}".format(crrt_instrument))
plt.plot(list_datetimes, sliding_filter_nsigma(0.001 + np.array(list_processed_swh), nsigma=2.0), marker="o", color=crrt_color, label="swh processed")
plt.ylabel("[m]")
plt.subplot(122)
plt.plot(list_datetimes, sliding_filter_nsigma(np.array(list_tp), nsigma=2.0), color=crrt_color, label="fp", marker="+")
plt.plot(list_datetimes, sliding_filter_nsigma(0.001 + np.array(list_processed_tp), nsigma=2.0), color=crrt_color, marker="*", label="fp processed")
plt.ylabel("[Hz]")
plt.subplot(121)
plt.legend(loc="lower left")
plt.xticks(rotation=30)
plt.subplot(122)
plt.legend(loc="lower left")
plt.xticks(rotation=30)
plt.tight_layout()
plt.savefig("swh_tp.png")
plt.show()
list_sliding_averaged_tz = list(sliding_filter_nsigma(np.array(list_tp), nsigma=2.0))
list_sliding_averaged_swh = list(sliding_filter_nsigma(np.array(list_swh), nsigma=2.0))
print()
print()
print("---------- DUMP ALL WAVE INFO START ----------")
print("format: timestamp, tz[Hz], swh[m]")
for crrt_datetime, crrt_tz, crrt_swh in zip(list_datetimes, list_sliding_averaged_tz, list_sliding_averaged_swh):
print(f"{crrt_datetime.isoformat()}, {crrt_tz:4.2f}, {crrt_swh:4.2f}")
print("---------- DONE DUMP ALL WAVE INFO ----------")
print()
print()
```
|
{
"source": "jerabaul29/PaddleAndUltrasonicGauges",
"score": 3
}
|
#### File: PaddleActuator_versionDUE/ComputerSide/CommunicationSerialBinary.py
```python
import serial
import math
import numpy as np
import matplotlib.pyplot as plt
import glob
import time
from StringIO import StringIO
################################################################################
################################################################################
# properties of the protocol
# define the number of bits of the int to be sent as two bytes
# note: this should be 14 bits at max (otherwise, need more than
# 2 bytes with this technique)
NUMBER_OF_BITS = 12
# corresponding mean position of the paddle
MEAN_POSITION = 2**(NUMBER_OF_BITS-1)
################################################################################
################################################################################
# properties of the PID values transfer
# defines the number of digits used in the int mantissa (2 to be enough with one
#byte)
NDIGITS_MANTISSA = 2
# define the shifting in the power of 10 to use for transmission of pid
# constants through serial. 128 to be half of one byte.
SHIFT_POWER_OF_10_PID_CSTTS = 128
################################################################################
################################################################################
# properties of the time loops
# frequency of the control signal (scan rate on old paddle)
FREQUENCY_CONTROL = 500
# number of points to send to the board upon buffer request
# NOTE: CAUTION!! the size of the real buffer to send is bigger: if use two bytes, it will
# be twice as many bytes as this number of points!
# the NUMBER_OF_POINTS_PER_BUFFER should be typically half the HALF_INPUT_BUFFER in the Arduino program
# in practise, take a few points less to avoid issues
NUMBER_OF_POINTS_PER_BUFFER = 2038
################################################################################
################################################################################
# put on off additional outputs
DEBUGGING_FLAG = True
################################################################################
################################################################################
# diverse
ONE_MICRO_SECOND = 0.000001
################################################################################
def look_for_available_ports():
'''
find available serial ports to Arduino
'''
available_ports = glob.glob('/dev/ttyACM*')
return available_ports
def convert_to_list_bits(value_in,type_out=np.uint16):
"""
convert value_in into a numpy array of bits
type_out indicates how many bits are wanted: uint8, uint16
"""
# convert the input into a numpy array if needed and
# choose the level of precision
a = np.array([value_in],dtype=type_out)
# convert to byte vision
b = a.view(np.uint8)
# flip: all less significant bit right
b = np.flipud(b)
# unpack into bits
c = np.unpackbits(b)
return c
def convert_to_two_bytes(value_bit_format):
"""
take a value in bit format (16 bits), extract a NUMBER_OF_BITS bit value out
of it (the less significant bits), and put it in the format corresponding to
the protocol for transmission of one 10 bits int:
----------------------------------------------------------------------------
16 bits number to be translated in byte1 & byte2
0 *(16-NUMBER_OF_BITS) | [0 or 1]*NUMBER_OF_BITS
Max number of bits to be transmitted on two bytes with this method is 14
byte1:
1 | 0 * (NUMBER_OF_BITS-14) (empty bits) | [0 or 1] *(NUMBER_OF_BITS - 7) (firts bits of the integer to transmit)
byte 2:
0 [0 or 1] * 7 (following bits of the integer to transmit)
convention: most significant bits first
"""
# initialize with zeros
first_byte = np.unpackbits(np.array(0,dtype=np.uint8))
second_byte = np.unpackbits(np.array(0,dtype=np.uint8))
# start of a new packet
first_byte[0] = 1
# data
pivot_1 = 15-NUMBER_OF_BITS
pivot_2 = 16-NUMBER_OF_BITS
first_byte[pivot_1:8] = value_bit_format[pivot_2:9]
second_byte[1:8] = value_bit_format[9:16]
return np.concatenate((np.packbits(first_byte), np.packbits(second_byte)))
def convert_to_protocol_format(value):
"""
convert an int into a couple of
bytes that follow the protocol implementation
"""
bit_array = convert_to_list_bits(value)
bytes_array = convert_to_two_bytes(bit_array)
return bytes_array
"""vectorized version for handling complete buffers directly"""
vectorized_convert_to_protocol_format = np.vectorize(convert_to_protocol_format, otypes=[np.ndarray])
def pid_constant_serial_format(value):
"""
generate value as a couple mantissa (2 digits int) exponent (base 10):
value = mantissa * 10 ** exponent
"""
# power of 10 as an int
power_of_ten = int(math.floor(math.log10(value*10)))
# exponent for PID transmission
# the -NDIGITS_MANTISSA is here because we want a mantissa which is an
# integer with a given number of digits
exponent_serial_transmission = power_of_ten-NDIGITS_MANTISSA
# mantissa
mantissa = int(math.floor(value*10**(-exponent_serial_transmission)))
return mantissa, exponent_serial_transmission
def convert_list_feedback(list_feedback):
"""convert a list feedback into a numpy array"""
# remove the first item: comma
list_feedback.pop(0)
# join the list in a string
list_as_string = ''.join(list_feedback)
# generate the string
string_feedback = StringIO(''.join(list_feedback))
# generate as numpy table
numpy_feedback = np.genfromtxt(string_feedback, delimiter=",")
return numpy_feedback
################################################################################
class Paddle_Actuator(object):
"""
class for interacting with the Arduino controlled paddle actuator
"""
serial_ready = False
def set_serial_port(self,serial_port):
"""
sets the serial port to used
"""
self.serial_port = serial_port
self.serial_ready = True
# wait a bit because the board will reboot when connection is established
time.sleep(1)
def connect_to_board(self):
"""
connect to the board on USB
"""
# find available port
port = look_for_available_ports()
# if none stop here
if not port:
print "No board available"
return
# if some, take the first
#print "Using port: "+str(port[0])
port = '/dev/ttyACM0'
print "Using port: "+str(port)
#usb_port = serial.Serial(port,baudrate=57600,timeout=0.5)
usb_port = serial.Serial(port,baudrate=115200,timeout=0.5)
#usb_port = serial.Serial(port,baudrate=47600,timeout=0.5)
usb_port.flushInput()
print "Port imported"
# set port in library
self.set_serial_port(usb_port)
# return the port for external use if needed
return(usb_port)
############################################################################
# all methods for transmitting PID parameters
pid_coefficients_loaded = False
pid_ready = False
def set_PID_parameters(self,kp,ki,kd,sign_actuation):
"""
sets the Kp, Ki, Kd, sign_actuation parameters for the PID controller
sign_actuation is 0 or 1
"""
self.kp = kp
self.ki = ki
self.kd = kd
self.sign_actuation = sign_actuation
self.pid_coefficients_loaded = True
def send_one_PID_parameter(self,mantissa,exponent,parameterKey):
"""
send one PID parameter
"""
self.serial_port.write(parameterKey)
char_received = self.serial_port.read()
if not (char_received == parameterKey):
print "Received: " + str(char_received)
print "Problem transmitting PID parameter!"
#NOTE: do something in that case!!
self.serial_port.write(chr(mantissa))
self.serial_port.write(chr(exponent))
if DEBUGGING_FLAG:
print "Transmitted: "+str(parameterKey)
print "Get back:"
time.sleep(0.1)
while (self.serial_port.inWaiting() > 0):
char_feedback = self.serial_port.read()
if DEBUGGING_FLAG:
print str(char_feedback)
if DEBUGGING_FLAG:
print "Done transmitting"
def send_sign_actuation(self,sign):
"""
send actuation sign
"""
self.serial_port.write('S')
char_received = self.serial_port.read()
if not (char_received == 'S'):
print "Received: " + str(char_received)
print "Problem transmitting PID parameter!"
#NOTE: do something in that case!!
self.serial_port.write(chr(sign))
if DEBUGGING_FLAG:
print "Transmitted: S"
print "Get back:"
time.sleep(0.1)
while (self.serial_port.inWaiting() > 0):
char_feedback = self.serial_port.read()
if DEBUGGING_FLAG:
print str(char_feedback)
if DEBUGGING_FLAG:
print "Done transmitting"
def send_PID_parameters(self):
"""
send all the PID parameters to the Arduino Paddle
"""
if not self.pid_coefficients_loaded:
print "PID coefficients were not given to the software!!"
return False
# to go through the serial, the constants are expressed as:
# coefficient (10 to 99) * 10^(exponent+SHIFT_POWER_OF_10_PID_CSTTS)
# note: transmitt exponent + SHIFT_POWER_OF_10_PID_CSTTS to avoid
# uint8 sign problems when going through serial!
mantissa, exponent = pid_constant_serial_format(self.kp)
self.send_one_PID_parameter(mantissa,exponent+SHIFT_POWER_OF_10_PID_CSTTS,'P')
#
mantissa, exponent = pid_constant_serial_format(self.ki)
self.send_one_PID_parameter(mantissa,exponent+SHIFT_POWER_OF_10_PID_CSTTS,'I')
#
mantissa, exponent = pid_constant_serial_format(self.kd)
self.send_one_PID_parameter(mantissa,exponent+SHIFT_POWER_OF_10_PID_CSTTS,'D')
#
# send the sign of actuation
self.send_sign_actuation(self.sign_actuation)
# NOTE: add check some feedbach from Actuator here maybe?
self.pid_ready = True
return True
############################################################################
# all methods for generation and transmission of buffer
def set_buffer(self,buffer_values):
"""
load numpy array values in the buffer
"""
self.buffer_values = buffer_values
def generate_buffer_as_bytes(self):
"""
generate a bytes version of the buffer, ready to transmit to Arduino
following the communication protocol
"""
self.buffer_as_bytes = vectorized_convert_to_protocol_format(self.buffer_values)
def make_next_buffer_ready(self):
"""
Make nex buffer ready in advance
"""
# compute end of buffer to generate and check that not out of range
self.pointer_end_buffer = self.pointer_position_buffer + NUMBER_OF_POINTS_PER_BUFFER
# -1 to take care of > and 0 starting indexing
if self.pointer_end_buffer > (self.signal_length-1):
print 'Hit end of the signal'
self.end_signal_buffer = True
self.pointer_end_buffer = self.signal_length-1
# generate next buffer in advance
self.set_buffer(self.control_signal[self.pointer_position_buffer:self.pointer_end_buffer])
self.generate_buffer_as_bytes()
# update start of next buffer
self.pointer_position_buffer = self.pointer_position_buffer + NUMBER_OF_POINTS_PER_BUFFER
# next buffer is ready!
self.next_buffer_is_ready = True
def transmit_buffer_bytes_through_serial(self):
"""
send the buffer bytes (must have been computed before) through the
serial port
"""
# for debugging
#print self.buffer_as_bytes.shape
#print self.buffer_as_bytes[0]
for value_to_send in self.buffer_as_bytes:
# print for debugging
#print bytes(value_to_send[0])
#print bytes(value_to_send[1])
# NOTE: bytes(char(x)) returns x as a byte when 0<=x<256
self.serial_port.write(bytes(chr(value_to_send[0])))
self.serial_port.write(bytes(chr(value_to_send[1])))
# we just transmitted next buffer: next buffer is not ready
self.next_buffer_is_ready = False
############################################################################
# all methods for loading the signal to be sent to the board
# the signal to send is stored in an array
def set_signal(self,signal_array):
"""
load a numpy array as a control signal
and add the beginning and ends to have smooth paddle motion
"""
self.control_signal = signal_array
self.add_beginning_signal()
self.add_end_signal()
self.check_signal()
self.signal_length = np.size(self.control_signal)
print 'Length of the signal: '+str(self.signal_length)
self.signal_ready = True
# to prevent effect of a distracted user, start smoothly from mean position
def add_beginning_signal(self):
"""
add a beginning to the signal so that the paddle starts from the mean
position smoothly
"""
#NOTE: to improve if powerful paddle: should then be based on
# acceleration
# how much time to go to mean
# default
#number_points_go_to_zero = FREQUENCY_CONTROL * 10
# reduced for faster testing / actuation with good signal
number_points_go_to_zero = FREQUENCY_CONTROL * 3
# the offset to correct for
excess = self.control_signal[0] - MEAN_POSITION
# how fast go to mean
decay_base = np.linspace(0,number_points_go_to_zero-1,number_points_go_to_zero)
time_constant = 5. / number_points_go_to_zero
decay_array = np.exp(-time_constant*decay_base)
# decaying return to first set point
exponention_decay = excess*(1-decay_array) + MEAN_POSITION
# set first point exact
exponention_decay[0] = np.array(MEAN_POSITION)
# concatenate to control signal
self.control_signal = np.concatenate([exponention_decay,self.control_signal])
# to prevent effect of a distracted user, end smoothly at mean position
def add_end_signal(self):
"""
add an end to the signal so that the paddle comes back to the mean
position smoothly
"""
#NOTE: to improve if powerful paddle: should then be based on
# acceleration
# how much time to go to mean
# default
#number_points_go_to_zero = FREQUENCY_CONTROL * 10
# reduced for faster testing / actuation with good signal
number_points_go_to_zero = FREQUENCY_CONTROL * 3
# the offset to correct for
excess = self.control_signal[-1]-MEAN_POSITION
# how fast go to mean
decay_base = np.linspace(1,number_points_go_to_zero,number_points_go_to_zero)
time_constant = 5. / number_points_go_to_zero
decay_array = np.exp(-time_constant*decay_base)
# decaying return to mean
exponention_decay = excess*decay_array + MEAN_POSITION
# set last point exact
exponention_decay[-1] = np.array(MEAN_POSITION)
# concatenate to control signal
self.control_signal = np.concatenate([self.control_signal,exponention_decay])
def check_signal(self):
"""
perform basic checks on the signal (range, sign, could add acceleration)
"""
if np.min(self.control_signal) < 0:
print "Some negative values in the signal!!"
return False
if np.max(self.control_signal) > (2**NUMBER_OF_BITS-1):
print "Going out of range!!"
return False
if not self.control_signal[0] == MEAN_POSITION:
print "Not starting from mean position!!"
return False
if not self.control_signal[-1] == MEAN_POSITION:
print "Not finishing at mean position!!"
return False
# NOTE: could do other checks: acceleration, mean position etc
# especially if very powerful hydraulic systems
print "Signal checked: valid"
return True
def plot_control_signal(self):
plt.figure()
number_points_control = np.size(self.control_signal)
time = np.linspace(0,number_points_control-1,number_points_control)/FREQUENCY_CONTROL
#
plt.plot(time, self.control_signal)
plt.xlabel('Time (s)')
plt.ylabel('Signal (int value control)')
plt.show(block=True)
############################################################################
# all methods for performing one actuation cycle
def check_ready(self):
"""
Check that eveything ready for starting one actuation
Return True if everything is fine
"""
print "-----------------------------------------------------------------"
print "PERFORM CHECKS"
print "-----------------------------------------------------------------"
if not self.serial_ready:
print "Serial port not set!!"
return False
if not self.pid_coefficients_loaded:
print "PID coefficients not set in Python code!!"
return False
if not self.signal_ready:
print "Signal not ready!!"
return False
#self.serial_port.flushInput()
print 'Check_ready: everything ready'
return True
def perform_setup_and_start(self):
"""
Perform setup of the Arduino controller for next actuation
and start by sending the two first buffers
Return True if everything went fine
"""
print "-----------------------------------------------------------------"
print "PERFORM SETUP LOOP"
print "-----------------------------------------------------------------"
print 'Starting setup...'
# first flush buffer
self.serial_port.flushInput()
# check that the Arduino board is ready to start a new actuation cycle
self.serial_port.write('R')
char_answer = self.serial_port.read()
if not char_answer == 'R':
print "Received: "+str(char_answer)
print 'Arduino is not ready for a new actuation cycle!!'
return False
print 'Ready for starting a new actuation cycle'
# transmitt pid parameters
print 'Send PID parameters'
self.send_PID_parameters()
print 'Check that ready to receive first buffer'
# check that board waiting for the first buffer
self.serial_port.write('X')
char_answer = self.serial_port.read()
if not char_answer == 'X':
print 'Arduino is not ready to receive first buffer!!'
return False
print "Initialize lists for processing feedback"
# initialize list for storing feedback
self.dict_feedback = {}
self.dict_feedback["feedback_set_point"] = []
self.dict_feedback["feedback_position"] = []
self.dict_feedback["feedback_control"] = []
self.dict_feedback["feedback_time_ms"] = []
self.dict_feedback["error_msg"] = []
self.dict_feedback["benign_msg"] = []
self.dict_feedback["init_trash"] = []
self.dict_feedback["post_actuation"] = []
self.dict_feedback["post_actuation_total_actuation_time"] = []
self.dict_feedback["post_actuation_number_of_updates"] = []
self.dict_feedback["post_actuation_number_of_loop_calls"] = []
self.dict_feedback["post_actuation_error_msg"] = []
self.dict_feedback["number_of_feedback_send"] = []
print 'Send first double buffer and start actuation'
# intialize the indice for start of next buffer
self.pointer_position_buffer = 0
# NOTE: In arduino, receive the first two buffers in setup to avoid need
# for flushing buffer B here
# make first buffer ready and send it
self.make_next_buffer_ready()
self.transmit_buffer_bytes_through_serial()
# make the second buffer ready and send it
self.make_next_buffer_ready()
self.transmit_buffer_bytes_through_serial()
# reset buffer to avoid trash effect
#self.serial_port.flushInput()
# NOW ARDUINO CAN END SETUP AND START LOOP
def perform_actuation(self):
"""
core of the actuation, once the actuation has been started through
perform_setup_and_start
"""
# NOTE: feedback implemented using ASCII on the way back
# (Arduino to computer). Not very efficient, but otherwise
# need protocol
print "-----------------------------------------------------------------"
print "PERFORM ACTUATION"
print "-----------------------------------------------------------------"
print "Entering actuation core"
# start actuation
not_finished = True
# flag for logging error message
error_message = False
# flag for end of the signal buffer
self.end_signal_buffer = False
# number of error messages
self.number_error_messages = 0
# number of buffers transmitted
self.number_buffers_transmitted = 2
# number of feedback received
self.number_feedback_ms_received = 0
# read init trash
current_key = "init_trash"
while not_finished:
# check if need to pre generate next buffer and not end of signal yet
if not self.next_buffer_is_ready:
if not self.end_signal_buffer:
print "A: make next buffer ready"
self.make_next_buffer_ready()
# if a char available on serial, process information from arduino
if self.serial_port.inWaiting > 0:
# read one char at a time
char_read = self.serial_port.read()
#print "*"+str(char_read)
# ignore newline
if char_read == '\n':
pass
# ignore return
elif char_read == '\r':
pass
# feedback set point follows
elif char_read == 'A':
current_key = "feedback_set_point"
self.dict_feedback[current_key].append(',')
error_message = False
# feedback position follows
elif char_read == 'B':
current_key = "feedback_position"
self.dict_feedback[current_key].append(',')
error_message = False
# feedback control follows
elif char_read == 'C':
current_key = "feedback_control"
self.dict_feedback[current_key].append(',')
error_message = False
# feedback time micro seconds follows
elif char_read == 'D':
current_key = "feedback_time_ms"
self.dict_feedback[current_key].append(',')
error_message = False
self.number_feedback_ms_received = self.number_feedback_ms_received + 1
# request buffer: serve buffer if not arrived at the end
elif char_read == 'T' and self.next_buffer_is_ready:
print "A: Transmit pre computed buffer"
# transmit pre computed buffer
self.transmit_buffer_bytes_through_serial()
error_message = False
self.number_buffers_transmitted = self.number_buffers_transmitted + 1
# error message (note: use E in the Arduino program only to say error follows)
elif char_read == 'E':
current_key = "error_msg"
self.dict_feedback[current_key].append(',')
error_message = True
self.number_error_messages = self.number_error_messages + 1
print "---------------------------- !!RECEIVED ERROR MESSAGE!! ----------------------------"
# benign message
elif char_read == 'M':
current_key = "benign_msg"
self.dict_feedback[current_key].append(',')
error_message = False
# check if the board says actuation is finished
elif char_read =='Z':
not_finished = False
error_message = False
# data about a signal to log in the right list
else:
self.dict_feedback[current_key].append(char_read)
if error_message:
#print "Error!!"
pass
print "Finished actuation and feedback logging!"
print "Number of error messages received: "+str(self.number_error_messages)
print "Number of buffers transmitted: "+str(self.number_buffers_transmitted)
# log post actuation information ----------------------------------------------------
# wait a bit to let time to post actuation information to arrive
time.sleep(0.1)
while (self.serial_port.inWaiting() > 0):
## log
#self.dict_feedback["post_actuation"].append(self.serial_port.read())
# read one char at a time
char_read = self.serial_port.read()
# ignore newline
if char_read == '\n':
pass
# ignore return
elif char_read == '\r':
pass
# wait for total actuation time
elif char_read == 'T':
current_key = "post_actuation_total_actuation_time"
self.dict_feedback[current_key].append(',')
error_message = False
elif char_read == 'U':
current_key = "post_actuation_number_of_updates"
self.dict_feedback[current_key].append(',')
error_message = False
elif char_read == 'V':
current_key = "post_actuation_number_of_loop_calls"
self.dict_feedback[current_key].append(',')
error_message = False
elif char_read == 'W':
current_key = "number_of_feedback_send"
self.dict_feedback[current_key].append(',')
error_message = False
# error message (note: use E in the Arduino program only to say error follows)
elif char_read == 'E':
current_key = "post_actuation_error_msg"
self.dict_feedback[current_key].append(',')
error_message = True
self.number_error_messages = self.number_error_messages + 1
print "---------------------------- !!RECEIVED ERROR MESSAGE!! ----------------------------"
# data about a signal to log in the right list
else:
self.dict_feedback[current_key].append(char_read)
if error_message:
print char_read
print "Finished post actuation logging!"
############################################################################
# all diagnosis and post processing methods
def convert_feedback_data(self):
"""
convert feedback into numpy format
"""
self.feedback_set_point = convert_list_feedback(self.dict_feedback["feedback_set_point"])
self.feedback_position = convert_list_feedback(self.dict_feedback["feedback_position"])
self.feedback_control = convert_list_feedback(self.dict_feedback["feedback_control"])
self.feedback_time_ms = convert_list_feedback(self.dict_feedback["feedback_time_ms"])
self.post_actuation_total_actuation_time = convert_list_feedback(self.dict_feedback["post_actuation_total_actuation_time"])
self.post_actuation_number_of_updates = convert_list_feedback(self.dict_feedback["post_actuation_number_of_updates"])
self.post_actuation_number_of_loop_calls = convert_list_feedback(self.dict_feedback["post_actuation_number_of_loop_calls"])
self.number_of_feedback_send = convert_list_feedback(self.dict_feedback["number_of_feedback_send"])
def analyze_performed_actuation(self):
"""
analyze of the feedback data and plot it
"""
print "-----------------------------------------------------------------"
print "FEEDBACK ANALYSIS"
print "-----------------------------------------------------------------"
print "Total actuation time (milli seconds): "+str(self.post_actuation_total_actuation_time)
print "Total number of set point updates: "+str(self.post_actuation_number_of_updates)
print "Total number of loop calls: "+str(self.post_actuation_number_of_loop_calls)
print "Total number of ms feedback send by Arduino: "+str(self.number_of_feedback_send)
print "Corresponding to a theoretical signal duration (s): "+str(self.number_of_feedback_send/20.)
print "Total number of ms feedback received: "+str(self.number_feedback_ms_received)
print "Corresponding to a theoretical signal duration (s): "+str(float(self.number_feedback_ms_received)/20.)
mean_update_time = self.post_actuation_total_actuation_time / self.post_actuation_number_of_updates
print "Mean update time (milli seconds): "+str(mean_update_time)
print "Theory: 2 milli seconds for scan rate 500 Hz"
mean_loop_time = 1000 * self.post_actuation_total_actuation_time / self.post_actuation_number_of_loop_calls
print "Mean loop time (micro seconds): "+str(mean_loop_time)
print "Plot graphical output"
plt.figure(figsize=(20,10))
plt.plot(self.feedback_time_ms*ONE_MICRO_SECOND,self.feedback_set_point,label="set point")
plt.plot(self.feedback_time_ms*ONE_MICRO_SECOND,self.feedback_position,label="position")
#plt.plot(self.feedback_time_ms*ONE_MICRO_SECOND,self.feedback_control,label="control")
plt.xlabel('time (s)')
plt.ylabel('feedback from arduino')
plt.legend(loc=3)
plt.show(block=True)
```
|
{
"source": "jerabaul29/platform-apollo3blue",
"score": 2
}
|
#### File: platform-apollo3blue/builder/main.py
```python
import sys
import os
from os.path import join, isdir
from SCons.Script import AlwaysBuild, Builder, Default, DefaultEnvironment
from platform import system
from platformio.project.config import ProjectOptions
env = DefaultEnvironment()
# The Apollo3bluePlatform object from this projects platform.py file.
platform_apollo3blue = env.PioPlatform()
currently_configured_board = env.BoardConfig()
# The project configuration, derived from the projects platform.ini file.
project_config = env.GetProjectConfig()
# A list of all of the specified build targets
all_build_targets = BUILD_TARGETS
# The currently building build target
build_type = env.GetBuildType()
# The env:<NAME> from the projects platformio.ini file
build_environment = env["PIOENV"]
# The options specified in the `build_environment` section of the platform.ini file.
options = env.GetProjectOptions()
# I have found that using the debug_build_flags defaults of
# ["-Og", "-g2", "-ggdb2"] produces some serious
# "Spooky Action At A Distance", specifically the "-Og" flag.
# It seems to happen when calling functions in libmbed-os.a,
# which has not been compiled with the "-Og". So we clear
# out the default values of debug_build_flags and set it to "-g".
debug_build_flags = ProjectOptions.get("env.debug_build_flags")
debug_build_flags.default.clear()
debug_build_flags.default.append("-g")
debug_build_flags.default.append("-ggdb")
# This lets us run the auto-port-detect to find an upload port
# just before we issue the upload command.
def BeforeUpload(target, source, env):
upload_port = env.subst("$UPLOAD_PORT")
if len(upload_port) == 0:
env.AutodetectUploadPort()
upload_actions = [
env.VerboseAction(BeforeUpload, "Looking for upload port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE"),
]
upload_flags = []
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
upload_speed = env.subst("$UPLOAD_SPEED")
if upload_protocol == "svl":
if len(upload_speed) == 0:
upload_speed = "921600"
valid_svl_baud = ["57600", "115200", "230400", "460800", "921600"]
if upload_speed not in valid_svl_baud:
sys.stderr.write(
"Error: Invalid SVL baud rate specified: {}. \nSelect one of: {}\n".format(upload_speed, valid_svl_baud)
)
env.Exit(1)
upload_flags = ["$UPLOAD_PORT", "-b", "$UPLOAD_SPEED", "-f", "$SOURCES", "-v"],
elif upload_protocol == "asb":
upload_speed = env.subst("$UPLOAD_SPEED")
if len(upload_speed) == 0:
upload_speed = "115200"
valid_asb_baud = ["115200"]
if upload_speed not in valid_asb_baud:
sys.stderr.write(
"Error: Invalid ASB baud rate specified: {}. \n Select one of: {}\n".format(upload_speed,
valid_asb_baud)
)
env.Exit(1)
upload_flags = [
"--bin", "$SOURCES",
"--load-address-blob", "0x20000",
"--magic-num", "0xCB",
"-o", "${SOURCES}.ASB",
"--version", "0x0",
"--load-address-wired", "0xC000",
"-i", "6",
"--options", "0x1",
"-b", "$UPLOAD_SPEED",
"-port", "$UPLOAD_PORT", "-r", "2", "-v"]
elif upload_protocol.startswith("jlink"):
# ------------------START------------------------
# Code segment borrowed and modified from:
# https://github.com/platformio/platform-atmelsam/blob/798b40a14807e2e9874b2f39c50b0b89781d29ae/builder/main.py#L179
#
# The original code (as well as this project) is distributed under
# an Apache2.0 License: https://www.apache.org/licenses/LICENSE-2.0.html
def __jlink_cmd_script(env, source):
build_dir = env.subst("$BUILD_DIR")
if not isdir(build_dir):
os.makedirs(build_dir)
script_path = join(build_dir, "upload.jlink")
commands = [
"h",
"loadbin %s, %s" % (source, currently_configured_board.get(
"upload.jlink_offset_address")),
"r",
"q"
]
with open(script_path, "w") as fp:
fp.write("\n".join(commands))
return script_path
UPLOADER="JLinkExe"
debug = currently_configured_board.get("debug", {})
if system() == "Windows":
UPLOADER="JLink.exe"
upload_flags = [
"-device", debug.get("jlink_device"),
"-speed", "4000",
"-if", "swd",
"-autoconnect", "1",
"-CommanderScript", '"${__jlink_cmd_script(__env__, SOURCE)}"'
]
env.Replace(
__jlink_cmd_script=__jlink_cmd_script,
UPLOADER=join(platform_apollo3blue.get_package_dir("tool-jlink"), UPLOADER)
)
# -------------------END-------------------------
else:
sys.stderr.write("Error: Unknown Upload Protocol: {}. \nSelect one of: {}\n".format(
upload_protocol,
currently_configured_board.get("upload.protocols")))
env.Exit(1)
# A full list with the available variables
# http://www.scons.org/doc/production/HTML/scons-user.html#app-variables
env.Replace(
AR="arm-none-eabi-ar",
AS="arm-none-eabi-as",
CC="arm-none-eabi-gcc",
CXX="arm-none-eabi-g++",
GDB="arm-none-eabi-gdb",
OBJCOPY="arm-none-eabi-objcopy",
RANLIB="arm-none-eabi-ranlib",
SIZETOOL="arm-none-eabi-size",
ARFLAGS=["rc"],
UPLOADERFLAGS = upload_flags,
UPLOAD_SPEED=upload_speed,
UPLOADCMD="$UPLOADER $UPLOADERFLAGS"
)
env.Append(
ARFLAGS=[],
ASFLAGS=[],
CCFLAGS=[],
CXXFLAGS=[],
LINKFLAGS=[],
CPPDEFINES=[],
LIBS=[],
BUILDERS=dict(
ElfToBin=Builder(
action=" ".join([
"$OBJCOPY",
"-O",
"binary",
"$SOURCES",
"$TARGET"]),
suffix=".bin"
)
)
)
#
# Target: Build executable and linkable firmware
#
target_elf = env.BuildProgram()
#
# Target: Build the .bin file
#
target_bin = env.ElfToBin(join("$BUILD_DIR", "firmware"), target_elf)
#
# Target: Upload firmware
#
upload = env.AddPlatformTarget("upload", target_bin, upload_actions, "Upload")
#
# Target: Define targets
#
Default([target_bin, upload])
```
|
{
"source": "jerabaul29/python_huffman",
"score": 3
}
|
#### File: python_huffman/test/test_example_build_tree.py
```python
from __future__ import print_function
from bitarray import bitarray
import pyhuffman.pyhuffman as pyhuffman
"""
A test case that can also be used as example, about how to build trees.
"""
def test_valid_dicts():
# example of data: frequencies in the alphabet for typical english text
# this data is from: https://stackoverflow.com/questions/11587044/how-can-i-create-a-tree-for-huffman-encoding-and-decoding
freq = [
(8.167, 'a'), (1.492, 'b'), (2.782, 'c'), (4.253, 'd'),
(12.702, 'e'), (2.228, 'f'), (2.015, 'g'), (6.094, 'h'),
(6.966, 'i'), (0.153, 'j'), (0.747, 'k'), (4.025, 'l'),
(2.406, 'm'), (6.749, 'n'), (7.507, 'o'), (1.929, 'p'),
(0.095, 'q'), (5.987, 'r'), (6.327, 's'), (9.056, 't'),
(2.758, 'u'), (1.037, 'v'), (2.365, 'w'), (0.150, 'x'),
(1.974, 'y'), (0.074, 'z')]
# build the Huffman tree, dictionary and reverse dictionary
huffman_tree = pyhuffman.HuffmanTree(frequency_data=freq)
assert len(huffman_tree.huffman_dict.keys()) == 26
valid_dict = {'a': '1110',
'b': '110000',
'c': '01001',
'd': '11111',
'e': '100',
'f': '00100',
'g': '110011',
'h': '0110',
'i': '1011',
'j': '001011011',
'k': '0010111',
'l': '11110',
'm': '00111',
'n': '1010',
'o': '1101',
'p': '110001',
'q': '001011001',
'r': '0101',
's': '0111',
't': '000',
'u': '01000',
'v': '001010',
'w': '00110',
'x': '001011010',
'y': '110010',
'z': '001011000'}
assert huffman_tree.huffman_dict == valid_dict
valid_bitarray_tree_ = {'a': bitarray('1110'),
'b': bitarray('110000'),
'c': bitarray('01001'),
'd': bitarray('11111'),
'e': bitarray('100'),
'f': bitarray('00100'),
'g': bitarray('110011'),
'h': bitarray('0110'),
'i': bitarray('1011'),
'j': bitarray('001011011'),
'k': bitarray('0010111'),
'l': bitarray('11110'),
'm': bitarray('00111'),
'n': bitarray('1010'),
'o': bitarray('1101'),
'p': bitarray('110001'),
'q': bitarray('001011001'),
'r': bitarray('0101'),
's': bitarray('0111'),
't': bitarray('000'),
'u': bitarray('01000'),
'v': bitarray('001010'),
'w': bitarray('00110'),
'x': bitarray('001011010'),
'y': bitarray('110010'),
'z': bitarray('001011000')}
assert huffman_tree.bitarray_dict == valid_bitarray_tree_
test_valid_dicts()
```
|
{
"source": "jerabaul29/python_printind",
"score": 3
}
|
#### File: python_printind/tests/test_function.py
```python
from printind.printind_function import printi, printiv
def test_printi_functions():
class TestClass(object):
def __init__(self):
self.some_var = "some_var"
def print_self(self):
printiv(self.some_var)
def f_1(debug=0):
printi('start f_1', debug=debug)
printi('f_1 again!')
f_2()
f_3()
printi('end f_1')
def f_2():
printi('start f_2')
f_3()
printi('f_2, this is the end!')
def f_3():
printi('start f_3')
a = 4
printiv(a)
printi('this is the script')
printi('the script is executing')
f_1()
printi('end of the script')
test_class = TestClass()
test_class.print_self()
if __name__ == "__main__":
test_printi_functions()
```
|
{
"source": "jeradM/openwater",
"score": 2
}
|
#### File: openwater/openwater/ow_http.py
```python
import logging
from typing import TYPE_CHECKING, Callable, Type
from starlette.applications import Starlette
from starlette.endpoints import HTTPEndpoint, WebSocketEndpoint
from starlette.middleware import Middleware
from starlette.middleware.cors import CORSMiddleware
from starlette.staticfiles import StaticFiles
from uvicorn import Server, Config
from openwater.utils.decorator import nonblocking
if TYPE_CHECKING:
from openwater.core import OpenWater
_LOGGER = logging.getLogger(__name__)
async def setup_http(ow: "OpenWater"):
middleware = [Middleware(CORSMiddleware, allow_origins=["*"], allow_methods=["*"])]
app = Starlette(on_shutdown=[ow.stop], middleware=middleware)
setattr(app, "ow", ow)
config = Config(app, host="0.0.0.0")
ow.http = OWHttp(config)
class OWHttp:
def __init__(self, config: Config):
self.config = config
self.app: Starlette = config.app
@nonblocking
def register_endpoint(self, endpoint: Type[HTTPEndpoint]) -> None:
assert hasattr(endpoint, "path"), "Endpoint must define its own path"
self.app.add_route(getattr(endpoint, "path"), endpoint)
@nonblocking
def register_websocket_endpoint(self, endpoint: Type[WebSocketEndpoint]):
assert hasattr(endpoint, "path"), "Endpoint must define its own path"
self.app.add_websocket_route(getattr(endpoint, "path"), endpoint)
@nonblocking
def register_route(self, path: str, endpoint: Callable, **kwargs) -> None:
self.app.add_route(path, endpoint, **kwargs)
@nonblocking
def register_websocket_route(self, path: str, endpoint: Callable, **kwargs) -> None:
self.app.add_websocket_route(path, endpoint, **kwargs)
@nonblocking
def register_static_directory(
self, path: str, dir: str, html: bool = False, name: str = None
) -> None:
self.app.mount(
path=path,
app=StaticFiles(directory=dir, html=html, check_dir=False),
name=name,
)
async def run(self):
_LOGGER.info("Starting OpenWater HTTP server")
server = Server(self.config)
await server.serve()
```
#### File: plugins/rest_api/helpers.py
```python
import datetime
import json
from typing import Any
from starlette.responses import JSONResponse
from openwater.utils.decorator import nonblocking
@nonblocking
def respond(data=None, status_code=200):
return ToDictJSONResponse(content=data, status_code=status_code)
class ToDictJSONEncoder(json.JSONEncoder):
def default(self, o: Any) -> Any:
try:
if isinstance(o, list):
return [i.to_dict for i in o]
if isinstance(o, datetime.datetime):
return o.isoformat()
if isinstance(o, datetime.date):
return str(o)
return o.to_dict()
except AttributeError:
return json.JSONEncoder.default(self, o)
class ToDictJSONResponse(JSONResponse):
def render(self, content: Any) -> bytes:
return json.dumps(
content,
ensure_ascii=False,
allow_nan=False,
indent=None,
separators=(",", ":"),
cls=ToDictJSONEncoder,
).encode("utf-8")
```
#### File: plugins/rest_api/schedule.py
```python
import logging
from json.decoder import JSONDecodeError
from typing import TYPE_CHECKING
from starlette.endpoints import HTTPEndpoint
from starlette.requests import Request
from starlette.responses import Response
from starlette.status import *
from openwater.errors import ScheduleValidationException
from openwater.plugins.rest_api.helpers import respond
from openwater.schedule.model import ScheduleType
if TYPE_CHECKING:
from openwater.core import OpenWater
_LOGGER = logging.getLogger(__name__)
def register_endpoints(ow: "OpenWater") -> None:
ow.http.register_endpoint(ScheduleEndpoint)
ow.http.register_endpoint(SchedulesEndpoint)
ow.http.register_route("/api/schedule_types", get_schedule_types, methods=["GET"])
class ScheduleEndpoint(HTTPEndpoint):
path = "/api/schedules/{id:int}"
async def get(self, request: Request) -> Response:
ow: "OpenWater" = request.app.ow
schedule = ow.schedules.store.get(request.path_params["id"])
if not schedule:
return respond(status_code=HTTP_404_NOT_FOUND)
return respond(schedule)
async def put(self, request: Request) -> Response:
ow: "OpenWater" = request.app.ow
data: dict = await request.json()
try:
status_code = HTTP_200_OK
if not await ow.schedules.store.update(data):
status_code = HTTP_400_BAD_REQUEST
return respond(status_code=status_code)
except ScheduleValidationException as e:
return respond({"errors": e.errors}, HTTP_400_BAD_REQUEST)
except Exception as e:
return respond({"error": e.args[0]}, HTTP_400_BAD_REQUEST)
class SchedulesEndpoint(HTTPEndpoint):
path = "/api/schedules"
async def get(self, request: Request) -> Response:
ow: "OpenWater" = request.app.ow
return respond(ow.schedules.store.all)
async def post(self, request: Request) -> Response:
"""
description: Create a new schedule
responses:
200:
description: Successfully created schedule
400:
description: Invalid data sent
"""
ow: "OpenWater" = request.app.ow
try:
data = await request.json()
zone = await ow.schedules.store.create(data)
return respond(zone)
except JSONDecodeError as e:
return respond({"error": e.msg}, HTTP_400_BAD_REQUEST)
except ScheduleValidationException as e:
return respond({"errors": e.errors}, HTTP_400_BAD_REQUEST)
async def get_schedule_types(request: Request) -> Response:
return respond([st.value for st in ScheduleType])
# async def get_schedules_for_program(request: Request) -> Response:
# ow: "OpenWater" = request.app.ow
# id_ = request.path_params["id"]
# return respond(await ow.programs.store.get_schedules(id_))
```
#### File: plugins/rest_api/zone.py
```python
import logging
import sys
from typing import TYPE_CHECKING, Callable
from starlette.endpoints import HTTPEndpoint
from starlette.requests import Request
from openwater.errors import ZoneException, ZoneValidationException
from openwater.plugins.rest_api.helpers import ToDictJSONResponse, respond
if TYPE_CHECKING:
from openwater.core import OpenWater
_LOGGER = logging.getLogger(__name__)
def register_endpoints(ow: "OpenWater"):
ow.http.register_endpoint(Zone)
ow.http.register_route("/api/zones", create_zone, methods=["POST"])
ow.http.register_route("/api/zones", get_zones, methods=["GET"])
ow.http.register_route(
"/api/zones/{zone_id:int}/{cmd:str}", zone_cmd, methods=["POST"]
)
class Zone(HTTPEndpoint):
path = "/api/zones/{zone_id:int}"
async def get(self, request: Request):
"""
description: Get a zone by id
parameters:
- in: path
name: zone_id:int
description: Numeric id of requested zone
required: true
schema:
type: integer
responses:
200:
description: A zone
"""
ow: "OpenWater" = request.app.ow
zone = ow.zones.store.get(request.path_params["zone_id"])
return respond(zone)
async def put(self, request):
"""Update an existing zone"""
ow: "OpenWater" = request.app.ow
data = await request.json()
try:
zone = await ow.zones.store.update(data)
except ZoneValidationException as e:
_LOGGER.error("Zone update failed validation")
return respond({"errors": e.errors, "msg": "Invalid zone data"}, 400)
return respond(zone)
async def delete(self, request):
ow: "OpenWater" = request.app.ow
res = await ow.zones.store.delete(request.path_params["zone_id"])
sc = 204 if res else 400
return respond(status_code=sc)
async def get_zones(request):
"""
description: Get a list of all zones
responses:
200:
description: A list of zones.
"""
ow: "OpenWater" = request.app.ow
return respond(ow.zones.store.all)
async def create_zone(request: Request):
"""
description: Create a new zone
responses:
200:
description: Successfully created zone
400:
description: Invalid data sent
"""
ow: "OpenWater" = request.app.ow
data = await request.json()
try:
zone = await ow.zones.store.create(data)
except ZoneValidationException as e:
_LOGGER.error("Create zone failed validation")
return ToDictJSONResponse(
{"errors": e.errors, "msg": "Invalid zone data"}, status_code=400,
)
return respond(zone)
async def zone_cmd(request: Request):
"""
description: Execute a zone command
parameters:
- in: path
name: zone_id:int
description: Numeric id of target zone
required: true
schema:
type: integer
- in: path
name: cmd:str
description: the command to execute
required: true
schema:
type: string
responses:
200:
description: successfully executed command
500:
description: unable to execute given command
"""
ow: "OpenWater" = request.app.ow
zone_id = request.path_params["zone_id"]
cmd = request.path_params["cmd"]
try:
if cmd == "on":
await ow.zones.controller.open_zone(zone_id)
elif cmd == "off":
await ow.zones.controller.close_zone(zone_id)
else:
raise ZoneException("Unknown zone command: {}".format(cmd))
return respond(status_code=200)
except ZoneException as e:
_LOGGER.error(e)
return respond(status_code=500)
```
#### File: plugins/shift_register/__init__.py
```python
from typing import TYPE_CHECKING, Optional
from cerberus import Validator
from openwater.constants import EVENT_ZONE_STATE
from openwater.errors import ZoneException, ZoneValidationException
from openwater.plugins.gpio import DATA_GPIO, OWGpio
from openwater.zone.model import BaseZone
if TYPE_CHECKING:
from openwater.core import OpenWater
DATA_SHIFT_REGISTER = "SHIFT_REGISTER"
ZONE_TYPE_SHIFT_REGISTER = "SHIFT_REGISTER"
def setup_plugin(ow: "OpenWater", config: dict = {}):
gpio: Optional[OWGpio] = ow.data[DATA_GPIO]
if not gpio:
raise ZoneException("Required plugin(s) not enabled: {}", ["gpio"])
num_reg = config.get("num_reg", 8)
data_pin = config.get("data_pin", 0)
clock_pin = config.get("clock_pin", 0)
oe_pin = config.get("oe_pin", 0)
latch_pin = config.get("latch_pin", 0)
if not (data_pin and clock_pin and oe_pin and latch_pin):
raise ZoneException("Must define all required shift register pins")
gpio.set_output([data_pin, clock_pin, oe_pin, latch_pin])
active_high = config.get("active_high", True)
sr = ShiftRegister(
ow, gpio, data_pin, clock_pin, oe_pin, latch_pin, num_reg, active_high
)
ow.data[DATA_SHIFT_REGISTER] = sr
ow.zones.registry.register_zone_type(
ZONE_TYPE_SHIFT_REGISTER, ShiftRegisterZone, create_zone
)
def create_zone(ow: "OpenWater", zone_data: dict) -> "ShiftRegisterZone":
attr_schema = {"sr_idx": {"type": "integer", "required": True}}
v: Validator = Validator(attr_schema)
v.allow_unknown = True
if not v.validate(zone_data["attrs"]):
raise ZoneValidationException("ShiftRegisterZone validation failed", v.errors)
return ShiftRegisterZone(ow, **zone_data)
class ShiftRegisterZone(BaseZone):
ATTR_SCHEMA = {"sr_idx": {"type": "integer", "required": True}}
def __init__(self, ow: "OpenWater", **kwargs: dict):
super().__init__(ow=ow, **kwargs)
self._sr: "ShiftRegister" = ow.data[DATA_SHIFT_REGISTER]
self._sr_idx = kwargs.get("attrs").get("sr_idx")
@property
def extra_attrs(self):
return {"sr_idx": self._sr_idx}
def is_open(self) -> bool:
return bool(self._sr.get_reg_status(self._sr_idx))
async def open(self) -> None:
await self._sr.async_turn_on(self._sr_idx)
self._ow.bus.fire(EVENT_ZONE_STATE, self)
async def close(self) -> None:
await self._sr.async_turn_off(self._sr_idx)
self._ow.bus.fire(EVENT_ZONE_STATE, self)
def get_zone_type(self) -> str:
return "SHIFT_REGISTER"
@staticmethod
def get_additional_config(ow: "OpenWater"):
sr = ow.data[DATA_SHIFT_REGISTER]
num_reg = sr.num_regs
return {
"sr_idx": {
"type": "select",
"label": "Physical Shift Register Index",
"options": list(range(num_reg)),
}
}
class ShiftRegister:
def __init__(
self,
ow: "OpenWater",
gpio: "OWGpio",
data_pin: int,
clock_pin: int,
oe_pin: int,
latch_pin: int,
num_regs: int,
active_high: bool = True,
):
self.ow = ow
self.g = gpio
self.data_pin = data_pin
self.clock_pin = clock_pin
self.oe_pin = oe_pin
self.latch_pin = latch_pin
self.num_regs = num_regs
self.active_high = active_high
self._reg_mask = 0
def write_registers(self) -> None:
self.g.low([self.clock_pin, self.latch_pin])
for reg in range(self.num_regs):
self.g.low(self.clock_pin)
if 1 & (self._reg_mask >> reg):
self.g.high(self.data_pin)
else:
self.g.low(self.data_pin)
self.g.high(self.clock_pin)
self.g.high(self.latch_pin)
async def async_write_registers(self) -> None:
await self.g.async_low([self.clock_pin, self.latch_pin])
for reg in range(self.num_regs):
await self.g.async_low(self.clock_pin)
if 1 & (self._reg_mask >> reg):
await self.g.async_high(self.data_pin)
else:
await self.g.async_low(self.data_pin)
await self.g.async_high(self.clock_pin)
await self.g.async_high(self.latch_pin)
def turn_on(self, reg: int) -> None:
if reg > self.num_regs - 1:
raise ZoneException(
"Attempted to turn on register {}, but SR only has {} registers",
reg,
self.num_regs,
)
if self.active_high:
self._reg_mask |= 1 << reg
else:
self._reg_mask ^= 1 << reg
self.write_registers()
async def async_turn_on(self, reg: int) -> None:
if reg > self.num_regs - 1:
raise ZoneException(
"Attempted to turn on register {}, but SR only has {} registers",
reg,
self.num_regs,
)
if self.active_high:
self._reg_mask |= 1 << reg
else:
self._reg_mask ^= 1 << reg
await self.async_write_registers()
def turn_off(self, reg: int) -> None:
if reg > self.num_regs - 1:
raise ZoneException(
"Attempted to turn off register {}, but SR only has {} registers",
reg,
self.num_regs,
)
if self.active_high:
self._reg_mask ^= 1 << reg
else:
self._reg_mask |= 1 << reg
self.write_registers()
async def async_turn_off(self, reg: int) -> None:
if reg > self.num_regs - 1:
raise ZoneException(
"Attempted to turn off register {}, but SR only has {} registers",
reg,
self.num_regs,
)
if self.active_high:
self._reg_mask ^= 1 << reg
else:
self._reg_mask |= 1 << reg
await self.async_write_registers()
def disable_output(self) -> None:
self.g.high(self.oe_pin)
async def async_disable_outputs(self):
await self.g.async_high(self.oe_pin)
def get_reg_status(self, reg: int):
return 1 & (self._reg_mask >> reg)
```
#### File: plugins/websocket/handlers.py
```python
from typing import TYPE_CHECKING
from openwater.constants import (
EVENT_ZONE_STATE,
EVENT_PROGRAM_STATE,
EVENT_SCHEDULE_STATE,
)
from openwater.plugins.websocket.response import (
ZonesResponse,
ProgramsResponse,
SchedulesResponse,
)
from openwater.utils.decorator import nonblocking
if TYPE_CHECKING:
from openwater.core import OpenWater, Event
from openwater.plugins.websocket import WebSocketApi
@nonblocking
def setup_handlers(ow: "OpenWater", ws: "WebSocketApi"):
handler = WSEventHandler(ow, ws)
ow.bus.listen(EVENT_ZONE_STATE, handler.zone_state)
ow.bus.listen(EVENT_PROGRAM_STATE, handler.program_state)
ow.bus.listen(EVENT_SCHEDULE_STATE, handler.schedule_state)
class WSEventHandler:
def __init__(self, ow: "OpenWater", ws: "WebSocketApi"):
self._ow = ow
self.ws = ws
@nonblocking
def program_state(self, event: "Event") -> None:
programs = self._ow.programs.store.all
steps = self._ow.programs.store.steps
self.ws.respond(ProgramsResponse(programs, steps))
@nonblocking
def schedule_state(self, event: "Event") -> None:
self.ws.respond(SchedulesResponse(self._ow.schedules.store.all))
@nonblocking
def zone_state(self, event: "Event") -> None:
self.ws.respond(ZonesResponse(self._ow.zones.store.all))
```
#### File: openwater/program/helpers.py
```python
import logging
from typing import TYPE_CHECKING, Collection, Any
from databases.core import Transaction
from openwater.database import model
from openwater.database.model import program_step, program_step_zones
from openwater.errors import OWError
from openwater.program.model import ProgramStep
if TYPE_CHECKING:
from openwater.core import OpenWater
_LOGGER = logging.getLogger(__name__)
async def load_programs(ow: "OpenWater"):
if not ow.db:
raise OWError("OpenWater database not initialized")
steps = [ProgramStep(**dict(s)) for s in await ow.db.list(model.program_step)]
ow.programs.store.set_steps(steps)
step_zones: Any = await ow.db.list(model.program_step_zones)
for step in steps:
step.zones = [
ow.zones.store.get(sz.zone_id) for sz in step_zones if sz.step_id == step.id
]
programs: Any = await ow.db.list(model.program)
for row in programs:
program = dict(row)
program_type = ow.programs.registry.get_program_for_type(
program["program_type"]
)
if program_type is None:
continue
p = program_type.create(ow, program)
ow.programs.store.add(p)
p.steps = [s for s in steps if s.program_id == p.id]
async def insert_program(ow: "OpenWater", data: dict) -> int:
tx = await ow.db.connection.transaction()
steps = data.pop("steps")
res = await ow.db.insert(model.program, data)
for s in steps:
if await insert_step(ow, s, res) == -1:
await tx.rollback()
return 0
await tx.commit()
return res
async def update_program(ow: "OpenWater", data: dict) -> bool:
tx = await ow.db.connection.transaction()
steps = data.pop("steps")
res = await ow.db.update(model.program, data)
if not res:
await tx.rollback()
return False
for s in steps:
if "id" in s:
await update_step(ow, s)
else:
await insert_step(ow, s, data["id"])
await tx.commit()
return True
async def delete_program(ow: "OpenWater", id_: int) -> int:
return await ow.db.delete(model.program, id_)
async def get_program_schedules(ow: "OpenWater", program_id: int) -> Collection[dict]:
conn = ow.db.connection
query = model.schedule.select().where(model.schedule.c.program_id == program_id)
rows = await conn.fetch_all(query)
return [dict(row) for row in rows]
async def insert_steps(ow: "OpenWater", data: list) -> bool:
tx: Transaction = await ow.db.connection.transaction()
for step in data:
res = await insert_step(ow, step)
if res == -1:
await tx.rollback()
return False
await tx.commit()
return True
async def insert_step(ow: "OpenWater", data: dict, program_id: int = None) -> int:
zones = data.pop("zones")
if program_id is not None:
data["program_id"] = program_id
try:
id_ = await ow.db.insert(program_step, data)
for zone in zones:
await ow.db.insert(program_step_zones, {"step_id": id_, "zone_id": zone})
except Exception as e:
_LOGGER.error("Error inserting step: %s", e)
return -1
async def update_steps(ow: "OpenWater", data: list) -> bool:
tx: Transaction = await ow.db.connection.transaction()
for step in data:
res = await update_step(ow, step)
if res == -1:
await tx.rollback()
return False
await tx.commit()
return True
async def update_step(ow: "OpenWater", data: dict) -> int:
zones = data.pop("zones")
try:
ins_res = await ow.db.update(program_step, data)
del_res = await ow.db.delete_many(
program_step_zones, program_step_zones.c.step_id == data["id"]
)
if del_res == -1:
raise Exception("Failed to delete existing step_zones")
for zone in zones:
await ow.db.insert(
program_step_zones, {"step_id": data["id"], "zone_id": zone}
)
return ins_res
except Exception as e:
_LOGGER.error(e)
return -1
```
#### File: openwater/program/store.py
```python
from typing import TYPE_CHECKING, Dict, List, Optional
from openwater.constants import EVENT_PROGRAM_STATE
from openwater.errors import ProgramException, ProgramValidationException
from openwater.program.helpers import (
insert_program,
update_program,
delete_program,
)
from openwater.program.model import BaseProgram, ProgramStep
from openwater.program.registry import ProgramRegistry
from openwater.program.validation import validate_program
from openwater.utils.decorator import nonblocking
if TYPE_CHECKING:
from openwater.core import OpenWater
class ProgramStore:
def __init__(self, ow: "OpenWater", registry: ProgramRegistry):
self._ow = ow
self._registry = registry
self.programs_: dict = dict()
self.steps_: dict = dict()
@nonblocking
def to_dict(self):
return {"programs": self.all, "steps": self.steps}
@property
def all(self) -> List[BaseProgram]:
return list(self.programs_.values())
@property
def steps(self) -> List[ProgramStep]:
return list(self.steps_.values())
@nonblocking
def get(self, id_: int) -> Optional[BaseProgram]:
"""Get a program from the store by id"""
return self.programs_.get(id_, None)
@nonblocking
def add(self, program: BaseProgram) -> None:
"""Add a new program to the store"""
self.programs_[program.id] = program
self._ow.bus.fire(EVENT_PROGRAM_STATE, program)
@nonblocking
def remove(self, id_: int) -> None:
"""Remove a program from the store"""
try:
res = self.programs_.pop(id_)
self._ow.bus.fire(EVENT_PROGRAM_STATE, program)
return res
except KeyError:
return None
async def create(self, data: Dict) -> BaseProgram:
"""Create a new zone and insert database record"""
errors = validate_program(data)
if errors:
raise ProgramValidationException("Program validation failed", errors)
id_ = await insert_program(self._ow, data)
data["id"] = id_
program_type = self._registry.get_program_for_type(data["program_type"])
program = program_type.create(self._ow, data)
self.add(program)
return program
async def update(self, data: Dict) -> BaseProgram:
"""Update an existing program and update database record"""
errors = validate_program(data)
if errors:
raise ProgramValidationException("Program validation failed", errors)
await update_program(self._ow, data)
program_type = self._registry.get_program_for_type(data["program_type"])
program = program_type.create(self._ow, data)
self.add(program)
return program
async def delete(self, program_id: int):
"""Delete a program from the store and remove database record"""
success = delete_program(self._ow, program_id)
if success:
self.remove(program_id)
return success
def set_steps(self, steps: List[ProgramStep]) -> None:
self.steps_ = {s.id: s for s in steps}
```
#### File: openwater/utils/decorator.py
```python
from typing import Callable
def blocking(func: Callable):
setattr(func, "_ow_blocking", True)
return func
def is_blocking(func: Callable):
return getattr(func, "_ow_blocking", False) is True
def nonblocking(func: Callable) -> Callable:
setattr(func, "_ow_nonblocking", True)
return func
def is_nonblocking(func: Callable) -> bool:
return getattr(func, "_ow_nonblocking", False) is True
```
#### File: openwater/zone/model.py
```python
from abc import ABC, abstractmethod
from datetime import datetime
from typing import TYPE_CHECKING, Dict, Any, List, Optional
if TYPE_CHECKING:
from openwater.core import OpenWater
class ZoneRun:
def __init__(self, id: int, zone_id: int, start: datetime, duration: int):
self.id = id
self.zone_id = zone_id
self.start = start
self.duration = duration
def to_dict(self) -> dict:
return {
"id": self.id,
"zone_id": self.zone_id,
"start": self.start,
"duration": self.duration,
}
def to_db(self) -> dict:
return self.to_dict()
class BaseZone(ABC):
def __init__(
self,
ow: "OpenWater",
id: int,
name: str,
zone_type: str,
is_master: bool,
attrs: dict,
open_offset: int = 0,
close_offset: int = 0,
last_run: Optional[ZoneRun] = None,
):
self._ow = ow
self.id = id
self.name = name
self.zone_type = zone_type
self.is_master = is_master
self.attrs = attrs
self.open_offset = open_offset
self.close_offset = close_offset
self.last_run = last_run
self.master_zones: Optional[List[BaseZone]] = None
@classmethod
def of(cls, ow: "OpenWater", data: Dict[str, Any]):
return cls(
ow=ow,
id=data.get("id"),
name=data["name"],
zone_type=data["zone_type"],
is_master=data["is_master"],
open_offset=data["open_offset"],
close_offset=data["close_offset"],
attrs=data["attrs"],
)
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"zone_type": self.zone_type,
"is_master": self.is_master,
"open_offset": self.open_offset,
"close_offset": self.close_offset,
"open": self.is_open(),
"attrs": dict(self.attrs, **self.extra_attrs),
"last_run": self.last_run,
"master_zones": self.master_zones,
}
def to_db(self):
return {
"id": self.id,
"name": self.name,
"zone_type": self.zone_type,
"is_master": self.is_master,
"open": self.is_open(),
"attrs": dict(self.attrs, **self.extra_attrs),
}
@abstractmethod
def is_open(self) -> bool:
pass
@abstractmethod
async def open(self) -> None:
pass
@abstractmethod
async def close(self) -> None:
pass
@abstractmethod
def get_zone_type(self) -> str:
pass
@property
def extra_attrs(self) -> dict:
return {}
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
```
#### File: openwater/zone/store.py
```python
from typing import TYPE_CHECKING, Dict, Optional
from openwater.constants import EVENT_ZONE_STATE
from openwater.errors import ZoneException, ZoneValidationException
from openwater.zone.helpers import insert_zone, update_zone, delete_zone
from openwater.zone.model import BaseZone
from openwater.zone.validation import validate_zone, validate_attrs
if TYPE_CHECKING:
from openwater.core import OpenWater
from openwater.zone.registry import ZoneRegistry
class ZoneStore:
def __init__(self, ow: "OpenWater", registry: "ZoneRegistry"):
self._ow = ow
self._registry = registry
self.zones_ = dict()
@property
def all(self):
return list(self.zones_.values())
def get(self, id_: int) -> Optional[BaseZone]:
"""Get a zone from the store by id"""
return self.zones_.get(id_, None)
def add(self, zone: BaseZone):
"""Add a new zone to the store"""
self.zones_[zone.id] = zone
def remove(self, zone: BaseZone):
"""Remove a zone from the store"""
try:
return self.zones_.pop(zone.id)
except KeyError:
return None
async def create(self, data: Dict) -> BaseZone:
"""Create a new zone and insert database record"""
errors = validate_zone(data)
if errors:
raise ZoneValidationException("Zone validation failed", errors)
zone_type = self._registry.get_zone_for_type(data["zone_type"])
errors = {"attrs": validate_attrs(zone_type.cls, data["attrs"])}
if errors["attrs"]:
raise ZoneValidationException("Zone attribute validation failed", errors)
id_ = await insert_zone(self._ow, data)
data["id"] = id_
zone = zone_type.create(self._ow, data)
self.zones_[zone.id] = zone
self._ow.bus.fire(EVENT_ZONE_STATE, zone)
return zone
async def update(self, data: Dict) -> BaseZone:
"""Update an existing zone and update database record"""
errors = validate_zone(data)
if errors:
raise ZoneValidationException("Zone validation failed", errors)
zone_type = self._registry.get_zone_for_type(data["zone_type"])
errors = validate_attrs(zone_type.cls, data["attrs"])
if errors:
raise ZoneValidationException("Zone validation failed", errors)
await update_zone(self._ow, data)
zone = zone_type.create(self._ow, data)
zone_ = self.get(data["id"])
zone.last_run = zone_.last_run
self.zones_[zone.id] = zone
self._ow.bus.fire(EVENT_ZONE_STATE, zone)
return zone
async def delete(self, zone_id: int) -> int:
"""Delete a zone from the store and remove database record"""
result = await delete_zone(self._ow, zone_id)
self.remove(self.get(zone_id))
self._ow.bus.fire(EVENT_ZONE_STATE, {"zone_id": zone_id})
return result
```
|
{
"source": "Jerakin/DefBuild",
"score": 2
}
|
#### File: DefBuild/defbuild/bob.py
```python
import os
import sys
import requests
import logging
import defbuild.versions as versions
class Project:
def __init__(self, config):
self.config = config
self.cache_dir = os.path.join(os.path.expanduser("~"), ".builder", "cache")
self.bob = config.get("config", "bob", fallback="")
def final(self):
self.config.set("config", "bob", self.bob)
with open(os.path.join(self.cache_dir, "session"), 'w') as f:
self.config.write(f)
def exists(project, sha):
target = os.path.join(project.cache_dir, "bob", "bob_{}.jar".format(sha))
if os.path.exists(target):
return target
return
def update(project, sha, force):
target = os.path.join(project.cache_dir, "bob", "bob_{}.jar".format(sha))
bob_exists = exists(project, sha)
if bob_exists and not force:
project.bob = bob_exists
logging.info("Using cached version {}".format(get_version_from_sha(sha)))
else:
download(project.cache_dir, sha)
project.bob = target
logging.info("Bob set to {}".format(get_version_from_sha(sha)))
def download(cache, sha):
if requests.head("http://d.defold.com/archive/{}/bob/bob.jar".format(sha)).status_code > 400:
logging.error("Can't find bob version {}".format(sha))
sys.exit(1)
logging.info("Downloading new bob {}".format(get_version_from_sha(sha)))
bob_directory = os.path.join(cache, "bob")
bob_url = "http://d.defold.com/archive/{}/bob/bob.jar".format(sha)
if not os.path.exists(bob_directory):
os.makedirs(bob_directory, exist_ok=True)
target = os.path.join(bob_directory, "bob_{}.jar".format(sha))
r = requests.get(bob_url, stream=True)
with open(target, "wb") as f:
total_size = int(r.headers.get('content-length', 0))
if total_size:
dl = 0
for data in r.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
# Progressbar
done = int(50 * dl / total_size)
sys.stdout.write("\r[%s%s]" % ('=' * done, ' ' * (50 - done)))
sys.stdout.flush()
sys.stdout.write("\n")
else:
f.write(r.content)
def beta():
return versions.beta()
def get_version_from_sha(sha):
json_data = versions.get(True)
for x in json_data["versions"]:
if x["sha1"] == sha:
return x["version"]
beta_sha, beta_version = beta()
if beta_sha == sha:
logging.info("Using beta version")
return beta_version
return "unknown"
def get_sha_from_version(version):
json_data = versions.get(True)
for x in json_data["versions"]:
if x["version"] == version:
return x["sha1"]
def get_version_from_file_name(file_name):
return get_version_from_sha(file_name.replace(".jar", "").split("bob_")[-1])
```
#### File: DefBuild/defbuild/__init__.py
```python
import os
import sys
import logging
import defbuild.commands as commands
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
if not sys.version_info >= (3, 3):
logging.error("Python version needs to be of 3.3 or higher")
sys.exit(1)
import shutil
import configparser
import argparse
try:
import requests
except ImportError:
ver = sys.version_info
logging.info("Python version is {}.{}.{}".format(ver.major, ver.minor, ver.micro))
logging.error("requests not found, install with `pip install requests`")
sys.exit(1)
__version__ = "2.1.1"
class Project:
def __init__(self, arguments):
self.arguments = arguments
_project_path = arguments.project if hasattr(arguments, 'project') and arguments.project else "."
self.source_directory = os.path.abspath(_project_path)
self.cache_dir = os.path.join(os.path.expanduser("~"), ".builder", "cache")
self.project_file = _get_project_file(self.source_directory)
self.output = os.path.join(self.cache_dir, "output")
self.platform = arguments.platform if hasattr(arguments, 'platform') else None
self.report = arguments.report if hasattr(arguments, 'report') else None
self.quick = arguments.quick if hasattr(arguments, 'quick') else None
self.force = arguments.force if hasattr(arguments, 'force') else None
self.variant = arguments.variant if hasattr(arguments, 'variant') else None
self.resolve = arguments.resolve if hasattr(arguments, 'resolve') else None
self.verbose = arguments.verbose if hasattr(arguments, 'verbose') else None
self.name = None
self.provision = None
self.identity = None
self.certificate = None
self.private_key = None
self.bob = None
self.ios_id = None
self.android_id = None
self.ios_build = None
self.android_build = None
if hasattr(arguments, 'options') and arguments.options:
path, name = os.path.split(self.project_file)
shutil.copy(self.project_file, os.path.join(path, "{}_old".format(name)))
options_file = os.path.abspath(arguments.options)
_merge_properties(self.project_file, options_file)
self.load()
def load(self):
config = _load_config()
game_config = _load_game_config(self.project_file)
self.name = game_config.get("project", "title")
self.ios_id = game_config.get("ios", "bundle_identifier", fallback="com.example.todo")
self.android_id = game_config.get("android", "package", fallback="com.example.todo")
self.bob = config.get("config", "bob", fallback="")
id_name = config.get(self.name, "identity", fallback="")
self.identity = id_name if id_name else config.get("config", "identity", fallback="")
prov_name = config.get(self.name, "provision", fallback="")
self.provision = prov_name if prov_name else config.get("config", "provision", fallback="")
cert_name = config.get(self.name, "certificate", fallback=None)
self.certificate = cert_name if cert_name else config.get("config", "certificate", fallback="")
pk_name = config.get(self.name, "private-key", fallback=None)
self.private_key = pk_name if pk_name else config.get("config", "private-key", fallback="")
self.output = config.get("config", "output") if config.has_option("config", "output") else self.output
self.platform = self.platform if self.platform else config.get("config", "platform") if config.has_option(
"config", "platform") else None
self.platform = "armv7-android" if self.platform in ["android", "armv7-android"] else \
"armv7-darwin" if self.platform in ["ios", "armv7-darwin"] else ""
if not self.platform:
logging.info("No platform found, specify ios or android")
if self.name not in config.sections():
return
self.ios_build = config.get(self.name, "ios_build", fallback="")
self.android_build = config.get(self.name, "android_build", fallback="")
def final(self):
# Final clean up
if hasattr(self.arguments, 'options') and self.arguments.options:
path, name = os.path.split(self.project_file)
os.remove(self.project_file)
os.rename(os.path.join(path, "{}_old".format(name)), self.project_file)
self.save()
def save(self):
config = _load_config()
if not config.has_section(self.name):
config.add_section(self.name)
config.set(self.name, "ios_id", self.ios_id)
config.set(self.name, "android_id", self.android_id)
if self.android_build:
config.set(self.name, "android_build", self.android_build)
if self.ios_build:
config.set(self.name, "ios_build", self.ios_build)
if self.identity:
config.set("config", "identity", self.identity)
if self.provision:
config.set("config", "provision", self.provision)
config.set("config", "platform", self.platform)
config.set("config", "bob", self.bob)
config.set("config", "output", self.output)
with open(os.path.join(self.cache_dir, "session"), 'w') as f:
config.write(f)
def _load_config():
cache_dir = os.path.join(os.path.expanduser("~"), ".builder", "cache")
path = os.path.join(cache_dir, "session")
if not os.path.exists(path):
if not os.path.exists(cache_dir):
os.makedirs(cache_dir, exist_ok=True)
config = configparser.RawConfigParser()
config.add_section('config')
with open(path, 'w') as f:
config.write(f)
config = configparser.ConfigParser()
config.read(path)
return config
def _load_game_config(project_file):
config = configparser.ConfigParser()
config.read(project_file)
return config
def _get_project_file(folder):
for x in os.listdir(folder):
if x.endswith(".project"):
return os.path.join(folder, x)
logging.error("Can not find project file in this location {}".format(folder))
sys.exit(1)
def _merge_properties(project_file, properties_file):
project = configparser.ConfigParser()
project.read_file(open(project_file))
properties = configparser.ConfigParser()
properties.read_file(open(properties_file))
for section_name, section_proxy in properties.items():
for name, value in properties.items(section_name):
if not project.has_section(section_name):
project.add_section(section_name)
project.set(section_name, name, value)
with open(project_file, "w") as f:
project.write(f)
def init():
parser = argparse.ArgumentParser(description='Commandline tool to build a Defold project')
sub_parsers = parser.add_subparsers(dest="command")
parser.add_argument('--version', action='version', version="DefBuild {}".format(__version__))
sub_build = sub_parsers.add_parser("build", help="Build a Defold project")
sub_build.add_argument("project", help="source directory of project")
sub_build.add_argument("-p", "--platform", help="which platform to build, 'ios' or 'android'", dest="platform",
choices=["android", "ios"])
sub_build.add_argument("-q", "--quick", help="option to do a quick build by skipping distclean",
action='store_true', dest="quick")
sub_build.add_argument("-o", "--options", help="Read options from properties file. Options specified on the "
"commandline will be given precedence over the ones read from "
"the properties file", dest="options")
sub_build.add_argument("-r", "--report", help="which platform to build, 'ios' or 'android'", action="store_true",
dest="report")
sub_build.add_argument("--verbose", help="print verbose logs", dest="verbose", action="store_true")
sub_build.add_argument("--variant", help="specify debug or release of the engine", dest="variant",
choices=["release", "debug"], default="debug")
sub_build.add_argument("--resolve", help="Resolve all external library dependencies", dest="resolve",
action="store_true")
sub_install = sub_parsers.add_parser("install", help="Install a project to a connected device")
sub_install.add_argument("project", help="what to install", nargs="?")
sub_install.add_argument("-f", "--force", help="force installation by uninstalling first", action='store_true',
dest="force")
sub_install.add_argument("-p", "--platform", help="which platform to install on, 'ios' or 'android'", nargs="?",
dest="platform")
sub_uninstall = sub_parsers.add_parser("uninstall", help="Uninstall the Defold project on a connected device")
sub_uninstall.add_argument("project", help="which app to uninstall", nargs="?")
sub_uninstall.add_argument("-p", "--platform", help="which platform to uninstall, 'ios' or 'android'", nargs="?",
dest="platform")
sub_parsers.add_parser("start")
sub_parsers.add_parser("listen")
sub_config = sub_parsers.add_parser("config")
sub_config.add_argument("key", help="key to update")
sub_config.add_argument("value", help="the value to assign to key")
sub_bob = sub_parsers.add_parser("bob", help="Update or set the version of bob that is used")
sub_bob.add_argument("-u", "--update", help="update bob", action='store_true', dest="update")
sub_bob.add_argument("-f", "--force", help="force download of bob", action='store_true', dest="force")
sub_bob.add_argument("--set", help="download a specific version of bob, takes version number and 'beta'",
dest="set")
sub_resolve = sub_parsers.add_parser("resolve")
sub_resolve.add_argument("project", help="source directory", nargs="?")
input_args = parser.parse_args()
return input_args
def run():
options = init()
project = None
try:
if options.command == "bob":
config = _load_config()
project = commands.bob(config, options)
else:
project = Project(options)
if options.command == "build":
commands.build(project)
elif options.command == "install":
commands.install(project)
elif options.command == "uninstall":
commands.uninstall(project)
elif options.command == "resolve":
commands.resolve(project)
elif options.command == "start":
commands.start(project)
elif options.command == "listen":
commands.listen(project)
elif options.command == "set":
commands.config_set(project, options)
else:
commands.print_help()
finally:
if project:
project.final()
def main():
try:
run()
except KeyboardInterrupt:
sys.exit()
except:
raise
if __name__ == '__main__':
main()
```
|
{
"source": "Jerakin/DefTree",
"score": 3
}
|
#### File: tests/profiling/profile_deftree.py
```python
import deftree
import os
import timeit
import csv
import datetime
import cProfile
root_path = os.path.dirname(__file__)
profiling_document = os.path.join(root_path, 'profile.defold')
csv_profile_data = os.path.join(root_path, 'profiling.csv')
def timing_parse():
print("Warning: This can take anything between 0 seconds to minutes")
times = 300
value = timeit.timeit(
stmt="deftree.parse('{}')".format(profiling_document),
setup="import deftree; import os", number=times)
print("Total time spent: {}, Number of times ran: {}".format(value, times))
return value/times
def store_timing_data():
if not os.path.exists(csv_profile_data):
with open(csv_profile_data, "w", newline='') as csvfile:
_writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
_writer.writerow(['sep=,'])
_writer.writerow(["Date", "Version", "Average Time", "profile.defold modified"])
with open(csv_profile_data, "a", newline='') as csvfile:
now = datetime.datetime.now()
profile_doc = datetime.datetime.fromtimestamp(os.path.getmtime(profiling_document))
_writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
_writer.writerow(
[now.strftime("%Y-%m-%d"), deftree.__version__, timing_parse(), profile_doc.strftime("%Y-%m-%d %H:%M")])
def profile_parse():
cProfile.run('for x in range(200): deftree.parse("{}")'.format(profiling_document))
store_timing_data()
```
|
{
"source": "Jerakin/editor-script-check-dependencies-versions",
"score": 3
}
|
#### File: editor-script-check-dependencies/scripts/check_dependencies.py
```python
import os
import json
try:
from urllib.request import urlopen
from urllib.request import Request
from urllib.request import HTTPError
except ImportError:
from urllib2 import urlopen
from urllib2 import Request
from urllib2 import HTTPError
try:
from ConfigParser import SafeConfigParser as ConfigParser
except ImportError:
from configparser import ConfigParser
import re
from distutils.version import StrictVersion
import time
def MyStrictVersion(v):
m = re.search('^(\D|)+(.*)', v)
if m:
v = m.group(2)
v = v or "0.0"
return StrictVersion(v)
def get_dependencies(config_file):
config = ConfigParser()
config.read(config_file)
if config.has_option("project", "dependencies"):
return config.get('project', 'dependencies').split(",")
return []
def get_header():
config_file = os.path.join(os.getcwd(),".editor-script-settings", "editor-script-check-dependencies")
header = {"User-Agent": "editor-script-check-dependencies"}
if os.path.exists(config_file):
config = ConfigParser()
config.read(config_file)
if config.has_option("Authenticate", "TOKEN"):
header['Authorization'] = "token " + config.get('Authenticate', 'TOKEN')
return header
def have_releases(request):
"""Check if a repository is using releases
GET /repos/:owner/:repo/releases"""
response = urlopen(request)
if response.getcode() not in [200, 304]:
return False
response_content = response.read()
response_content.decode('utf-8')
json_response = json.loads(response_content)
if json_response:
return True
return False
def get_latest_version(request):
"""Get the latest version
GET /repos/:owner/:repo/releases/latest"""
response = urlopen(request)
if response.getcode() not in [200, 304]:
return
response_content = response.read()
response_content.decode('utf-8')
json_response = json.loads(response_content)
version_string = json_response["tag_name"]
try:
return MyStrictVersion(version_string)
except ValueError:
return MyStrictVersion("0.0")
def compare_versions(old, new, project, url):
"""Print our version information"""
if new > old:
print("Project '{}' is outdated, latest version is".format(project))
print(" {}/archive/{}.zip\n".format(url, new))
else:
if old == MyStrictVersion("0.0"):
print("Project '{}' does not have any versions.\n".format(project))
else:
print("Project '{}' is up to date.\n".format(project))
def get_latest_tags(request):
"""If a project is not using releases presume the latest tag is the latest release
GET /repos/:owner/:repo/tags"""
response = urlopen(request)
if response.getcode() not in [200, 304]:
return
response_content = response.read()
response_content.decode('utf-8')
json_response = json.loads(response_content)
tags = []
for p in json_response:
tags.append(p["name"])
try:
tags.sort(key=MyStrictVersion)
if tags:
return MyStrictVersion(tags[-1])
else:
return MyStrictVersion("0.0")
except ValueError:
return MyStrictVersion("0.0")
def check_rate_limit(header, dependencies):
"""Github rate limits the API calls, this checks if we are and prints some info
GET /rate_limit"""
request_url = "https://api.github.com/rate_limit"
request = Request(request_url, headers=header)
response = urlopen(request)
if response.getcode() not in [200, 304]:
return
response_content = response.read()
response_content.decode('utf-8')
json_response = json.loads(response_content)
limit = json_response["resources"]["core"]["limit"]
remaining = json_response["resources"]["core"]["remaining"]
reset = json_response["resources"]["core"]["reset"]
if len(dependencies) > remaining:
if "token" not in header:
print("You are rate limited to {} dependency checks, add a 'token' to increase this limit".format(limit))
print("You are rate limited until", time.ctime(reset))
if remaining == 0:
return True
def main():
game_project = os.path.join(os.getcwd(), "game.project")
header = get_header()
dependencies = get_dependencies(game_project)
if check_rate_limit(header, dependencies):
# No need to continue if we are rate-limited
return
for dependency in dependencies:
url = re.match("(.*)\/archive", dependency)
if not url:
continue
url = url.group(1)
project = url.split("/")[-1]
owner = url.split("/")[-2]
# Collect the version from the url
try:
current_version = MyStrictVersion(dependency.split("/")[-1].replace(".zip", ""))
except ValueError:
if "master.zip" in dependency:
print("Project '{}' is using master this is not recommended.".format(project))
else:
print("Project '{}' does not follow Semantic Versioning.".format(project))
current_version = MyStrictVersion("0.0")
# First check of there are any releases
releases = "https://api.github.com/repos/{}/{}/releases".format(owner, project)
request = Request(releases, headers=header)
try:
if have_releases(request):
latest = "https://api.github.com/repos/{}/{}/releases/latest".format(owner, project)
request = Request(latest, headers=header)
latest = get_latest_version(request)
else:
# Get the latest version from tags if there are no releases
releases = "https://api.github.com/repos/{}/{}/tags".format(owner, project)
request = Request(releases, headers=header)
latest = get_latest_tags(request)
compare_versions(current_version, latest, project, url)
if "Authorization" not in header:
time.sleep(2) # We are only allowed to do one call every 2 min if we are not using authorization
except HTTPError as e:
print("Project '{}' could not be checked, HTTPError {}.".format(project, e.code))
if __name__ == '__main__':
main()
```
|
{
"source": "Jerakin/FakemonCreator",
"score": 2
}
|
#### File: creator/child_views/item_tab.py
```python
import logging as log
from PyQt5 import QtWidgets, uic
from PyQt5.QtCore import Qt
from creator.utils import util
from creator.child_views import shared
from creator.child_views import list_view
import qtmodern.windows
import qtmodern.styles
class ItemTab(QtWidgets.QWidget, shared.Tab):
def __init__(self, data):
super(ItemTab, self).__init__()
uic.loadUi(util.RESOURCE_UI / 'ItemTab.ui', self)
self.data = data
self.item_list = util.JsonToList(util.DATA / "items.json")
self.child = None
self.list_items.setContextMenuPolicy(Qt.CustomContextMenu)
self.list_items.customContextMenuRequested.connect(self.context_menu)
self.list_items.itemDoubleClicked.connect(self.open_custom_item)
self.item_name.textEdited.connect(lambda x: self.setattr(self.data.item, "name", x))
self.item_entry.textChanged.connect(
lambda: self.setattr(self.data.item, "description", self.item_entry.toPlainText()))
def load_item_view(self):
self.clear_item_view()
self.item_name.setText(self.data.item.name)
self.item_entry.blockSignals(True)
self.item_entry.setText(self.data.item.description)
self.item_entry.blockSignals(False)
def clear_item_view(self):
self.item_name.setText("")
self.item_entry.blockSignals(True)
self.item_entry.setText("")
self.item_entry.blockSignals(False)
def context_menu(self, pos):
context = QtWidgets.QMenu()
delete_action = context.addAction("delete")
action = context.exec_(self.list_items.mapToGlobal(pos))
if action == delete_action:
self.delete_item(self.list_items.selectedItems()[0])
def _open_item(self, _item):
self.data.new_item()
self.data.item.load(_item)
self.load_item_view()
self.child = None
def open_item(self):
if self.data.item.edited:
response = self.save_and_continue()
if response == QtWidgets.QMessageBox.Cancel:
return
if self.child:
self.child.close()
self.child = list_view.ListView(util.JsonToList(util.DATA / "items.json"))
self.modern = qtmodern.windows.ModernWindow(self.child)
self.child.finish_function = self._open_item
self.modern.show()
def open_custom_item(self, widget_item):
name = widget_item.text()
if self.data.item.edited:
response = self.save_and_continue()
if response == QtWidgets.QMessageBox.Cancel:
return
self.data.new_item()
self.data.item.custom(self.data.container.data(), name)
self.load_item_view()
def new_item(self):
if self.data.item.edited:
if not self.save_and_continue():
return
self.data.new_item()
self.data.item.new()
self.update_list_signal.emit()
self.load_item_view()
def delete_item(self, widget_item):
item_name = widget_item.text()
button_reply = QtWidgets.QMessageBox.question(None, 'Delete',
"Would you like to delete {}".format(item_name),
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel,
QtWidgets.QMessageBox.Cancel)
if button_reply == QtWidgets.QMessageBox.Yes:
self.data.container.delete_entry("items.json", item_name)
self.list_items.takeItem(self.list_items.currentRow())
self.data._edited = True
if item_name == self.data.item.name:
self.data.item.new()
self.load_item_view()
self.update_list_signal.emit()
log.info("Deleted {}".format(item_name))
def update_custom_list(self):
data = self.data.container.data() if self.data.container else None
if not data or "items.json" not in data:
return
item_data = data["items.json"]
self.list_items.clear()
for _item, _ in item_data.items():
self.list_items.addItem(_item)
```
#### File: creator/child_views/list_view.py
```python
from PyQt5 import QtWidgets, uic, QtGui, QtCore
import creator.utils.util as util
class ListView(QtWidgets.QWidget):
def __init__(self, list_class):
super(ListView, self).__init__()
uic.loadUi(util.RESOURCE_UI / 'ListSelector.ui', self) # Load the .ui file
exit_shortcut = QtWidgets.QShortcut(QtGui.QKeySequence("Esc"), self)
exit_shortcut.activated.connect(self.close)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)
self.finish_function = None
# Skip the error entries
for entry in list_class:
if entry:
self.listWidget.addItem(entry)
self.pushButton.clicked.connect(self.open)
self.listWidget.itemDoubleClicked.connect(self.open)
def open(self):
if self.finish_function:
self.finish_function(self.listWidget.selectedItems()[0].text())
self.close()
```
#### File: creator/child_views/move_tab.py
```python
import logging as log
from PyQt5 import QtWidgets, uic, QtGui
from PyQt5.QtCore import Qt
from creator.data import fields
from creator.utils import util
from creator.child_views import shared
from creator.child_views import list_view
import qtmodern.windows
import qtmodern.styles
class MoveTab(QtWidgets.QWidget, shared.Tab):
def __init__(self, data):
super(MoveTab, self).__init__()
uic.loadUi(util.RESOURCE_UI / 'MoveTab.ui', self)
self.data = data
self.move_list =util.move_list()
self.child = None
self.move_pp.setValidator(QtGui.QIntValidator())
self.die_at_1.setValidator(QtGui.QIntValidator())
self.die_at_5.setValidator(QtGui.QIntValidator())
self.die_at_10.setValidator(QtGui.QIntValidator())
self.die_at_17.setValidator(QtGui.QIntValidator())
self.move_power1.addItem("None")
self.move_power2.addItem("None")
self.move_power3.addItem("None")
self.move_type.addItem("None")
self.move_save.addItem("None")
self.die_type_1.addItem("")
self.die_type_5.addItem("")
self.die_type_10.addItem("")
self.die_type_17.addItem("")
self.move_save.addItems(fields.Attributes)
self.move_power1.addItems(fields.Attributes)
self.move_power2.addItems(fields.Attributes)
self.move_power3.addItems(fields.Attributes)
self.move_type.addItems(fields.Type)
self.die_type_1.addItems(fields.Dice)
self.die_type_5.addItems(fields.Dice)
self.die_type_10.addItems(fields.Dice)
self.die_type_17.addItems(fields.Dice)
self.list_moves.setContextMenuPolicy(Qt.CustomContextMenu)
self.list_moves.customContextMenuRequested.connect(self.move_context_menu)
self.delete_damage.clicked.connect(self.clear_damage)
self.list_moves.itemDoubleClicked.connect(self.open_custom_move)
self.move_name.textEdited.connect(lambda x: self.setattr(self.data.move, "name", x))
self.move_entry.textChanged.connect(
lambda: self.setattr(self.data.move, "description", self.move_entry.toPlainText()))
self.move_duration.textEdited.connect(lambda x: self.setattr(self.data.move, "duration", x))
self.move_duration.textEdited.connect(lambda x: self.setattr(self.data.move, "casting_time", x))
self.move_range.textEdited.connect(lambda x: self.setattr(self.data.move, "range", x))
self.move_pp.textEdited.connect(lambda x: self.setattr(self.data.move, "PP", x))
self.die_at_1.textEdited.connect(lambda x: self.data.move.set_damage_die_property("amount", "1", x))
self.die_at_5.textEdited.connect(lambda x: self.data.move.set_damage_die_property("amount", "5", x))
self.die_at_10.textEdited.connect(lambda x: self.data.move.set_damage_die_property("amount", "10", x))
self.die_at_17.textEdited.connect(lambda x: self.data.move.set_damage_die_property("amount", "17", x))
self.times_1.textEdited.connect(lambda x: self.data.move.set_damage_die_property("times", "1", x))
self.times_5.textEdited.connect(lambda x: self.data.move.set_damage_die_property("times", "5", x))
self.times_10.textEdited.connect(lambda x: self.data.move.set_damage_die_property("times", "10", x))
self.times_17.textEdited.connect(lambda x: self.data.move.set_damage_die_property("times", "17", x))
self.die_type_1.activated[str].connect(lambda x: self.data.move.set_damage_die_property("dice_max", "1", x))
self.die_type_5.activated[str].connect(lambda x: self.data.move.set_damage_die_property("dice_max", "5", x))
self.die_type_10.activated[str].connect(lambda x: self.data.move.set_damage_die_property("dice_max", "10", x))
self.die_type_17.activated[str].connect(lambda x: self.data.move.set_damage_die_property("dice_max", "17", x))
self.move_save.activated[str].connect(lambda x: self.setattr(self.data.move, "save", x))
self.move_power1.activated[str].connect(lambda x: self.setattr(self.data.move, "move_power1", x))
self.move_power2.activated[str].connect(lambda x: self.setattr(self.data.move, "move_power2", x))
self.move_power3.activated[str].connect(lambda x: self.setattr(self.data.move, "move_power3", x))
self.move_type.activated[str].connect(lambda x: self.setattr(self.data.move, "type", x))
self.move_1.stateChanged.connect(lambda x: self.data.move.set_damage_die_property("move", "1", x))
self.move_5.stateChanged.connect(lambda x: self.data.move.set_damage_die_property("move", "5", x))
self.move_10.stateChanged.connect(lambda x: self.data.move.set_damage_die_property("move", "10", x))
self.move_17.stateChanged.connect(lambda x: self.data.move.set_damage_die_property("move", "17", x))
self.level_1.stateChanged.connect(lambda x: self.data.move.set_damage_die_property("level", "1", x))
self.level_5.stateChanged.connect(lambda x: self.data.move.set_damage_die_property("level", "5", x))
self.level_10.stateChanged.connect(lambda x: self.data.move.set_damage_die_property("level", "10", x))
self.level_17.stateChanged.connect(lambda x: self.data.move.set_damage_die_property("level", "17", x))
def new_move(self):
if self.data.move.edited:
if not self.save_and_continue():
return
self.data.new_move()
self.load_move_view()
def delete_move(self, widget_item):
move_name = widget_item.text()
button_reply = QtWidgets.QMessageBox.question(None, 'Delete', "Would you like to delete {}".format(move_name),
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel,
QtWidgets.QMessageBox.Cancel)
if button_reply == QtWidgets.QMessageBox.Yes:
self.data.container.delete_entry("moves.json", move_name)
self.list_moves.takeItem(self.list_moves.currentRow())
self.data._edited = True
if move_name == self.data.move.name:
self.data.move.new()
self.load_move_view()
self.update_list_signal.emit()
log.info("Deleted {}".format(move_name))
def _open_move(self, _move):
self.data.move.load(_move)
self.load_move_view()
self.child = None
def open_move(self):
if self.data.move.edited:
response = self.save_and_continue()
if response == QtWidgets.QMessageBox.Cancel:
return
if self.child:
self.child.close()
self.child = list_view.ListView(util.move_list())
self.modern = qtmodern.windows.ModernWindow(self.child)
self.child.finish_function = self._open_move
self.modern.show()
def open_custom_move(self, widget_item):
name = widget_item.text()
if self.data.move.edited:
response = self.save_and_continue()
if response == QtWidgets.QMessageBox.Cancel:
return
self.data.move.custom(self.data.container.data(), name)
self.load_move_view()
def move_context_menu(self, pos):
context = QtWidgets.QMenu()
remove_fakemon = context.addAction("delete")
action = context.exec_(self.list_moves.mapToGlobal(pos))
if action == remove_fakemon:
self.delete_move(self.list_moves.selectedItems()[0])
def clear_damage(self):
self.data.move.delete_damage()
self.die_at_1.setText("")
self.die_at_5.setText("")
self.die_at_10.setText("")
self.die_at_17.setText("")
self.die_type_1.setCurrentText("")
self.die_type_5.setCurrentText("")
self.die_type_10.setCurrentText("")
self.die_type_17.setCurrentText("")
self.move_1.setChecked(False)
self.move_5.setChecked(False)
self.move_10.setChecked(False)
self.move_17.setChecked(False)
self.level_1.setChecked(False)
self.level_5.setChecked(False)
self.level_10.setChecked(False)
self.level_17.setChecked(False)
def load_move_view(self):
self.clear_move_view()
self.move_name.setText(self.data.move.name)
self.move_entry.blockSignals(True)
self.move_entry.setText(self.data.move.description)
self.move_entry.blockSignals(False)
self.move_duration.setText(self.data.move.duration)
self.move_casting_time.setText(self.data.move.casting_time)
self.move_range.setText(self.data.move.range)
self.move_pp.setText(self.data.move.PP)
self.die_at_1.setText(self.data.move.get_damage_die_property("amount", "1"))
self.die_at_5.setText(self.data.move.get_damage_die_property("amount", "5"))
self.die_at_10.setText(self.data.move.get_damage_die_property("amount", "10"))
self.die_at_17.setText(self.data.move.get_damage_die_property("amount", "17"))
self.times_1.setText(self.data.move.get_damage_die_property("times", "1"))
self.times_5.setText(self.data.move.get_damage_die_property("times", "5"))
self.times_10.setText(self.data.move.get_damage_die_property("times", "10"))
self.times_17.setText(self.data.move.get_damage_die_property("times", "17"))
self.die_type_1.setCurrentText(self.data.move.get_damage_die_property("dice_max", "1"))
self.die_type_5.setCurrentText(self.data.move.get_damage_die_property("dice_max", "5"))
self.die_type_10.setCurrentText(self.data.move.get_damage_die_property("dice_max", "10"))
self.die_type_17.setCurrentText(self.data.move.get_damage_die_property("dice_max", "17"))
self.move_save.setCurrentText(self.data.move.save)
self.move_power1.setCurrentText(self.data.move.move_power1)
self.move_power2.setCurrentText(self.data.move.move_power2)
self.move_type.setCurrentText(self.data.move.type)
self.move_1.setChecked(bool(self.data.move.get_damage_die_property("move", "1")))
self.move_5.setChecked(bool(self.data.move.get_damage_die_property("move", "5")))
self.move_10.setChecked(bool(self.data.move.get_damage_die_property("move", "10")))
self.move_17.setChecked(bool(self.data.move.get_damage_die_property("move", "17")))
self.level_1.setChecked(bool(self.data.move.get_damage_die_property("level", "1")))
self.level_5.setChecked(bool(self.data.move.get_damage_die_property("level", "5")))
self.level_10.setChecked(bool(self.data.move.get_damage_die_property("level", "10")))
self.level_17.setChecked(bool(self.data.move.get_damage_die_property("level", "17")))
def clear_move_view(self):
self.move_name.setText("")
self.move_entry.blockSignals(True)
self.move_entry.setText("")
self.move_entry.blockSignals(False)
self.move_duration.setText("")
self.move_casting_time.setText("")
self.move_range.setText("")
self.move_pp.setText("")
self.die_at_1.setText("")
self.die_at_5.setText("")
self.die_at_10.setText("")
self.die_at_17.setText("")
self.times_1.setText("")
self.times_5.setText("")
self.times_10.setText("")
self.times_17.setText("")
self.die_type_1.setCurrentText("")
self.die_type_5.setCurrentText("")
self.die_type_10.setCurrentText("")
self.die_type_17.setCurrentText("")
self.move_save.setCurrentText("None")
self.move_power1.setCurrentText("None")
self.move_power2.setCurrentText("None")
self.move_type.setCurrentText("None")
self.move_1.setChecked(False)
self.move_5.setChecked(False)
self.move_10.setChecked(False)
self.move_17.setChecked(False)
self.level_1.setChecked(False)
self.level_5.setChecked(False)
self.level_10.setChecked(False)
self.level_17.setChecked(False)
def update_custom_list(self):
data = self.data.container.data() if self.data.container else None
if not data:
return
moves_data = data["moves.json"]
self.list_moves.clear()
for _move, _ in moves_data.items():
self.list_moves.addItem(_move)
```
#### File: creator/data/item.py
```python
import json
import copy
from datetime import datetime
import creator.utils.util as util
_NEW_DATA = {
"Description": ""
}
class Item:
__initialized = False
def __init__(self):
self._name = None
self.data = None
self.edited = False
def __setattr__(self, key, value):
if self.__initialized:
self.__dict__["edited"] = True
super(Item, self).__setattr__(key, value)
def serialize(self):
if not self.name:
now = datetime.now()
self.name = now.strftime("%m%d%Y%H%M%S")
def custom(self, data, name):
self.data = data["items.json"][name]
self._name = name
self.__initialized = True
def load(self, name):
self.name = name
data_path = util.DATA / "items.json"
with data_path.open("r", encoding="utf-8") as f:
self.data = json.load(f)[name]
self.__initialized = True
def new(self):
self.data = copy.deepcopy(_NEW_DATA)
self._name = None
self.edited = False
self.__initialized = True
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def description(self):
return self.data["Effect"] if "Effect" in self.data else ""
@description.setter
def description(self, value):
self.data["Effect"] = value
```
#### File: res/ui/FakemonCreator.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.setObjectName("MainWindow")
self.resize(1178, 933)
self.centralwidget = QtWidgets.QWidget(self)
self.centralwidget.setObjectName("centralwidget")
self.list_pokemon = QtWidgets.QListWidget(self.centralwidget)
self.list_pokemon.setGeometry(QtCore.QRect(10, 10, 256, 731))
self.list_pokemon.setObjectName("list_pokemon")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(340, 270, 160, 121))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.skills_list = QtWidgets.QListWidget(self.verticalLayoutWidget)
self.skills_list.setObjectName("skills_list")
self.verticalLayout.addWidget(self.skills_list)
self.add_skill = QtWidgets.QComboBox(self.verticalLayoutWidget)
self.add_skill.setObjectName("add_skill")
self.verticalLayout.addWidget(self.add_skill)
self.verticalLayoutWidget_6 = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget_6.setGeometry(QtCore.QRect(330, 580, 160, 131))
self.verticalLayoutWidget_6.setObjectName("verticalLayoutWidget_6")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_6)
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.label_6 = QtWidgets.QLabel(self.verticalLayoutWidget_6)
self.label_6.setObjectName("label_6")
self.verticalLayout_6.addWidget(self.label_6)
self.senses_list = QtWidgets.QListWidget(self.verticalLayoutWidget_6)
self.senses_list.setObjectName("senses_list")
self.verticalLayout_6.addWidget(self.senses_list)
self.add_senses = QtWidgets.QComboBox(self.verticalLayoutWidget_6)
self.add_senses.setObjectName("add_senses")
self.verticalLayout_6.addWidget(self.add_senses)
self.verticalLayoutWidget_7 = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget_7.setGeometry(QtCore.QRect(360, 410, 160, 168))
self.verticalLayoutWidget_7.setObjectName("verticalLayoutWidget_7")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_7)
self.verticalLayout_7.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.label_7 = QtWidgets.QLabel(self.verticalLayoutWidget_7)
self.label_7.setObjectName("label_7")
self.verticalLayout_7.addWidget(self.label_7)
self.abilities_list = QtWidgets.QListWidget(self.verticalLayoutWidget_7)
self.abilities_list.setObjectName("abilities_list")
self.verticalLayout_7.addWidget(self.abilities_list)
self.add_ability = QtWidgets.QComboBox(self.verticalLayoutWidget_7)
self.add_ability.setObjectName("add_ability")
self.verticalLayout_7.addWidget(self.add_ability)
self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(270, 760, 641, 91))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_8 = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.label_8.setObjectName("label_8")
self.horizontalLayout.addWidget(self.label_8)
self.verticalLayout_16 = QtWidgets.QVBoxLayout()
self.verticalLayout_16.setObjectName("verticalLayout_16")
self.evolve_into_list = QtWidgets.QListWidget(self.horizontalLayoutWidget)
self.evolve_into_list.setObjectName("evolve_into_list")
self.verticalLayout_16.addWidget(self.evolve_into_list)
self.add_evolution = QtWidgets.QComboBox(self.horizontalLayoutWidget)
self.add_evolution.setObjectName("add_evolution")
self.verticalLayout_16.addWidget(self.add_evolution)
self.horizontalLayout.addLayout(self.verticalLayout_16)
self.label_9 = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.label_9.setObjectName("label_9")
self.horizontalLayout.addWidget(self.label_9)
self.evolve_level_gain = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.evolve_level_gain.setObjectName("evolve_level_gain")
self.horizontalLayout.addWidget(self.evolve_level_gain)
self.label_11 = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.label_11.setObjectName("label_11")
self.horizontalLayout.addWidget(self.label_11)
self.evolve_points_gain = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.evolve_points_gain.setObjectName("evolve_points_gain")
self.horizontalLayout.addWidget(self.evolve_points_gain)
self.label_10 = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.label_10.setObjectName("label_10")
self.horizontalLayout.addWidget(self.label_10)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.gridLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.gridLayoutWidget.setGeometry(QtCore.QRect(740, 160, 391, 530))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(0, 0, 5, 5)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout_13 = QtWidgets.QVBoxLayout()
self.verticalLayout_13.setObjectName("verticalLayout_13")
self.label_17 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_17.setObjectName("label_17")
self.verticalLayout_13.addWidget(self.label_17)
self.moves_tm_list = QtWidgets.QListWidget(self.gridLayoutWidget)
self.moves_tm_list.setObjectName("moves_tm_list")
self.verticalLayout_13.addWidget(self.moves_tm_list)
self.add_tms = QtWidgets.QComboBox(self.gridLayoutWidget)
self.add_tms.setObjectName("add_tms")
self.verticalLayout_13.addWidget(self.add_tms)
self.gridLayout.addLayout(self.verticalLayout_13, 0, 1, 1, 1)
self.verticalLayout_10 = QtWidgets.QVBoxLayout()
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.label_14 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_14.setObjectName("label_14")
self.verticalLayout_10.addWidget(self.label_14)
self.moves_10_list = QtWidgets.QListWidget(self.gridLayoutWidget)
self.moves_10_list.setObjectName("moves_10_list")
self.verticalLayout_10.addWidget(self.moves_10_list)
self.add_level_10_moves = QtWidgets.QComboBox(self.gridLayoutWidget)
self.add_level_10_moves.setObjectName("add_level_10_moves")
self.verticalLayout_10.addWidget(self.add_level_10_moves)
self.gridLayout.addLayout(self.verticalLayout_10, 1, 1, 1, 1)
self.verticalLayout_9 = QtWidgets.QVBoxLayout()
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.label_13 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_13.setObjectName("label_13")
self.verticalLayout_9.addWidget(self.label_13)
self.moves_6_list = QtWidgets.QListWidget(self.gridLayoutWidget)
self.moves_6_list.setObjectName("moves_6_list")
self.verticalLayout_9.addWidget(self.moves_6_list)
self.add_level_6_moves = QtWidgets.QComboBox(self.gridLayoutWidget)
self.add_level_6_moves.setObjectName("add_level_6_moves")
self.verticalLayout_9.addWidget(self.add_level_6_moves)
self.gridLayout.addLayout(self.verticalLayout_9, 1, 0, 1, 1)
self.verticalLayout_8 = QtWidgets.QVBoxLayout()
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.label_12 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_12.setObjectName("label_12")
self.verticalLayout_8.addWidget(self.label_12)
self.moves_starting_list = QtWidgets.QListWidget(self.gridLayoutWidget)
self.moves_starting_list.setObjectName("moves_starting_list")
self.verticalLayout_8.addWidget(self.moves_starting_list)
self.add_starting_moves = QtWidgets.QComboBox(self.gridLayoutWidget)
self.add_starting_moves.setObjectName("add_starting_moves")
self.verticalLayout_8.addWidget(self.add_starting_moves)
self.gridLayout.addLayout(self.verticalLayout_8, 0, 0, 1, 1)
self.verticalLayout_11 = QtWidgets.QVBoxLayout()
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.label_15 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_15.setObjectName("label_15")
self.verticalLayout_11.addWidget(self.label_15)
self.moves_14_list = QtWidgets.QListWidget(self.gridLayoutWidget)
self.moves_14_list.setObjectName("moves_14_list")
self.verticalLayout_11.addWidget(self.moves_14_list)
self.add_level_14_moves = QtWidgets.QComboBox(self.gridLayoutWidget)
self.add_level_14_moves.setObjectName("add_level_14_moves")
self.verticalLayout_11.addWidget(self.add_level_14_moves)
self.gridLayout.addLayout(self.verticalLayout_11, 2, 0, 1, 1)
self.verticalLayout_12 = QtWidgets.QVBoxLayout()
self.verticalLayout_12.setObjectName("verticalLayout_12")
self.label_16 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_16.setObjectName("label_16")
self.verticalLayout_12.addWidget(self.label_16)
self.moves_18_list = QtWidgets.QListWidget(self.gridLayoutWidget)
self.moves_18_list.setObjectName("moves_18_list")
self.verticalLayout_12.addWidget(self.moves_18_list)
self.add_level_18_moves = QtWidgets.QComboBox(self.gridLayoutWidget)
self.add_level_18_moves.setObjectName("add_level_18_moves")
self.verticalLayout_12.addWidget(self.add_level_18_moves)
self.gridLayout.addLayout(self.verticalLayout_12, 2, 1, 1, 1)
self.verticalLayoutWidget_14 = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget_14.setGeometry(QtCore.QRect(570, 270, 160, 475))
self.verticalLayoutWidget_14.setObjectName("verticalLayoutWidget_14")
self.verticalLayout_14 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_14)
self.verticalLayout_14.setContentsMargins(0, 10, 0, 0)
self.verticalLayout_14.setObjectName("verticalLayout_14")
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_3 = QtWidgets.QLabel(self.verticalLayoutWidget_14)
self.label_3.setObjectName("label_3")
self.verticalLayout_3.addWidget(self.label_3)
self.vulnerabilities_list = QtWidgets.QListWidget(self.verticalLayoutWidget_14)
self.vulnerabilities_list.setObjectName("vulnerabilities_list")
self.verticalLayout_3.addWidget(self.vulnerabilities_list)
self.add_vulnerabilities = QtWidgets.QComboBox(self.verticalLayoutWidget_14)
self.add_vulnerabilities.setObjectName("add_vulnerabilities")
self.verticalLayout_3.addWidget(self.add_vulnerabilities)
self.verticalLayout_14.addLayout(self.verticalLayout_3)
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_4 = QtWidgets.QLabel(self.verticalLayoutWidget_14)
self.label_4.setObjectName("label_4")
self.verticalLayout_4.addWidget(self.label_4)
self.resistances_list = QtWidgets.QListWidget(self.verticalLayoutWidget_14)
self.resistances_list.setObjectName("resistances_list")
self.verticalLayout_4.addWidget(self.resistances_list)
self.add_resistances = QtWidgets.QComboBox(self.verticalLayoutWidget_14)
self.add_resistances.setObjectName("add_resistances")
self.verticalLayout_4.addWidget(self.add_resistances)
self.verticalLayout_14.addLayout(self.verticalLayout_4)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.label_5 = QtWidgets.QLabel(self.verticalLayoutWidget_14)
self.label_5.setObjectName("label_5")
self.verticalLayout_5.addWidget(self.label_5)
self.immunities_list = QtWidgets.QListWidget(self.verticalLayoutWidget_14)
self.immunities_list.setObjectName("immunities_list")
self.verticalLayout_5.addWidget(self.immunities_list)
self.add_immunities = QtWidgets.QComboBox(self.verticalLayoutWidget_14)
self.add_immunities.setObjectName("add_immunities")
self.verticalLayout_5.addWidget(self.add_immunities)
self.verticalLayout_14.addLayout(self.verticalLayout_5)
self.verticalLayoutWidget_15 = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget_15.setGeometry(QtCore.QRect(310, 130, 310, 134))
self.verticalLayoutWidget_15.setObjectName("verticalLayoutWidget_15")
self.verticalLayout_15 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_15)
self.verticalLayout_15.setContentsMargins(0, 0, 0, 5)
self.verticalLayout_15.setObjectName("verticalLayout_15")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_18 = QtWidgets.QLabel(self.verticalLayoutWidget_15)
self.label_18.setObjectName("label_18")
self.horizontalLayout_2.addWidget(self.label_18)
self.armor_class = QtWidgets.QLineEdit(self.verticalLayoutWidget_15)
self.armor_class.setObjectName("armor_class")
self.horizontalLayout_2.addWidget(self.armor_class)
self.verticalLayout_15.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_19 = QtWidgets.QLabel(self.verticalLayoutWidget_15)
self.label_19.setObjectName("label_19")
self.horizontalLayout_3.addWidget(self.label_19)
self.hit_points = QtWidgets.QLineEdit(self.verticalLayoutWidget_15)
self.hit_points.setObjectName("hit_points")
self.horizontalLayout_3.addWidget(self.hit_points)
self.verticalLayout_15.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_20 = QtWidgets.QLabel(self.verticalLayoutWidget_15)
self.label_20.setObjectName("label_20")
self.horizontalLayout_4.addWidget(self.label_20)
self.hit_dice = QtWidgets.QComboBox(self.verticalLayoutWidget_15)
self.hit_dice.setObjectName("hit_dice")
self.horizontalLayout_4.addWidget(self.hit_dice)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem1)
self.verticalLayout_15.addLayout(self.horizontalLayout_4)
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.label_25 = QtWidgets.QLabel(self.verticalLayoutWidget_15)
self.label_25.setObjectName("label_25")
self.horizontalLayout_10.addWidget(self.label_25)
self.saving_throw1_pokemon = QtWidgets.QComboBox(self.verticalLayoutWidget_15)
self.saving_throw1_pokemon.setObjectName("saving_throw1_pokemon")
self.horizontalLayout_10.addWidget(self.saving_throw1_pokemon)
self.saving_throw2_pokemon = QtWidgets.QComboBox(self.verticalLayoutWidget_15)
self.saving_throw2_pokemon.setObjectName("saving_throw2_pokemon")
self.horizontalLayout_10.addWidget(self.saving_throw2_pokemon)
self.verticalLayout_15.addLayout(self.horizontalLayout_10)
self.gridLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(310, 30, 821, 91))
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.label_24 = QtWidgets.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(16)
self.label_24.setFont(font)
self.label_24.setObjectName("label_24")
self.horizontalLayout_8.addWidget(self.label_24)
self.species = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(16)
self.species.setFont(font)
self.species.setObjectName("species")
self.horizontalLayout_8.addWidget(self.species)
self.gridLayout_2.addLayout(self.horizontalLayout_8, 0, 0, 1, 1)
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_22 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_22.setObjectName("label_22")
self.horizontalLayout_6.addWidget(self.label_22)
self.type1_pokemon = QtWidgets.QComboBox(self.gridLayoutWidget_2)
self.type1_pokemon.setObjectName("type1_pokemon")
self.horizontalLayout_6.addWidget(self.type1_pokemon)
self.type2_pokemon = QtWidgets.QComboBox(self.gridLayoutWidget_2)
self.type2_pokemon.setObjectName("type2_pokemon")
self.horizontalLayout_6.addWidget(self.type2_pokemon)
self.horizontalLayout_9.addLayout(self.horizontalLayout_6)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.label_23 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_23.setObjectName("label_23")
self.horizontalLayout_7.addWidget(self.label_23)
self.sr_pokemon = QtWidgets.QComboBox(self.gridLayoutWidget_2)
self.sr_pokemon.setObjectName("sr_pokemon")
self.horizontalLayout_7.addWidget(self.sr_pokemon)
self.horizontalLayout_11 = QtWidgets.QHBoxLayout()
self.horizontalLayout_11.setObjectName("horizontalLayout_11")
self.label_26 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_26.setObjectName("label_26")
self.horizontalLayout_11.addWidget(self.label_26)
self.level = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.level.setObjectName("level")
self.horizontalLayout_11.addWidget(self.level)
self.horizontalLayout_7.addLayout(self.horizontalLayout_11)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem2)
self.horizontalLayout_9.addLayout(self.horizontalLayout_7)
self.gridLayout_2.addLayout(self.horizontalLayout_9, 1, 0, 1, 1)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_21 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_21.setObjectName("label_21")
self.horizontalLayout_5.addWidget(self.label_21)
self.index_number = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.index_number.setObjectName("index_number")
self.horizontalLayout_5.addWidget(self.index_number)
self.gridLayout_2.addLayout(self.horizontalLayout_5, 0, 1, 1, 1)
self.gridLayout_2.setColumnStretch(0, 1)
self.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(self)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1178, 22))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(self)
self.statusbar.setObjectName("statusbar")
self.setStatusBar(self.statusbar)
self.actionNew_Pokemon = QtWidgets.QAction(self)
self.actionNew_Pokemon.setObjectName("actionNew_Pokemon")
self.actionNew_Move = QtWidgets.QAction(self)
self.actionNew_Move.setObjectName("actionNew_Move")
self.actionNew_Ability = QtWidgets.QAction(self)
self.actionNew_Ability.setObjectName("actionNew_Ability")
self.actionExit = QtWidgets.QAction(self)
self.actionExit.setObjectName("actionExit")
self.actionOpen_Standard_Pokemon = QtWidgets.QAction(self)
self.actionOpen_Standard_Pokemon.setObjectName("actionOpen_Standard_Pokemon")
self.actionOpen_Move = QtWidgets.QAction(self)
self.actionOpen_Move.setObjectName("actionOpen_Move")
self.actionOpen = QtWidgets.QAction(self)
self.actionOpen.setObjectName("actionOpen")
self.menuFile.addAction(self.actionOpen)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionNew_Pokemon)
self.menuFile.addAction(self.actionNew_Move)
self.menuFile.addAction(self.actionNew_Ability)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionOpen_Standard_Pokemon)
self.menuFile.addAction(self.actionOpen_Move)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menubar.addAction(self.menuFile.menuAction())
self.retranslateUi()
QtCore.QMetaObject.connectSlotsByName(self)
def retranslateUi(self):
_translate = QtCore.QCoreApplication.translate
self.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "Proficient Skills"))
self.label_6.setText(_translate("MainWindow", "Senses"))
self.label_7.setText(_translate("MainWindow", "Abilities"))
self.label_8.setText(_translate("MainWindow", "Evolve into"))
self.label_9.setText(_translate("MainWindow", "at level"))
self.label_11.setText(_translate("MainWindow", "gain"))
self.label_10.setText(_translate("MainWindow", "points to add to its ability scores"))
self.label_17.setText(_translate("MainWindow", "Learnables TMs"))
self.label_14.setText(_translate("MainWindow", "Level 10 Moves"))
self.label_13.setText(_translate("MainWindow", "Level 6 Moves"))
self.label_12.setText(_translate("MainWindow", "Starting Moves"))
self.label_15.setText(_translate("MainWindow", "Level 14 Moves"))
self.label_16.setText(_translate("MainWindow", "Level 18 Moves"))
self.label_3.setText(_translate("MainWindow", "Vulnerabilities"))
self.label_4.setText(_translate("MainWindow", "Resistances"))
self.label_5.setText(_translate("MainWindow", "Immunities"))
self.label_18.setText(_translate("MainWindow", "Armor Class"))
self.label_19.setText(_translate("MainWindow", "Hit Points"))
self.label_20.setText(_translate("MainWindow", "Hit Dice"))
self.label_25.setText(_translate("MainWindow", "Saving Throws"))
self.label_24.setText(_translate("MainWindow", "Species"))
self.label_22.setText(_translate("MainWindow", "Types"))
self.label_23.setText(_translate("MainWindow", "SR"))
self.label_26.setText(_translate("MainWindow", "Level"))
self.label_21.setText(_translate("MainWindow", "Index #"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.actionNew_Pokemon.setText(_translate("MainWindow", "New Pokemon"))
self.actionNew_Move.setText(_translate("MainWindow", "New Move"))
self.actionNew_Ability.setText(_translate("MainWindow", "New Ability"))
self.actionExit.setText(_translate("MainWindow", "Close"))
self.actionOpen_Standard_Pokemon.setText(_translate("MainWindow", "Open Pokemon"))
self.actionOpen_Move.setText(_translate("MainWindow", "Open Move"))
self.actionOpen.setText(_translate("MainWindow", "Open..."))
```
|
{
"source": "jeraldlyh/HoloRPG",
"score": 2
}
|
#### File: api/entity/services.py
```python
from collections import OrderedDict
from datetime import datetime
from django.db.models.expressions import F
from django.db.models.query import QuerySet
from ..user.services import deduct_player_currency
from ..user.selectors import get_user_by_username
from .selectors import get_user_entities_by_username, get_user_entity_by_entityname
from .models import Entity, UserEntity
def create_entity(serializer_data: OrderedDict) -> None:
Entity.objects.create(**serializer_data)
def update_or_create_user_entity(serializer_data: OrderedDict) -> QuerySet:
"""
Creates a new user entity object if it does not exist for the user
Returns updated list of user entities to be rendered on frontend
"""
data = list(serializer_data.items())
user = data[2][1]
entity = data[1][1]
quantity = data[0][1]
try:
existing = get_user_entity_by_entityname(user.username, entity.name)
existing.quantity = F("quantity") + quantity
existing.save()
except UserEntity.DoesNotExist:
UserEntity.objects.create(**serializer_data)
cost = quantity * entity.cost
deduct_player_currency(user, cost)
return get_user_entities_by_username(user.username)
def reset_income_collected(username: str) -> None:
entities = get_user_entities_by_username(username)
for entity in entities:
entity.last_collected = datetime.now()
entity.save()
def claim_income(username: str) -> None:
user = get_user_by_username(username)
user.currency = F("currency") + user.get_income_accumulated
user.save()
reset_income_collected(user)
def deduct_user_entity(user_entity: UserEntity, quantity: int) -> None:
if user_entity.quantity - quantity <= 0:
user_entity.delete()
else:
user_entity.quantity = F("quantity") - quantity
user_entity.save()
```
#### File: api/room/services.py
```python
from collections import OrderedDict
from .models import Room
def create_room(serializer_data: OrderedDict) -> None:
data = list(serializer_data.items())
Room.objects.create(**serializer_data)
```
#### File: api/room/views.py
```python
from rest_framework import viewsets, status
from rest_framework.response import Response
from .models import Room, Dungeon
from .services import create_room
from .serializers import RoomSerializer, DungeonSerializer
from .selectors import get_all_dungeons, get_all_rooms
class RoomViewSet(viewsets.ViewSet):
serializer_class = RoomSerializer
def create(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
create_room(serializer.validated_data)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response({"Bad Request": serializer.error_messages}, status=status.HTTP_400_BAD_REQUEST)
def list(self, request):
serializer = self.serializer_class(get_all_rooms(), many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class DungeonViewSet(viewsets.ViewSet):
def list(self, request):
serializer = DungeonSerializer(get_all_dungeons(), many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
```
#### File: HoloRPG/discord/create_sql.py
```python
import sqlite3
def create_profile_table(dbPath):
database = sqlite3.connect(dbPath)
cursor = database.cursor()
cursor.execute("""
CREATE TABLE IF NOT EXISTS profile(
user_id INTEGER,
date_registered DATETIME,
main_class TEXT,
sub_class TEXT,
level INTEGER,
experience INTEGER,
currency INTEGER,
reputation INTEGER,
max_health INTEGER,
health INTEGER,
attack INTEGER,
defence INTEGER
)
""")
def create_dungeon_table(dbPath):
database = sqlite3.connect(dbPath)
cursor = database.cursor()
cursor.execute("""
CREATE TABLE IF NOT EXISTS dungeon(
user_id INTEGER,
status INTEGER,
level INTEGER,
max_level INTEGER
)
""")
```
|
{
"source": "jeraldlyh/horizon",
"score": 2
}
|
#### File: horizon/cogs/kits.py
```python
import pymongo
import discord
import random
import datetime
import time
import pytz
import os
from discord.ext import commands
from cogs.utils.misc import level_up
from cogs.utils.checks import is_donator, has_registered, is_economy_channel
from cogs.utils.embed import (passembed, errorembed)
class Kits(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.client = pymongo.MongoClient(os.getenv("MONGO_DB"))
self.db = self.client.get_database('Users')
self.records = self.db.horizon_database
self.classDict = {
'Soldier':['Sash Sergeant', 'Shock Trooper', 'Commando', 'Special Forces', 'Bullet Storm'],
'Constructor':['BASE', 'Heavy BASE', 'MEGABASE', 'Riot Control', 'Warden'],
'Ninja':['Assassin', 'Deadly Blade', 'Dim Mak', 'Harvester', 'Shuriken Master'],
'Outlander':['Pathfinder', 'Reclaimer', 'Recon Scout', 'T.E.D.D Shot', 'Trailblazer']
}
def kitEmbed(self, ctx, amount, exp):
embed = discord.Embed(color=discord.Color.from_hsv(random.random(), 1, 1))
embed.set_author(name=f'{ctx.command.name.capitalize()} Kit', icon_url=ctx.author.avatar_url)
embed.add_field(name='Rewards', value=f'Wood: **+{round(amount)}**<:Wood:585780105696116736>\n Experience: **+{round(exp)}**<:BattlePass:585742444092456960>')
embed.set_footer(text='Type `.cd` to check your kit cooldowns')
return embed
@commands.command(aliases=['dl'])
@has_registered()
@is_economy_channel()
async def daily(self, ctx):
amount = 150
exp = 200
try:
userData = self.records.find({'userID':str(ctx.author.id)})
for x in userData:
timeData = str(x['Kits']['Daily'])
woodData = float(x['Currencies']['Wood'])
expData = float(x['Profile']['Experience'])
jobData = str(x['RPG']['Job'])
classData = str(x['RPG']['Class'])
# Converts date from database to compare
availableTime = datetime.datetime.strptime(timeData, '%Y-%m-%d %H:%M:%S.%f%z')
# Current Time
currentTime = datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
# Current Time in seconds
if currentTime > availableTime:
# Use this format to update database
formatTime = (datetime.datetime.now(tz=pytz.timezone('Asia/Singapore')) + datetime.timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S.%f%z')
jobAdvancementBonus = self.classDict[classData].index(jobData) + 1 if jobData != 'None' else 1 if jobData != 'None' else 1
kitAmount = amount*jobAdvancementBonus if classData else amount
kitExp = exp*jobAdvancementBonus if classData else exp
woodData += kitAmount
expData += kitExp
dataUpdate = {
'Kits.Daily':formatTime,
'Currencies.Wood':woodData,
'Profile.Experience':expData
}
update = self.records.update_one({'userID':str(ctx.author.id)}, {'$set':dataUpdate})
await level_up(ctx)
await ctx.send(ctx.author.mention)
embed = self.kitEmbed(ctx, kitAmount, kitExp)
return await ctx.send(embed=embed)
else:
eembed = errorembed(description=f'{ctx.author.mention} You are currently on cooldown. Type ``.cd`` to check your cooldowns.')
return await ctx.send(embed=eembed)
except Exception as e:
print(e)
# Use this format to update database
formatTime = (datetime.datetime.now(tz=pytz.timezone('Asia/Singapore')) + datetime.timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S.%f%z')
jobAdvancementBonus = self.classDict[classData].index(jobData) + 1 if jobData != 'None' else 1
kitAmount = amount*jobAdvancementBonus if classData else amount
kitExp = exp*jobAdvancementBonus if classData else exp
woodData += kitAmount
expData += kitExp
dataUpdate = {
'Kits.Daily':formatTime,
'Currencies.Wood':woodData,
'Profile.Experience':expData
}
update = self.records.update_one({'userID':str(ctx.author.id)}, {'$set':dataUpdate})
await level_up(ctx)
await ctx.send(ctx.author.mention)
embed = self.kitEmbed(ctx, kitAmount, kitExp)
return await ctx.send(embed=embed)
@commands.command(aliases=['wk'])
@has_registered()
@is_economy_channel()
async def weekly(self, ctx):
amount = 2000
exp = 3500
try:
userData = self.records.find({'userID':str(ctx.author.id)})
for x in userData:
timeData = str(x['Kits']['Weekly'])
woodData = float(x['Currencies']['Wood'])
expData = float(x['Profile']['Experience'])
jobData = str(x['RPG']['Job'])
classData = str(x['RPG']['Class'])
# Converts date from database to compare
availableTime = datetime.datetime.strptime(timeData, '%Y-%m-%d %H:%M:%S.%f%z')
# Current Time
currentTime = datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
# Current Time in seconds
if currentTime > availableTime:
# Use this format to update database
formatTime = (datetime.datetime.now(tz=pytz.timezone('Asia/Singapore')) + datetime.timedelta(days=7)).strftime('%Y-%m-%d %H:%M:%S.%f%z')
jobAdvancementBonus = self.classDict[classData].index(jobData) + 1 if jobData != 'None' else 1
kitAmount = amount*jobAdvancementBonus if classData else amount
kitExp = exp*jobAdvancementBonus if classData else exp
woodData += kitAmount
expData += kitExp
dataUpdate = {
'Kits.Weekly':formatTime,
'Currencies.Wood':woodData,
'Profile.Experience':expData
}
update = self.records.update_one({'userID':str(ctx.author.id)}, {'$set':dataUpdate})
await level_up(ctx)
await ctx.send(ctx.author.mention)
embed = self.kitEmbed(ctx, kitAmount, kitExp)
return await ctx.send(embed=embed)
else:
eembed = errorembed(description=f'{ctx.author.mention} You are currently on cooldown. Type ``.cd`` to check your cooldowns.')
return await ctx.send(embed=eembed)
except:
# Use this format to update database
formatTime = (datetime.datetime.now(tz=pytz.timezone('Asia/Singapore')) + datetime.timedelta(days=7)).strftime('%Y-%m-%d %H:%M:%S.%f%z')
jobAdvancementBonus = self.classDict[classData].index(jobData) + 1 if jobData != 'None' else 1
kitAmount = amount*jobAdvancementBonus if classData else amount
kitExp = exp*jobAdvancementBonus if classData else exp
woodData += kitAmount
expData += kitExp
dataUpdate = {
'Kits.Weekly':formatTime,
'Currencies.Wood':woodData,
'Profile.Experience':expData
}
update = self.records.update_one({'userID':str(ctx.author.id)}, {'$set':dataUpdate})
await level_up(ctx)
await ctx.send(ctx.author.mention)
embed = self.kitEmbed(ctx, kitAmount, kitExp)
return await ctx.send(embed=embed)
@commands.command(aliases=['sp'])
@has_registered()
@is_economy_channel()
@commands.has_any_role('Titan Donator', 'Mystic Donator', 'Immortal Donator')
async def supporter(self, ctx):
amount = 350
exp = 500
try:
userData = self.records.find({'userID':str(ctx.author.id)})
for x in userData:
timeData = str(x['Kits']['Supporter'])
woodData = float(x['Currencies']['Wood'])
expData = float(x['Profile']['Experience'])
jobData = str(x['RPG']['Job'])
classData = str(x['RPG']['Class'])
# Converts date from database to compare
availableTime = datetime.datetime.strptime(timeData, '%Y-%m-%d %H:%M:%S.%f%z')
# Current Time
currentTime = datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
# Current Time in seconds
if currentTime > availableTime:
# Use this format to update database
formatTime = (datetime.datetime.now(tz=pytz.timezone('Asia/Singapore')) + datetime.timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S.%f%z')
jobAdvancementBonus = self.classDict[classData].index(jobData) + 1 if jobData != 'None' else 1
kitAmount = amount*jobAdvancementBonus if classData else amount
kitExp = exp*jobAdvancementBonus if classData else exp
woodData += kitAmount
expData += kitExp
dataUpdate = {
'Kits.Supporter':formatTime,
'Currencies.Wood':woodData,
'Profile.Experience':expData
}
update = self.records.update_one({'userID':str(ctx.author.id)}, {'$set':dataUpdate})
await level_up(ctx)
await ctx.send(ctx.author.mention)
embed = self.kitEmbed(ctx, kitAmount, kitExp)
return await ctx.send(embed=embed)
else:
eembed = errorembed(description=f'{ctx.author.mention} You are currently on cooldown. Type ``.cd`` to check your cooldowns.')
return await ctx.send(embed=eembed)
except:
# Use this format to update database
formatTime = (datetime.datetime.now(tz=pytz.timezone('Asia/Singapore')) + datetime.timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S.%f%z')
jobAdvancementBonus = self.classDict[classData].index(jobData) + 1 if jobData != 'None' else 1
kitAmount = amount*jobAdvancementBonus if classData else amount
kitExp = exp*jobAdvancementBonus if classData else exp
woodData += kitAmount
expData += kitExp
dataUpdate = {
'Kits.Supporter':formatTime,
'Currencies.Wood':woodData,
'Profile.Experience':expData
}
update = self.records.update_one({'userID':str(ctx.author.id)}, {'$set':dataUpdate})
await level_up(ctx)
await ctx.send(ctx.author.mention)
embed = self.kitEmbed(ctx, kitAmount, kitExp)
return await ctx.send(embed=embed)
@supporter.error
async def supporter_error(self, ctx, error):
if isinstance(error, commands.MissingAnyRole):
supporterRole = discord.utils.get(ctx.message.guild.roles, name='Titan Donator').mention
eembed = errorembed(description=f'{ctx.author.mention} Want to claim this **Supporter** kit? You have to minimally be a {supporterRole}')
return await ctx.send(embed=eembed)
@commands.command(aliases=['nt'])
@has_registered()
@is_economy_channel()
@commands.has_any_role('Nitro Booster')
async def nitro(self, ctx):
amount = 250
exp = 400
try:
userData = self.records.find({'userID':str(ctx.author.id)})
for x in userData:
timeData = str(x['Kits']['Nitro'])
woodData = float(x['Currencies']['Wood'])
expData = float(x['Profile']['Experience'])
jobData = str(x['RPG']['Job'])
classData = str(x['RPG']['Class'])
# Converts date from database to compare
availableTime = datetime.datetime.strptime(timeData, '%Y-%m-%d %H:%M:%S.%f%z')
# Current Time
currentTime = datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
# Current Time in seconds
if currentTime > availableTime:
# Use this format to update database
formatTime = (datetime.datetime.now(tz=pytz.timezone('Asia/Singapore')) + datetime.timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S.%f%z')
jobAdvancementBonus = self.classDict[classData].index(jobData) + 1 if jobData != 'None' else 1
kitAmount = amount*jobAdvancementBonus if classData else amount
kitExp = exp*jobAdvancementBonus if classData else exp
woodData += kitAmount
expData += kitExp
dataUpdate = {
'Kits.Nitro':formatTime,
'Currencies.Wood':woodData,
'Profile.Experience':expData
}
update = self.records.update_one({'userID':str(ctx.author.id)}, {'$set':dataUpdate})
await level_up(ctx)
await ctx.send(ctx.author.mention)
embed = self.kitEmbed(ctx, kitAmount, kitExp)
return await ctx.send(embed=embed)
else:
eembed = errorembed(description=f'{ctx.author.mention} You are currently on cooldown. Type ``.cd`` to check your cooldowns.')
return await ctx.send(embed=eembed)
except:
# Use this format to update database
formatTime = (datetime.datetime.now(tz=pytz.timezone('Asia/Singapore')) + datetime.timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S.%f%z')
jobAdvancementBonus = self.classDict[classData].index(jobData) + 1 if jobData != 'None' else 1
kitAmount = amount*jobAdvancementBonus if classData else amount
kitExp = exp*jobAdvancementBonus if classData else exp
woodData += kitAmount
expData += kitExp
dataUpdate = {
'Kits.Nitro':formatTime,
'Currencies.Wood':woodData,
'Profile.Experience':expData
}
update = self.records.update_one({'userID':str(ctx.author.id)}, {'$set':dataUpdate})
await level_up(ctx)
await ctx.send(ctx.author.mention)
embed = self.kitEmbed(ctx, kitAmount, kitExp)
return await ctx.send(embed=embed)
@nitro.error
async def nitro_error(self, ctx, error):
if isinstance(error, commands.MissingAnyRole):
nitroRole = discord.utils.get(ctx.message.guild.roles, name='Nitro Booster').mention
eembed = errorembed(description=f'{ctx.author.mention} Want to claim this **Nitro** kit? You have to be a {nitroRole}')
return await ctx.send(embed=eembed)
@commands.command(aliases=['v'])
@has_registered()
@is_economy_channel()
async def vote(self, ctx, user:discord.User):
if user == ctx.author:
eembed = errorembed(description=f"{ctx.author.mention} You can't upvote yourself. Good try though! <:PepeHugs:541252355518365718>")
return await ctx.send(embed=eembed)
# Checks if User is inside Database
try:
userList = [x['userID'] for x in self.records.find({})]
if str(ctx.author.id) not in userList:
eembed = errorembed(description=f'{ctx.author.mention} You are currently not registered yet. Kindly type ``.register`` to be registered.')
return await ctx.send(embed=eembed)
elif str(user.id) not in userList:
eembed = errorembed(description=f'{ctx.author.mention} {user.mention} has not registered yet.')
return await ctx.send(embed=eembed)
except:
pass
try:
userData = self.records.find({'userID':str(ctx.author.id)})
for x in userData:
voteData = str(x['Kits']['Votes'])
# Converts date from database to compare
availableTime = datetime.datetime.strptime(voteData, '%Y-%m-%d %H:%M:%S.%f%z')
# Current Time
currentTime = datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
# Current Time in seconds
if currentTime > availableTime:
userData = self.records.find({'userID':str(user.id)})
for x in userData:
repData = int(x['Profile']['Rep'])
repData += 1
dataUpdate = {
'Profile.Rep':repData
}
update = self.records.update_one({'userID':str(user.id)}, {'$set':dataUpdate})
# Use this format to update database
formatTime = (datetime.datetime.now(tz=pytz.timezone('Asia/Singapore')) + datetime.timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S.%f%z')
dataUpdate = {
'Kits.Votes':formatTime
}
update = self.records.update_one({'userID':str(ctx.author.id)}, {'$set':dataUpdate})
pembed = passembed(description=f'{ctx.author.mention} You have successfully added reputation for {user.mention}.')
return await ctx.send(embed=pembed)
else:
eembed = errorembed(description=f'{ctx.author.mention} You are currently on cooldown. Type ``.cd`` to check your cooldowns.')
return await ctx.send(embed=eembed)
except Exception:
userData = self.records.find({'userID':str(user.id)})
for x in userData:
repData = int(x['Profile']['Rep'])
repData += 1
dataUpdate = {
'Profile.Rep':repData
}
update = self.records.update_one({'userID':str(user.id)}, {'$set':dataUpdate})
# Use this format to update database
formatTime = (datetime.datetime.now(tz=pytz.timezone('Asia/Singapore')) + datetime.timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S.%f%z')
dataUpdate = {
'Kits.Votes':formatTime
}
update = self.records.update_one({'userID':str(ctx.author.id)}, {'$set':dataUpdate})
pembed = passembed(description=f'{ctx.author.mention} You have successfully added reputation for {user.mention}.')
return await ctx.send(embed=pembed)
@vote.error
async def vote_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
eembed = errorembed(description='Kindly indicate the User that you wish to upvote.')
return await ctx.send(embed=eembed)
@commands.command(aliases=['cd'])
@has_registered()
@is_economy_channel()
async def cooldown(self, ctx):
userData = self.records.find({'userID':str(ctx.author.id)})
for x in userData:
dailyData = str(x['Kits']['Daily'])
weeklyData = str(x['Kits']['Weekly'])
supporterData = str(x['Kits']['Supporter'])
nitroData = str(x['Kits']['Nitro'])
voteData = str(x['Kits']['Votes'])
# Daily Cooldown
try:
# Usable Time
timeFormat = datetime.datetime.strptime(dailyData, '%Y-%m-%d %H:%M:%S.%f%z')
timeInSeconds = time.mktime(timeFormat.timetuple())
# Current Time
timeNow = datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
timeNowInSeconds = time.mktime(timeNow.timetuple())
# Before rounding off
cooldownHours = ((timeInSeconds - timeNowInSeconds)/60)/60
coolDownMins = float('.' + str(round(((timeInSeconds - timeNowInSeconds)/60)/60, 3)).split('.')[1])*60
if int(cooldownHours) < 0:
dailyCooldown = '• You have not claimed your **Daily** kit yet.'
else:
# After rounding off
coolDownMins = round(coolDownMins)
coolDownHours = str(cooldownHours).split('.')[0]
dailyCooldown = '• ' + str(coolDownHours) + 'H ' + str(coolDownMins) + 'M'
except Exception:
dailyCooldown = '• You have not claimed your **Daily** kit yet.'
# Weekly Cooldown
try:
# Usable Time
timeFormat = datetime.datetime.strptime(weeklyData, '%Y-%m-%d %H:%M:%S.%f%z')
timeInSeconds = time.mktime(timeFormat.timetuple())
# Current Time
timeNow = datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
timeNowInSeconds = time.mktime(timeNow.timetuple())
# Before rounding off
coolDownHours = float('.' + str(round((((timeInSeconds - timeNowInSeconds)/60)/60)/24, 4)).split('.')[1])*24
coolDownDays = (((timeInSeconds - timeNowInSeconds)/60)/60)/24
if int(coolDownDays) < 0:
weeklyCooldown = '• You have not claimed your **Weekly** kit yet.'
else:
# After rounding off
coolDownHours = round(coolDownHours)
coolDownDays = str(coolDownDays).split('.')[0]
weeklyCooldown = '• ' + str(coolDownDays) + 'D ' + str(coolDownHours) + 'H'
except Exception:
weeklyCooldown = '• You have not claimed your **Weekly** kit yet.'
# Supporter Cooldown
try:
# Usable Time
timeFormat = datetime.datetime.strptime(supporterData, '%Y-%m-%d %H:%M:%S.%f%z')
timeInSeconds = time.mktime(timeFormat.timetuple())
# Current Time
timeNow = datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
timeNowInSeconds = time.mktime(timeNow.timetuple())
# Before rounding off
coolDownMins = float('.' + str(round(((timeInSeconds - timeNowInSeconds)/60)/60, 3)).split('.')[1])*60
cooldownHours = ((timeInSeconds - timeNowInSeconds)/60)/60
if int(cooldownHours) < 0:
supporterCooldown = '• You have not claimed your **Supporter** kit yet.'
else:
# After rounding off
coolDownMins = round(coolDownMins)
coolDownHours = str(cooldownHours).split('.')[0]
supporterCooldown = '• ' + str(coolDownHours) + 'H ' + str(coolDownMins) + 'M'
except Exception:
supporterCooldown = '• You have not claimed your **Supporter** kit yet.'
# Nitro Cooldown
try:
# Usable Time
timeFormat = datetime.datetime.strptime(nitroData, '%Y-%m-%d %H:%M:%S.%f%z')
timeInSeconds = time.mktime(timeFormat.timetuple())
# Current Time
timeNow = datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
timeNowInSeconds = time.mktime(timeNow.timetuple())
# Before rounding off
coolDownMins = float('.' + str(round(((timeInSeconds - timeNowInSeconds)/60)/60, 3)).split('.')[1])*60
cooldownHours = ((timeInSeconds - timeNowInSeconds)/60)/60
if int(cooldownHours) < 0:
nitroCooldown = '• You have not claimed your **Nitro** kit yet.'
else:
# After rounding off
coolDownMins = round(coolDownMins)
coolDownHours = str(cooldownHours).split('.')[0]
nitroCooldown = '• ' + str(coolDownHours) + 'H ' + str(coolDownMins) + 'M'
except Exception:
nitroCooldown = '• You have not claimed your **Nitro** kit yet.'
# Vote Cooldown
try:
# Usable Time
timeFormat = datetime.datetime.strptime(voteData, '%Y-%m-%d %H:%M:%S.%f%z')
timeInSeconds = time.mktime(timeFormat.timetuple())
# Current Time
timeNow = datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
timeNowInSeconds = time.mktime(timeNow.timetuple())
# Before rounding off
coolDownMins = float('.' + str(round(((timeInSeconds - timeNowInSeconds)/60)/60, 3)).split('.')[1])*60
cooldownHours = ((timeInSeconds - timeNowInSeconds)/60)/60
if int(cooldownHours) < 0:
voteCooldown = '• You have not **voted** anyone today yet.'
else:
# After rounding off
coolDownMins = round(coolDownMins)
coolDownHours = str(cooldownHours).split('.')[0]
voteCooldown = '• ' + str(coolDownHours) + 'H ' + str(coolDownMins) + 'M'
except Exception:
voteCooldown = '• You have not **voted** anyone today yet.'
# Embed Cooldown Message
embed = discord.Embed(title='Kit Cooldowns', color=discord.Color.from_hsv(random.random(), 1, 1), timestamp=datetime.datetime.now(tz=pytz.timezone('Asia/Singapore')))
embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)
embed.add_field(name='⏰ Daily', value=dailyCooldown)
embed.add_field(name='📅 Weekly', value=weeklyCooldown)
embed.add_field(name='💎 Supporter', value=supporterCooldown)
embed.add_field(name='⚡ Nitro', value=nitroCooldown)
embed.add_field(name='🌟 Votes', value=voteCooldown)
embed.set_footer(text=ctx.guild.name, icon_url=ctx.guild.icon_url)
await ctx.send(embed=embed)
# Adding the cog to main script
def setup(bot):
bot.add_cog(Kits(bot))
```
|
{
"source": "jeraldlyh/telegram-webscraper",
"score": 2
}
|
#### File: jeraldlyh/telegram-webscraper/telegram.py
```python
import json
import time
import telebot
import threading
import concurrent.futures
import datetime
import pytz
import os
from dotenv import load_dotenv
from telebot import types, apihelper
from modules.scraper import WebScraper
from modules.reuters import Reuters
from modules.seekingAlpha import SeekingAlpha
from modules.utils.misc import write_json, check_strength, beautify_emoji, add_keyword, remove_keyword, display_keywords
BOT_TOKEN = os.environ.get("BOT_TOKEN")
CHAT_ID = os.environ.get("CHAT_ID")
bot = telebot.TeleBot(token=BOT_TOKEN)
scraper = WebScraper()
seekingalpha = SeekingAlpha()
reuters = Reuters()
approvedList = ["ADMIN_IDS HERE"]
seleniumTimeout = False
@bot.message_handler(commands=['stock'], func=lambda message: message.chat.type == 'supergroup' and message.chat.id == int(CHAT_ID))
def send_random_stock(message):
randomStock = scraper.random_stock()
bot.send_message(message.chat.id, randomStock)
@bot.message_handler(commands =['analyse'], func=lambda message: message.chat.type == 'supergroup' and message.chat.id == int(CHAT_ID))
def analyse_stock(message):
global seleniumTimeout
if seleniumTimeout is True:
return bot.reply_to(message, '*Error* - Command is on cooldown', parse_mode='Markdown') # Checks if command is on cooldown
messageLength = len(message.text.split())
if messageLength != 2: # Ensure that correct parameters are entered
return bot.reply_to(message, 'This command only accepts _*one*_ parameter', parse_mode='MarkdownV2')
else:
seleniumTimeout = True
timeFrameList = ['1Min', '5Mins', '15Mins', '1Hour', '4Hours', '1Day', '1Week', '1Month']
markup = types.InlineKeyboardMarkup()
# Populate timeframe buttons
for x in range(0, len(timeFrameList), 2):
markup.add(types.InlineKeyboardButton(text=timeFrameList[x], callback_data=f'Timeframe - {timeFrameList[x]}'), types.InlineKeyboardButton(text=timeFrameList[x + 1], callback_data=f'Timeframe - {timeFrameList[x + 1]}'))
# Creates a cancel button
markup.add(types.InlineKeyboardButton(text='❌', callback_data='Delete'))
text = f'''
*Stock Symbol* - {message.text.split()[-1].upper()}
Select timeframe for analysis:
'''
bot.send_message(message.chat.id, text, parse_mode='Markdown', reply_markup=markup)
seleniumTimeout = False
@bot.message_handler(commands=['add'], func=lambda message: message.chat.type == 'supergroup' and message.from_user.id in approvedList and message.chat.id == int(CHAT_ID))
def add(message):
if message.from_user.id not in [x.user.id for x in bot.get_chat_administrators(CHAT_ID)]:
return bot.reply_to(message, 'You do not have access to this command', parse_mode='Markdown')
# messageLength = len(message.text.split())
# if messageLength != 2: # Ensure that correct parameters are entered
# return bot.reply_to(message, 'This command only accepts _*one*_ parameter', parse_mode='MarkdownV2')
keyword = message.text[5:]
add_keyword(keyword)
text = f'✅ *Keyword* : _{keyword}_ has been added from watchlist'
bot.reply_to(message, text, parse_mode='Markdown')
@bot.message_handler(commands=['remove'], func=lambda message: message.chat.type == 'supergroup' and message.from_user.id in approvedList and message.chat.id == int(CHAT_ID))
def remove(message):
if message.from_user.id not in [x.user.id for x in bot.get_chat_administrators(CHAT_ID)]:
return bot.reply_to(message, 'You do not have access to this command', parse_mode='Markdown')
# messageLength = len(message.text.split())
# if messageLength > 2: # Ensure that correct parameters are entered
# return bot.reply_to(message, 'This command only accepts _*one*_ parameter', parse_mode='MarkdownV2')
original = message.text[8::]
keyword = message.text.split()[1:]
updatedKeyword = '(.?)'.join(keyword)
try: # Error catch for keywords that does not exist
remove_keyword(updatedKeyword)
text = f'*Keyword* : _{original}_ has been removed from watchlist'
bot.send_message(message.chat.id, text, parse_mode='Markdown')
except:
text = f'*Keyword* : _{original}_ does not exist in watchlist'
bot.reply_to(message, text, parse_mode='Markdown')
@bot.message_handler(commands=['display'], func=lambda message: message.chat.type == 'supergroup' and message.from_user.id in approvedList and message.chat.id == int(CHAT_ID))
def display(message):
if message.from_user.id not in [x.user.id for x in bot.get_chat_administrators(CHAT_ID)]:
return bot.reply_to(message, 'You do not have access to this command', parse_mode='Markdown')
text = f'*Current Watchlist*: {display_keywords()}'
bot.send_message(message.chat.id, text, parse_mode='Markdown')
@bot.message_handler(commands =['news'], func=lambda message: message.chat.type == 'supergroup' and message.chat.id == int(CHAT_ID))
def news(message):
global seleniumTimeout
if seleniumTimeout is True:
return bot.reply_to(message, '*Error* - Command is on cooldown', parse_mode='Markdown') # Checks if command is on cooldown
messageLength = len(message.text.split(' '))
if messageLength != 3: # Ensure that correct parameters are entered
return bot.reply_to(message, 'Command usage: _*/news <market> <stockSymbol>*_', parse_mode='MarkdownV2')
elif message.text.split(' ')[1].lower() not in ['sg', 'us']: # Check if correct market is input
return bot.reply_to(message, 'Markets avaiable: _*SG/US*_', parse_mode='MarkdownV2')
botMessage = bot.send_message(message.chat.id, text='_Loading_', parse_mode='Markdown')
symbol = message.text.split()[2]
data = {}
if message.text.split(' ')[1].lower() == 'us':
seleniumTimeout = True
data = scraper.retrieve_us_news(symbol)
seleniumTimeout = False
elif message.text.split(' ')[1].lower() == 'sg':
data = scraper.retrieve_sg_news(symbol)
if (data == 'Error'):
text = f'*Error* - {symbol} does not exist.'
return bot.send_message(message.chat.id, text, parse_mode='Markdown')
else:
text = ''
emoji = ['1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣']
index = 0
for key in data.keys():
for x in range(0, len(data[key]), 3):
text += f"""
{emoji[index]} {data[key][x]}
🔗: {data[key][x + 1]}
📅: _{key} {data[key][x + 2]}_
"""
index += 1
try:
bot.edit_message_text(text=text, chat_id=message.chat.id, message_id=botMessage.message_id, parse_mode='Markdown', disable_web_page_preview=True)
except apihelper.ApiTelegramException: # In case link contains double _ characters
bot.edit_message_text(text=text, chat_id=message.chat.id, message_id=botMessage.message_id, disable_web_page_preview=True)
except:
bot.edit_message_text(text='*Error* - Not able to retrieve data. Try again later.', chat_id=message.chat.id, message_id=message.message_id, parse_mode='Markdown')
@bot.callback_query_handler(func=lambda query: True)
def handle_query(query):
global seleniumTimeout
if (query.data.startswith('Timeframe')):
timeFrame = query.data
valueFromCallBack = timeFrame.split()[2]
bot.answer_callback_query(callback_query_id = query.id, show_alert = True)
symbol = query.message.text.split()[-5]
bot.edit_message_text(text='_Loading_', chat_id=query.message.chat.id, message_id=query.message.message_id, parse_mode='Markdown')
seleniumTimeout = True
data = scraper.analyse_stock(symbol, valueFromCallBack)
if (data == 'Error'): # Error retrieving data
return bot.edit_message_text(text='*Error* - Not able to retrieve data. Try again later.', chat_id=query.message.chat.id, message_id=query.message.message_id, parse_mode='Markdown')
text = f"""
📋 *Stock Name*: {data[0]}
💵 *Current Price*: {data[1]}
📊 *Price Change*: {data[2]}
📈 *P/E Ratio*: {data[3]}
⚖️ *Analysis* (Timeframe - {valueFromCallBack})
> *Relative Strength Index*: {beautify_emoji(data[5])} ({data[4]}%)
> *Moving Average*: {beautify_emoji(data[7])} ({check_strength(data[6])})
"""
try:
bot.edit_message_text(text=text, chat_id=query.message.chat.id, message_id=query.message.message_id, parse_mode='Markdown')
except:
bot.send_message(query.message.chat.id, text=text, parse_mode='Markdown')
seleniumTimeout = False
elif (query.data.startswith('Delete')):
bot.delete_message(chat_id=query.message.chat.id, message_id=query.message.message_id)
# Ensure all previous requests are erased
def clear_updates():
latestUpdates = bot.get_updates()
if latestUpdates:
latestUpdateID = latestUpdates[-1].update_id + 1
bot.get_updates(offset=latestUpdateID)
print('Erased all pending requests')
else:
print('No updates')
# Thread 1 function
def bot_polling():
try:
print('---WebScraper is Online---')
clear_updates()
bot.polling(none_stop=True)
except Exception as e:
write_json(e)
# Thread 2 function
def send_reuters_scraped_links():
while True:
reutersLinks = reuters.analyse_url()
if len(reuters.sentLinks) > 30: # Resets the memory log of links
reuters.sentLinks.clear()
if (reutersLinks):
for link in reutersLinks:
text = f'*Keyword Found* - {reutersLinks[link]}\n {link}'
bot.send_message(chat_id=CHAT_ID, text=text, disable_web_page_preview=True)
print('---Analysis of Links Completed---')
else:
print('---No Links Found---')
minute = 60
time.sleep(10*minute)
# Thread 3 function
def send_seekingalpha_scraped_links():
while True:
seekingalphaLinks = seekingalpha.analyse_url()
if len(seekingalpha.sentLinks) > 30: # Resets the memory log of links
seekingalpha.sentLinks.clear()
if (seekingalphaLinks):
for link in seekingalphaLinks:
text = f'*Keyword Found* - {seekingalphaLinks[link]}\n {link}'
bot.send_message(chat_id=CHAT_ID, text=text, disable_web_page_preview=True)
print('---Analysis of Links Completed---')
else:
print('---No Links Found---')
minute = 60
time.sleep(10*minute)
telegram = threading.Thread(target=bot_polling)
reutersNews = threading.Thread(target=send_reuters_scraped_links)
seekingalphaNews = threading.Thread(target=send_seekingalpha_scraped_links)
if __name__ == "__main__":
try:
dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
load_dotenv(dotenv_path)
telegram.start()
reutersNews.start()
seekingalphaNews.start()
except Exception as e:
write_json(repr(e))
```
|
{
"source": "Jeralph-Red/OOP-58001",
"score": 4
}
|
#### File: Jeralph-Red/OOP-58001/Lab Activity 6.py
```python
from tkinter import *
class SemGrade:
def __init__(self, win):
self.lbl1=Label(win, text='Prelim:')
self.lbl2=Label(win, text='Midterm:')
self.lbl3=Label(win, text='Final:')
self.lbl4=Label(win, text='Semestral Grade:')
self.t1=Entry(bd=3)
self.t2=Entry(bd=3)
self.t3=Entry(bd=3)
self.t4=Entry(bd=3)
self.btn1 = Button(win, text='Add')
self.b1 = Button(win, text='Compute for Semestral Grade', command=self.compute)
self.b1.place(x=100, y=150)
self.lbl1.place(x=70, y=50)
self.t1.place(x=180, y=50)
self.lbl2.place(x=70, y=80)
self.t2.place(x=180, y=80)
self.lbl3.place(x=70, y=110)
self.t3.place(x=180, y=110)
self.lbl4.place(x=70,y=190)
self.t4.place(x=180,y=190)
def compute(self):
self.t4.delete(0, 'end')
num1=int(self.t1.get())
num2=int(self.t2.get())
num3=int(self.t3.get())
result=(num1+num2+num3)/3
self.t4.insert(END, str(result))
window=Tk()
mywin=SemGrade(window)
window.title('Semestral Grade Calculator')
window.geometry("400x300+10+10")
window.mainloop()
```
#### File: Jeralph-Red/OOP-58001/Lab Activity 7.py
```python
from tkinter import *
def Window(source, side):
calc = Frame(source, borderwidth=4, bd=4, bg="powder blue")
calc.pack(side=side, expand=YES, fill=BOTH)
return calc
def button(source, side, text, command=None):
calc = Button(source, text=text, command=command)
calc.pack(side=side, expand=YES, fill=BOTH)
return calc
class simplecalculator(Frame):
def __init__(self):
Frame.__init__(self)
self.option_add('*Font', 'arial 20 bold')
self.pack(expand=YES, fill=BOTH)
self.master.title('Calculator')
display = StringVar()
Entry(self, relief=RIDGE, textvariable=display,
justify='right'
, bd=30, bg="powder blue").pack(side=TOP,
expand=YES, fill=BOTH)
for clearButton in (["C"]):
erase = Window(self, TOP)
for clear in clearButton:
button(erase, LEFT, clear, lambda
calc=display, q=clear: calc.set(''))
for NumberButton in ("789/", "456*", "123-", "0.+"):
FunctionNum = Window(self, TOP)
for integer in NumberButton:
button(FunctionNum, LEFT, integer, lambda
calc=display, q=integer: calc
.set(calc.get() + q))
EqualButton = Window(self, TOP)
for equals in "=":
if equals == '=':
equalbutton = button(EqualButton, LEFT, equals)
equalbutton.bind('<ButtonRelease-1>', lambda e, s=self,
calc=display: s.calc(calc), '+')
else:
equalbutton = button(EqualButton, LEFT, equals,
lambda calc=display, s=' %s ' % equals: calc.set
(calc.get() + s))
def calc(self, display):
try:
display.set(eval(display.get()))
except:
display.set("ERROR")
if __name__ == '__main__':
simplecalculator().mainloop()
```
|
{
"source": "JerameyATyler/sigR",
"score": 2
}
|
#### File: sigR/sigr/Anechoic.py
```python
from torch.utils.data import Dataset
class Anechoic(Dataset):
def __init__(self, root, ttv, download=False, transform=None, target_transform=None, columns=None,
output_path=None):
from pathlib import Path
import os
ttvs = ['train', 'test', 'validate']
assert ttv in ttvs, f'Acceptable values for ttv are {", ".join(ttvs)}'
self.ttv = ttv
self.transform = transform
self.target_transform = target_transform
self.root = Path(root).__str__()
self.data_path = (Path(self.root) / self.ttv).__str__()
self.label_path = f'{self.data_path}_recipe'
self.output_path = output_path
if download:
self.download()
else:
assert os.path.isdir(self.root), f'Root directory {self.root} must exist if download=False'
assert os.path.isdir(self.data_path), f'Data directory {self.data_path} must exist if download=False'
assert os.path.isdir(self.label_path), f'Label directory {self.label_path} must exist if download=False'
self.labels = self.set_labels(columns)
def download(self):
from pathlib import Path
import requests
import zipfile
import io
import shutil
import os
if not os.path.isdir(self.root):
os.mkdir(self.root)
_download_url = 'https://reflections.speakeasy.services'
print(f'Downloading dataset at {_download_url}/{self.ttv}.zip')
r = requests.get(f'{_download_url}/{self.ttv}.zip', stream=True)
z = zipfile.ZipFile(io.BytesIO(r.content))
print(f'Finished downloading')
if not os.path.isdir(self.data_path):
os.mkdir(self.data_path)
if not os.path.isdir(self.label_path):
os.mkdir(self.label_path)
print('Extracting dataset')
for f in z.namelist():
filename = Path(f).name
if not filename:
continue
source = z.open(f)
if filename.endswith('.zip'):
target = open((Path(self.root) / filename).__str__(), 'wb')
else:
target = open((Path(self.data_path) / filename).__str__(), 'wb')
print(f'\tExtracting file: {filename}')
with source, target:
shutil.copyfileobj(source, target)
assert os.path.isfile(f'{self.label_path}.zip'), f'{self.label_path}.zip missing'
z = zipfile.ZipFile(f'{self.label_path}.zip')
z.extractall(self.label_path)
def set_labels(self, columns):
from data_loader import read_recipe
if columns is not None:
if type(columns) is not None:
columns = [columns]
if 'filepath' not in columns:
columns.append('filepath')
return read_recipe(self.label_path)[columns]
return read_recipe(self.label_path)
def __len__(self):
return self.labels.shape[0]
def __getitem__(self, item):
from pydub import AudioSegment
from pathlib import Path
from utils import audiosegment_to_array
labels = self.labels.iloc[item]
audio = AudioSegment.from_wav((Path(self.data_path) / f"{labels['filepath']}.wav").__str__())
if self.transform:
audio = self.transform(audio)
else:
audio = audiosegment_to_array(audio)
if self.target_transform:
labels = self.target_transform(labels)
return audio, labels
def play_sample(self, item):
from pathlib import Path
from pydub import AudioSegment
from utils import play_audio
from IPython.display import display
import os
filepath = f'{(Path(self.data_path) / self.labels.iloc[item]["filepath"]).__str__()}.wav'
assert os.path.isfile(filepath), f'{filepath} does not exist'
audio = AudioSegment.from_wav(filepath)
return display(play_audio(audio))
def get_ttv(root, download=False, transform=None, target_transform=None, columns=None, batch_size=60):
from torch.utils.data import DataLoader
train = DataLoader(
Anechoic(root, 'train', download=download, transform=transform, target_transform=target_transform,
columns=columns), batch_size=batch_size, shuffle=True)
test = DataLoader(Anechoic(root, 'test', download=download, transform=transform, target_transform=target_transform,
columns=columns), batch_size=batch_size, shuffle=False)
validate = DataLoader(
Anechoic(root, 'validate', download=download, transform=transform, target_transform=target_transform,
columns=columns), batch_size=batch_size, shuffle=True)
return train, test, validate
```
#### File: sigR/sigr/DataGenerator.py
```python
class DataGenerator:
def __init__(self, sample_count, output_directory, rng=None, fs=24000, verbose=False):
import os
import hashlib
from pathlib import Path
if rng is None:
from RNG import RNG
rng = RNG()
self.rng = rng
self.sample_count = sample_count
self.recipe = None
self.chunk_size = 50
self.fs = fs
self.verbose = verbose
path = Path(output_directory)
if not os.path.isdir(path):
os.mkdir(path)
path = path / 'reflections'
if not os.path.isdir(path):
os.mkdir(path)
self.output_directory = path.__str__()
s = f'{rng.seed}{rng.duration}{rng.delay_limits}{rng.time_limits}{rng.reflection_limits}{rng.zenith_limits}' \
f'{rng.azimuth_limits}{sample_count}{verbose}'
self.hash = str(int(hashlib.sha256(s.encode('utf-8')).hexdigest(), 16) % 10 ** 8)
def generate(self):
import pandas as pd
import dask.dataframe as dd
import dask
from pathlib import Path
import numpy as np
import os
from data_loader import read_recipe
print('Data generator started')
dfi = self.generate_ingredients_list()
filepath = Path(self.output_directory) / f'recipe_{self.hash}'
if not os.path.isdir(filepath):
sample_count = self.sample_count
chunk_size = self.chunk_size
batches = int(np.ceil(sample_count / chunk_size))
results = []
print('Generating recipe batches')
for i in range(batches):
if (i + 1) * chunk_size > sample_count:
chunk = sample_count % chunk_size
else:
chunk = chunk_size
result = dask.delayed(self.generate_recipe)(chunk)
results.append(result)
df = pd.concat(dask.compute(*results))
ddf = dd.from_pandas(df, chunksize=chunk_size)
print('Writing recipes')
ddf.to_parquet(filepath, engine='pyarrow')
print('Generating samples')
s = ddf.map_partitions(self.generate_samples, meta=ddf)
s.compute()
df = ddf.compute()
else:
df = read_recipe((Path(self.output_directory) / f'recipe_{self.hash}').__str__())
return dfi, df
def generate_ingredients_list(self):
import pandas as pd
from pathlib import Path
import os
print('Generating ingredients list')
filepath = Path(self.output_directory) / f'ingredients_{self.hash}.json'
rng = self.rng
df = pd.DataFrame(dict(
seed=rng.seed,
duration=rng.duration,
delay_limits=[rng.delay_limits],
time_limits=[rng.time_limits],
reflections_limits=[rng.reflection_limits],
zenith_limits=[rng.zenith_limits],
azimuth_limits=[rng.azimuth_limits],
sample_count=self.sample_count
))
if not os.path.isfile(filepath):
df.to_json(filepath, orient='records', lines=True)
return df
def generate_recipe(self, count):
import pandas as pd
import dask
print('Generating recipes')
lazy_results = []
for i in range(count):
lazy_result = dask.delayed(self.generate_sample_recipe)()
lazy_results.append(lazy_result)
df = pd.DataFrame(dask.compute(*lazy_results))
return df
def generate_sample_recipe(self):
from data_loader import list_anechoic_lengths
import hashlib
lengths = list_anechoic_lengths()
rng = self.rng
composer = rng.get_composer()
part_count = rng.get_part_count(composer=composer)
parts = rng.get_parts(composer=composer, part_count=part_count)
offset = rng.get_offset(lengths[composer])
duration = rng.duration
zenith = rng.get_zenith()
azimuth = rng.get_azimuth(zenith=zenith)
reverb_time = rng.get_time()
reverb_delay = rng.get_delay()
reverb_amplitude = rng.rng.uniform(0, 0.05)
reflection_count = rng.get_reflection_count()
reflection_zenith, reflection_azimuth, reflection_amplitude, reflection_delay = self.get_reflections(
reflection_count)
s = f'{part_count:02d}{"".join(parts)}{offset}{zenith}{azimuth}{reflection_count}' \
f'{"".join(str(x) for x in reflection_zenith)}{"".join(str(x) for x in reflection_azimuth)}' \
f'{"".join(str(x) for x in reflection_amplitude)}{"".join(str(x) for x in reflection_delay)}' \
f'{reverb_amplitude}{reverb_delay}{reverb_time}'
s = str(int(hashlib.sha256(s.encode('utf-8')).hexdigest(), 16) % 10 ** 8)
filepath = f'{composer}_{s}'
print(f'Generating recipe {filepath}\n')
return dict(
composer=composer,
part_count=part_count,
parts=parts,
zenith=zenith,
azimuth=azimuth,
offset=offset,
duration=duration,
reverb_time=reverb_time,
reverb_delay=reverb_delay,
reverb_amplitude=reverb_amplitude,
reflection_count=reflection_count,
reflection_amplitude=reflection_amplitude,
reflection_delay=reflection_delay,
reflection_zenith=reflection_zenith,
reflection_azimuth=reflection_azimuth,
filepath=filepath,
name=''
)
def generate_sample(self, recipe):
from audio_processing import mix_parts, apply_hrtf, mix_reflections, apply_reverberation, sum_signals, \
adjust_signal_to_noise
from pathlib import Path
from utils import generate_impulse
import os
from pydub import AudioSegment
impulse = generate_impulse(recipe['duration'])
print(f'Generating sample: {recipe["filepath"]}\n')
print(f'\tMixing parts: {recipe["filepath"]}')
filepath = Path(f"raw/{recipe['filepath']}_raw.wav")
if os.path.isfile(filepath):
signal = AudioSegment.from_wav(filepath)
else:
signal = mix_parts(recipe['parts'], recipe['offset'], recipe['duration'])
print(f'\tApplying HRTF: {recipe["filepath"]}')
filepath = Path(f'hrtf/{recipe["filepath"]}_hrtf.wav')
if os.path.isfile(filepath):
hrtf = AudioSegment.from_wav(filepath)
else:
hrtf = apply_hrtf(signal, recipe['zenith'], recipe['azimuth'])
impulse_hrtf = apply_hrtf(impulse, recipe['zenith'], recipe['azimuth'])
print(f'\tApplying reflections: {recipe["filepath"]}')
filepath = Path(f"reflections/{recipe['filepath']}_reflections.wav")
if os.path.isfile(filepath):
reflections = AudioSegment.from_wav(filepath)
else:
reflections = mix_reflections(hrtf, recipe['reflection_count'], recipe['reflection_amplitude'],
recipe['reflection_delay'], recipe['reflection_zenith'],
recipe['reflection_azimuth'])
impulse_reflections = mix_reflections(impulse_hrtf, recipe['reflection_count'],
recipe['reflection_amplitude'], recipe['reflection_delay'],
recipe['reflection_zenith'], recipe['reflection_azimuth'])
print(f'\tApplying reverberation: {recipe["filepath"]}')
filepath = Path(f"reverberation/{recipe['filepath']}_reverberation.wav")
if os.path.isfile(filepath):
reverberation = AudioSegment.from_wav(filepath)
else:
reverberation = apply_reverberation(hrtf, recipe['reverb_amplitude'], recipe['reverb_delay'],
recipe['reverb_time'])
impulse_reverberation = apply_reverberation(impulse_hrtf, recipe['reverb_amplitude'], recipe['reverb_delay'],
recipe['reverb_time'])
print(f'\tSumming signals: {recipe["filepath"]}')
filepath = Path(f"summation/{recipe['filepath']}_summation.wav")
if os.path.isfile(filepath):
summation = AudioSegment.from_wav(filepath)
else:
summation = sum_signals(reflections, reverberation)
impulse_summation = sum_signals(impulse_reflections, impulse_reverberation)
print(f'\tAdjusting signal-to-noise ratio: {recipe["filepath"]}')
filepath = Path(f"noise/{recipe['filepath']}.wav")
if os.path.isfile(filepath):
noise = AudioSegment.from_wav(filepath)
else:
noise = adjust_signal_to_noise(summation, -60)
impulse_noise = adjust_signal_to_noise(impulse_summation, -60)
print(f'\tTrimming sample: {recipe["filepath"]}')
filepath = Path(f"samples/{recipe['filepath']}.wav")
if os.path.isfile(filepath):
sample = AudioSegment.from_wav(filepath)
else:
sample = noise[:recipe['duration'] * 1000]
impulse_sample = impulse_noise[:recipe["duration"] * 1000]
self.write(sample, 'samples', f'{recipe["filepath"]}.wav')
self.write(impulse_sample, 'rir', f'{recipe["filepath"]}_rir.wav')
if self.verbose:
self.write(signal, 'raw', f'{recipe["filepath"]}_raw.wav')
self.write(hrtf, 'hrtf', f'{recipe["filepath"]}_hrtf.wav')
self.write(reflections, 'reflections', f'{recipe["filepath"]}_reflections.wav')
self.write(reverberation, 'reverberation', f'{recipe["filepath"]}_reverberation.wav')
self.write(summation, 'summation', f'{recipe["filepath"]}_summation.wav')
self.write(noise, 'noise', f'{recipe["filepath"]}_noise.wav')
def generate_samples(self, recipe):
return recipe.apply(self.generate_sample, axis=1)
def get_reflections(self, count):
rng = self.rng
amplitudes = [rng.get_amplitude() for _ in range(count)]
delays = [rng.get_delay() for _ in range(count)]
zeniths = [rng.get_zenith() for _ in range(count)]
azimuths = [rng.get_azimuth(zenith=zeniths[i]) for i in range(count)]
return zeniths, azimuths, amplitudes, delays
def write(self, file, directory, filename):
from pathlib import Path
import os
path = Path(self.output_directory) / directory
if not os.path.isdir(path):
os.mkdir(path)
path = path / filename
if not os.path.isfile(path):
print(f'\tWriting file: {filename}')
file_format = path.suffix.strip('.')
if file_format == 'wav':
if self.fs != file.frame_rate:
file = file.set_frame_rate(self.fs)
file.export(path, format=file_format)
if file_format == 'png':
file.savefig(path, format=file_format)
file.figure().clear()
file.close()
file.cla()
file.clf()
return path
```
#### File: sigR/sigr/RNG.py
```python
class RNG:
def __init__(self, seed='0xec0ec0', duration=10, delay_limits=(1, 60), time_limits=(1, 8), reflection_limits=(4, 8), zenith_limits=(-40, 90), azimuth_limits=(0, 360)):
import numpy as np
from data_loader import list_hrtf_data, list_anechoic_data, list_composers, get_hrtfs
self.seed = seed
self.duration = duration
self.delay_limits = delay_limits
self.time_limits = time_limits
self.reflection_limits = reflection_limits
self.zenith_limits = zenith_limits
self.azimuth_limits = azimuth_limits
self.rng = np.random.default_rng(int(self.seed, 0))
self.composers = list_composers()
self.anechoic_data = list_anechoic_data()
if zenith_limits is not None:
zmin, zmax = zenith_limits
else:
zmin, zmax = None, None
if azimuth_limits is not None:
amin, amax = azimuth_limits
else:
amin, amax = None, None
hrtf_data = list_hrtf_data()
zeniths, azimuths = get_hrtfs(amin=amin, amax=amax, zmin=zmin, zmax=zmax)
hrtfs = {z: {} for z in zeniths}
for z in zeniths:
for a in azimuths:
if a in hrtf_data[z].keys():
hrtfs[z][a] = hrtf_data[z][a]
self.hrtf_data = hrtfs
def get_composer(self):
return self.rng.choice(self.composers)
def get_part_count(self, composer):
parts = self.anechoic_data[composer]
part_limits = (2, len(parts))
return self.rng.integers(part_limits[0], part_limits[1])
def get_parts(self, composer=None, part_count=None):
if composer is None:
composer = self.get_composer()
if part_count is None:
part_count = self.get_part_count(composer)
return self.rng.choice(self.anechoic_data[composer], part_count, replace=False)
def get_zenith(self, azimuth=None):
zeniths = sorted(list(self.hrtf_data.keys()))
if azimuth is not None:
zeniths = [z for z in zeniths if azimuth in self.hrtf_data[z]]
return self.rng.choice(zeniths)
def get_azimuth(self, zenith=None):
zeniths = []
if zenith is not None:
zeniths .append(zenith)
else:
zeniths = sorted(list(self.hrtf_data.keys()))
azimuths = set()
for z in zeniths:
for a in self.hrtf_data[z]:
azimuths.add(a)
return self.rng.choice(list(azimuths))
def get_delay(self):
return self.rng.integers(low=self.delay_limits[0], high=self.delay_limits[1] + 1)
def get_amplitude(self):
return self.rng.random()
def get_time(self):
return self.rng.integers(low=self.time_limits[0], high=self.time_limits[1] + 1)
def get_reflection_count(self):
return self.rng.integers(low=self.reflection_limits[0], high=self.reflection_limits[1] + 1)
def get_offset(self, length):
length = length - self.duration * 1000
return self.rng.integers(low=0, high=length)
```
|
{
"source": "JeramyJeffereis/robin_stocks",
"score": 3
}
|
#### File: robin_stocks/robinhood/helper.py
```python
from functools import wraps
import requests
from robin_stocks.robinhood.globals import LOGGED_IN, OUTPUT, SESSION
def set_login_state(logged_in):
"""Sets the login state"""
global LOGGED_IN
LOGGED_IN = logged_in
def set_output(output):
"""Sets the global output stream"""
global OUTPUT
OUTPUT = output
def get_output():
"""Gets the current global output stream"""
global OUTPUT
return OUTPUT
def login_required(func):
"""A decorator for indicating which methods require the user to be logged
in."""
@wraps(func)
def login_wrapper(*args, **kwargs):
global LOGGED_IN
if not LOGGED_IN:
raise Exception('{} can only be called when logged in'.format(
func.__name__))
return(func(*args, **kwargs))
return(login_wrapper)
def convert_none_to_string(func):
"""A decorator for converting a None Type into a blank string"""
@wraps(func)
def string_wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if result:
return(result)
else:
return("")
return(string_wrapper)
def id_for_stock(symbol):
"""Takes a stock ticker and returns the instrument id associated with the stock.
:param symbol: The symbol to get the id for.
:type symbol: str
:returns: A string that represents the stocks instrument id.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return(None)
url = 'https://api.robinhood.com/instruments/'
payload = {'symbol': symbol}
data = request_get(url, 'indexzero', payload)
return(filter_data(data, 'id'))
def id_for_chain(symbol):
"""Takes a stock ticker and returns the chain id associated with a stocks option.
:param symbol: The symbol to get the id for.
:type symbol: str
:returns: A string that represents the stocks options chain id.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return(None)
url = 'https://api.robinhood.com/instruments/'
payload = {'symbol': symbol}
data = request_get(url, 'indexzero', payload)
if data:
return(data['tradable_chain_id'])
else:
return(data)
def id_for_group(symbol):
"""Takes a stock ticker and returns the id associated with the group.
:param symbol: The symbol to get the id for.
:type symbol: str
:returns: A string that represents the stocks group id.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return(None)
url = 'https://api.robinhood.com/options/chains/{0}/'.format(
id_for_chain(symbol))
data = request_get(url)
return(data['underlying_instruments'][0]['id'])
def id_for_option(symbol, expirationDate, strike, optionType):
"""Returns the id associated with a specific option order.
:param symbol: The symbol to get the id for.
:type symbol: str
:param expirationData: The expiration date as YYYY-MM-DD
:type expirationData: str
:param strike: The strike price.
:type strike: str
:param optionType: Either call or put.
:type optionType: str
:returns: A string that represents the stocks option id.
"""
symbol = symbol.upper()
chain_id = id_for_chain(symbol)
payload = {
'chain_id': chain_id,
'expiration_dates': expirationDate,
'strike_price': strike,
'type': optionType,
'state': 'active'
}
url = 'https://api.robinhood.com/options/instruments/'
data = request_get(url, 'pagination', payload)
listOfOptions = [item for item in data if item["expiration_date"] == expirationDate]
if (len(listOfOptions) == 0):
print('Getting the option ID failed. Perhaps the expiration date is wrong format, or the strike price is wrong.', file=get_output())
return(None)
return(listOfOptions[0]['id'])
def round_price(price):
"""Takes a price and rounds it to an appropriate decimal place that Robinhood will accept.
:param price: The input price to round.
:type price: float or int
:returns: The rounded price as a float.
"""
price = float(price)
if price <= 1e-2:
returnPrice = round(price, 6)
elif price < 1e0:
returnPrice = round(price, 4)
else:
returnPrice = round(price, 2)
return returnPrice
def filter_data(data, info):
"""Takes the data and extracts the value for the keyword that matches info.
:param data: The data returned by request_get.
:type data: dict or list
:param info: The keyword to filter from the data.
:type info: str
:returns: A list or string with the values that correspond to the info keyword.
"""
if (data == None):
return(data)
elif (data == [None]):
return([])
elif (type(data) == list):
if (len(data) == 0):
return([])
compareDict = data[0]
noneType = []
elif (type(data) == dict):
compareDict = data
noneType = None
if info is not None:
if info in compareDict and type(data) == list:
return([x[info] for x in data])
elif info in compareDict and type(data) == dict:
return(data[info])
else:
print(error_argument_not_key_in_dictionary(info), file=get_output())
return(noneType)
else:
return(data)
def inputs_to_set(inputSymbols):
"""Takes in the parameters passed to *args and puts them in a set and a list.
The set will make sure there are no duplicates, and then the list will keep
the original order of the input.
:param inputSymbols: A list, dict, or tuple of stock tickers.
:type inputSymbols: list or dict or tuple or str
:returns: A list of strings that have been capitalized and stripped of white space.
"""
symbols_list = []
symbols_set = set()
def add_symbol(symbol):
symbol = symbol.upper().strip()
if symbol not in symbols_set:
symbols_set.add(symbol)
symbols_list.append(symbol)
if type(inputSymbols) is str:
add_symbol(inputSymbols)
elif type(inputSymbols) is list or type(inputSymbols) is tuple or type(inputSymbols) is set:
inputSymbols = [comp for comp in inputSymbols if type(comp) is str]
for item in inputSymbols:
add_symbol(item)
return(symbols_list)
def request_document(url, payload=None):
"""Using a document url, makes a get request and returnes the session data.
:param url: The url to send a get request to.
:type url: str
:returns: Returns the session.get() data as opppose to session.get().json() data.
"""
try:
res = SESSION.get(url, params=payload)
res.raise_for_status()
except requests.exceptions.HTTPError as message:
print(message, file=get_output())
return(None)
return(res)
def request_get(url, dataType='regular', payload=None, jsonify_data=True):
"""For a given url and payload, makes a get request and returns the data.
:param url: The url to send a get request to.
:type url: str
:param dataType: Determines how to filter the data. 'regular' returns the unfiltered data. \
'results' will return data['results']. 'pagination' will return data['results'] and append it with any \
data that is in data['next']. 'indexzero' will return data['results'][0].
:type dataType: Optional[str]
:param payload: Dictionary of parameters to pass to the url. Will append the requests url as url/?key1=value1&key2=value2.
:type payload: Optional[dict]
:param jsonify_data: If this is true, will return requests.post().json(), otherwise will return response from requests.post().
:type jsonify_data: bool
:returns: Returns the data from the get request. If jsonify_data=True and requests returns an http code other than <200> \
then either '[None]' or 'None' will be returned based on what the dataType parameter was set as.
"""
if (dataType == 'results' or dataType == 'pagination'):
data = [None]
else:
data = None
res = None
if jsonify_data:
try:
res = SESSION.get(url, params=payload)
res.raise_for_status()
data = res.json()
except (requests.exceptions.HTTPError, AttributeError) as message:
print(message, file=get_output())
return(data)
else:
res = SESSION.get(url, params=payload)
return(res)
# Only continue to filter data if jsonify_data=True, and Session.get returned status code <200>.
if (dataType == 'results'):
try:
data = data['results']
except KeyError as message:
print("{0} is not a key in the dictionary".format(message), file=get_output())
return([None])
elif (dataType == 'pagination'):
counter = 2
nextData = data
try:
data = data['results']
except KeyError as message:
print("{0} is not a key in the dictionary".format(message), file=get_output())
return([None])
if nextData['next']:
#print('Found Additional pages.', file=get_output())
JJJ = 0
while nextData['next']:
try:
res = SESSION.get(nextData['next'])
res.raise_for_status()
nextData = res.json()
except:
print('Additional pages exist but could not be loaded.', file=get_output())
return(data)
#print('Loading page '+str(counter)+' ...', file=get_output())
counter += 1
for item in nextData['results']:
data.append(item)
elif (dataType == 'indexzero'):
try:
data = data['results'][0]
except KeyError as message:
print("{0} is not a key in the dictionary".format(message), file=get_output())
return(None)
except IndexError as message:
return(None)
return(data)
def request_post(url, payload=None, timeout=16, json=False, jsonify_data=True):
"""For a given url and payload, makes a post request and returns the response. Allows for responses other than 200.
:param url: The url to send a post request to.
:type url: str
:param payload: Dictionary of parameters to pass to the url as url/?key1=value1&key2=value2.
:type payload: Optional[dict]
:param timeout: The time for the post to wait for a response. Should be slightly greater than multiples of 3.
:type timeout: Optional[int]
:param json: This will set the 'content-type' parameter of the session header to 'application/json'
:type json: bool
:param jsonify_data: If this is true, will return requests.post().json(), otherwise will return response from requests.post().
:type jsonify_data: bool
:returns: Returns the data from the post request.
"""
data = None
res = None
try:
if json:
update_session('Content-Type', 'application/json')
res = SESSION.post(url, json=payload, timeout=timeout)
update_session(
'Content-Type', 'application/x-www-form-urlencoded; charset=utf-8')
else:
res = SESSION.post(url, data=payload, timeout=timeout)
data = res.json()
except Exception as message:
print("Error in request_post: {0}".format(message), file=get_output())
# Either return response <200,401,etc.> or the data that is returned from requests.
if jsonify_data:
return(data)
else:
return(res)
def request_delete(url):
"""For a given url and payload, makes a delete request and returns the response.
:param url: The url to send a delete request to.
:type url: str
:returns: Returns the data from the delete request.
"""
try:
res = SESSION.delete(url)
res.raise_for_status()
data = res
except Exception as message:
data = None
print("Error in request_delete: {0}".format(message), file=get_output())
return(data)
def update_session(key, value):
"""Updates the session header used by the requests library.
:param key: The key value to update or add to session header.
:type key: str
:param value: The value that corresponds to the key.
:type value: str
:returns: None. Updates the session header with a value.
"""
SESSION.headers[key] = value
def error_argument_not_key_in_dictionary(keyword):
return('Error: The keyword "{0}" is not a key in the dictionary.'.format(keyword))
def error_ticker_does_not_exist(ticker):
return('Warning: "{0}" is not a valid stock ticker. It is being ignored'.format(ticker))
def error_must_be_nonzero(keyword):
return('Error: The input parameter "{0}" must be an integer larger than zero and non-negative'.format(keyword))
```
|
{
"source": "jeras/Modern-Computer-Architecture-and-Organization-Second-Edition",
"score": 3
}
|
#### File: Answers to Exercises/src/Ex__2_load_dataset.py
```python
from tensorflow.keras import datasets
import matplotlib.pyplot as plt
def load_dataset():
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
# Normalize pixel values to the range 0-1
train_images = train_images / 255.0
test_images = test_images / 255.0
return train_images, train_labels, test_images, test_labels
def plot_samples(train_images, train_labels):
class_names = ['Airplane', 'Automobile', 'Bird', 'Cat', 'Deer',
'Dog', 'Frog', 'Horse', 'Ship', 'Truck']
plt.figure(figsize=(14,7))
for i in range(60):
plt.subplot(5,12,i + 1)
plt.xticks([])
plt.yticks([])
plt.imshow(train_images[i])
plt.xlabel(class_names[train_labels[i][0]])
plt.show()
if __name__ == '__main__':
train_images, train_labels, test_images, test_labels = load_dataset()
plot_samples(train_images, train_labels)
```
|
{
"source": "jerbarnes/norwegian_dialect",
"score": 2
}
|
#### File: jerbarnes/norwegian_dialect/status_script.py
```python
import os
import json
# Status script
# Checking the status of the annotations,
# both old and new.
# Samia and Petter annotations
with open(os.path.join("v1.1","data","dev.json"),"r",encoding="utf-8") as data:
dev_sp = json.load(data)
with open(os.path.join("v1.1","data","train.json"),"r",encoding="utf-8") as data:
train_sp = json.load(data)
with open(os.path.join("v1.1","data","test.json"),"r",encoding="utf-8") as data:
test_sp = json.load(data)
alle_sp = dev_sp + train_sp + test_sp
alle_sp_id = {}
for tweet in alle_sp:
alle_sp_id[tweet["sent_id"]] = tweet
# Sentence level, curated round 1 and 2
with open(os.path.join("gui_annotations",
"finished_anns","curated_annotations",
"round1_curated.json"),"r",encoding="utf-8") as data:
runde1 = json.load(data)
with open(os.path.join("gui_annotations",
"finished_anns","curated_annotations",
"round2_curated.json"),"r",encoding="utf-8") as data:
runde2 = json.load(data)
# Currently in progress sentence level
with open(os.path.join("gui_annotations","marie","m_final_round.json"),"r",encoding="utf-8") as data:
marie_inprogress = json.load(data)
with open(os.path.join("gui_annotations","alexandra","a_final_round.json"),"r",encoding="utf-8") as data:
alexandra_inprogress = json.load(data)
def get_curated_num(json_file):
# Get the number of curated sentences from the sentence
# level annotations.
uncorrected = 0
corrected = 0
for tweet in json_file:
if json_file[tweet]["corrected_category"] == "NONE":
uncorrected += 1
else:
corrected += 1
summen = uncorrected + corrected
assert summen == len(json_file)
print("Corrected:",corrected)
print("Uncorrected:",uncorrected)
print(corrected/(summen/100),"% corrected")
# Uncomment to get the annotations
get_curated_num(marie_inprogress)
get_curated_num(alexandra_inprogress)
# Check overlap
#finegrained
def get_overlapping(progress):
for tweet in progress:
sid = progress[tweet]["sent_id"]
if sid in alle_sp_id:
print(sid)
#get_overlapping(marie_inprogress)
#get_overlapping(alexandra_inprogress)
```
#### File: v1.1/experiments/bert_classifier.py
```python
from transformers import BertTokenizer, BertForSequenceClassification
from torch.utils.data import DataLoader
import json
import os
import torch
from transformers import AdamW
from tqdm import tqdm
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sn
from sklearn.metrics import f1_score, confusion_matrix, precision_score, recall_score
import argparse
font = {'serif': ['Times'],
'size': 16}
matplotlib.rc('font', **font)
def collate_fn(batch):
batch = sorted(batch, key=lambda item : item[0]["attention_mask"].numpy().sum(), reverse=True)
max_length = max([len(item[0]["input_ids"]) for item in batch])
#print(max_length)
new_input_ids = []
new_tt = []
new_att = []
new_labels = []
for info, label in batch:
self_length = len(info["input_ids"])
#print(self_length)
padding = torch.zeros(max_length - self_length)
new_input_ids.append(torch.cat((info["input_ids"], padding)).long())
new_tt.append(torch.cat((info["token_type_ids"], padding)).long())
new_att.append(torch.cat((info["attention_mask"], padding)).long())
new_labels.append(label)
new_batch = {"input_ids": torch.stack(new_input_ids),
"token_type_ids": torch.stack(new_tt),
"attention_mask": torch.stack(new_att)
}
new_labels = torch.tensor(new_labels).long()
return new_batch, new_labels
# create dataloader
def load_dataset(dataset_file, tokenizer):
label_map = {"bokmål": 0, "nynorsk": 1, "dialectal": 2, "mixed": 3}
final_data = []
with open(dataset_file) as o:
data = json.load(o)
texts = [t["text"] for t in data]
labels = [label_map[t["category"]] for t in data]
tokenized = tokenizer(texts, return_tensors="pt", add_special_tokens=False, padding=True)
for i in range(len(data)):
info = {"input_ids": tokenized["input_ids"][i],
"token_type_ids": tokenized["token_type_ids"][i],
"attention_mask": tokenized["attention_mask"][i],
}
final_data.append((info, labels[i]))
return final_data
def test_model(dataloader, model):
model.eval()
preds = []
gold = []
for info, label in tqdm(dataloader):
output = model(**info)
preds.extend(output.logits.detach().numpy().argmax(1))
gold.extend(label.tolist())
return gold, preds, f1_score(gold, preds, average="macro")
def train_model(trainloader, devloader, model, output_dir="saved_models", model_name="norbert", num_epochs=20):
os.makedirs(output_dir, exist_ok=True)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=1e-5)
#scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_train_steps)
losses = []
best_dev = 0
for e in range(num_epochs):
print("Epochs: {}".format(e+1))
model.train()
epoch_loss = 0
for info, label in tqdm(trainloader):
outputs = model(**info, labels=label)
loss = outputs.loss
loss.backward()
epoch_loss += float(loss.detach())
optimizer.step()
#scheduler.step()
model.zero_grad()
epoch_loss /= len(trainloader)
print("Loss: {0:.3f}".format(epoch_loss))
losses.append(epoch_loss)
# eval on dev
dg, dp, dev_f1 = test_model(devloader, model)
print("Dev F1: {0:.3f}".format(dev_f1))
if dev_f1 > best_dev:
print("New best model")
model.save_pretrained(os.path.join("saved_models", model_name))
best_dev = dev_f1
def get_errors(gold, pred, texts, idx2label):
for g, p, t in zip(gold, pred, texts):
if g != p:
print(t)
print("Gold: {0} - Pred: {1}".format(idx2label[g], idx2label[p]))
print()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model", default="NbAiLab/nb-bert-base")
parser.add_argument("--train", action="store_true")
parser.add_argument("--test", action="store_true")
args = parser.parse_args()
name_map = {"ltgoslo/norbert": "norbert",
"NbAiLab/nb-bert-base": "nb-bert",
"bert-base-multilingual-cased": "mbert"
}
short_name = name_map[args.model]
print("importing data...")
tokenizer = BertTokenizer.from_pretrained(args.model)
train_data = load_dataset("../data/train.json",
tokenizer)
train_loader = DataLoader(train_data,
batch_size=32,
shuffle=True,
collate_fn=collate_fn)
dev_data = load_dataset("../data/dev.json",
tokenizer)
dev_loader = DataLoader(dev_data,
batch_size=8,
shuffle=False,
collate_fn=collate_fn)
test_data = load_dataset("../data/test.json",
tokenizer)
test_loader = DataLoader(test_data,
batch_size=8,
shuffle=False,
collate_fn=collate_fn)
print("importing {} model from {}...".format(short_name, args.model))
if args.train:
model = BertForSequenceClassification.from_pretrained(args.model, num_labels=4)
if args.train:
print("training model...")
train_model(train_loader, dev_loader, model, model_name=short_name)
print("evaluating model...")
model = BertForSequenceClassification.from_pretrained(os.path.join("saved_models", short_name))
idx2label = {0: 'bokmål', 1: 'nynorsk', 2: 'dialectal', 3: 'mixed'}
dev_texts = [" ".join(tokenizer.convert_ids_to_tokens(i[0]['input_ids'][i[0]["input_ids"].nonzero()].squeeze())) for i in dev_data]
dev_labels = [idx2label[i[1]] for i in dev_data]
test_texts = [" ".join(tokenizer.convert_ids_to_tokens(i[0]['input_ids'][i[0]["input_ids"].nonzero()].squeeze())) for i in test_data]
test_labels = [idx2label[i[1]] for i in test_data]
dev_gold, dev_pred, dev_f1 = test_model(dev_loader, model)
dev_prec = precision_score(dev_gold, dev_pred, average="macro")
dev_rec = recall_score(dev_gold, dev_pred, average="macro")
print("Dev Prec: {0:.3f}".format(dev_prec))
print("Dev Rec: {0:.3f}".format(dev_rec))
print("Dev F1: {0:.3f}".format(dev_f1))
#dev_pred = [idx2label[i] for i in dev_pred]
print()
print("Dev confusion matrix")
cm = confusion_matrix(dev_gold, dev_pred)
print(cm)
print()
print("-" * 40)
test_gold, test_pred, test_f1 = test_model(test_loader, model)
test_prec = precision_score(test_gold, test_pred, average="macro")
test_rec = recall_score(test_gold, test_pred, average="macro")
print("Test Prec: {0:.3f}".format(test_prec))
print("Test Rec: {0:.3f}".format(test_rec))
print("Test F1: {0:.3f}".format(test_f1))
#test_pred = [idx2label[i] for i in test_pred]
print()
print("Test confusion matrix")
cm = confusion_matrix(test_gold, test_pred)
print(cm)
print()
df = pd.DataFrame(cm, index=["BK", "NN", "DI", "MIX"], columns=["BK", "NN", "DI", "MIX"])
cmap = sn.color_palette("crest", as_cmap=True)
fig = sn.heatmap(df, annot=True, cmap=cmap, cbar=False)
plt.show()
get_errors(test_gold, test_pred, test_texts, idx2label)
```
#### File: word_level/experiments/labelling_functions.py
```python
from skweak.base import SpanAnnotator, CombinedAnnotator
from skweak.heuristics import FunctionAnnotator
from skweak.aggregation import HMM
import os
from spacy.tokens import Doc #type: ignore
from typing import Sequence, Tuple, Optional, Iterable
from collections import defaultdict
class NordialAnnotator(CombinedAnnotator):
"""Annotator of entities in documents, combining several sub-annotators (such as gazetteers,
spacy models etc.). To add all annotators currently implemented, call add_all(). """
def __init__(self, nlp, bokmal, nynorsk):
super(NordialAnnotator, self).__init__()
self.nlp = nlp
self.bokmal = bokmal
self.nynorsk = nynorsk
def open_dictionaries(self):
self.functional_dict = read_dic("lexicons/functional.txt")
self.marked_dict = read_dic("lexicons/marked.txt")
self.copula = read_dic("lexicons/copula.txt")
self.present_marker_deletion = read_dic("lexicons/present_marker_deletion.txt")
self.h_v = read_dic("lexicons/h_v.txt")
self.contraction = read_dic("lexicons/contraction.txt")
self.gender = read_dic("lexicons/gender.txt")
self.shortening = read_dic("lexicons/shortening.txt")
self.phonemic = read_dic("lexicons/phonemic_spelling.txt")
def add_all(self):
self.add_annotator(FunctionAnnotator("pron", dialect_pronoun))
self.add_annotator(FunctionAnnotator("pron_subj", pron_subj))
self.add_annotator(FunctionAnnotator("pron_obj", pron_obj))
self.add_annotator(FunctionAnnotator("adjective_declension", adj_dec))
self.add_annotator(FunctionAnnotator("nominal_declension", nom_dec))
self.add_annotator(FunctionAnnotator("conjugation", conjugation))
self.add_annotator(FunctionAnnotator("dem_pro", dem_pro))
# Lexicon-based labeling functions
self.add_annotator(LexiconAnnotator("present_marker_deletion",
self.present_marker_deletion))
self.add_annotator(LexiconAnnotator("h_v", self.h_v))
self.add_annotator(LexiconAnnotator("gender", self.gender))
self.add_annotator(LexiconAnnotator("shortening", self.shortening))
self.add_annotator(LexiconAnnotator("functional",
self.functional_dict))
self.add_annotator(LexiconAnnotator("marked",
self.marked_dict))
self.add_annotator(LexiconAnnotator("phonemic_spelling",
self.phonemic))
# specific labeling functions
self.add_annotator(VoicingAnnotator("voicing",
self.bokmal,
self.nynorsk))
self.add_annotator(ApocopeAnnotator("apocope",
self.nlp,
self.bokmal,
self.nynorsk))
self.add_annotator(VowelshiftAnnotator("vowel_shift",
self.bokmal,
self.nynorsk))
self.add_annotator(PalatalizationAnnotator("palatalization",
self.bokmal,
self.nynorsk))
self.add_annotator(CopulaAnnotator("copula",
self.copula))
self.add_annotator(ContractionAnnotator("contraction",
self.contraction))
####################################################################
# all dialectal forms of probouns
####################################################################
def dialect_pronoun(doc):
forms = ["æ", "æg", "jæ", "jæi", "je", "ej", "mæ", "dæ", "hu", "ho", "honn", "hænne", "dåkk", "døkk", "døkker", "økk", "dom", "dæi", "døm", "dømm", "dæm", "demm", "di", "æm", "æmm"]
i = 0
while i < len(doc):
tok = doc[i]
if tok.text.lower() in forms:
yield i, i+1, "pron"
i += 1
####################################################################
# pron-subj
####################################################################
def pron_subj(doc):
obj_pron = ["æ", "æg", "jæ", "jæi", "je", "ej" "i", "mæ", "meg", "dæ", "ham", "hu", "ho", "honn", "henne", "hænne", "oss", "dåkk", "døkk", "døkker", "økk", "dem", "dom", "dæi", "døm", "dømm", "dæm", "demm", "di", "æm", "æmm"]
i = 0
while i < len(doc):
tok = doc[i]
if tok.text.lower() in obj_pron and tok.dep_ == "nsubj":
yield i, i+1, "pron-subj"
i += 1
####################################################################
# pron-obj
####################################################################
def pron_obj(doc):
subj_pron = ["jeg", "eg", "æ", "æg", "jæ", "jæi", "ej", "je", "i", "mæ", "dæ", "hu", "ho", "honn", "hænne", "me", "dåkk", "døkk", "døkker", "økk", "dom", "dæi", "døm", "dømm", "dæm", "demm", "di", "æm", "æmm"]
i = 0
while i < len(doc):
tok = doc[i]
if tok.text.lower() in subj_pron and tok.dep_ == "obj":
yield i, i+1, "pron-obj"
i += 1
####################################################################
# copula
####################################################################
class CopulaAnnotator(SpanAnnotator):
def __init__(self, name, lexicon):
super(CopulaAnnotator, self).__init__(name)
self.lexicon = lexicon
#
def find_spans(self, doc):
i = 0
while i < len(doc):
tok = doc[i]
if tok.text.lower() in self.lexicon and tok.dep_ in ["xcomp", "cop"]:
yield i, i+1, "copula"
i += 1
####################################################################
# contraction
####################################################################
class ContractionAnnotator(SpanAnnotator):
def __init__(self, name, lexicon):
super(ContractionAnnotator, self).__init__(name)
self.lexicon = lexicon
self.exceptions = ["kanskje", "skje"]
def near_quote(self, token, prev_tok, next_tok):
quotes = ["'", '"']
if prev_tok in quotes or next_tok in quotes or "'" in token or '"' in token:
return True
return False
#
def find_spans(self, doc):
i = 0
while i < len(doc):
tok = doc[i]
if i > 0:
prev_tok = doc[i-1].text.lower()
else:
prev_tok = ""
if i < len(doc) - 1:
next_tok = doc[i-1].text.lower()
else:
next_tok = ""
# create a flag to only yield a single label
flag = False
for contraction in self.lexicon:
if tok.text.lower().endswith(contraction) and tok.text.lower() not in self.exceptions and self.near_quote(tok.text.lower(), prev_tok, next_tok):
flag = True
if flag is True:
yield i, i+1, "contraction"
i += 1
####################################################################
# palatalization
####################################################################
class PalatalizationAnnotator(SpanAnnotator):
def __init__(self, name, bokmal, nynorsk):
super(PalatalizationAnnotator, self).__init__(name)
self.bokmal = bokmal
self.nynorsk = nynorsk
def depalatize(self, token):
new_token = token
palatals = {"in": "n", "nj": "n", "il": "l", "lj": "l"}
for palatal, unpalatal in palatals.items():
if palatal in token:
new_token = token.replace(palatal, unpalatal)
return new_token
#
def find_spans(self, doc):
i = 0
exceptions = ["til", "ein"]
while i < len(doc):
tok = doc[i]
text = tok.text.lower()
unpalatal = self.depalatize(text)
if unpalatal != text and text not in exceptions:
if unpalatal in self.bokmal or unpalatal in self.nynorsk:
yield i, i+1, "palatalization"
i += 1
####################################################################
# present_marker_deletion
####################################################################
def present_marker_deletion(doc):
forms = ["ska", "vi"]
i = 0
while i < len(doc):
tok = doc[i]
if tok.text in forms and tok.pos_ in ["AUX", "VERB"]:
yield i, i+1, "present_marker_deletion"
i += 1
def present_marker_deletion2(doc):
"""
TODO: finish this for other verbs (velge -> vel)
"""
i = 0
while i < len(doc):
tok = doc[i]
if tok.text in forms and tok.pos_ in ["AUX", "VERB"]:
yield i, i+1, "present_marker_deletion"
i += 1
####################################################################
# apocope
####################################################################
class ApocopeAnnotator(SpanAnnotator):
def __init__(self, name, nlp, bokmal, nynorsk):
super(ApocopeAnnotator, self).__init__(name)
self.nlp = nlp
self.bokmal = bokmal
self.nynorsk = nynorsk
#
def find_spans(self, doc):
i = 0
exceptions = ["går"]
while i < len(doc):
tok = doc[i]
text = tok.text.lower()
form = tok.morph.get("VerbForm")
if len(form) > 0:
form = form[0]
#print(tok.text, ": ", form)
else:
form = "None"
if tok.pos_ in ["VERB"] and form != "Part" and tok.text not in exceptions and not text[-1] in ["e", "r"]:
new = tok.text.lower() + "e"
if new in self.bokmal or new in self.nynorsk:
new_pos = self.nlp(new)[0].pos_
#print(new, ": ", new_pos)
if new_pos == "VERB":
yield i, i+1, "apocope"
i += 1
####################################################################
# Labeling function for voicing of consonants between vowels or syllable final
####################################################################
class VoicingAnnotator(SpanAnnotator):
def __init__(self, name, bokmal, nynorsk):
super(VoicingAnnotator, self).__init__(name)
self.bokmal = bokmal
self.nynorsk = nynorsk
#
def devoice(self, word):
voiceable_consts = {"b": "p", "g": "k", "d": "t"}
vowels = ['a', 'e', 'i', 'o', 'u', 'æ', 'ø', 'y']
devoiced = ''
for i, char in enumerate(word.lower()):
if i == 0:
devoiced += char
elif i == len(word) - 1:
if char in voiceable_consts:
prev_char = word[i-1]
if prev_char in vowels:
devoiced += voiceable_consts[char]
else:
devoiced += char
else:
devoiced += char
elif char in voiceable_consts:
prev_char = word[i-1]
next_char = word[i+1]
if prev_char in vowels and next_char in vowels:
devoiced += voiceable_consts[char]
else:
devoiced += char
else:
devoiced += char
return devoiced
#
def find_spans(self, doc: Doc) -> Iterable[Tuple[int, int, str]]:
i = 0
exceptions = ["og", "lag", "med", "veg"]
while i < len(doc):
tok = doc[i]
if tok.text.lower() not in exceptions:
devoiced = self.devoice(tok.text)
if (devoiced != tok.text.lower()) and ((devoiced in self.bokmal) or (devoiced in self.nynorsk)):
yield i, i+1, "voicing"
i += 1
####################################################################
# vowel shift
####################################################################
class VowelshiftAnnotator(SpanAnnotator):
def __init__(self, name, bokmal, nynorsk):
super(VowelshiftAnnotator, self).__init__(name)
self.bokmal = bokmal
self.nynorsk = nynorsk
self.shifts = {"au": ["ø", "o"],
"jø": ["e"],
"øu": ["au"],
"æ": ["e"],
"jæ": ["e"],
"o": ["u"],
"ø": ["u", "o", "ei"],
"jo": ["y"],
"y": ["ø"],
"ei": ["e"],
"e": ["ei"],
"ju": ["y"],
"øu": ["au"],
"å": ["o"]
}
def apply_vowelshift(self, token):
shifted = []
for shift, shiftbacks in self.shifts.items():
if shift in token:
for shiftback in shiftbacks:
shifted.append(token.replace(shift, shiftback))
return shifted
#
def find_spans(self, doc):
# we do not include any word in the pronouns
pronouns = ["jeg", "eg", "æ", "æg", "jæ", "jæi", "ej", "je", "i", "mæ", "dæ", "hu", "ho", "honn", "hænne", "me", "dåkk", "døkk", "døkker", "økk", "dom", "dæi", "døm", "dømm", "dæm", "demm", "di", "æm", "æmm",
"æ", "æg", "jæ", "jæi", "je", "ej" "i", "mæ", "meg", "dæ", "ham",
"hu", "ho", "honn", "henne", "hænne", "oss", "dåkk", "døkk",
"døkker", "økk", "dem", "dom", "dæi", "døm", "dømm", "dæm", "demm",
"di", "æm", "æmm"]
i = 0
while i < len(doc):
tok = doc[i]
text = tok.text.lower()
# avoid very short common words
if len(text) > 4 and text not in pronouns:
shifted = self.apply_vowelshift(text)
for new in shifted:
if new in self.bokmal or new in self.nynorsk:
yield i, i+1, "vowel_shift"
i += 1
####################################################################
# lexical
####################################################################
class LexicalAnnotator(SpanAnnotator):
def __init__(self, name, bokmal, nynorsk):
super(LexicalAnnotator, self).__init__(name)
self.bokmal = bokmal
self.nynorsk = nynorsk
#
def find_spans(self, doc):
i = 0
while i < len(doc):
tok = doc[i].lemma_.lower()
if tok not in self.bokmal and tok not in self.nynorsk:
yield i, i+1, "lexical"
i += 1
####################################################################
# dem_pro
####################################################################
def dem_pro(doc):
i = 0
while i < len(doc):
tok = doc[i]
if tok.pos_ in ["PROPN"]:
if i-1 >= 0:
prev_tok = doc[i-1]
if prev_tok.text.lower() in ["han", "n", "hun", "hu", "ho", "a"]:
yield i-1, i+1, "dem_pro"
i += 1
####################################################################
# adjectival_declension
####################################################################
"""
ekje så møje større enn ein store hond
"""
def adj_dec(doc):
i = 0
while i < len(doc):
tok = doc[i]
if tok.pos_ in ["ADJ"] and tok.text.lower().endswith("e"):
if i + 1 < len(doc) and i-1 >= 0:
prev_tok = doc[i-1].text.lower()
next_tok = doc[i+1]
next_pos = next_tok.pos_
if prev_tok in ["en", "ein", "et", "eit"] and next_pos is "NOUN":
yield i, i+1, "adjective_declension"
i += 1
####################################################################
# nominal_declension
####################################################################
def nom_dec(doc):
i = 0
exceptions = ["ski"]
while i < len(doc):
tok = doc[i]
if tok.pos_ in ["NOUN"] and tok.text.lower().endswith("i") and tok.text.lower() not in exceptions:
yield i, i+1, "nominal_declension"
i += 1
####################################################################
# conjugation
####################################################################
def conjugation(doc):
i = 0
exceptions = ["vet", "kan", "skal", "vil", "finnes"]
while i < len(doc):
tok = doc[i]
if tok.pos_ in ["VERB"] and not tok.text.lower().endswith("r") and tok.text not in exceptions:
tense = tok.morph.get("Tense")
if len(tense) > 0:
tense = tense[0]
else:
tense = "None"
if tense != "Past":
if i + 1 < len(doc) and i-1 >= 0:
prev_tok = doc[i-1].text.lower()
prev_pos = doc[i-1].pos_
next_tok = doc[i+1]
next_pos = next_tok.pos_
if prev_tok in ["jeg", "eg", "je", "jæ", "jæi", "æ", "ej", "han", "hun", "den", "vi", "me", "de", "dere", "dokker", "dokk", "døkker", "døk", "dom"]:
if prev_pos not in ["AUX"] and next_pos not in ["AUX"]:
yield i, i+1, "conjugation"
i += 1
####################################################################
# functional
####################################################################
class FunctionalAnnotator(SpanAnnotator):
def __init__(self, name, functional):
super(FunctionalAnnotator, self).__init__(name)
self.functional = functional
#
def find_spans(self, doc: Doc) -> Iterable[Tuple[int, int, str]]:
i = 0
while i < len(doc):
tok = doc[i]
if tok.text.lower() in self.functional:
yield i, i+1, "functional"
i += 1
####################################################################
# phonemic_spelling
####################################################################
class LexiconAnnotator(SpanAnnotator):
def __init__(self, name, lexicon):
super(LexiconAnnotator, self).__init__(name)
self.lexicon = lexicon
self.name = name
#
def find_spans(self, doc: Doc) -> Iterable[Tuple[int, int, str]]:
i = 0
while i < len(doc):
tok = doc[i]
if tok.text.lower() in self.lexicon:
yield i, i+1, self.name
i += 1
def read_dic(dic):
vocab = set()
for line in open(dic):
# skip lines that are commented out
if not line.startswith("#"):
vocab.add(line.strip().split("/")[0].lower())
return vocab
if __name__ == "__main__":
import spacy
nlp = spacy.load("nb_core_news_sm")
labels = ["pron_subj",
"pron_obj",
"copulate",
"contraction",
"palatalization",
"present_marker_deletion",
"apocope",
"voicing",
"vowel_shift",
#"lexical",
#"dem_pro",
#"shortening",
#"gender",
#"marked",
"h_v",
#"adjectival_declension",
#"nominal_declension",
#"conjugation",
#"functional",
#"phonemic_spelling",
#"interjection"
]
texts = ['det var noe av det beste jeg her sett!',
'æ e så forbainna',
'det går vel tilbage til ei bog eg leste',
'dem har ikke noe å si',
'så godt har dem aldri spilt',
'jeg har ikke sett dem enda',
'eg sa de i går',
'eg vet ikkje ka som har skjedd',
'eg vet ikke hva som har skjedd',
"det ha'kke noe å si",
"ekje så møje større enn ein store hond",
"E kje så møye mindre enn ein liden hond heller",
"Sku ITTE ha skrivi dæ . Sogndal skårer . Litt unødvendig . https : //t.co/IzQKl9iqeb",
"Nei tore æ vil ikke snakk me dæ",
"Vess du kun e på treningssenteret før å se stygt på andre så kan du faktisk reis tel helvette vekk derfra.",
"åssen går det me deg?",
"Jaja , du trega kanskje atta du inkje gjer det au ; men det æ det , he du fysst gjort det så sede du i klemmå.Då kan du jeispa , du kjeme ingjen veg",
"Æ har møtt veggen . Den heite kjemi 2 og den e 3 lag mur og 8 lag stål og 10 lag vibranium : - )",
"Eg såg snurten av innhaldslista til boki <NAME> og <NAME> skriv til hundradårshøgtidi for Fyrebilsbibelen , og der er det ei underyverskrift « Dette med Mortensson » – ja , med hermeteikn . Og det segjer eg for sant og visst : Dette er pirresnutt som duger . https : //t.co/8L2NQnjTRr",
"Ein av to ganger denne sesongen eg satse på @ leoskirio har ein rævva kamp https : //t.co/cOvzoHEONk",
"Fyflate kor ej suge på å huske å gratulere folk på face ......",
"Sommerskirenn , jida greit nok det , men rulleski er juks ! Skarru gå på ski får ' u gå på ski ! Staka på asfalten !",
"""Etter en flott kveld med ho Kari på @hteater
blir d nedtur med mer danseshow på #tv2. Stoltenberg sr. funke hos @FredrikSkavlan"""
]
docs = list(nlp.pipe(texts))
bokmal = read_dic("dictionaries/bokmal.dic")
nynorsk = read_dic("dictionaries/nynorsk.dic")
annotator = NordialAnnotator(nlp,
bokmal,
nynorsk)
annotator.open_dictionaries()
annotator.add_all()
docs = list(annotator.pipe(docs))
hmm = HMM("hmm", labels)
#hmm.fit_and_aggregate(docs)
```
|
{
"source": "jerbaroo/bridge-sim",
"score": 2
}
|
#### File: internal/make/crack_question.py
```python
import datetime
from copy import deepcopy
from typing import List
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from bridge_sim import crack, sim, traffic, plot, temperature
from bridge_sim.model import Config, Point, ResponseType, Vehicle
from bridge_sim.util import flatten, print_i, print_w
def plot_crack_detection(config: Config, crack_x: float, length: float, healthy: bool):
response_type = ResponseType.YTrans
og_config = config
if not healthy:
config = crack.transverse_crack(length=length, at_x=crack_x).crack(config)
ts, tr, ta = traffic.load_traffic(config, traffic.normal_traffic(config), time=60)
# Calculate positions of sensors.
support_xs = sorted(set((s.x for s in config.bridge.supports)))
print_i(f"Support xs = {support_xs}")
x_delta = 1
mid0 = ((support_xs[0] + support_xs[1]) / 2) + x_delta
mid1 = ((support_xs[-1] + support_xs[-2]) / 2) + x_delta
point_0, point_1 = Point(x=mid0, z=-8.4), Point(x=mid1, z=-8.4)
print(f"X positions = {mid0}, {mid1}")
##########################
# Testing the positions. #
##########################
# rs = sim.responses.load(
# config=config,
# response_type=response_type,
# point_loads=[PointLoad(x=mid0, z=-8.4, load=100), PointLoad(x=mid1, z=-8.4, load=100)],
# )
# plot.contour_responses(config, rs)
# plot.top_view_bridge(config.bridge, piers=True)
# plt.show()
# Collect responses at times that vehicles cross sensors.
vehicles: List[Vehicle] = [
v for v in flatten(ts.vehicles_per_lane, Vehicle) if v.lane == 0
]
print_i(f"Amount of vehicles = {len(vehicles)}")
responses_0, responses_1 = [], []
responses = sim.responses.to_traffic_array(
config=config,
traffic_array=ta,
response_type=response_type,
points=[point_0, point_1],
)
max_i = len(responses[0]) - 1
total_time = np.round(ts.final_time - ts.start_time, 6)
print_i(
f"Total time, sensor_f, responses.shape = {total_time}, {config.sensor_freq}, {responses.shape}"
)
for v in vehicles:
time_0, time_1 = v.time_at(mid0, config.bridge), v.time_at(mid1, config.bridge)
print_i(f"Times = {time_0}, {time_1}")
index_0 = int((time_0 - ts.start_time) // config.sensor_freq)
index_1 = int((time_1 - ts.start_time) // config.sensor_freq)
print_i(f"Indices = {index_0}, {index_1}")
if 0 <= index_0 <= max_i and 0 <= index_1 <= max_i:
responses_0.append(responses[0][index_0])
responses_1.append(responses[1][index_1])
print(responses_0[-1], responses_1[-1])
responses_0 = np.array(responses_0)
responses_1 = np.array(responses_1)
plt.plot(responses_0)
plt.plot(responses_1)
plt.savefig(og_config.get_image_path("classify/crack", f"delta-{healthy}.pdf"))
plt.close()
def plot_q5_crack_substructures(
config: Config, crack_x: float, length: float, use_max: bool = False
):
plt.style.use("seaborn-bright")
feature, feature_name = np.var, "Variance"
if use_max:
feature, feature_name = np.max, "Maximum"
def legend():
plt.legend(
facecolor="white",
loc="lower right",
framealpha=1,
fancybox=False,
borderaxespad=0,
)
og_config = deepcopy(config)
OFFSET = 0.35
SENSOR_DISTS = [2, 1.75, 1.5, 1.25, 1, 0.75, 0.5, 0.25, 0.1]
# SENSOR_DISTS = [1]
lane = 0
cmap = mpl.cm.get_cmap("RdBu")
response_types = [
ResponseType.StrainXXB,
ResponseType.StrainZZB,
ResponseType.YTrans,
]
TIME_OFFSET_STDS = [0, 1, 2]
# Iterate through the SENSOR_DIST parameter and collect difference matrices!
# For each sensor dist we collect under healthy (0) and cracked (0).
matrices = [
[[([], [], []) for _ in SENSOR_DISTS] for _ in TIME_OFFSET_STDS]
for _ in response_types
]
for tos_i, time_offset_std in enumerate(TIME_OFFSET_STDS):
for SD_i, SENSOR_DIST in enumerate(SENSOR_DISTS):
# None is also healthy but needs a different dataset.
for hc_i, is_healthy in enumerate([None, True, False]):
time = 60 * 10
if is_healthy is None:
time = 60 * 5
if is_healthy in [None, True]:
config = og_config
else:
config = crack.transverse_crack(length=length, at_x=crack_x).crack(
config
)
# config.bridge.data_id = config.bridge.data_id.replace(",0", "") # TODO: remove hack!
# TODO: Different traffic per run.
if False:
time += np.random.random(1)
# First calculate vehicles on the bridge.
ts, tr, ta = traffic.load_traffic(
config, traffic.normal_traffic(config), time=time
)
vehicles: List[Vehicle] = [
v for v in flatten(ts.vehicles_per_lane, Vehicle) if v.lane == lane
]
print_i(f"Amount of vehicles = {len(vehicles)}")
# Calculate positions of sensors.
x_centers = sorted(set(support.x for support in config.bridge.supports))
d = (config.bridge.supports[0].length / 2) + OFFSET
# Maximum and minimum x positions of sensors in each mid-span, respectively.
xs_0, xs_1 = [x_centers[0] + d], [x_centers[-2] + d]
xs_1_max = crack_x - OFFSET
xs_0_max = xs_0[0] + (xs_1_max - xs_1[0])
assert xs_1_max < crack_x
assert OFFSET > 0
while True:
new_x_0 = xs_0[-1] + SENSOR_DIST
if new_x_0 >= xs_0_max:
break
xs_0.append(new_x_0)
while True:
new_x_1 = xs_1[-1] + SENSOR_DIST
if new_x_1 >= xs_1_max:
break
xs_1.append(new_x_1)
z_min = config.bridge.lanes[lane].z_min
z_max = config.bridge.lanes[lane].z_max
NUM_Z = int((z_max - z_min) / SENSOR_DIST)
# These two 2d-arrays are the sensor points in each mid-span, respectively.
sensors_0 = np.array(
[
[Point(x=x, z=z) for z in np.linspace(z_min, z_max, NUM_Z)]
for x in xs_0
]
)
sensors_1 = np.array(
[
[Point(x=x, z=z) for z in np.linspace(z_min, z_max, NUM_Z)]
for x in xs_1
]
)
assert sensors_0.shape == sensors_1.shape
# Verify position of sensors.
plot.top_view_bridge(
config.bridge, lanes=True, edges=True, piers=True, units="m"
)
for p in flatten(sensors_0, Point) + flatten(sensors_1, Point):
plt.scatter([p.x], [p.z], c="r")
plt.title(f"Sensors for crack zone at X = {int(crack_x)} m")
plt.savefig(
config.get_image_path(
"classify/q5", f"sensor-positions-sensor-dist-{SENSOR_DIST}.pdf"
)
)
plt.close()
# Load 10 minutes of weather data.
weather = temperature.load("holly-springs-18")
weather["temp"] = temperature.resize(weather["temp"], year=2018)
start_date = "14/05/2018 14:00"
end_date = "14/05/2018 14:10"
for r_i, response_type in enumerate(response_types):
# Calculate responses to traffic for both sets of sensors.
responses_0 = sim.responses.to(
config=config,
traffic_array=ta,
response_type=response_type,
points=flatten(sensors_0, Point),
weather=weather,
start_date=start_date,
end_date=end_date,
with_creep=False,
) * (1e6 if response_type.is_strain() else 1e3)
responses_1 = sim.responses.to(
config=config,
traffic_array=ta,
response_type=response_type,
points=flatten(sensors_1, Point),
weather=weather,
start_date=start_date,
end_date=end_date,
with_creep=False,
) * (1e6 if response_type.is_strain() else 1e3)
def time_func(v_: Vehicle, x_: float, b_: "Bridge") -> float:
if time_offset_std == 0:
return v_.time_at(x_, b_)
new_time = v_.time_at(
time_offset_std * np.random.random() + x_, b_
)
print(f"Time is {new_time}, was {v_.time_at(x_, b_)}")
return new_time
# For each vehicle find times and responses for each sensor.
max_index = len(responses_0[0])
for v_i, v in enumerate(vehicles):
avoid = False
matrix_0 = np.zeros(sensors_0.shape)
matrix_1 = np.zeros(sensors_1.shape)
for x_i in range(len(sensors_0)):
for z_i, sensor in enumerate(sensors_0[x_i]):
time = time_func(v, sensor.x, config.bridge)
print_i(f"Time = {time}")
index = round(
(time - ts.start_time) / config.sensor_freq
)
result = (
responses_0[x_i * NUM_Z + z_i][index]
if 0 <= index < max_index
else np.nan
)
if np.isnan(result):
avoid = True
matrix_0[x_i][z_i] = result
for x_i in range(len(sensors_1)):
for z_i, sensor in enumerate(sensors_1[x_i]):
time = time_func(v, sensor.x, config.bridge)
print_i(f"Time = {time}")
index = round(
(time - ts.start_time) / config.sensor_freq
)
result = (
responses_1[x_i * NUM_Z + z_i][index]
if 0 <= index < max_index
else np.nan
)
if np.isnan(result):
avoid = True
matrix_1[x_i][z_i] = result
# Plot the results for this vehicle.
# vmin = min(np.amin(matrix_0), np.amin(matrix_1))
# vmax = max(np.amax(matrix_0), np.amax(matrix_1))
# vmin, vmax = min(vmin, -vmax), max(vmax, -vmin)
# norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
# xticks = np.arange(len(matrix_0), dtype=np.int)
# plt.portrait()
# plt.subplot(3, 1, 1)
# plt.imshow(matrix_0.T, cmap=cmap, norm=norm, interpolation="nearest", aspect="auto")
# plt.title(f"Healthy, Var = {np.var(matrix_0.T):.2f}")
# plt.xticks(xticks)
# plt.colorbar()
# plt.subplot(3, 1, 2)
# plt.imshow(matrix_1.T, cmap=cmap, norm=norm, interpolation="nearest", aspect="auto")
# plt.title(f"Cracked, Var = {np.var(matrix_1.T):.2f}")
# plt.xticks(xticks)
# plt.colorbar()
# plt.subplot(3, 1, 3)
mat_delta = matrix_0.T - matrix_1.T
# plt.imshow(mat_delta, cmap=cmap, norm=norm, interpolation="nearest", aspect="auto")
# plt.xticks(xticks)
# plt.title(f"Difference, Var = {np.var(mat_delta):.2f}")
# plt.colorbar()
# plt.suptitle(f"{response_type.name()}, {length} m crack zone at {crack_x} m")
# plt.tight_layout(rect=[0, 0.03, 1, 0.95])
# plt.savefig(config.get_image_path("classify/q5/mat/", f"vehicle={v_i}-sensor-dist={SENSOR_DIST}-healthy={is_healthy}-{tos_i}.pdf"))
# plt.close()
if not avoid:
matrices[r_i][tos_i][SD_i][hc_i].append(mat_delta)
plt.figure(figsize=(20, 16))
for tos_i, time_offset_std in enumerate(TIME_OFFSET_STDS):
# Each feature is a row.
for r_i, response_type in enumerate(response_types):
plt.subplot(
len(response_types),
len(TIME_OFFSET_STDS),
r_i * len(TIME_OFFSET_STDS) + tos_i + 1,
)
# Matrix collection has finished!
for SD_i, SENSOR_DIST in enumerate(SENSOR_DISTS):
ref_mats, healthy_mats, crack_mats = matrices[r_i][tos_i][SD_i]
ref_features = list(map(feature, ref_mats))
healthy_features = list(map(feature, healthy_mats))
cracked_features = list(map(feature, crack_mats))
min_feature, max_feature = min(ref_features), max(ref_features) * 1.5
print_i(
f"Sensor distance = {SENSOR_DIST}, feature = {feature_name}, min max ref = {min_feature}, {max_feature}"
)
print_i(
f"Sensor distance = {SENSOR_DIST}, feature = {feature_name}, min max healthy = {min(healthy_features)}, {max(healthy_features)}"
)
print_i(
f"Sensor distance = {SENSOR_DIST}, feature = {feature_name}, min max cracked = {min(cracked_features)}, {max(cracked_features)}"
)
fprs, tprs = [], []
for TH in np.linspace(min_feature, max_feature, 100):
fp = len([1 for m in healthy_mats if feature(m) > TH])
fpr = fp / len(healthy_mats)
tp = len([1 for m in crack_mats if feature(m) > TH])
tpr = tp / len(crack_mats)
fprs.append(fpr)
tprs.append(tpr)
plt.plot(fprs, tprs, label=f"d = {SENSOR_DIST}", lw=2)
plt.xlabel("FPR")
plt.ylabel("TPR")
if tos_i == len(TIME_OFFSET_STDS) - 1 and r_i == len(response_types) - 1:
legend()
plt.title(f"{response_type.name()} (±{time_offset_std} m)")
plt.suptitle(
f"Receiver operating characteristic curves for {length} m crack zone at {crack_x} m (feature is '{feature_name.lower()}')"
)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(
config.get_image_path("classify/q5", f"roc{use_max}-{feature_name}.pdf")
)
plt.close()
```
#### File: internal/make/creep.py
```python
import matplotlib.pyplot as plt
import numpy as np
from bridge_sim import creep, shrinkage, sim
from bridge_sim.model import Config, Point, RT, ResponseType, PierSettlement
from bridge_sim.util import convert_times
def plot_creep(config: Config, x: float, z: float, n: int = 100):
"""Plot creep over n years."""
days = np.arange(n * 365)
seconds = convert_times(f="day", t="second", times=days)
strain = creep.creep_coeff(config, shrinkage.CementClass.Normal, seconds, 51)
for s in strain:
if not np.isnan(s):
break
plt.plot(days / 365, strain, lw=3, c="r")
plt.ylabel("Creep coefficient")
plt.xlabel("Time (years)")
plt.title(f"Creep")
plt.tight_layout()
plt.savefig(config.get_image_path("verification/creep", "creep_coeff.pdf"))
plt.close()
plt.landscape()
point = Point(x=x, z=z)
install_day, start_day, end_day, signal_len = 37, 37, 100 * 365, 100
for r_i, response_type in enumerate([ResponseType.YTrans, ResponseType.StrainXXB]):
plt.subplot(1, 2, r_i + 1)
pier_settlement = PierSettlement(pier=5, settlement=1 / 1e3)
for i, (name, sw, ps, sh, c) in enumerate(
[
["self-weight", True, [], False, "black"],
[
"pier settlement",
False,
[(pier_settlement, pier_settlement)],
False,
"blue",
],
["shrinkage", False, [], True, "red"],
]
):
creep_responses = sim.responses.to_creep(
config=config,
points=[point],
responses_array=np.empty((1, signal_len)),
response_type=response_type,
install_day=install_day,
start_day=start_day,
end_day=end_day,
self_weight=sw,
pier_settlement=ps,
shrinkage=sh,
)[0]
xs = (
np.interp(
np.arange(len(creep_responses)),
[0, len(creep_responses) - 1],
[start_day, end_day],
)
/ 365
)
if response_type.is_strain():
plt.semilogy(xs, creep_responses * 1e6, label=name, lw=3, c=c)
else:
plt.plot(xs, creep_responses * 1e3, label=name, lw=3, c=c)
plt.ylabel(
"Microstrain XXB" if response_type.is_strain() else "Y translation (mm)"
)
plt.xlabel("Time (years)")
plt.legend()
plt.suptitle(f"Responses to creep at X = {point.x} m, Z = {point.z} m")
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(config.get_image_path("verification/creep", "creep-responses.pdf"))
plt.close()
```
#### File: internal/make/ps_question.py
```python
import os
from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
from bridge_sim import model, sim, temperature, traffic, plot, util
from bridge_sim.model import Config, Point, Bridge
from bridge_sim.plot.util import equal_lims
from bridge_sim.sim.responses import without
from bridge_sim.util import print_i, print_w
from bridge_sim.internal.plot import axis_cmap_r
def plot_year_effects(config: Config, x: float, z: float, num_years: int):
"""Plot all effects over a single year and 100 years at a point."""
install_day = 37
year = 2018
weather = temperature.load("holly-springs-18")
_0, _1, traffic_array = traffic.load_traffic(
config, traffic.normal_traffic(config), 60 * 10
)
(
ll_responses,
ps_responses,
temp_responses,
shrinkage_responses,
creep_responses,
) = np.repeat(None, 5)
start_day, end_day = None, None
def set_responses(n):
nonlocal weather, start_day, end_day
weather["temp"] = temperature.resize(weather["temp"], year=year)
weather = temperature.repeat(config, "holly-springs-18", weather, n)
start_date, end_date = (
weather["datetime"].iloc[0].strftime(temperature.f_string),
weather["datetime"].iloc[-1].strftime(temperature.f_string),
)
start_day, end_day = install_day, 365 * n
nonlocal ll_responses, ps_responses, temp_responses, shrinkage_responses, creep_responses
(
ll_responses,
ps_responses,
temp_responses,
shrinkage_responses,
creep_responses,
) = sim.responses.to(
config=config,
points=[model.Point(x=x, z=z)],
traffic_array=traffic_array,
response_type=model.RT.YTrans,
with_creep=True,
weather=weather,
start_date=start_date,
end_date=end_date,
install_day=install_day,
start_day=start_day,
end_day=end_day,
ret_all=True,
)
# from sklearn.decomposition import FastICA, PCA
# ica = FastICA(n_components=3)
# try_ = ica.fit_transform((ll_responses + temp_responses + creep_responses + shrinkage_responses).T)
# plt.plot(try_)
# plt.show()
plt.landscape()
lw = 2
def legend():
leg = plt.legend(
facecolor="white",
loc="upper right",
framealpha=1,
fancybox=False,
borderaxespad=0,
)
for legobj in leg.legendHandles:
legobj.set_linewidth(lw)
plt.subplot(1, 2, 1)
set_responses(1)
xax = np.interp(
np.arange(len(traffic_array)), [0, len(traffic_array) - 1], [start_day, end_day]
)
plt.plot(xax, ll_responses[0] * 1e3, c="green", label="traffic", lw=lw)
plt.plot(xax, temp_responses[0] * 1e3, c="red", label="temperature")
plt.plot(xax, shrinkage_responses[0] * 1e3, c="blue", label="shrinkage", lw=lw)
plt.plot(xax, creep_responses[0] * 1e3, c="black", label="creep", lw=lw)
legend()
plt.ylabel("Y translation (mm)")
plt.xlabel("Time (days)")
plt.subplot(1, 2, 2)
end_day = 365 * num_years
set_responses(num_years)
xax = (
np.interp(
np.arange(len(traffic_array)),
[0, len(traffic_array) - 1],
[start_day, end_day],
)
/ 365
)
plt.plot(xax, ll_responses[0] * 1e3, c="green", label="traffic", lw=lw)
plt.plot(xax, temp_responses[0] * 1e3, c="red", label="temperature")
plt.plot(xax, shrinkage_responses[0] * 1e3, c="blue", label="shrinkage", lw=lw)
plt.plot(xax, creep_responses[0] * 1e3, c="black", label="creep", lw=lw)
legend()
plt.ylabel("Y translation (mm)")
plt.xlabel("Time (years)")
equal_lims("y", 1, 2)
plt.suptitle(f"Y translation at X = {x} m, Z = {z} m")
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(config.get_image_path("classify/ps", f"year-effect-{x}-{z}.png"))
def plot_sensor_placement(config: Config, num_years: int):
all_points = [
model.Point(x=x, z=z)
for x in np.linspace(config.bridge.x_min, config.bridge.x_max, 300)
for z in np.linspace(config.bridge.z_min, config.bridge.z_max, 100)
]
response_type = model.ResponseType.YTrans
install_day = 37
year = 2018
weather = temperature.load("holly-springs-18")
config.sensor_freq = 1
_0, _1, traffic_array = traffic.load_traffic(
config, traffic.normal_traffic(config), 10
)
weather["temp"] = temperature.resize(weather["temp"], year=year)
weather = temperature.repeat(config, "holly-springs-18", weather, num_years)
start_date, end_date = (
weather["datetime"].iloc[0].strftime(temperature.f_string),
weather["datetime"].iloc[-1].strftime(temperature.f_string),
)
start_day, end_day = install_day, 365 * num_years
for pier in [9]:
pier_centre = model.Point(
x=config.bridge.supports[pier].x, z=config.bridge.supports[pier].z,
)
points = [p for p in all_points if pier_centre.distance(p) < 7]
ps = model.PierSettlement(pier=pier, settlement=5 / 1e3)
(
_0,
_1,
temp_responses,
shrinkage_responses,
creep_responses,
) = sim.responses.to(
config=config,
points=points,
traffic_array=traffic_array,
response_type=response_type,
with_creep=True,
weather=weather,
start_date=start_date,
end_date=end_date,
install_day=install_day,
start_day=start_day,
end_day=end_day,
ret_all=True,
)
ps_responses = sim.responses.to_pier_settlement(
config=config,
points=points,
responses_array=_0,
response_type=response_type,
pier_settlement=[(ps, ps)],
).T[-1]
ps_responses += sim.responses.to_creep(
config=config,
points=points,
responses_array=_0,
response_type=response_type,
pier_settlement=[(ps, ps)],
install_pier_settlement=[ps],
install_day=install_day,
start_day=start_day,
end_day=end_day,
).T[-1]
long_term_responses = (
temp_responses.T[-1] + shrinkage_responses.T[-1] + creep_responses.T[-1]
)
############
# Plotting #
############
plt.landscape()
plt.subplot(3, 1, 1)
responses = sim.model.Responses(
response_type=response_type,
responses=list(zip(abs(long_term_responses) * 1e3, points)),
)
plot.contour_responses(config, responses, levels=30, interp=(200, 60))
plot.top_view_bridge(config.bridge, piers=True)
plt.subplot(3, 1, 2)
responses = sim.model.Responses(
response_type=response_type,
responses=list(zip(abs(ps_responses) * 1e3, points)),
)
plot.contour_responses(config, responses, levels=30, interp=(200, 60))
plot.top_view_bridge(config.bridge, piers=True)
plt.subplot(3, 1, 3)
responses = sim.model.Responses(
response_type=response_type,
responses=list(
zip((abs(ps_responses) - abs(long_term_responses)) * 1e3, points)
),
)
plot.contour_responses(config, responses, levels=30, interp=(200, 60))
plot.top_view_bridge(config.bridge, piers=True)
plt.savefig(config.get_image_path("classify/ps", "placement.pdf"))
def plot_removal(config: Config, x: float, z: float):
response_type = model.RT.YTrans
weather = temperature.load("holly-springs-18")
weather["temp"] = temperature.resize(weather["temp"], year=2018)
start_date, end_date = (
weather["datetime"].iloc[0].strftime(temperature.f_string),
weather["datetime"].iloc[-1].strftime(temperature.f_string),
)
install_day = 37
start_day, end_day = install_day, install_day + 365
_0, _1, traffic_array = traffic.load_traffic(
config, traffic.normal_traffic(config), time=60
)
responses = (
sim.responses.to(
config=config,
points=[model.Point(x=x, z=z)],
traffic_array=traffic_array,
response_type=response_type,
with_creep=True,
weather=weather,
start_date=start_date,
end_date=end_date,
install_day=install_day,
start_day=start_day,
end_day=end_day,
# ret_all=True,
)[0]
* 1e3
)
def legend():
return plt.legend(
facecolor="white",
loc="upper right",
framealpha=1,
fancybox=False,
borderaxespad=0,
)
plt.landscape()
plt.subplot(2, 2, 1)
xax = np.interp(
np.arange(len(weather)), [0, len(weather) - 1], [start_day, end_day]
)
plt.plot(xax, weather["temp"], c="red")
plt.ylabel("Temperature °C")
plt.xlabel("Days since T_0")
plt.title("Temperature in 2018")
plt.subplot(2, 2, 2)
xax = np.interp(
np.arange(len(responses)), [0, len(responses) - 1], [start_day, end_day]
)
plt.plot(xax, responses)
plt.ylabel("Y translation (mm)")
plt.xlabel("Days since T_0")
plt.title("Y translation in 2018")
plt.subplot(2, 2, 3)
num_samples = 365 * 24
temps = util.apply(weather["temp"], np.arange(num_samples))
rs = util.apply(responses, np.arange(num_samples))
lr, _ = temperature.regress_and_errors(temps, rs)
lr_x = np.linspace(min(temps), max(temps), 100)
y = lr.predict(lr_x.reshape((-1, 1)))
plt.plot(lr_x, y, lw=2, c="red", label="linear fit")
plt.scatter(temps, rs, s=2, alpha=0.5, label="hourly samples")
leg = legend()
leg.legendHandles[1]._sizes = [30]
plt.ylabel("Y translation (mm)")
plt.xlabel("Temperature °C")
plt.title("Linear model from 2018 data")
#############
# 2019 data #
#############
weather_2019 = temperature.load("holly-springs")
weather_2019["temp"] = temperature.resize(weather_2019["temp"], year=2019)
start_date, end_date = (
weather_2019["datetime"].iloc[0].strftime(temperature.f_string),
weather_2019["datetime"].iloc[-1].strftime(temperature.f_string),
)
start_day, end_day = install_day + 365, install_day + (2 * 365)
responses_2019 = (
sim.responses.to(
config=config,
points=[model.Point(x=x, z=z)],
traffic_array=traffic_array,
response_type=response_type,
with_creep=True,
weather=weather_2019,
start_date=start_date,
end_date=end_date,
install_day=install_day,
start_day=start_day,
end_day=end_day,
)[0]
* 1e3
)
plt.subplot(2, 2, 4)
xax_responses = np.interp(
np.arange(len(responses_2019)),
[0, len(responses_2019) - 1],
[start_day, end_day],
)
plt.plot(xax_responses, responses_2019, label="2019 responses")
temps_2019 = util.apply(weather_2019["temp"], xax_responses)
y = lr.predict(temps_2019.reshape((-1, 1)))
plt.plot(xax_responses, y, label="prediction")
plt.ylabel("Y translation (mm)")
plt.xlabel("Days since T_0")
plt.title("Y translation in 2019")
for legobj in legend().legendHandles:
legobj.set_linewidth(2.0)
plt.suptitle(f"Predicting long-term effect at X = {x} m, Z = {z} m")
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(config.get_image_path("classify/ps", "regress.pdf"))
def plot_removal_2(config: Config, x: float, z: float):
response_type = model.RT.YTrans
weather_2018 = temperature.load("holly-springs-18")
weather_2018["temp"] = temperature.resize(weather_2018["temp"], year=2018)
start_date, end_date = (
weather_2018["datetime"].iloc[0].strftime(temperature.f_string),
weather_2018["datetime"].iloc[-1].strftime(temperature.f_string),
)
install_day = 37
start_day, end_day = install_day, install_day + 365
_0, _1, traffic_array = traffic.load_traffic(
config, traffic.normal_traffic(config), time=60
)
responses_2018 = (
sim.responses.to(
config=config,
points=[model.Point(x=x, z=z)],
traffic_array=traffic_array,
response_type=response_type,
with_creep=True,
weather=weather_2018,
start_date=start_date,
end_date=end_date,
install_day=install_day,
start_day=start_day,
end_day=end_day,
# ret_all=True,
)[0]
* 1e3
)
num_samples = 365 * 24
temps = util.apply(weather_2018["temp"], np.arange(num_samples))
rs = util.apply(responses_2018, np.arange(num_samples))
lr, err = temperature.regress_and_errors(temps, rs)
def legend():
plt.legend(
facecolor="white",
loc="lower left",
framealpha=1,
fancybox=False,
borderaxespad=0,
labelspacing=0.02,
)
##############################
# Iterate through each year. #
##############################
plt.landscape()
weather_2019 = temperature.load("holly-springs")
weather_2019["temp"] = temperature.resize(weather_2019["temp"], year=2019)
start_date, end_date = (
weather_2019["datetime"].iloc[0].strftime(temperature.f_string),
weather_2019["datetime"].iloc[-1].strftime(temperature.f_string),
)
for y_i, year in enumerate([2019, 2024, 2039]):
plt.subplot(3, 1, y_i + 1)
start_day = install_day + ((year - 2018) * 365)
end_day = start_day + 365
responses_2019 = (
sim.responses.to(
config=config,
points=[model.Point(x=x, z=z)],
traffic_array=traffic_array,
response_type=response_type,
with_creep=True,
weather=weather_2019,
start_date=start_date,
end_date=end_date,
install_day=install_day,
start_day=start_day,
end_day=end_day,
)[0]
* 1e3
)
# Plot actual values.
xax = np.interp(
np.arange(len(responses_2019)), [0, len(responses_2019) - 1], [0, 364]
)
plt.plot(xax, responses_2019, label="responses in year", lw=2)
# Daily prediction.
xax_responses = np.arange(365)
temps_2019 = util.apply(weather_2019["temp"], xax_responses)
y_daily = lr.predict(temps_2019.reshape((-1, 1)))
y_2_week = [
np.mean(y_daily[max(0, i - 14) : min(i + 14, len(y_daily))])
for i in range(len(y_daily))
]
for percentile, alpha in [(100, 20), (75, 40), (50, 60), (25, 100)]:
err = np.percentile(err, percentile)
p = percentile / 100
plt.fill_between(
xax_responses,
y_2_week + (err * p),
y_2_week - (err * p),
color="orange",
alpha=alpha / 100,
label=f"{percentile}% of regression error",
)
plt.plot(xax_responses, y_daily, color="black", lw=2, label="daily prediction")
plt.plot(
xax_responses, y_2_week, color="red", lw=2, label="2 week sliding window"
)
plt.ylabel("Y. trans (mm)")
plt.title(f"Year {year}")
if y_i == 0:
legend()
if y_i == 2:
plt.xlabel("Days in year")
else:
plt.tick_params("x", bottom=False, labelbottom=False)
equal_lims("y", 3, 1)
plt.suptitle(f"Predicting long-term effects at X = {x} m, Z = {z} m")
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(config.get_image_path("classify/ps", "regress-2.pdf"))
def plot_removal_3(config: Config, x: float, z: float):
# First calculate the linear model.
response_type = model.RT.YTrans
weather_2018 = temperature.load("holly-springs-18")
weather_2018["temp"] = temperature.resize(weather_2018["temp"], year=2018)
start_date, end_date = (
weather_2018["datetime"].iloc[0].strftime(temperature.f_string),
weather_2018["datetime"].iloc[-1].strftime(temperature.f_string),
)
install_day = 37
start_day, end_day = install_day, install_day + 365
_0, _1, traffic_array = traffic.load_traffic(
config, traffic.normal_traffic(config), time=60
)
responses_2018 = (
sim.responses.to(
config=config,
points=[model.Point(x=x, z=z)],
traffic_array=traffic_array,
response_type=response_type,
with_creep=True,
weather=weather_2018,
start_date=start_date,
end_date=end_date,
install_day=install_day,
start_day=start_day,
end_day=end_day,
)[0]
* 1e3
)
num_samples = 365 * 24
temps = util.apply(weather_2018["temp"], np.arange(num_samples))
rs = util.apply(responses_2018, np.arange(num_samples))
lr, _ = temperature.regress_and_errors(temps, rs)
# Calculate long-term weather.
NUM_YEARS = 5
PIER = 5
long_weather = deepcopy(weather_2018)
long_weather["temp"] = temperature.resize(long_weather["temp"], year=2019)
print_i(f"Repeating {NUM_YEARS} of weather data")
long_weather = temperature.repeat(
config, "holly-springs-18", long_weather, NUM_YEARS
)
print_i(f"Repeated {NUM_YEARS} of weather data")
start_date, end_date = (
long_weather["datetime"].iloc[0].strftime(temperature.f_string),
long_weather["datetime"].iloc[-1].strftime(temperature.f_string),
)
start_day = install_day + 365
end_day = start_day + 365 * NUM_YEARS
MAX_PS = 20
THRESHES = np.arange(0, MAX_PS, 1)
acc_mat = np.zeros((MAX_PS, len(THRESHES)))
fp_mat = np.zeros(acc_mat.shape)
fn_mat = np.zeros(acc_mat.shape)
tp_mat = np.zeros(acc_mat.shape)
tn_mat = np.zeros(acc_mat.shape)
for p_i, ps in enumerate(range(MAX_PS)):
print_i(f"Using pier settlement = {ps} mm")
long_responses = sim.responses.to(
config=config,
points=[model.Point(x=x, z=z)],
traffic_array=traffic_array,
response_type=response_type,
with_creep=True,
pier_settlement=[
(
model.PierSettlement(pier=PIER, settlement=0.00001),
model.PierSettlement(pier=PIER, settlement=ps / 1e3),
)
],
install_pier_settlement=[],
weather=long_weather,
start_date=start_date,
end_date=end_date,
install_day=install_day,
start_day=start_day,
end_day=end_day,
ret_all=False,
ignore_pier_creep=True,
)
healthy_responses = sim.responses.to(
config=config,
points=[model.Point(x=x, z=z)],
traffic_array=traffic_array,
response_type=response_type,
with_creep=True,
pier_settlement=[],
install_pier_settlement=None,
weather=long_weather,
start_date=start_date,
end_date=end_date,
install_day=install_day,
start_day=start_day,
end_day=end_day,
ret_all=False,
ignore_pier_creep=True,
)
plt.plot(healthy_responses[0] * 1e3, label="healthy")
plt.plot(long_responses[0] * 1e3, label="pier settlement")
plt.legend()
plt.savefig(config.get_image_path("hello", f"q3-{p_i}.png"))
plt.close()
for t_i, thresh in enumerate(THRESHES):
thresh *= -1
print(thresh)
print(max(healthy_responses[0]))
print(min(healthy_responses[0]))
print(max(long_responses[0]))
print(min(long_responses[0]))
fp = len([x for x in healthy_responses[0] * 1e3 if x <= thresh])
tp = len([x for x in long_responses[0] * 1e3 if x <= thresh])
tn = len([x for x in healthy_responses[0] * 1e3 if x > thresh])
fn = len([x for x in long_responses[0] * 1e3 if x > thresh])
acc_mat[p_i][t_i] = (tp + tn) / (tp + tn + fp + fn)
fp_mat[p_i][t_i] = fp
tp_mat[p_i][t_i] = tp
fn_mat[p_i][t_i] = fn
tn_mat[p_i][t_i] = tn
##################
# Save matrices. #
##################
plt.imshow(acc_mat, cmap=axis_cmap_r)
plt.savefig(config.get_image_path("hello", f"mat.png"))
plt.close()
plt.imshow(fp_mat, cmap=axis_cmap_r)
plt.savefig(config.get_image_path("hello", f"mat-fp.png"))
plt.close()
plt.imshow(fn_mat, cmap=axis_cmap_r)
plt.savefig(config.get_image_path("hello", f"mat-fn.png"))
plt.close()
plt.imshow(tp_mat, cmap=axis_cmap_r)
plt.savefig(config.get_image_path("hello", f"mat-tp.png"))
plt.close()
plt.imshow(tn_mat, cmap=axis_cmap_r)
plt.savefig(config.get_image_path("hello", f"mat-tn.png"))
plt.close()
def support_with_points(bridge: Bridge, delta_x: float):
for support in bridge.supports:
if support.x < bridge.length / 2:
s_x = support.x - ((support.length / 2) + delta_x)
else:
s_x = support.x + ((support.length / 2) + delta_x)
support.point = Point(x=s_x, z=support.z)
for support_2 in bridge.supports:
if support_2.z == support.z and np.isclose(
support_2.x, bridge.length - support.x
):
support.opposite_support = support_2
print_w(f"Support sensor at X = {support.point.x}, Z = {support.point.z}")
if not hasattr(support, "opposite_support"):
raise ValueError("No opposite support")
return bridge.supports
def plot_min_diff(config: Config, num_years: int, delta_x: float = 0.5):
plt.landscape()
log_path = config.get_image_path("classify/q1", "min-thresh.txt")
if os.path.exists(log_path):
os.remove(log_path)
install_day = 37
start_day, end_day = install_day, 365 * num_years
year = 2018
weather = temperature.load("holly-springs-18")
_0, _1, traffic_array = traffic.load_traffic(
config, traffic.normal_traffic(config), 60 * 10
)
weather["temp"] = temperature.resize(weather["temp"], year=year)
# weather = temperature.repeat(config, "holly-springs-18", weather, num_years)
start_date, end_date = (
weather["datetime"].iloc[0].strftime(temperature.f_string),
weather["datetime"].iloc[-1].strftime(temperature.f_string),
)
# For each support load the responses to traffic and assign to "Support".
for s_i, support in enumerate(support_with_points(config.bridge, delta_x=delta_x)):
support.responses = (
sim.responses.to_traffic_array(
config=config,
points=[support.point],
traffic_array=traffic_array,
response_type=model.RT.YTrans,
# with_creep=True,
# weather=weather,
# start_date=start_date,
# end_date=end_date,
# install_day=install_day,
# start_day=start_day,
# end_day=end_day,
)[0]
* 1e3
)
# Determine max difference for each sensor pair.
for s_i, support in enumerate(config.bridge.supports):
min1, max1 = min(support.responses), max(support.responses)
min2, max2 = (
min(support.opposite_support.responses),
max(support.opposite_support.responses),
)
delta_1, delta_2 = abs(min1 - max2), abs(min2 - max1)
# max_delta = max(abs(support.responses - support.opposite_support.responses))
support.max_delta = max(delta_1, delta_2)
to_write = f"Max delta {support.max_delta} for support {s_i}, sensor at X = {support.point.x}, Z = {support.point.z}"
with open(log_path, "a") as f:
f.write(to_write)
# Bridge supports.
plot.top_view_bridge(config.bridge, lanes=True, piers=True, units="m")
for s_i, support in enumerate(config.bridge.supports):
if s_i % 4 == 0:
support.max_delta = max(
support.max_delta, config.bridge.supports[s_i + 3].max_delta
)
elif s_i % 4 == 1:
support.max_delta = max(
support.max_delta, config.bridge.supports[s_i + 1].max_delta
)
elif s_i % 4 == 2:
support.max_delta = max(
support.max_delta, config.bridge.supports[s_i - 1].max_delta
)
elif s_i % 4 == 3:
support.max_delta = max(
support.max_delta, config.bridge.supports[s_i - 3].max_delta
)
plt.scatter([support.point.x], [support.point.z], c="red")
plt.annotate(
f"{np.around(support.max_delta, 2)} mm",
xy=(support.point.x - 3, support.point.z + 2),
color="b",
size="large",
)
plt.title("Maximum difference between symmetric sensors")
plt.tight_layout()
plt.savefig(config.get_image_path("classify/q1", "min-thresh.pdf"))
def plot_contour_q2(config: Config, num_years: int, delta_x: float = 0.5):
# Select points: over the deck and the sensors!
points = [
Point(x=x, z=z)
for x in np.linspace(config.bridge.x_min, config.bridge.x_max, 100)
for z in np.linspace(config.bridge.z_min, config.bridge.z_max, 30)
]
sensor_points = [
s.point for s in support_with_points(config.bridge, delta_x=delta_x)
]
points += sensor_points
install_day = 37
start_day, end_day = install_day, 365 * num_years
year = 2018
weather = temperature.load("holly-springs-18")
# Responses aren't much from traffic, and we are getting the maximum from 4
# sensors, so short traffic data doesn't really matter.
_0, _1, traffic_array = traffic.load_traffic(
config, traffic.normal_traffic(config), 10
)
weather["temp"] = temperature.resize(weather["temp"], year=year)
# weather = temperature.repeat(config, "holly-springs-18", weather, num_years)
start_date, end_date = (
weather["datetime"].iloc[0].strftime(temperature.f_string),
weather["datetime"].iloc[-1].strftime(temperature.f_string),
)
# Generate the data!
responses = (
sim.responses.to(
config=config,
points=points,
traffic_array=traffic_array,
response_type=model.RT.YTrans,
with_creep=True,
weather=weather,
start_date=start_date,
end_date=end_date,
install_day=install_day,
start_day=start_day,
end_day=end_day,
)
* 1e3
)
# Convert to Responses, determining maximum response per point.
max_responses = [min(rs) for rs in responses]
sensor_responses = max_responses[-len(sensor_points) :]
responses = sim.model.Responses(
response_type=model.RT.YTrans,
responses=[(r, p) for r, p in zip(max_responses, points)],
units="mm",
).without(without.edges(config, 2))
# Adjust maximum responses per sensor so they are symmetric!
for s_i, support in enumerate(support_with_points(config.bridge, delta_x=delta_x)):
support.max_response = sensor_responses[s_i]
for support in support_with_points(config.bridge, delta_x=delta_x):
support.max_response = min(
support.max_response, support.opposite_support.max_response
)
for s_i, support in enumerate(support_with_points(config.bridge, delta_x=delta_x)):
if s_i % 4 == 0:
support.max_response = max(
support.max_response, config.bridge.supports[s_i + 3].max_response
)
elif s_i % 4 == 1:
support.max_response = max(
support.max_response, config.bridge.supports[s_i + 1].max_response
)
elif s_i % 4 == 2:
support.max_response = max(
support.max_response, config.bridge.supports[s_i - 1].max_response
)
elif s_i % 4 == 3:
support.max_response = max(
support.max_response, config.bridge.supports[s_i - 3].max_response
)
plt.landscape()
plot.contour_responses(config, responses, interp=(200, 60), levels=20)
plot.top_view_bridge(config.bridge, lanes=True, piers=True, units="m")
for s_i, support in enumerate(support_with_points(config.bridge, delta_x=delta_x)):
plt.scatter([support.point.x], [support.point.z], c="black")
plt.annotate(
f"{np.around(support.max_response, 2)}",
xy=(support.point.x - 3, support.point.z + 2),
color="black",
size="large",
)
plt.title(
f"Minimum Y translation over {num_years} years \n from traffic, temperature, shrinkage & creep"
)
plt.tight_layout()
plt.savefig(config.get_image_path("classify/q2", "q2-contour.pdf"))
plt.close()
def plot_min_ps_1(config: Config, num_years: int, delta_x: float = 0.5):
THRESH = 2 # Pier settlement from question 1.
plt.landscape()
log_path = config.get_image_path("classify/q1b", "min-ps.txt")
if os.path.exists(log_path): # Start with fresh logfile.
os.remove(log_path)
install_day = 37
start_day, end_day = install_day, 365 * num_years
year = 2018
weather = temperature.load("holly-springs-18")
_0, _1, traffic_array = traffic.load_traffic(
config, traffic.normal_traffic(config), 60 * 10
)
weather["temp"] = temperature.resize(weather["temp"], year=year)
# weather = temperature.repeat(config, "holly-springs-18", weather, num_years)
start_date, end_date = (
weather["datetime"].iloc[0].strftime(temperature.f_string),
weather["datetime"].iloc[-1].strftime(temperature.f_string),
)
# For each support..
for s_i, support in enumerate(support_with_points(config.bridge, delta_x=delta_x)):
# ..increase pier settlement until threshold triggered.
for settlement in np.arange(0, 10, 0.1):
responses = (
sim.responses.to(
config=config,
points=[support.point, support.opposite_support.point],
traffic_array=traffic_array,
response_type=model.RT.YTrans,
with_creep=True,
weather=weather,
start_date=start_date,
end_date=end_date,
install_day=install_day,
start_day=start_day,
end_day=end_day,
pier_settlement=[
(
model.PierSettlement(pier=s_i, settlement=0),
model.PierSettlement(pier=s_i, settlement=settlement / 1e3),
)
],
skip_weather_interp=True,
)
* 1e3
)
delta = max(abs(responses[0] - responses[1]))
to_write = f"Max delta {delta} for settlement {settlement} mm for support {s_i}, sensor at X = {support.point.x}, Z = {support.point.z}"
print_w(to_write)
# Because of "abs", "delta" will be positive.
if delta > THRESH:
break
# Write the minimum settlement value for this support to a file.
with open(log_path, "a") as f:
f.write(to_write)
# Annotate the support with the minimum settlement value.
plt.scatter([support.point.x], [support.point.z], c="red")
plt.annotate(
f"{np.around(settlement, 2)} mm",
xy=(support.point.x - 3, support.point.z + 2),
color="b",
size="large",
)
# Plot the results.
plot.top_view_bridge(config.bridge, lanes=True, piers=True, units="m")
plt.title("Minimum pier settlement detected (Question 1B)")
plt.tight_layout()
plt.savefig(config.get_image_path("classify/q1b", "q1b-min-ps.pdf"))
plt.close()
def plot_min_ps_2(config: Config, num_years: int, delta_x: float = 0.5):
THRESH = 6 # Pier settlement from question 1.
plt.landscape()
log_path = config.get_image_path("classify/q2b", "2b-min-ps.txt")
if os.path.exists(log_path): # Start with fresh logfile.
os.remove(log_path)
install_day = 37
start_day, end_day = install_day, 365 * num_years
year = 2018
weather = temperature.load("holly-springs-18")
_0, _1, traffic_array = traffic.load_traffic(
config, traffic.normal_traffic(config), 60 * 10
)
weather["temp"] = temperature.resize(weather["temp"], year=year)
# weather = temperature.repeat(config, "holly-springs-18", weather, num_years)
start_date, end_date = (
weather["datetime"].iloc[0].strftime(temperature.f_string),
weather["datetime"].iloc[-1].strftime(temperature.f_string),
)
for s_i, support in enumerate(support_with_points(config.bridge, delta_x=delta_x)):
# Increase pier settlement until threshold triggered.
for settlement in np.arange(0, 10, 0.1):
responses = (
sim.responses.to(
config=config,
points=[support.point],
traffic_array=traffic_array,
response_type=model.RT.YTrans,
with_creep=True,
weather=weather,
start_date=start_date,
end_date=end_date,
install_day=install_day,
start_day=start_day,
end_day=end_day,
pier_settlement=[
(
model.PierSettlement(pier=s_i, settlement=0),
model.PierSettlement(pier=s_i, settlement=settlement / 1e3),
)
],
skip_weather_interp=True,
)
* 1e3
)
# Determine the minimum response for this level of settlement.
max_r = min(responses[0])
to_write = f"Min {max_r} for settlement {settlement} mm for support {s_i}, sensor at X = {support.point.x}, Z = {support.point.z}"
print_w(to_write)
if max_r < -THRESH:
break
# Write the minimum response and settlement for this support to a file.
with open(log_path, "a") as f:
f.write(to_write)
plt.scatter([support.point.x], [support.point.z], c="red")
plt.annotate(
f"{np.around(settlement, 2)} mm",
xy=(support.point.x - 3, support.point.z + 2),
color="b",
size="large",
)
plot.top_view_bridge(config.bridge, lanes=True, piers=True, units="m")
plt.title("Minimum pier settlement detected (Question 2B)")
plt.tight_layout()
plt.savefig(config.get_image_path("classify/q2b", "q2b-min-ps.pdf"))
```
#### File: internal/make/temperature.py
```python
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
from bridge_sim import temperature
from bridge_sim.model import Config, Point, ResponseType
from bridge_sim.sim.model import Responses
from bridge_sim.util import plot_hours, print_i, safe_str
from bridge_sim.plot import contour_responses, top_view_bridge
from bridge_sim.plot.util import equal_lims
def temp_contour_plot(c: Config, temp_bottom: float, temp_top: float):
"""Contour plot of responses for a temperature profile."""
# Points on the deck to collect fem.
deck_points = [
Point(x=x, y=0, z=z)
for x in np.linspace(
c.bridge.x_min, c.bridge.x_max, num=int(c.bridge.length * 2)
)
for z in np.linspace(
c.bridge.z_min, c.bridge.z_max, num=int(c.bridge.width * 2)
)
]
def plot_response_type(response_type: ResponseType):
# Temperature effect.
temp_effect = temperature.effect(
config=c,
response_type=response_type,
points=deck_points,
temps_bt=([temp_bottom], [temp_top]),
).T[0]
print_i(f"temp shape = {temp_effect.shape}")
responses = Responses(
response_type=response_type,
responses=[
(temp_effect[p_ind], deck_points[p_ind])
for p_ind in range(len(deck_points))
],
).without_nan_inf()
if response_type.is_strain():
responses = responses.map(lambda r: r * 1e6)
else:
responses.units = "mm"
responses = responses.map(lambda r: r * 1e3)
top_view_bridge(c.bridge, abutments=True, piers=True, units="m")
contour_responses(config=c, responses=responses)
plt.title(
"Microstrain XXB" if response_type.is_strain() else response_type.name()
)
plt.landscape()
plt.subplot(2, 1, 1)
plot_response_type(ResponseType.YTrans)
plt.subplot(2, 1, 2)
plot_response_type(ResponseType.StrainXXB)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.suptitle(
f"T_REF, T_bot, T_top = {c.bridge.ref_temp_c} °C, {temp_bottom} °C, {temp_top} °C"
)
plt.savefig(
c.get_image_path(
"thesis/temperature", safe_str(f"contour-{temp_bottom}-{temp_top}") + ".pdf"
)
)
plt.close()
def temp_profile_plot(c: Config, fname: str):
"""Plot the temperature profile throughout the bridge deck."""
x, z = 21, -8.4
# Load weather data.
weather = temperature.load(name=fname)
weather["temp"] = temperature.resize(list(weather["temp"]), year=2019)
# Convert to minutely.
from_ = datetime.fromisoformat(f"2019-01-01T00:00")
to = datetime.fromisoformat(f"2019-12-31T23:59")
temp_year = temperature.from_to_mins(weather, from_, to)
# Temperature profile.
temps_year_bottom, temps_year_top = temperature.temp_profile(
temps=temp_year["temp"], solar=temp_year["solar"],
)
# Calculate responses.
uniform_year_y, linear_year_y, effect_year_y = temperature.effect(
config=c,
response_type=ResponseType.YTrans,
points=[Point(x=x, y=0, z=z)],
weather=temp_year,
d=True,
)
effect_year_s = temperature.effect(
config=c,
response_type=ResponseType.StrainXXB,
points=[Point(x=x, y=0, z=z)],
weather=temp_year,
)
def legend_lw(leg):
for legobj in leg.legendHandles:
legobj.set_linewidth(3.0)
plt.portrait()
plt.subplot(3, 2, 1)
plt.plot(temp_year["datetime"], temps_year_top, label="Top of deck", c="tab:red")
plt.plot(temp_year["datetime"], temp_year["temp"], label="Air", c="tab:blue")
plt.plot(
temp_year["datetime"], temps_year_bottom, label="Bottom of deck", c="tab:orange"
)
plt.ylabel("Temperature °C")
legend_lw(plt.legend(loc="lower right"))
plt.title("Annual temperature")
plt.subplot(3, 2, 5)
plt.plot(temp_year["datetime"], linear_year_y, label="Linear", c="tab:blue")
plt.plot(temp_year["datetime"], uniform_year_y, label="Uniform", c="tab:orange")
plt.ylabel("Temperature °C")
legend_lw(plt.legend(loc="lower right"))
plt.title("Annual gradient")
plt.subplot(3, 2, 3)
plt.scatter(temp_year["datetime"], temp_year["solar"], c="tab:red", s=1)
plt.ylabel("Solar radiation (W/m²)")
plt.title("Annual solar radiation")
from_ = datetime.fromisoformat(f"2019-07-01T00:00")
to = datetime.fromisoformat(f"2019-07-02T23:59")
temp_month = temperature.from_to_mins(df=temp_year, from_=from_, to=to)
# Temperature profile.
temps_month_bottom, temps_month_top = temperature.temp_profile(
temps=temp_month["temp"], solar=temp_month["solar"],
)
uniform_month_y, linear_month_y, effect_month_y = temperature.effect(
config=c,
response_type=ResponseType.YTrans,
points=[Point(x=x, y=0, z=z)],
weather=temp_month,
d=True,
)
plt.subplot(3, 2, 2)
plt.plot(
temp_month["datetime"], temps_month_top, label="Top of deck", c="tab:red", lw=3
)
plt.plot(
temp_month["datetime"], temp_month["temp"], label="Air", c="tab:blue", lw=3
)
plt.plot(
temp_month["datetime"],
temps_month_bottom,
label="Top of deck",
c="tab:orange",
lw=3,
)
legend_lw(plt.legend(loc="lower right"))
plt.title("Two day temperature")
plt.subplot(3, 2, 6)
plt.plot(temp_month["datetime"], linear_month_y, label="Linear", c="tab:blue", lw=3)
plt.plot(
temp_month["datetime"], uniform_month_y, label="Uniform", c="tab:orange", lw=3
)
legend_lw(plt.legend(loc="lower right"))
plt.title("Two day gradient")
plt.subplot(3, 2, 4)
plt.scatter(temp_year["datetime"], temp_year["solar"], c="tab:red", s=1)
plt.title("Two day solar radiation")
for ps in [(1, 2), (3, 4), (5, 6)]:
plt.subplot(3, 2, ps[1])
plt.gca().set_yticklabels([])
equal_lims("y", 3, 2, ps)
plt.gcf().autofmt_xdate()
plt.tight_layout()
plt.savefig(c.get_image_path("thesis/temperature", "profile.pdf"))
plt.close()
def temperature_effect(config: Config, fname: str):
weather = temperature.load(name=fname)
weather["temp"] = temperature.resize(weather["temp"], year=2019)
print_i(f"Min/max temp = {min(weather['temp'])}, {max(weather['temp'])}")
print_i(f"Min/max solar = {min(weather['solar'])}, {max(weather['solar'])}")
# Plot the temperature.
plt.portrait()
plt.subplot(4, 1, 1)
plt.scatter(weather["datetime"], weather["temp"], c="b", s=1)
plt.ylabel("Temperature (°C)")
plt.gcf().autofmt_xdate()
plt.title(f"Temperature from {str(fname[0]).upper()}{fname[1:]}")
# Plot the temperature in May.
plt.subplot(4, 1, 2)
weather_may = temperature.from_to_mins(
weather,
from_=datetime.strptime("01/05/19 00:00", "%d/%m/%y %H:%M"),
to=datetime.strptime("31/05/19 23:59", "%d/%m/%y %H:%M"),
)
plot_hours(weather_may)
plt.scatter(weather_may["datetime"], weather_may["temp"], c="b", s=1)
plt.ylabel("Temperature (°C)")
plt.gcf().autofmt_xdate()
plt.title(f"Temperature in May")
# Plot the solar radiation.
plt.subplot(4, 1, 3)
plt.scatter(weather["datetime"], weather["solar"], c="r", s=1)
plt.ylabel("Solar radiation")
plt.gcf().autofmt_xdate()
plt.title(f"Solar radiation from {str(fname[0]).upper()}{fname[1:]}")
# Plot the effect at two points.
plt.subplot(4, 1, 4)
effect = temperature.effect(
config=config,
response_type=ResponseType.StrainXXB,
points=[Point(x=51)],
weather=weather,
)[0]
plt.scatter(weather["datetime"], effect * 1e6, c="g", s=1)
plt.ylabel("Microstrain XXB")
plt.gcf().autofmt_xdate()
plt.title("Strain at X = 51 in May")
print_i(f"Effect shape = {effect.shape}")
# Save.
plt.tight_layout()
plt.savefig(config.get_image_path("verification/temperature", f"{fname}.png"))
plt.close()
```
#### File: internal/make/traffic.py
```python
import matplotlib.pyplot as plt
from bridge_sim import temperature
from bridge_sim.model import Config, Point, ResponseType, PierSettlement
from bridge_sim.sim.responses import to_traffic_array
from bridge_sim.traffic import normal_traffic
from bridge_sim.plot.animate import animate_traffic as at
from bridge_sim.plot.animate import animate_traffic_array as ata
from bridge_sim.plot.animate import animate_responses as ar
def animate_traffic(config: Config):
time = 10
config.sensor_freq = 1 / 10
traffic_scenario = normal_traffic(config=config)
traffic_sequence = traffic_scenario.traffic_sequence(config, time)
traffic = traffic_sequence.traffic()
at(
config=config,
traffic_sequence=traffic_sequence,
traffic=traffic,
save=config.get_image_path("verification/animate", "traffic.mp4"),
)
traffic_array = traffic_sequence.traffic_array()
ata(
config=config,
traffic_sequence=traffic_sequence,
traffic_array=traffic_array,
save=config.get_image_path("verification/animate", "traffic_array.mp4"),
)
def animate_responses(config: Config):
time = 60
config.sensor_freq = 1 / 10 # 10 samples per second.
traffic_scenario = normal_traffic(config=config)
traffic_sequence = traffic_scenario.traffic_sequence(config, time)
weather = temperature.load("holly-springs")
weather["temp"] = temperature.resize(weather["temp"], year=2019)
ps = PierSettlement(4, 1.2 / 1e3)
ar(
config=config,
traffic_sequence=traffic_sequence,
response_type=ResponseType.YTrans,
units="mm",
save=config.get_image_path("verification/animate", "traffic-responses.mp4"),
with_creep=True,
pier_settlement=[(ps, ps)],
weather=weather,
start_date="01/05/2019 00:00",
end_date="02/05/2019 00:00",
install_day=37,
start_day=366 * 10,
end_day=366 * 10 + 1,
)
def plot_responses(config: Config):
max_time = 10
traffic_scenario = normal_traffic(config=config, lam=5, min_d=2)
traffic_sequence = traffic_scenario.traffic_sequence(config, max_time)
traffic_array = traffic_sequence.traffic_array()
responses = to_traffic_array(
config, traffic_array, ResponseType.YTrans, points=[Point(x=51, z=-8.4)],
)
plt.plot(responses[0])
plt.show()
```
#### File: internal/make/validate.py
```python
import os
import matplotlib.pyplot as plt
import numpy as np
from bridge_sim import sim, plot, temperature
from bridge_sim.crack import transverse_crack
from bridge_sim.internal.validate import _displa_sensor_xz, _strain_sensor_xz
from bridge_sim.model import Config, Point, ResponseType, PierSettlement
from bridge_sim.plot.util import equal_lims
from bridge_sim.sim.model import Responses
from bridge_sim.sim.responses import to_traffic_array, without
from bridge_sim.traffic import TrafficSequence
from bridge_sim.util import print_i, project_dir
from bridge_sim.vehicles import truck1
def truck_1_time_series(c: Config):
"""Time series of 3 sensors to Truck 1's movement."""
side_s = 7
side = int(side_s * (1 / c.sensor_freq))
assert truck1.x_at(time=0, bridge=c.bridge) == 0
# Get times and loads for Truck 1.
end_time = truck1.time_left_bridge(c.bridge)
traffic_array = (
TrafficSequence(
config=c,
vehicles_per_lane=[[truck1], []],
warmed_up_at=0,
final_time=end_time,
).traffic_array()
/ 1e6
)
def legend():
for line in plt.legend().get_lines():
line.set_linewidth(4)
# Find points of each sensor.
displa_labels = ["U13", "U26", "U29"]
displa_points = [
Point(x=sensor_x, y=0, z=sensor_z)
for sensor_x, sensor_z in [
_displa_sensor_xz(displa_label) for displa_label in displa_labels
]
]
strain_labels = ["T1", "T10", "T11"]
strain_points = [
Point(x=sensor_x, y=0, z=sensor_z)
for sensor_x, sensor_z in [
_strain_sensor_xz(strain_label) for strain_label in strain_labels
]
]
for strain_point in strain_points:
print(f"Strain point = {strain_point}")
for displa_point in displa_points:
print(f"Displa point = {displa_point}")
################
# Vert. trans. #
################
plt.portrait()
# Ensure points and truck are on the same lane.
assert all(p.z < 0 for p in displa_points)
# Results from simulation.
responses_truck1 = to_traffic_array(
config=c,
traffic_array=traffic_array,
response_type=ResponseType.YTrans,
points=displa_points,
)
for s_i, sensor_responses in enumerate(responses_truck1):
plt.subplot(len(displa_points), 1, s_i + 1)
# Find the center of the plot, minimum point in the data.
data_center = 10
for i in range(len(sensor_responses)):
if sensor_responses[i] < sensor_responses[data_center]:
data_center = i
left, right = (
max(0, data_center - side),
min(len(sensor_responses), data_center + side),
)
plot_data = np.array(sensor_responses[left:right]) * 1e3
x = np.arange(len(plot_data)) / 700
if data_center - side < 0:
x += abs(data_center - side) / 700
plt.plot(x, plot_data, c="tab:blue", label="Simulation")
# Results from experiment.
center = 13500
plot_offsets = [-1350, -825, 0]
for s_i, displa_label in enumerate(displa_labels):
plt.subplot(len(displa_points), 1, s_i + 1)
with open(
os.path.join(
project_dir(), f"data/validation/experiment/D1a-{displa_label}.txt"
)
) as f:
data = list(map(float, f.readlines()))
print_i(f"Total Y translation data length = {len(data)}")
new_center = center + plot_offsets[s_i]
plot_data = data[new_center - side : new_center + side]
x = np.arange(len(plot_data)) / 700
plt.plot(x, plot_data, c="tab:red", label="Experiment")
point = displa_points[s_i]
plt.scatter(
[0],
[0],
label=f"{displa_labels[s_i]}: X = {np.around(point.x, 3)} m, Z = {np.around(point.z, 3)} m",
alpha=0,
)
# Labels/titles.
legend()
plt.ylabel(f"{ResponseType.YTrans.name()} (mm)")
plt.suptitle(
"Y translation from Truck 1 on bridge 705\nstatic simulation vs. dynamic test"
)
if s_i < len(displa_labels) - 1:
plt.tick_params(axis="x", bottom=False, labelbottom=False)
else:
plt.xlabel("Time (s)")
plt.tight_layout(rect=[0, 0.03, 1, 0.93])
plt.savefig(c.get_image_path("validation/dynamic", "y-trans.pdf"))
plt.close()
##########
# Strain #
##########
plt.portrait()
# Results from simulation.
responses_truck1 = to_traffic_array(
config=c,
traffic_array=traffic_array,
response_type=ResponseType.StrainXXB,
points=strain_points,
)
for s_i, sensor_responses in enumerate(responses_truck1):
plt.subplot(len(strain_points), 1, s_i + 1)
data_center = 0
for i in range(len(sensor_responses)):
if sensor_responses[i] > sensor_responses[data_center]:
data_center = i
plt.plot(
np.array(sensor_responses[data_center - side : data_center + side]) * 1e6,
c="tab:blue",
label="Simulation",
)
# Results from experiment.
center = 13000
plot_offsets = [-370, -290, -100]
for s_i, strain_label in enumerate(strain_labels):
plt.subplot(len(strain_points), 1, s_i + 1)
with open(
os.path.join(
project_dir(), f"data/validation/experiment/D1a-{strain_label}.txt"
)
) as f:
data = list(map(float, f.readlines()))
print_i(f"Total strain data length = {len(data)}")
new_center = center + plot_offsets[s_i]
plt.plot(
data[new_center - side : new_center + side], c="tab:red", label="Experiment"
)
point = strain_points[s_i]
plt.scatter(
[0],
[0],
label=f"{strain_labels[s_i]}: X = {np.around(point.x, 3)} m, Z = {np.around(point.z, 3)} m",
alpha=0,
)
# Labels/titles.
plt.suptitle(
"Microstrain XXB from Truck 1 on bridge 705\nstatic simulation vs. dynamic test"
)
legend()
plt.ylabel("Microstrain XXB")
if s_i < len(strain_labels) - 1:
plt.tick_params(axis="x", bottom=False, labelbottom=False)
else:
plt.xlabel("Time (s)")
# set_labels(ResponseType.StrainXXB.name(), "Time")
plt.tight_layout(rect=[0, 0.03, 1, 0.93])
plt.savefig(c.get_image_path("validation/dynamic", "strain.pdf"))
plt.close()
def stress_strength_plot(config: Config, top: bool):
"""Plot the difference of tensile strength and stress under load."""
plt.portrait()
response_type = ResponseType.StrainXXT if top else ResponseType.StrainXXB
settlement_mm = 3
temp_bottom, temp_top = 21, 30
deck_points = [
Point(x=x, y=0, z=z)
for x in np.linspace(config.bridge.x_min, config.bridge.x_max, 200)
for z in np.linspace(config.bridge.z_min, config.bridge.z_max, 60)
]
# Pier settlement.
plt.subplot(3, 1, 1)
responses = sim.responses.load(
config=config,
response_type=response_type,
pier_settlement=[PierSettlement(pier=9, settlement=settlement_mm / 1e3)],
).to_stress(config.bridge)
responses.units = "N/mm²"
plot.top_view_bridge(bridge=config.bridge, abutments=True, piers=True, units="m")
plot.contour_responses(config, responses=responses, decimals=2, interp=(200, 60))
plt.legend(loc="upper right", borderaxespad=0)
plt.title(f"{settlement_mm} mm pier settlement")
print("Calculated stress from pier settlement")
# Temperature effect.
plt.subplot(3, 1, 2)
print(f"deck_points.shape = {np.array(deck_points).shape}")
temp_effect = temperature.effect(
config=config,
response_type=response_type,
points=deck_points,
temps_bt=([temp_bottom], [temp_top]),
).T[0]
print(f"temp_effect.shape = {np.array(temp_effect).shape}")
responses = (
Responses(
response_type=response_type, responses=list(zip(temp_effect, deck_points))
)
.without_nan_inf()
.without(without.edges(c=config, radius=2))
.to_stress(config.bridge)
)
responses.units = "N/mm²"
plot.top_view_bridge(config.bridge, abutments=True, piers=True, units="m")
plot.contour_responses(config, responses=responses, decimals=2, interp=(200, 60))
plt.legend(loc="upper right", borderaxespad=0)
plt.title(f"T_bot, T_top = {temp_bottom}°C, {temp_top}°C")
# plt.title(f"{top_str} stress\nbottom, top = {temp_bottom}, {temp_top}")
print("Calculated stress from temperature")
# Cracked concrete.
plt.subplot(3, 1, 3)
time = truck1.time_at(x=53, bridge=config.bridge)
truck1.load = 400 * 1e3
assert truck1.total_load() == 400 * 1e3
cracked_config = transverse_crack().crack(config)
responses = sim.responses.load(
config=cracked_config,
response_type=response_type,
point_loads=truck1.wheel_track_loads(config, [time])[0],
).to_stress(config.bridge)
responses.units = "N/mm²"
plot.top_view_bridge(bridge=config.bridge, abutments=True, piers=True, units="m")
plot.contour_responses(
config=config, responses=responses, decimals=2, interp=(200, 60)
)
truck1.load /= 1e3 # Display correct units.
plot.top_view_vehicles(
config, vehicles=[truck1], time=time, wheels=True, label_wheels=True
)
plt.legend(loc="upper right", borderaxespad=0)
plt.title(f"{int(truck1.load)} kN truck over 0.5 m crack zone")
plt.suptitle(f"Stress {response_type.ss_direction()} for 3 scenarios")
equal_lims("x", 3, 1)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(
config.get_image_path(
"validation", f"stress-strength-{response_type.value}.pdf"
)
)
plt.close()
```
#### File: internal/plot/vehicles.py
```python
import bridge_sim.util
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from bridge_sim.model import Config
from bridge_sim.vehicles.sample import (
sample_vehicle,
axle_array_and_count,
load_vehicle_data,
)
from bridge_sim.util import print_i
# Print debug information for this file.
D: bool = False
def plot_dist(c: Config):
"""Original A16 data, showing outliers, and downsampled final data."""
# Print information on original data.
a16 = load_vehicle_data("data/a16-data/original-a16.csv")
print_i(f"A16 columns = {a16.columns}")
print_i(f"Original A16 data has {len(a16)} rows")
min_length = np.min(a16["length"])
print_i(f"Minimum length = {min_length / 100} m")
min_weight = np.min(a16["total_weight"])
print_i(f"Minimum weight = {min_weight} kN")
# Get and remove outliers.
outliers = a16[(np.abs(stats.zscore(a16[["total_weight", "length"]])) >= 2)]
num_outliers = len(a16) - len(outliers)
print_i(
f"Removed {len(outliers)} ({len(outliers) / len(a16):.4f}) outliers (by weight & length) from A16 data"
)
a16 = a16.drop(outliers.index)
# Sample to 10% of original size.
a16 = a16.sample(n=int(len(a16) * 0.1))
print_i(f"Downsampled A16 data has {len(a16)} rows")
# Construct passenger vehicles.
n, min_kn = len(a16), 5
weights = np.random.gumbel(loc=12.53, scale=10, size=n)
weights = [w for w in weights if w >= min_kn]
axles = list(
map(int, np.around(np.interp(weights, [min(weights), max(weights)], [2, 4]), 0))
)
add_min_length = 2.4 * 100
add_max_length = min_length * 1.2
lengths = np.interp(
weights, [min(weights), max(weights)], [add_min_length, add_max_length]
)
rand = np.random.gumbel(loc=1.5, scale=4, size=len(lengths))
lengths = np.multiply(lengths, rand)
weights = np.multiply(weights, np.random.gumbel(1, 1, len(weights)))
add_weight = np.interp(
lengths, [add_min_length, add_max_length], [1, min_weight * 1.5]
)
weights += add_weight
# Add passenger vehicles to DataFrame.
records = []
for length, weight, axle in zip(lengths, weights, axles):
# A little filter function, to make results look a bit better.
if (
add_min_length <= length <= 9.7 * 100
and weight >= 7
and (length > 5 * 100 or weight < 100)
):
records.append(
{
"length": length,
"total_weight": weight,
"weight_per_axle": str([weight / axle] * axle),
"axle_distance": str([length / (axle - 1)] * (axle - 1)),
}
)
a16 = a16.append(records, ignore_index=True)
a16.index.name = "number"
a16.to_csv("data/a16-data/a16.csv")
print_i("Wrote updated A16 data to disk")
ws, ls = a16["total_weight"], a16["length"]
print_i(f"Weight: min = {min(ws)}, max = {max(ws)}")
print_i(f"Length: min = {min(ls)}, max = {max(ls)}")
# Plot.
def plot_pdf():
xs = list(map(lambda x: x[0], c.vehicle_pdf))
xs[-1] = min(xs[-1], plt.xlim()[1])
ps = list(map(lambda x: x[1], c.vehicle_pdf))
total_x = xs[-1] - xs[0]
rel_heights = []
for x0, x1, p in zip(xs[:-1], xs[1:], ps):
l = (x1 - x0) / total_x
h = p / l
rel_heights.append(h)
for x0, x1, h in zip(xs[:-1], xs[1:], rel_heights):
h = (h / max(rel_heights)) * plt.ylim()[1]
plt.gca().add_patch(
patches.Rectangle(
(x0, 0),
x1 - x0,
h,
facecolor="none",
edgecolor="red",
lw=1,
label=f"Area ∝ probability" if x1 == xs[-1] else None,
)
)
plt.legend()
n = 10000
c.vehicle_data = load_vehicle_data(c.vehicle_data_path)
vehicles = [sample_vehicle(c) for _ in range(n)]
kns = list(map(lambda v: v.total_kn(), vehicles))
num_axles = bridge_sim.util.apply(lambda s: len(axle_array_and_count(s)))
plt.landscape()
plt.subplot(3, 1, 1)
plt.scatter(a16["length"] / 100, a16["total_weight"], s=1)
plot_pdf()
plt.ylabel("Load intensity (kN)")
plt.xlabel("Length (m)")
plt.title("Load intensity per vehicles")
plt.xlim(0, plt.xlim()[1])
plt.subplot(3, 1, 2)
plt.scatter(a16["length"] / 100, num_axles, s=1)
plt.xlim(0, plt.xlim()[1])
plt.ylabel("Number of axles")
plt.xlabel("Length (m)")
plt.title("Number of axles per vehicles")
plt.subplot(3, 1, 3)
plt.hist(kns)
plt.ylabel("Number of vehicles")
plt.xlabel("Load intensity")
plt.title(f"Load intensity distribution of {n} sampled vehicles")
plt.tight_layout()
plt.savefig(c.get_image_path("vehicles", "vehicles-db.png"))
plt.savefig(c.get_image_path("vehicles", "vehicles-db.pdf"))
plt.close()
```
#### File: bridge_sim/model/__init__.py
```python
import os
from enum import Enum
from itertools import chain
from timeit import default_timer as timer
from typing import List, Union, Tuple, Optional, Callable
import numpy as np
from matplotlib import cm as cm, colors as colors, pyplot as plt
from scipy.interpolate import interp1d
from bridge_sim.util import (
safe_str,
round_m,
flatten,
print_i,
print_w,
print_s,
get_dir,
)
DIST_DECIMALS = 6
class PierSettlement:
def __init__(self, pier: int, settlement: float):
"""A vertical translation applied to a pier in simulation.
Args:
pier: index of a pier on a bridge.
settlement: amount of pier settlement to apply.
"""
self.pier = pier
self.settlement = settlement
def id_str(self):
return safe_str(f"{np.around(self.settlement, 3)}-{self.pier}")
class Point:
def __init__(self, x: float = 0, y: float = 0, z: float = 0):
"""A point described by three positions: (X, Y, Z).
Args:
x: X position of the point.
y: Y position of the point.
z: Z position of the point.
"""
self.x: float = np.around(x, DIST_DECIMALS)
self.y: float = np.around(y, DIST_DECIMALS)
self.z: float = np.around(z, DIST_DECIMALS)
def distance(self, point: "Point"):
"""Distance from this Point to the given Point.
Args:
point: other Point to compute the distance to.
"""
return np.around(
np.sqrt(
((self.x - point.x) ** 2)
+ ((self.y - point.y) ** 2)
+ ((self.z - point.z) ** 2)
),
DIST_DECIMALS,
)
def __str__(self):
return f"({self.x}, {self.y}, {self.z})"
class PointLoad:
def __init__(self, x: float, z: float, load: float, units: Optional[str] = None):
"""A point load applied in simulation.
Args:
x: X position of the point-load.
z: Z position of the point-load.
load: intensity of the point-load.
"""
self.x = x
self.z = z
self.load = load
self.units = units
def __repr__(self):
"""Human readable representation of this point-load."""
return f"X = {self.x}, Z = {self.z}, load = {self.load}"
def id_str(self):
"""String uniquely representing this point-load."""
return safe_str(
f"({np.around(self.x, DIST_DECIMALS)}, {np.around(self.z, DIST_DECIMALS)}, {np.around(self.load, DIST_DECIMALS)})"
)
def point(self) -> Point:
"""The 'Point' part of this point-load."""
return Point(x=self.x, y=0, z=self.z)
class ResponseType(Enum):
"""A simulation response type."""
XTrans = "xtrans"
YTrans = "ytrans"
ZTrans = "ztrans"
StressXXB = "stressxxb"
StressXXT = "stressxxt"
StressZZB = "stresszzb"
StrainXXB = "strainxxb"
StrainXXT = "strainxxt"
StrainZZB = "strainzzb"
@staticmethod
def all() -> List["ResponseType"]:
"""A list of all response types."""
return [rt for rt in ResponseType]
def is_stress(self):
"""Is this response type a stress type?"""
return self in [
ResponseType.StressXXB,
ResponseType.StressXXT,
ResponseType.StressZZB,
]
def is_strain(self):
"""Is this response type a strain type?"""
return self in [
ResponseType.StrainXXB,
ResponseType.StrainXXT,
ResponseType.StrainZZB,
]
def ss_direction(self) -> str:
"""A stress or strain identifier e.g. XXB (if applicable)."""
if self.is_stress() or self.is_strain():
return self.name()[-3:]
raise ValueError("Not stress or strain")
def name(self) -> str:
"""Human readable name for this response type."""
return {
ResponseType.XTrans: "X translation",
ResponseType.YTrans: "Y translation",
ResponseType.ZTrans: "Z translation",
ResponseType.StressXXB: "Stress XXB",
ResponseType.StressXXT: "Stress XXT",
ResponseType.StressZZB: "Stress ZZB",
ResponseType.StrainXXB: "Strain XXB",
ResponseType.StrainXXT: "Strain XXT",
ResponseType.StrainZZB: "Strain ZZB",
}[self]
def to_stress(self):
"""The corresponding stress type for this strain type.
Raises a ValueError if this is not a strain response type.
"""
if self == ResponseType.StrainXXB:
return ResponseType.StressXXB
if self == ResponseType.StrainXXT:
return ResponseType.StressXXT
if self == ResponseType.StrainZZB:
return ResponseType.StressZZB
if self == ResponseType.StrainZZT:
return ResponseType.StressZZT
raise ValueError(f"Responses must be a strain type")
# Shorthand for ResponseType.
RT = ResponseType
class Config:
def __init__(
self,
bridge: Callable[[], "Bridge"],
sim_runner: "FEMRunner",
vehicle_data_path: str,
vehicle_pdf: List[Tuple[float, float]],
vehicle_pdf_col: str,
generated_data: str = "generated-data",
shorten_paths: bool = False,
il_num_loads: int = 600,
):
"""Simulation configuration object.
Combines a Bridge and FEMRunner among other configuration.
Args:
bridge: function that returns a bridge.
sim_runner: function that returns a simulation runner.
vehicle_data_path: path of the vehicles CSV file.
vehicle_pdf:
percentage of vehicles below a maximum value for that column.
Example: [(2.4, 0.5), (5.6, 94.5), (16, 5)]
Here 5% of vehicles are 2.4m or less in length, 94.5% greater than
2.4m and less than 5.6m, and the remaining 5% are less than 16m.
This applies if 'vehicle_pdf_col' is "length".
vehicle_pdf_col: column name of vehicle_data to group by.
generated_data: path to directory where to save generated files.
shorten_paths: shorten simulation paths (to avoid OS limits).
"""
# Core.
self._bridge = bridge
self.bridge = self._bridge()
self.sim_runner = sim_runner
# OpenSees
self.os_model_template_path: str = "model-template.tcl"
self.os_3d_model_template_path: str = "model-template-3d.tcl"
# Simulation performance.
self.parallel = 1
self.shorten_paths = shorten_paths
self.resp_matrices = dict()
# Unit loads.
self.il_num_loads = il_num_loads
self.il_unit_load: float = 1000000
self.unit_pier_settlement: float = 1
self.unit_axial_delta_temp_c: int = 1
self.unit_moment_delta_temp_c: int = 1
self.cte = 12e-6
self.self_weight_asphalt: bool = True
# Responses & events.
self.sensor_freq: float = 1 / 100
self.event_time_s: float = 2 # Seconds.
# Vehicles.
self.perturb_stddev: float = 0.1
self.axle_width: float = 2.5
self.vehicle_pdf = vehicle_pdf
self.vehicle_pdf_col = vehicle_pdf_col
start = timer()
self.vehicle_data_path = vehicle_data_path
# Necessary to prevent a circular import.
from bridge_sim.vehicles.sample import load_vehicle_data
self.vehicle_data = load_vehicle_data(vehicle_data_path)
print_i(
f"Loaded vehicles data from {vehicle_data_path} in"
+ f" {timer() - start:.2f}s"
)
# Ensure vehicles probability density sums to 1.
pdf_sum = sum(map(lambda f: f[1], self.vehicle_pdf))
if int(pdf_sum) != 100:
pre_pdf_sum = pdf_sum
for i in range(len(self.vehicle_pdf)):
self.vehicle_pdf[i] = (
self.vehicle_pdf[i][0],
self.vehicle_pdf[i][1] / pdf_sum,
)
pdf_sum = sum(map(lambda f: f[1], self.vehicle_pdf))
print_w(f"Vehicle PDF sums to {pre_pdf_sum}, adjusted to sum to 1")
# Root directories for generated data.
self._root_generated_data_dir = generated_data
self.root_generated_data_dir = lambda: get_dir(self._root_generated_data_dir)
if self._root_generated_data_dir[-1] in "/\\":
raise ValueError("generated_data must not end in path separator")
self.root_generated_images_dir = lambda: get_dir(
os.path.join(self.root_generated_data_dir() + "-images")
)
# Bridge-specific directories for generated data.
def generated_data_dir(self):
"""Path to directory where data is saved."""
return get_dir(
os.path.join(self.root_generated_data_dir(), self.bridge.id_str(),)
)
def generated_images_dir(self):
"""Path to directory where images are saved."""
return get_dir(
os.path.join(self.root_generated_images_dir(), self.bridge.id_str(),)
)
# Bridge-specific but accuracy-independent directories.
def generated_data_dir_no_acc(self):
"""Like 'generated_data_dir' but doesn't use 'Bridge.msl' or 'Bridge.data_id'."""
return get_dir(
os.path.join(
self.root_generated_data_dir(),
self.bridge.id_str(msl=False, data_id=False),
)
)
def generated_images_dir_no_acc(self):
"""Like 'generated_images_dir' but doesn't use 'Bridge.msl' or 'Bridge.data_id'."""
return get_dir(
os.path.join(
self.root_generated_images_dir(),
self.bridge.id_str(msl=False, data_id=False),
)
)
def get_path_in(self, in_: str, dirname: str, filename: str):
"""Filepath in a directory in a directory (created if necessary).
TODO: Use safe_str here.
"""
dirpath = os.path.join(in_, dirname)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
return os.path.join(dirpath, filename)
def get_data_path(
self, dirname: str, filename: str, bridge: bool = True, acc: bool = True
):
"""Get a bridge-specific image path in a named directory."""
dir_path = self.generated_data_dir()
if not bridge:
dir_path = self.root_generated_images_dir()
elif not acc:
dir_path = self.generated_data_dir_no_acc()
return self.get_path_in(dir_path, dirname, filename)
def get_image_path(
self, dirname: str, filename: str, bridge: bool = True, acc: bool = True
):
"""Get a bridge-specific image path in a named directory."""
dir_path = self.generated_images_dir()
if not bridge:
dir_path = self.root_generated_images_dir()
elif not acc:
dir_path = self.generated_images_dir_no_acc()
return self.get_path_in(dir_path, dirname, filename)
class Dimensions(Enum):
D3 = "D3"
def name(self) -> str:
"""Human readable name for dimensions."""
return {Dimensions.D3: "3D",}[self]
class Support:
"""A support of the bridge deck, when 3D modeling.
SIDE_VIEW:
<------------x----------->
<---length-->
|------------------|-----------|----------------------| ↑ h
\ / | e
\ / | i
\ / | g
\ / | h
\ / ↓ t
TOP_VIEW:
|-----------------------------------------------------| ↑+
|-----------------------------------------------------| |
|-----------------------------------------------------| |
|-----------------------------------------------------| |
|-----------------------------------------------------| 0
|------------------|-----------|----------------------| |
|------------------|-----------|----------------------| | z = -2
|------------------|-----------|----------------------| |
|-----------------------------------------------------| ↓-
FRONT_VIEW:
<---width-top---->
|----------------|
\ /
\ /
\ /
\ /
\______/
<------>
width-bottom
Args:
x: X position of center of the support in meters.
z: Z position of center of the support in meters.
length: length of the support in meters.
height: height of the support in meters.
width_top: width of the top of the support in meters.
width_bottom: width of the bottom of the support in meters.
materials: deck materials, either a list or function from X position.
"""
def __init__(
self,
x: float,
z: float,
length: float,
height: float,
width_top: float,
width_bottom: float,
materials: Union[List["MaterialSupport"], Callable[[float], "MaterialSupport"]],
fix_x_translation: bool,
fix_z_translation: bool,
fix_y_translation: bool = True,
fix_x_rotation: bool = False,
fix_z_rotation: bool = False,
fix_y_rotation: bool = False,
):
self.x = x
self.z = z
self.length = length
self.height = height
self.width_top = width_top
self.width_bottom = width_bottom
self.fix_x_translation = fix_x_translation
self.fix_y_translation = fix_y_translation
self.fix_z_translation = fix_z_translation
self.fix_x_rotation = fix_x_rotation
self.fix_y_rotation = fix_y_rotation
self.fix_z_rotation = fix_z_rotation
self._sections = materials
# Must be callable or a list.
if not callable(self._sections):
assert isinstance(self._sections, list)
assert all(isinstance(s, MaterialSupport) for s in self._sections)
if self.width_top < self.width_bottom:
raise ValueError("Support: top width must be >= bottom width")
def x_min_max_top(self) -> Tuple[float, float]:
"""The min and max x positions for the top of this pier."""
half_length = self.length / 2
return round_m(self.x - half_length), round_m(self.x + half_length)
def y_min_max(self) -> Tuple[float, float]:
"""The min and max y positions for this pier."""
return round_m(-self.height), 0
def z_min_max_top(self) -> Tuple[float, float]:
"""The min and max z positions for the top of this pier."""
half_top = self.width_top / 2
return round_m(self.z - half_top), round_m(self.z + half_top)
def z_min_max_bottom(self) -> Tuple[float, float]:
"""The min and max z positions for the bottom of this pier."""
half_bottom = self.width_bottom / 2
return round_m(self.z - half_bottom), round_m(self.z + half_bottom)
class Asphalt:
def __init__(self, thickness: float, density: float):
"""Asphalt on a lane of a bridge."""
self.thickness = thickness
self.density = density
class Lane:
"""A traffic lane spanning the length of a bridge.
Args:
z0: Z position of one edge of the lane.
z1: Z position of the other edge of the lane.
ltr: traffic moves in left to right direction?
asphalt: thickness and density of the asphalt.
Attrs:
z_min, lower Z position of the bridge.
z_max, greater Z position of the bridge.
width, width of the lane.
"""
def __init__(
self,
z0: float,
z1: float,
ltr: bool,
asphalt: Optional[Asphalt] = Asphalt(thickness=0.1, density=2.4),
):
self.z_min: float = round_m(min(z0, z1))
self.z_max: float = round_m(max(z0, z1))
self.ltr: bool = ltr
self.width = round_m(self.z_max - self.z_min)
self.z_center = round_m(self.z_min + (self.width / 2))
self.asphalt = asphalt
def wheel_track_zs(self, config: Config):
"""Z positions of this lane's wheel track on a bridge."""
half_axle = config.axle_width / 2
return [self.z_center - half_axle, self.z_center + half_axle]
class Material:
"""An abstract class for material properties.
Args:
density: density of the material.
thickness: thickness of the material.
youngs: Young's modulus of the material.
youngs_x: Young's modulus in x direction.
poissons: Poisson's ratio of the material.
start_x_frac: start of the material as a fraction of X position.
start_z_frac: start of the material as a fraction of Z position.
end_x_frac: end of the section as a fraction of X position.
end_z_frac: end of the section as a fraction of Z position.
"""
def __init__(
self,
thickness: float,
youngs: float,
poissons: float,
start_x_frac: float = 0,
start_z_frac: float = 0,
end_x_frac: float = 1,
end_z_frac: float = 1,
density: float = 0,
youngs_x: Optional[float] = None,
):
self.density = density
self.thickness = thickness
self.youngs = youngs
self._youngs_x = youngs_x
self.poissons = poissons
self.start_x_frac = start_x_frac
self.start_z_frac = start_z_frac
self.end_x_frac = end_x_frac
self.end_z_frac = end_z_frac
def youngs_x(self):
if self._youngs_x is not None:
return self._youngs_x
return self.youngs
def contains(self, bridge: "Bridge", x: float, z: float) -> bool:
"""Does this material contain the given point?"""
x_frac, z_frac = bridge.x_frac(x), bridge.z_frac(z)
return (
(self.start_x_frac < x_frac or np.isclose(self.start_x_frac, x_frac))
and (self.end_x_frac > x_frac or np.isclose(self.end_x_frac, x_frac))
and (self.start_z_frac < z_frac or np.isclose(self.start_z_frac, z_frac))
and (self.end_z_frac > z_frac or np.isclose(self.end_z_frac, z_frac))
)
def mat_id_str(self):
"""Representation of this section by material properties."""
return f"{self.density}-{self.thickness}-{self.youngs}-{self.poissons}"
def y_min_max(self) -> Tuple[float, float]:
"""The min and max values in y for this section."""
return -self.thickness, 0
def prop_str(self):
"""Textual representation of material properties."""
return (
"Material"
+ f"\n starts at (x_frac, z_frac) ="
+ f" ({round_m(self.start_x_frac)}, {round_m(self.start_z_frac)})"
+ f"\n ends at (x_frac, z_frac) ="
+ f" ({round_m(self.end_x_frac)}, {round_m(self.end_z_frac)})"
+ f"\n density = {self.density} kg/m"
+ f"\n thickness = {self.thickness} m"
+ f"\n youngs = {self.youngs} MPa"
+ f"\n poissons = {self.poissons}"
)
MaterialDeck = Material
class MaterialSupport(Material):
"""Like Material but intended for describing piers.
Args:
density: density of the material.
thickness: thickness of the material.
youngs: Young's modulus of the material.
poissons: Poisson's ratio of the material.
start_frac_len: start of the section as a fraction of pier length.
"""
def __init__(
self,
density: float,
thickness: float,
youngs: float,
poissons: float,
start_frac_len: float,
):
super().__init__(
density=density,
thickness=thickness,
youngs=youngs,
poissons=poissons,
start_x_frac=None,
start_z_frac=None,
end_x_frac=None,
end_z_frac=None,
)
self.start_frac_len = start_frac_len
def prop_str(self):
"""Textual representation of material properties."""
return (
"Material"
+ f"\n starts at {round_m(self.start_frac_len)}"
+ f"\n density = {self.density} kg/m"
+ f"\n thickness = {self.thickness} m"
+ f"\n youngs = {self.youngs} MPa"
+ f"\n poissons = {self.poissons}"
)
class Bridge:
def __init__(
self,
name: str,
length: float,
width: float,
supports: List[Support],
materials: List["MaterialDeck"],
lanes: List[Lane],
msl: float,
data_id: str = "healthy",
single_sections: Optional[Tuple[Material, Material]] = None,
):
"""A bridge's geometry, material properties and boundary conditions.
Args:
name: name of this bridge.
length: length of this bridge.
width: width of this bridge.
supports: a list of Support.
materials: a list of bridge deck Material.
lanes: a list of Lane for traffic to drive on.
msl: maximum shell length.
data_id: additional identifier for saving/loading data.
single_sections: tuple of one deck and one material for supports.
"""
# Given arguments.
self.name = name
self.msl = float(msl)
self.data_id = data_id
self.length = length
self.width = width
self.supports = supports
self.sections = materials
self.lanes = lanes
self.dimensions = Dimensions.D3
self.ref_temp_c = 17
self._next_section_id = 1
# Mesh.
self.base_mesh_deck_max_x = msl
self.base_mesh_deck_max_z = msl
self.base_mesh_pier_max_long = msl
# Attach single section option for asserts and printing info.
self.single_sections = single_sections
if self.single_sections is not None:
self.name += "-single-sections"
self.sections = [self.single_sections[0]] # Set deck section.
for pier in self.supports: # Set pier sections.
pier.sections = [self.single_sections[1]]
self.additional_xs = []
# Derived attributes.
#
# NOTE: The functions y_min_max and z_min_max calculate the min and max
# values of the bridge in y and z directions respectively, based on the
# supports and sections. For a 3D bridge neither supports nor sections
# contain information on the min or max values in z direction.
self.x_min, self.x_max = 0, length
self.y_min, self.y_max = self.y_min_max()
self.z_min, self.z_max = -width / 2, width / 2
self.x_center = (self.x_min + self.x_max) / 2
self.y_center = (self.y_min + self.y_max) / 2
self.z_center = (self.z_min + self.z_max) / 2
self.height = self.y_max - self.y_min
# All sections belonging to this bridge.
self._sections_dict = dict()
# Assert the bridge is fine and print info.
self._assert_bridge()
def _get_section(self, section: Material) -> Material:
"""An equivalent section if exists, else the given one."""
def with_id(s: Material) -> Material:
s.id = self._next_section_id
self._next_section_id += 1
return s
section_prop_str = section.prop_str()
if section_prop_str in self._sections_dict:
return with_id(self._sections_dict[section_prop_str])
self._sections_dict[section_prop_str] = section
return with_id(self._sections_dict[section_prop_str])
def deck_section_at(self, x: float, z: float) -> Material:
"""Return the deck section at given position."""
if callable(self.sections):
raise NotImplementedError()
if len(self.sections) == 1:
return self._get_section(self.sections[0])
for section in self.sections:
if section.contains(bridge=self, x=x, z=z):
return self._get_section(section)
raise ValueError(f"No section for x, z = {x}, {z}")
def pier_section_at_len(self, p_i: int, section_frac_len: float) -> Material:
"""Return the section at a fraction of a pier's length"""
assert 0 <= section_frac_len <= 1
pier = self.supports[p_i]
if callable(pier._sections):
return self._get_section(pier._sections(section_frac_len))
if len(pier._sections) == 1:
return self._get_section(pier._sections[0])
raise ValueError(f"Pier {p_i} sections are not a function")
def print_info(self, c: "Config", pier_fix_info: bool = False):
"""Print summary information about this bridge.
Args:
fix_info: print information on pier's fixed nodes.
"""
print_s(f"Bridge dimensions:")
print_s(f" x = ({self.x_min}, {self.x_max})")
print_s(f" y = ({self.y_min}, {self.y_max})")
print_s(f" z = ({self.z_min}, {self.z_max})")
print_s(f"Bridge lanes:")
wheel_tracks = self.wheel_tracks(c)
for l, lane in enumerate(self.lanes):
print_s(f" lane {l}: {lane.z_min} <= z <= {lane.z_max}")
print_s(f" lane {l}: center at z = {lane.z_center}")
track_0 = wheel_tracks[l * 2]
track_1 = wheel_tracks[l * 2 + 1]
print_s(f" lane {l}: wheel tracks at z = {track_0}, {track_1}")
if self.single_sections:
print_s("One section for the deck, one for piers:")
print_s(f"Deck:")
list(map(print_s, str(self.sections[0]).split("\n")))
print_s(f"Piers:")
list(map(print_s, str(self.supports[0].sections[0]).split("\n")))
if pier_fix_info:
for p, pier in enumerate(self.supports):
print_s(f"Pier {p} fixed:")
print_s(f" x-trans {pier.fix_x_translation}")
print_s(f" y-trans {pier.fix_y_translation}")
print_s(f" z-trans {pier.fix_z_translation}")
print_s(f" x-rot {pier.fix_x_rotation}")
print_s(f" y-rot {pier.fix_y_rotation}")
print_s(f" z-rot {pier.fix_z_rotation}")
def id_str(self, msl: bool = True, data_id: bool = True):
"""Name with accuracy information.
Args:
msl: bool, include msl in identifier.
data_id: bool, include data_id in identifier.
"""
acc_str = f"-{self.msl}" if msl else ""
data_id_str = f"-{self.data_id}" if data_id else ""
return safe_str(f"{self.name}{acc_str}{data_id_str}")
def closest_lane(self, z: float):
"""Index of the lane closest to the point."""
result = None
lane_dist = np.inf
for lane_ind, lane in enumerate(self.lanes):
this_dist = abs(lane.z_center - z)
if this_dist < lane_dist:
result = lane_ind
lane_dist = this_dist
return result
def axle_track_zs(self):
"""Z positions of axle track-centers on the bridge."""
return sorted(lane.z_center for lane in self.lanes)
def wheel_track_zs(self, c: "Config"):
"""Z positions of wheel track on the bridge."""
return sorted(
chain.from_iterable(lane.wheel_track_zs(c) for lane in self.lanes)
)
def wheel_track_xs(self, c: "Config"):
"""Unit load x positions for wheel tracks on this bridge."""
return round_m(np.linspace(c.bridge.x_min, c.bridge.x_max, c.il_num_loads))
def y_min_max(self):
"""The min and max values in y direction from supports and sections."""
return self._min_max(lambda s: s.y_min_max())
def z_min_max(self):
"""The min and max values in z direction from supports and sections."""
return self._min_max(lambda s: s.z_min_max())
def x_axis(self) -> List[float]:
"""Position of supports in meters along the bridge's x-axis."""
return np.interp([f.x_frac for f in self.supports], [0, 1], [0, self.length])
def x_axis_equi(self, n) -> List[float]:
"""n equidistant values along the bridge's x-axis, in meters."""
return np.interp(np.linspace(0, 1, n), [0, 1], [0, self.length])
def x_frac(self, x: float):
return float(
interp1d([self.x_min, self.x_max], [0, 1], fill_value="extrapolate")(x)
)
def x(self, x_frac: float):
return float(
interp1d([0, 1], [self.x_min, self.x_max], fill_value="extrapolate")(x_frac)
)
def y_frac(self, y: float):
assert self.y_min <= y <= self.y_max
return np.interp(y, [self.y_min, self.y_max], [0, 1])
def y(self, y_frac: float):
assert 0 <= y_frac <= 1
return np.interp(y_frac, [0, 1], [self.y_min, self.y_max])
def z_frac(self, z: float):
assert self.z_min <= z <= self.z_max
return np.interp(z, [self.z_min, self.z_max], [0, 1])
def z(self, z_frac: float):
assert 0 <= z_frac <= 1
return np.interp(z_frac, [0, 1], [self.z_min, self.z_max])
def _min_max(
self,
f: Callable[
[Union[Support, Material]], Tuple[Optional[float], Optional[float]]
],
) -> Tuple[float, float]:
"""The min and max values in a direction from supports and sections."""
z_min, z_max = None, None
def set_z_min(z: float):
nonlocal z_min
if z is None:
return
z_min = z if z_min is None or z < z_min else z_min
def set_z_max(z: float):
nonlocal z_max
if z is None:
return
z_max = z if z_max is None or z > z_max else z_max
for section in self.sections:
s_z_min, s_z_max = f(section)
set_z_min(s_z_min)
set_z_max(s_z_max)
for support in self.supports:
s_z_min, s_z_max = f(support)
set_z_min(s_z_min)
set_z_max(s_z_max)
return z_min, z_max
def _assert_bridge(self):
"""Assert this bridge makes sense."""
# Single section only in 3D.
if self.single_sections:
if self.dimensions != Dimensions.D3:
raise ValueError("Bridge.single_section only supported in 3D")
assert self.single_sections[0].start_x_frac == 0
assert self.single_sections[0].start_z_frac == 0
assert self.single_sections[1].start_x_frac == 0
assert self.single_sections[1].start_z_frac == 0
assert self.single_sections[1].start_frac_len == 0
assert len(self.sections) == 1
for pier in self.supports:
assert len(pier.sections) == 1
# Bridge boundaries should be correct in orientation.
assert self.x_min < self.x_max
assert self.y_min < self.y_max
assert self.z_min < self.z_max
# Derived dimensions should make sense.
assert self.length == self.x_max - self.x_min
assert self.width == self.z_max - self.z_min
# Base mesh must be of a minimum size.
assert self.base_mesh_deck_max_x <= self.length
if self.dimensions == Dimensions.D3:
assert self.base_mesh_deck_max_z <= self.width
# for pier in self.supports:
# TODO: Improve this assert, piers are not vertical.
# assert self.base_mesh_pier_max_long <= pier.height
self._assert_3d()
def _assert_3d(self):
# All sections are Material.
for section in self.sections:
if not isinstance(section, Material):
raise ValueError("3D bridge must use Material sections")
# First section must start at 0.
if self.sections[0].start_x_frac != 0:
raise ValueError("First section of 3D bridge must start at 0")
# Section must be in order.
last_start_x_frac = self.sections[0].start_x_frac
for section in self.sections[1:]:
if section.start_x_frac < last_start_x_frac:
raise ValueError("Sections not in order of start_x_frac")
last_start_x_frac = section.start_x_frac
# Lanes must be in range.
for i, lane in enumerate(self.lanes):
if lane.z_min < self.z_min:
raise ValueError(
f"Lane {i} lower position {lane.z_min} less than bridge"
+ f" {self.z_min}"
)
if lane.z_min > self.z_max:
raise ValueError(
f"Lane {i} lower position {lane.z_min} greater than bridge"
+ f" {self.z_max}"
)
if lane.z_max < self.z_min:
raise ValueError(
f"Lane {i} upper position {lane.z_max} less than bridge"
+ f" {self.z_min}"
)
if lane.z_max > self.z_max:
raise ValueError(
f"Lane {i} upper position {lane.z_max} greater than bridge"
+ f" {self.z_max}"
)
# Supports must be in range.
for i, support in enumerate(self.supports):
support_z_min, support_z_max = support.z_min_max_top()
if support_z_min < self.z_min:
raise ValueError(
f"Support {i} lower position {support_z_min} less than"
+ f" bridge {self.z_min}"
)
if support_z_min > self.z_max:
raise ValueError(
f"Support {i} lower position {support_z_min} greater than"
+ f" bridge {self.z_max}"
)
if support_z_max < self.z_min:
raise ValueError(
f"Support {i} upper position {support_z_max} less than"
+ f" bridge {self.z_min}"
)
if support_z_max > self.z_max:
raise ValueError(
f"Support {i} upper position {support_z_max} greater than"
+ f" bridge {self.z_max}"
)
class Vehicle:
def __init__(
self,
load: Union[float, List[float]],
axle_distances: List[float],
axle_width: float,
kmph: float,
lane: int = 0,
init_x: float = 0,
):
"""A vehicles, load intensities, position and speed.
Args:
load: either a scalar (total load of this vehicle), or a list (load
per axle).
axle_distances: distance between axles in meters.
axle_width: width of the vehicles's axles in meters.
kmph: speed of this vehicle.
lane: index of a lane on a bridge.
init_x: distance from lane beginning at time 0.
"""
self.load = load
self.axle_distances = axle_distances
self.axle_width = axle_width
self.length = sum(self.axle_distances)
self.num_axles = len(self.axle_distances) + 1
self.num_wheels = self.num_axles * 2
self.kmph = kmph
self.mps = self.kmph / 3.6 # Meters per second.
self.lane = lane
self.init_x = init_x
if self.init_x >= 1:
raise ValueError("Already left bridge at time t = 0")
if self._is_load_per_axle() and not len(self.load) == self.num_axles:
raise ValueError("Number of loads and axle distances don't match")
def _is_load_per_axle(self) -> bool:
"""Is there a load per axle, or a total load?"""
return isinstance(self.load, list)
def total_load(self) -> float:
"""Total load of this vehicle."""
if self._is_load_per_axle():
return sum(self.load)
return self.load
def load_per_axle(self) -> List[float]:
"""Load for each axle."""
if self._is_load_per_axle():
return self.load
result = [(self.load / self.num_axles) for _ in range(self.num_axles)]
print(result[0])
assert isinstance(result[0], float)
return result
def _cmap_norm(self, all_vehicles: List["Vehicle"], cmap, cmin=0, cmax=1):
"""A colormap and norm for coloring vehicles.
Args:
all_vehicles: to compute the maximum and minimum of all vehicles.
cmap: Matplotlib colormap for the colours to use.
cmin: the minimum colour value.
cmax: the maximum colour value.
Returns: a tuple of Matplotlib colormap and norm.
"""
from bridge_sim.plot.util import truncate_colormap
cmap = truncate_colormap(cmap, cmin, cmax)
total_kns = [v.total_load() for v in all_vehicles] + [self.total_load()]
norm = colors.Normalize(vmin=min(total_kns), vmax=max(total_kns))
return cmap, norm
def color(
self, all_vehicles: List["Vehicle"] = [], cmap=cm.get_cmap("YlGn")
) -> float:
"""Colour of this vehicle, compared to other vehicles if given."""
cmap, norm = self._cmap_norm(all_vehicles, cmap=cmap)
if len(all_vehicles) == 0:
return cmap(0.5)
return cmap(norm(self.total_load()))
def wheel_tracks_zs(self, config: Config) -> Tuple[float, float]:
"""Positions of the vehicles's wheels in Z direction."""
return config.bridge.lanes[self.lane].wheel_track_zs(config)
def xs_at(self, times: List[float], bridge: Bridge) -> List[List[float]]:
"""X position on bridge for each axle in meters at given times.
Args:
times: times when to compute positions.
bridge: the bridge on which the vehicle moves.
Returns: a NumPy array of shape len(times) x self.num_axles.
"""
# Initial positions of axles.
lane = bridge.lanes[self.lane]
xs = [bridge.x_min if lane.ltr else bridge.x_max]
xs[0] += self.init_x if lane.ltr else (-self.init_x)
for ad in self.axle_distances:
xs.append(xs[-1] - ad if lane.ltr else xs[-1] + ad)
# Difference at each point in time.
deltas = np.array(times) * self.mps
if not lane.ltr: # If right to left, decreasing X position.
deltas *= -1
assert len(deltas.shape) == 1
assert len(deltas) == len(times)
# Make result.
result = np.ndarray((len(times), self.num_axles))
assert len(result.shape) == 2
for t, d in enumerate(deltas):
result[t] = xs + d
return result
def x_at(self, time: float, bridge: Bridge) -> float:
"""X position of front axle on bridge at a time, in meters."""
return self.xs_at(times=[time], bridge=bridge)[0][0]
def on_bridge(self, time: float, bridge: Bridge) -> bool:
"""Is the vehicle on a bridge at a given time?"""
xs = sorted(self.xs_at(times=[time], bridge=bridge)[0])
x_min, x_max = xs[0], xs[-1]
assert x_min < x_max
return (bridge.x_min <= x_min <= bridge.x_max) or (
bridge.x_min <= x_max <= bridge.x_max
)
def passed_bridge(self, time: float, bridge: Bridge) -> bool:
"""Has the current vehicle travelled over a bridge?"""
rear_x = self.xs_at(times=[time], bridge=bridge)[0][-1]
lane = bridge.lanes[self.lane]
return rear_x > bridge.x_max if lane.ltr else rear_x < bridge.x_min
def time_at(self, x: float, bridge: Bridge) -> float:
"""Time when the front axle is at an X position."""
if bridge.lanes[self.lane].ltr:
return (x - bridge.x_min - self.init_x) / self.mps
return ((-x) + bridge.x_max - self.init_x) / self.mps
def time_entering_bridge(self, bridge: Bridge) -> float:
"""Time the front axle is at lane beginning."""
if bridge.lanes[self.lane].ltr:
return self.time_at(x=bridge.x_min, bridge=bridge)
return self.time_at(x=bridge.x_max, bridge=bridge)
def time_entered_bridge(self, bridge: Bridge) -> float:
"""Time the rear axle is at lane beginning."""
if bridge.lanes[self.lane].ltr:
return self.time_at(x=bridge.x_min + self.length, bridge=bridge)
return self.time_at(x=bridge.x_max - self.length, bridge=bridge)
def time_leaving_bridge(self, bridge: Bridge) -> float:
"""Time the front axle is at lane end."""
if bridge.lanes[self.lane].ltr:
return self.time_at(x=bridge.x_max, bridge=bridge)
return self.time_at(x=bridge.x_min, bridge=bridge)
def time_left_bridge(self, bridge: Bridge) -> float:
"""Time the rear axle is at lane end."""
if bridge.lanes[self.lane].ltr:
return self.time_at(x=bridge.x_max + self.length, bridge=bridge)
return self.time_at(x=bridge.x_min - self.length, bridge=bridge)
def _axle_track_weights(self, axle_x: float, wheel_track_xs: List[float]):
"""Indices and weights for some X position.
NOTE: Before using this function you should check if wheel_x is an X
position on the bridge.
"""
unit_load_x_ind = np.searchsorted(wheel_track_xs, axle_x)
unit_load_x = lambda: wheel_track_xs[unit_load_x_ind]
# If greater then subtract one index.
if unit_load_x() > axle_x:
unit_load_x_ind -= 1
assert unit_load_x() <= axle_x
# If the unit load is an exact match just return it..
if np.isclose(axle_x, unit_load_x()):
return (unit_load_x_ind, 1), (None, 0)
# ..otherwise, return a combination of two unit loads. In this case the
# unit load's position is less than the wheel.
unit_load_x_lo = unit_load_x()
unit_load_x_hi = wheel_track_xs[unit_load_x_ind + 1]
assert unit_load_x_hi > axle_x
dist_lo = abs(unit_load_x_lo - axle_x)
dist_hi = abs(unit_load_x_hi - axle_x)
dist = dist_lo + dist_hi
return (unit_load_x_ind, dist_hi / dist), (unit_load_x_ind + 1, dist_lo / dist)
def _axle_track_indices(
self, config: Config, times: List[float]
) -> List[List[Tuple[int, float]]]:
"""Axle track indices and load intensities over time.
NOTE: Each index is in [0, uls * lanes - 1].
"""
try:
self.load[0][0]
raise ValueError("Load per wheel not supported!")
except:
pass
xs = self.xs_at(times=times, bridge=config.bridge) # Times x X axles.
wheel_track_xs = config.bridge.wheel_track_xs(config)
lane_offset = self.lane * config.il_num_loads
for t, time in enumerate(times):
result = []
for x, kn in zip(xs[t], self.load_per_axle()):
if config.bridge.x_min <= x <= config.bridge.x_max:
(lo, weight_lo), (hi, weight_hi) = self._axle_track_weights(
axle_x=x, wheel_track_xs=wheel_track_xs,
)
result.append(
(
(lo + lane_offset, weight_lo * kn),
(None if hi is None else hi + lane_offset, weight_hi * kn),
)
)
yield result
def wheel_track_loads(
self, config: Config, times: List[float],
) -> List[List[PointLoad]]:
"""Point loads "bucketed" onto axle tracks.
Returns: a list of PointLoad at every time step.
"""
from bridge_sim.sim.run import ulm_point_loads
u_point_loads = ulm_point_loads(config)
result = []
# This loop has one item per axle!
for loads in self._axle_track_indices(config=config, times=times):
time_results = []
for (lo, load_lo), (hi, load_hi) in loads:
# These point loads will have unit loads!
# The loads need to be overwritten with half axle loads!
pl_lo0, pl_lo1 = u_point_loads[lo]
pl_lo0.load = load_lo / 2
pl_lo1.load = load_lo / 2
time_results.append(pl_lo0)
time_results.append(pl_lo1)
if hi is not None:
pl_hi0, pl_hi1 = u_point_loads[hi]
pl_hi0.load = load_hi / 2
pl_hi1.load = load_hi / 2
time_results.append(pl_hi0)
time_results.append(pl_hi1)
result.append(time_results)
return result
def point_load_pw(
self, config: Config, time: float, list: bool = False,
) -> Union[List[Tuple[PointLoad, PointLoad]], List[PointLoad]]:
"""A tuple of point load per axle, one point load per wheel."""
z0, z1 = self.wheel_tracks_zs(config=config)
assert z0 < z1
load_per_axle = self.load_per_axle()
result = []
# For each axle create two loads.
for x_i, x in enumerate(self.xs_at(times=[time], bridge=config.bridge)[0]):
if config.bridge.x_min <= x <= config.bridge.x_max:
wheel_load = load_per_axle[x_i] / 2
result.append(
(
PointLoad(x=x, z=z0, load=wheel_load),
PointLoad(x=x, z=z1, load=wheel_load),
)
)
if list:
return flatten(result, PointLoad)
return result
def _times_on_bridge(
self, config: Config, sorted_times: List[float]
) -> Tuple[List[float], List[float]]:
"""Of the given sorted times only those when on the bridge."""
entering_time = self.time_entering_bridge(config.bridge)
left_time = self.time_left_bridge(config.bridge)
first_index = np.searchsorted(sorted_times, entering_time)
if not self.on_bridge(time=sorted_times[first_index], bridge=config.bridge):
return [], []
last_index = np.searchsorted(sorted_times, left_time)
if last_index == len(sorted_times) or not self.on_bridge(
time=sorted_times[last_index], bridge=config.bridge
):
last_index -= 1
return (
np.arange(first_index, last_index + 1),
np.array(sorted_times)[first_index : last_index + 1],
)
def plot_wheels(self, c: Config, time: float, label=None, **kwargs):
"""Plot each wheel as a single black dot."""
wheel_loads = self.point_load_pw(time=time, bridge=c.bridge, list=True)
for i, load in enumerate(wheel_loads):
plt.scatter(
[load.x],
[load.z],
facecolors="none",
edgecolors="black",
label=None if i > 0 else label,
**kwargs,
)
```
#### File: sim/build/deck.py
```python
import math
from typing import List, NewType, Tuple
import numpy as np
from bridge_sim.model import Bridge
from bridge_sim.sim.model import (
BuildContext,
DeckNodes,
DeckShellNodes,
DeckShells,
Node,
)
from bridge_sim.util import assert_sorted, flatten, print_i, print_w, round_m
# A list of x positions, and a list of z positions.
DeckGrid = NewType("DeckPositions", Tuple[List[float], List[float]])
def get_deck_section_grid(bridge: Bridge) -> DeckGrid:
"""Grid where material properties change on the deck."""
if callable(bridge.sections):
print_w(
"Not adding additional nodes to bridge deck because material "
" properties are given as a potentially continuous function"
)
return [], []
xs, zs = set(), set()
for section in bridge.sections:
xs.add(round_m(bridge.x(section.start_x_frac)))
xs.add(round_m(bridge.x(section.end_x_frac)))
zs.add(round_m(bridge.z(section.start_z_frac)))
zs.add(round_m(bridge.z(section.end_z_frac)))
return sorted(xs), sorted(zs)
def get_deck_xs(bridge: Bridge, ctx: BuildContext) -> List[float]:
"""X positions of nodes on the bridge deck.
First the required X positions 'RX' are determined, positions of loads and
abutments etc.. After that a number of X positions are calculated between
each pair of adjacent X positions 'RX_i' and 'RX_j', such that the maximum
distance between X positions does not exceed 'bridge.base_mesh_deck_max_x'.
"""
all_xs = set()
# From piers.
for pier in bridge.supports:
for x in pier.x_min_max_top():
all_xs.add(round_m(x))
# Bridge ends.
all_xs.add(round_m(bridge.x_min))
all_xs.add(round_m(bridge.x_max))
# From loads.
for point in ctx.add_loads:
all_xs.add(round_m(point.x))
# From material propertes.
for x in get_deck_section_grid(bridge)[0]:
all_xs.add(round_m(x))
# Additional nodes requested by the Bridge.
for x in bridge.additional_xs:
all_xs.add(round_m(x))
all_xs = sorted(all_xs)
print_i(f"Required node X positions on deck (from all sources) =\n {all_xs}")
deck_xs = set()
for i in range(len(all_xs) - 1):
x0, x1 = all_xs[i], all_xs[i + 1]
num = math.ceil((x1 - x0) / bridge.base_mesh_deck_max_x) + 1
for x in np.linspace(x0, x1, num=num):
deck_xs.add(round_m(x))
return sorted(deck_xs)
def get_deck_zs(bridge: Bridge, ctx: BuildContext) -> List[float]:
"""Z positions of nodes on the bridge deck.
First the required Z positions 'RZ' are determined, positions of loads and
abutments etc.. After that a number of Z positions are calculated between
each pair of adjacent Z positions 'RZ_i' and 'RZ_j', such that the maximum
distance between Z positions does not exceed 'bridge.base_mesh_deck_max_z'.
"""
all_zs = set() # Important Z positions.
# From piers.
for pier in bridge.supports:
for z in pier.z_min_max_top():
all_zs.add(round_m(z))
# Bridge abutments.
all_zs.add(round_m(bridge.z_min))
all_zs.add(round_m(bridge.z_max))
# From loads.
for point in ctx.add_loads:
all_zs.add(round_m(point.z))
# From material propertes.
for z in get_deck_section_grid(bridge)[1]:
all_zs.add(round_m(z))
all_zs = sorted(all_zs)
print_i(f"Required node Z positions on deck (from all sources) =\n {all_zs}")
deck_zs = set()
for i in range(len(all_zs) - 1):
z0, z1 = all_zs[i], all_zs[i + 1]
num = math.ceil((z1 - z0) / bridge.base_mesh_deck_max_z) + 1
for z in np.linspace(z0, z1, num=num):
deck_zs.add(round_m(z))
return sorted(deck_zs)
def get_deck_grid(bridge: Bridge, ctx: BuildContext) -> DeckGrid:
return get_deck_xs(bridge=bridge, ctx=ctx), get_deck_zs(bridge=bridge, ctx=ctx)
def get_base_deck_nodes(bridge: Bridge, ctx: BuildContext) -> DeckNodes:
"""Deck nodes without refinement."""
deck_grid = get_deck_grid(bridge=bridge, ctx=ctx)
nodes = []
for z in deck_grid[1]:
nodes.append([])
for x in deck_grid[0]:
nodes[-1].append(ctx.get_node(x=x, y=0, z=z, deck=True))
return nodes
def get_deck_nodes(bridge: Bridge, ctx: BuildContext) -> DeckShellNodes:
"""Deck nodes with refinement."""
deck_nodes = get_base_deck_nodes(bridge=bridge, ctx=ctx)
assert_sorted([nodes[0].z for nodes in deck_nodes])
assert_sorted([len(nodes) for nodes in deck_nodes]) # All should be equal.
assert_sorted([node.x for node in deck_nodes[0]])
# Convert to 'DeckShellNodes'.
deck_shell_nodes = []
for z_i in range(len(deck_nodes) - 1):
for x_i in range(len(deck_nodes[0]) - 1):
node_i = deck_nodes[z_i][x_i]
node_j = deck_nodes[z_i][x_i + 1]
node_k = deck_nodes[z_i + 1][x_i + 1]
node_l = deck_nodes[z_i + 1][x_i]
deck_shell_nodes.append((node_i, node_j, node_k, node_l))
if len(ctx.refinement_radii) > 0:
raise NotImplementedError("Refinement not implemented!")
return deck_shell_nodes
def get_deck_shells(
bridge: Bridge, deck_shell_nodes: DeckShellNodes, ctx: BuildContext
) -> DeckShells:
shells = []
for node_i, node_j, node_k, node_l in deck_shell_nodes:
center_x = round_m(node_i.x + (node_i.distance_n(node_j) / 2))
center_z = round_m(node_i.z + (node_i.distance_n(node_l) / 2))
section = bridge.deck_section_at(x=center_x, z=center_z)
shells.append(
ctx.get_shell(
ni_id=node_i.n_id,
nj_id=node_j.n_id,
nk_id=node_k.n_id,
nl_id=node_l.n_id,
pier=False,
section=section,
)
)
return shells
```
#### File: sim/build/__init__.py
```python
from collections import defaultdict
from typing import List, Optional, Tuple
from bridge_sim.sim.build.deck import get_deck_nodes, get_deck_shells
from bridge_sim.sim.build.piers import get_pier_nodes, get_pier_shells
from bridge_sim.sim.model import (
BridgeNodes,
BridgeShells,
BuildContext,
DeckNodes,
DeckShellNodes,
Node,
Shell,
)
from bridge_sim.model import Bridge
from bridge_sim.util import flatten
def get_bridge_nodes(bridge: Bridge, ctx: Optional[BuildContext] = None) -> BridgeNodes:
if ctx is None:
ctx = BuildContext([])
return (
get_deck_nodes(bridge=bridge, ctx=ctx),
get_pier_nodes(bridge=bridge, ctx=ctx),
)
def get_bridge_shells(
bridge: Bridge, ctx: Optional[BuildContext] = None, ret_nodes: bool = False
) -> BridgeShells:
if ctx is None:
ctx = BuildContext([])
bridge_nodes = get_bridge_nodes(bridge=bridge, ctx=ctx)
bridge_shells = (
get_deck_shells(bridge=bridge, deck_shell_nodes=bridge_nodes[0], ctx=ctx),
get_pier_shells(bridge=bridge, pier_nodes=bridge_nodes[1], ctx=ctx),
)
if ret_nodes:
return bridge_shells, bridge_nodes
return bridge_shells
def get_bridge_shells_and_nodes(
bridge: Bridge, ctx: Optional[BuildContext] = None
) -> Tuple[BridgeNodes, BridgeShells]:
return get_bridge_shells(bridge=bridge, ctx=ctx, ret_nodes=True)
def to_deck_nodes(deck_shell_nodes: DeckShellNodes) -> DeckNodes:
"""Convert 'DeckShellNodes' to 'DeckNodes'."""
# A dict of z position to x position to Node.
deck_nodes_dict = defaultdict(dict)
for node in set(flatten(deck_shell_nodes, Node)):
deck_nodes_dict[node.z][node.x] = node
# Iterate through sorted z and x positions.
deck_nodes = []
for z in sorted(deck_nodes_dict.keys()):
deck_nodes.append([])
for x in sorted(deck_nodes_dict[z].keys()):
deck_nodes[-1].append(deck_nodes_dict[z][x])
return deck_nodes
def det_nodes(iterable) -> List[Node]:
"""Nodes in a deterministic ordering."""
nodes = set(flatten(iterable, Node))
return sorted(nodes, key=lambda n: n.n_id)
def det_nodes_id_str(ctx: BuildContext) -> str:
nodes = det_nodes(ctx.nodes_by_id.values())
return " ".join(map(lambda n: str(n.n_id), nodes))
def det_shells(iterable) -> List[Shell]:
"""Shells in a deterministic ordering."""
shells = set(flatten(iterable, Shell))
return sorted(shells, key=lambda s: s.e_id)
def det_shells_id_str(ctx: BuildContext) -> str:
shells = det_shells(ctx.shells_by_id.values())
return " ".join(map(lambda s: str(s.e_id), shells))
```
#### File: build/d3/__init__.py
```python
import os
from collections import OrderedDict, defaultdict
from itertools import chain
from typing import List, Optional, Tuple
import numpy as np
from bridge_sim.model import PierSettlement, PointLoad, Config, Material
from bridge_sim.sim.model import (
BuildContext,
DeckNodes,
DeckShells,
Node,
PierNodes,
PierShells,
SimParams,
)
from bridge_sim.sim.build import (
det_nodes_id_str,
det_shells_id_str,
det_shells,
get_bridge_shells_and_nodes,
to_deck_nodes,
)
from bridge_sim.sim.run.opensees.build.d3.self_weight import opensees_self_weight_loads
from bridge_sim.sim.run.opensees.build.d3.thermal import (
opensees_thermal_axial_deck_loads,
opensees_thermal_moment_deck_loads,
)
from bridge_sim.sim.run.opensees.build.d3.util import comment
from bridge_sim.util import flatten, print_d, print_i, print_w, round_m
# Print debug information for this file.
# D: str = "fem.run.opensees.build.d3"
D: bool = False
##### Begin nodes #####
def opensees_support_nodes(
c: Config, deck_nodes: DeckNodes, all_support_nodes: PierNodes,
) -> str:
"""Opensees node commands for the supports (ignoring deck).
By 'ignoring deck' we mean that nodes that belong to both supports and the
deck will not be returned by this function but instead by
'opensees_deck_nodes'.
Args:
c: Config, global configuration object.
deck_nodes: DeckNodes, to check for already added support nodes.
all_support_nodes: AllSupportNodes, all support nodes to generate
commands for.
"""
# We want to avoid generating commands for support nodes that also belong to
# the deck, thus we create a set for fast indexing to allow this check.
deck_nodes = set(chain.from_iterable(deck_nodes))
nodes = OrderedDict()
# For each support.
for s_nodes in all_support_nodes:
# For each wall of the support (there are two).
for w_nodes in s_nodes:
# For each ~vertical line of nodes for a z position at top of wall.
for y_nodes in w_nodes:
# For each node in the ~vertical line.
for y, node in enumerate(y_nodes):
# Insert the node, if not part of the deck nodes.
if node not in deck_nodes:
# A dictionary is used incase the node is already added,
# incase it is a bottom node shared by both walls.
nodes[node] = None
return comment(
"support nodes",
"\n".join(map(lambda n: n.command_3d(), nodes.keys())),
units="node nodeTag x y z",
)
def opensees_deck_nodes(c: Config, deck_nodes: DeckNodes) -> str:
"""OpenSees node commands for a bridge deck.
The nodes are created based on given positions of deck nodes.
Args:
c: Config, global configuratin object.
"""
node_strings = []
node_strings += list(
map(lambda node: node.command_3d(), list(chain.from_iterable(deck_nodes)),)
)
return comment("deck nodes", "\n".join(node_strings), units="node nodeTag x y z")
##### End nodes #####
##### Begin fixed nodes #####
class FixNode:
"""A command to fix a node in some degrees of freedom (dof).
Args:
node: Node, the node with dof to fix specified.
comment_: Optional[str], an optional comment for the command.
"""
def __init__(
self,
node: Node,
fix_x_translation: bool,
fix_y_translation: bool,
fix_z_translation: bool,
fix_x_rotation: bool,
fix_y_rotation: bool,
fix_z_rotation: bool,
comment: Optional[str] = None,
):
self.node = node
self.fix_x_translation = fix_x_translation
self.fix_y_translation = fix_y_translation
self.fix_z_translation = fix_z_translation
self.fix_x_rotation = fix_x_rotation
self.fix_y_rotation = fix_y_rotation
self.fix_z_rotation = fix_z_rotation
self.comment = comment
def command_3d(self):
"""The command in string format for a TCL file."""
# TODO: Update comment to include support ID.
comment_ = "" if self.comment is None else f"; # {self.comment}"
return (
f"fix {self.node.n_id}"
+ f" {int(self.fix_x_translation)}"
+ f" {int(self.fix_y_translation)}"
+ f" {int(self.fix_z_translation)}"
+ f" {int(self.fix_x_rotation)}"
+ f" {int(self.fix_y_rotation)}"
+ f" {int(self.fix_z_rotation)}"
+ f"{comment_}"
)
def opensees_fixed_abutment_nodes(
c: Config, sim_params: SimParams, deck_nodes: DeckNodes
) -> str:
"""OpenSees fix commands for fixed nodes on the abument.
Fixed for translation but not for rotation.
"""
thermal = (sim_params.axial_delta_temp is not None) or (
sim_params.moment_delta_temp is not None
)
fixed_nodes: List[FixNode] = []
for i_x, x_nodes in enumerate(deck_nodes):
assert len(x_nodes) >= 2
for node in [x_nodes[0], x_nodes[-1]]:
fixed_nodes.append(
FixNode(
node=node,
fix_x_translation=False,
fix_y_translation=True,
fix_z_translation=True,
# fix_z_translation=(not thermal) or (i_x == (len(deck_nodes) // 2)),
fix_x_rotation=False,
fix_y_rotation=False,
fix_z_rotation=False,
)
)
return comment(
"fixed deck nodes",
"\n".join(map(lambda f: f.command_3d(), fixed_nodes)),
units="fix nodeTag x y z rx ry rz",
)
def opensees_fixed_pier_nodes(
c: Config,
sim_params: SimParams,
all_support_nodes: PierNodes,
pier_disp: List[PierSettlement],
) -> str:
"""OpenSees fix commands for fixed support nodes."""
# First, for thermal loading, we determine the piers at each longitudinal
# (x) position, so for each x position we can then determine which piers
# will be fixed in transverse (z) translation.
pier_positions = defaultdict(set)
for p_i, _ in enumerate(all_support_nodes):
pier = c.bridge.supports[p_i]
pier_positions[round_m(pier.x)].add(round_m(pier.z))
pier_positions = {
pier_x: sorted(pier_zs) for pier_x, pier_zs in pier_positions.items()
}
fixed_nodes: List[FixNode] = []
# Iterate through each pier. Note that p_nodes is a tuple of nodes for each
# pier wall. And each wall is a 2-d array of nodes.
for p_i, p_nodes in enumerate(all_support_nodes):
pier = c.bridge.supports[p_i]
# If pier displacement for this pier then select the bottom central node
# for the integrator command, and attach it to the pier.
free_y_trans = False
for ps in pier_disp:
if p_i == ps.pier:
free_y_trans = True
pier = c.bridge.supports[ps.pier]
pier.disp_node = p_nodes[0][len(p_nodes[0]) // 2][-1]
if len(p_nodes[0]) % 2 == 0:
print_w("Pier settlement:")
print_w(" no central node (even number of nodes)")
# For each ~vertical line of nodes for a z position at top of wall.
for y_i, y_nodes in enumerate(p_nodes[0]):
# We will fix the bottom node.
node = y_nodes[-1]
fixed_nodes.append(
FixNode(
node=node,
fix_x_translation=pier.fix_x_translation,
fix_y_translation=False if free_y_trans else pier.fix_y_translation,
# fix_z_translation=fix_pier_z_translation(pier),
fix_z_translation=True,
fix_x_rotation=pier.fix_x_rotation,
fix_y_rotation=pier.fix_y_rotation,
fix_z_rotation=pier.fix_z_rotation,
comment=f"pier {p_i} y {y_i}",
)
)
return comment(
"fixed support nodes",
"\n".join(map(lambda f: f.command_3d(), fixed_nodes)),
units="fix nodeTag x y z rx ry rz",
)
##### End fixed nodes #####
##### Begin sections #####
def opensees_section(section: Material):
"""OpenSees ElasticMembranePlateSection command for a Material."""
# TODO: Implicit information, assumption that if young's modulus in x
# direction is modified that cracking is desired (poisson's set to 0).
CRACK_Z = not np.isclose(section.youngs_x(), section.youngs)
# New orthotropic method.
return (
f"nDMaterial ElasticOrthotropic {section.id}"
f" {section.youngs_x() * 1E6} {section.youngs * 1E6} {section.youngs * 1E6}"
f" {0 if CRACK_Z else section.poissons} {section.poissons} {section.poissons}"
f" {(section.youngs * 1E6) / (2 * (1 + section.poissons))}"
f" {(section.youngs * 1E6) / (2 * (1 + section.poissons))}"
f" {(section.youngs * 1E6) / (2 * (1 + section.poissons))}"
f" {section.density * 1E-3}"
f"\nsection PlateFiber {section.id} {section.id} {section.thickness}"
)
# Old isotropic method.
raise ValueError("Not using orthotropic method")
return (
f"section ElasticMembranePlateSection {section.id}"
+ f" {section.youngs * 1E6} {section.poissons} {section.thickness}"
+ f" {section.density * 1E-3}"
)
def opensees_deck_sections(c: Config):
"""Sections used in the bridge deck."""
return comment(
"deck sections",
"\n".join([opensees_section(section) for section in c.bridge.sections]),
units=(
"section ElasticMembranePlateSection secTag youngs_modulus"
+ " poisson_ratio depth mass_density"
),
)
def opensees_pier_sections(c: Config, all_pier_elements: PierShells):
"""Sections used in the bridge's piers."""
pier_shells = det_shells(all_pier_elements)
# Some pier's may refer to the same section so we create a set to avoid
# rendering duplicate section definitions into the .tcl file.
pier_sections = set([pier_shell.section for pier_shell in pier_shells])
return comment(
"pier sections",
"\n".join([opensees_section(section) for section in pier_sections]),
units=(
"section ElasticMembranePlateSection secTag youngs_modulus"
+ " poisson_ratio depth mass_density"
),
)
##### End sections #####
##### Begin shell elements #####
def opensees_deck_elements(c: Config, deck_elements: DeckShells) -> str:
"""OpenSees element commands for a bridge deck."""
deck_shells = det_shells(deck_elements)
return comment(
"deck shell elements",
"\n".join(map(lambda e: e.command_3d(), deck_shells)),
units="element ShellMITC4 eleTag iNode jNode kNode lNode secTag",
)
def opensees_pier_elements(c: Config, all_pier_elements: PierShells) -> str:
"""OpenSees element commands for a bridge's piers."""
pier_shells = det_shells(all_pier_elements)
return comment(
"pier shell elements",
"\n".join(map(lambda e: e.command_3d(), pier_shells)),
units="element ShellMITC4 eleTag iNode jNode kNode lNode secTag",
)
# End shell elements #
# Begin loads #
def opensees_load(
c: Config, pload: PointLoad, deck_nodes: DeckNodes,
):
"""An OpenSees load command."""
assert deck_nodes[0][0].y == 0
assert deck_nodes[-1][-1].y == 0
best_node = sorted(
chain.from_iterable(deck_nodes),
key=lambda node: node.distance(x=pload.x, y=0, z=pload.z),
)[0]
assert np.isclose(best_node.y, 0)
print(f"before assert load.x = {pload.x}")
print(f"best_node_x = {best_node.x}")
assert np.isclose(best_node.x, pload.x)
assert np.isclose(best_node.z, pload.z)
return f"load {best_node.n_id} 0 {pload.load} 0 0 0 0"
def opensees_loads(
c: Config,
ploads: List[PointLoad],
deck_nodes: DeckNodes,
pier_disp: List[PierSettlement],
):
"""OpenSees load commands for a .tcl file."""
# In case of pier displacement apply load at the pier's central bottom node,
# the load intensity doesn't matter though, only the position matters.
if len(pier_disp) > 0:
load_str = ""
for ps in pier_disp:
node = c.bridge.supports[ps.pier].disp_node
load_str += f"\nload {node.n_id} 0 {ps.settlement * 1000} 0 0 0 0"
# Otherwise find the deck nodes which best suit given point loads.
else:
load_str = "\n".join(
opensees_load(c=c, pload=pload, deck_nodes=deck_nodes) for pload in ploads
)
return comment("loads", load_str, units="load nodeTag N_x N_y N_z N_rx N_ry N_rz")
##### End loads #####
##### Begin recorders #####
def opensees_translation_recorders(
c: Config, fem_params: SimParams, os_runner: "OSRunner", ctx: BuildContext
) -> str:
"""OpenSees recorder commands for translation."""
# A list of tuples of ResponseType and OpenSees direction index, for
# translation response types, if requested in fem_params.response_types.
translation_response_types = []
# X translation.
x_path = os_runner.x_translation_path(c, fem_params)
translation_response_types.append((x_path, 1))
print_i(f"OpenSees: saving x translation at {x_path}")
# Y translation.
y_path = os_runner.y_translation_path(c, fem_params)
translation_response_types.append((y_path, 2))
print_i(f"OpenSees: saving y translation at {y_path}")
# Z translation.
z_path = os_runner.z_translation_path(c, fem_params)
translation_response_types.append((z_path, 3))
print_i(f"OpenSees: saving z translation at {z_path}")
# Append a recorder string for each response type (recording nodes).
recorder_strs = []
node_str = det_nodes_id_str(ctx)
for response_path, direction in translation_response_types:
print_d(D, f"Adding response path to build: {response_path}")
recorder_strs.append(
f"recorder Node -file {response_path} -node {node_str} -dof"
+ f" {direction} disp"
)
return comment(
"translation recorders",
"\n".join(recorder_strs),
units="recorder Node -file path -node nodeTags -dof direction disp",
)
def opensees_strain_recorders(
c: Config, sim_params: SimParams, os_runner: "OSRunner", ctx: BuildContext
):
"""OpenSees recorder commands for translation."""
return "\n".join(
f"recorder Element"
f" -file {os_runner.strain_path(config=c, sim_params=sim_params, point=point)}"
f" -ele {det_shells_id_str(ctx)} material {str(point)} deformation"
for point in [1, 2, 3, 4]
)
def opensees_forces(
config: Config, sim_params: SimParams, os_runner: "OSRunner", ctx: BuildContext
):
return (
f"recorder Element"
f" -file {os_runner.forces_path(config=config, sim_params=sim_params)}"
f" -ele {det_shells_id_str(ctx)} forces"
)
def opensees_stress_variables(
c: Config, sim_params: SimParams, os_runner: "OSRunner", ctx: BuildContext
) -> Tuple[str, str]:
"""OpenSees stress recorder variables.
These replace <<ELEM_IDS>> and <<FORCES_OUT_FILE>> in the TCL file.
"""
return (
det_shells_id_str(ctx),
os_runner.stress_path(config=c, sim_params=sim_params),
)
def opensees_integrator(c: Config, pier_disp: List[PierSettlement]):
"""The integrator command to use based on FEMParams."""
if len(pier_disp) > 0:
node = c.bridge.supports[pier_disp[0].pier].disp_node
if len(pier_disp) > 1:
print_w(f"Using pier {pier_disp[0].pier} for DisplacementControl")
return (
f"integrator DisplacementControl {node.n_id} 2"
+ f" {pier_disp[0].settlement}"
)
return "integrator LoadControl 1"
def opensees_algorithm(pier_disp: List[PierSettlement]):
"""The algorithm command to use based on FEMParams."""
if len(pier_disp) > 0:
return "algorithm Linear"
return "algorithm Newton"
def opensees_test(pier_disp: List[PierSettlement]):
"""The test command to use based on FEMParams."""
if len(pier_disp) > 0:
return ""
return "test NormDispIncr 1.0e-12 1000"
##### End recorders #####
def build_model_3d(c: Config, expt_params: List[SimParams], os_runner: "OSRunner"):
"""Build OpenSees 3D model files.
TODO: ExptParams -> SimParams.
"""
# Read in the template model file.
dir_path = os.path.dirname(os.path.realpath(__file__))
template_path = os.path.normpath(
os.path.join(dir_path, "../../../../../../", c.os_3d_model_template_path)
)
with open(template_path) as f:
in_tcl = f.read()
# Build a model file for each simulation.
for sim_params in expt_params:
# Setup the 'BuildContext' for this simulation.
sim_ctx = sim_params.build_ctx()
# Determine nodes and shells.
bridge_shells, bridge_nodes = get_bridge_shells_and_nodes(
bridge=c.bridge, ctx=sim_ctx
)
deck_shells, pier_shells = bridge_shells
deck_shell_nodes, pier_nodes = bridge_nodes
deck_nodes = to_deck_nodes(deck_shell_nodes)
# Attaching nodes and shells to the 'SimParams'. This allows the convert
# process to build a deterministic list of nodes and shells. They should
# be deleted again at that point.
sim_params.bridge_shells = bridge_shells
sim_params.bridge_nodes = bridge_nodes
# Build the 3D model file by replacements in the template model file.
out_tcl = (
in_tcl.replace(
"<<DECK_NODES>>", opensees_deck_nodes(c=c, deck_nodes=deck_nodes),
)
.replace(
"<<SUPPORT_NODES>>",
opensees_support_nodes(
c=c, deck_nodes=deck_nodes, all_support_nodes=pier_nodes,
),
)
.replace(
"<<FIX_DECK>>",
opensees_fixed_abutment_nodes(
c=c, sim_params=sim_params, deck_nodes=deck_nodes
),
)
.replace(
"<<FIX_SUPPORTS>>",
opensees_fixed_pier_nodes(
c=c,
sim_params=sim_params,
all_support_nodes=pier_nodes,
pier_disp=sim_params.pier_settlement,
),
)
.replace(
"<<LOAD>>",
opensees_loads(
c=c,
ploads=sim_params.ploads,
deck_nodes=deck_nodes,
pier_disp=sim_params.pier_settlement,
),
)
.replace(
"<<THERMAL_AXIAL_LOAD_DECK>>",
opensees_thermal_axial_deck_loads(
c=c, sim_params=sim_params, deck_elements=deck_shells, ctx=sim_ctx,
),
)
.replace(
"<<THERMAL_MOMENT_LOAD_DECK>>",
opensees_thermal_moment_deck_loads(
c=c, sim_params=sim_params, deck_elements=deck_shells, ctx=sim_ctx,
),
)
.replace(
"<<SELF_WEIGHT>>",
opensees_self_weight_loads(c, sim_params, deck_shells),
)
.replace("<<SUPPORTS>>", "")
.replace("<<DECK_SECTIONS>>", opensees_deck_sections(c=c))
.replace(
"<<TRANS_RECORDERS>>",
opensees_translation_recorders(
c=c, fem_params=sim_params, os_runner=os_runner, ctx=sim_ctx
),
)
.replace(
"<<FORCES>>",
opensees_forces(
config=c, sim_params=sim_params, os_runner=os_runner, ctx=sim_ctx
),
)
.replace(
"<<DECK_ELEMENTS>>",
opensees_deck_elements(c=c, deck_elements=deck_shells),
)
.replace(
"<<PIER_ELEMENTS>>",
opensees_pier_elements(c=c, all_pier_elements=pier_shells),
)
.replace(
"<<PIER_SECTIONS>>",
opensees_pier_sections(c=c, all_pier_elements=pier_shells),
)
.replace(
"<<INTEGRATOR>>",
opensees_integrator(c=c, pier_disp=sim_params.pier_settlement),
)
.replace("<<ALGORITHM>>", opensees_algorithm(sim_params.pier_settlement))
.replace("<<TEST>>", opensees_test(sim_params.pier_settlement))
)
elem_ids, forces_out_file = opensees_stress_variables(
c=c, sim_params=sim_params, os_runner=os_runner, ctx=sim_ctx
)
out_tcl = out_tcl.replace("<<ELEM_IDS>>", elem_ids).replace(
"<<FORCES_OUT_FILE>>", forces_out_file
)
out_tcl = out_tcl.replace(
"<<STRAIN_RECORDERS>>",
opensees_strain_recorders(
c=c, sim_params=sim_params, os_runner=os_runner, ctx=sim_ctx
),
)
# Write the generated model file.
model_path = os_runner.sim_model_path(
config=c, sim_params=sim_params, ext="tcl"
)
with open(model_path, "w") as f:
f.write(out_tcl)
num_nodes = len(set(flatten(bridge_nodes, Node)))
print_i(f"OpenSees: saved 3D model ({num_nodes} nodes) file to {model_path}")
return expt_params
```
#### File: build/d3/self_weight.py
```python
from collections import defaultdict
from typing import Dict
import numpy as np
import scipy.constants as constants
from bridge_sim.model import Config
from bridge_sim.sim.build import det_shells
from bridge_sim.sim.model import SimParams, DeckShells
def opensees_self_weight_loads(
config: Config, sim_params: SimParams, deck_shells: DeckShells
):
"""Loads for the self weight, if in the simulation parameters."""
if not sim_params.self_weight:
return ""
def to_tcl(n_id: int, load_intensity: float):
"""Return an empty string or a load string."""
if np.isclose(load_intensity, 0):
return ""
return f"\nload {n_id} 0 {load_intensity} 0 0 0 0"
yloads_by_nid: Dict[int, float] = defaultdict(lambda: 0)
for shell in det_shells(deck_shells):
node_newtons = shell.mass(config) * constants.g / 4 * 1e3
for node in shell.nodes():
yloads_by_nid[node.n_id] += node_newtons
load_str = "".join([to_tcl(n_id, li) for n_id, li in yloads_by_nid.items()])
from bridge_sim.sim.run.opensees.build.d3 import comment
return comment(
"thermal loads", load_str, units="load nodeTag N_x N_y N_z N_rx N_ry N_rz",
)
```
#### File: build/d3/thermal.py
```python
from collections import defaultdict
from enum import Enum
from typing import Dict
import numpy as np
from bridge_sim.model import Config
from bridge_sim.sim.build import det_shells
from bridge_sim.sim.model import BuildContext, DeckShells, Node, SimParams
from bridge_sim.sim.run.opensees.build.d3.util import comment
from bridge_sim.util import print_d
# Print debug information for this file.
D: str = "fem.run.opensees.build.d3.thermal"
D: bool = False
def opensees_thermal_axial_deck_loads(
c: Config, sim_params: SimParams, deck_elements: DeckShells, ctx: BuildContext
):
"""Thermal axial loads for deck shells, if in the simulation parameters."""
if sim_params.axial_delta_temp is None:
return ""
class LoadDirection(Enum):
"""Direction a thermal load is applied to a shell."""
XPOS = 1
XNEG = 2
ZPOS = 3
ZNEG = 4
def assert_load_direction(node_0: Node, node_1: Node, direction: LoadDirection):
"""Assert the load direction is perpendicular to the nodes."""
if direction in [LoadDirection.XPOS, LoadDirection.XNEG]:
assert node_0.x == node_1.x
elif direction in [LoadDirection.ZPOS, LoadDirection.ZNEG]:
assert node_0.z == node_1.z
else:
raise ValueError(f"Unknown thermal load direction {direction}")
class ThermalLoad:
"""Total thermal load to be applied to a node."""
def __init__(self):
self.x = 0
self.z = 0
def add_load(self, magnitude: float, direction: LoadDirection):
"""Add a load in a given direction."""
if direction == LoadDirection.XPOS:
self.x += magnitude
elif direction == LoadDirection.XNEG:
self.x -= magnitude
elif direction == LoadDirection.ZPOS:
self.z += magnitude
elif direction == LoadDirection.ZNEG:
self.z -= magnitude
else:
raise ValueError(f"Unknown thermal load direction {direction}")
def to_tcl(self, n_id: int):
"""Return a string with 0, 1, or 2 OpenSees load commands."""
if np.isclose(self.x, 0) and np.isclose(self.z, 0):
return ""
return (
f"\nload {n_id} {np.around(self.x, 3)} 0 {np.around(self.z, 3)} 0 0 0"
)
thermal_loads_by_nid: Dict[int, ThermalLoad] = defaultdict(ThermalLoad)
for shell in det_shells(deck_elements):
print_d(D, shell)
print_d(D, np.array(deck_elements).shape)
print_d(D, "")
print_d(D, f"cte = {c.cte}")
print_d(D, f"d_temp = {sim_params.axial_delta_temp}")
shell_thermal_strain = c.cte * sim_params.axial_delta_temp
shell_youngs_si = shell.section.youngs * 1e6
shell_thermal_stress = shell_youngs_si * shell_thermal_strain
print_d(D, f"shell youngs SI = {shell_youngs_si}")
print_d(D, f"thermal stress = {shell_thermal_stress}")
# For each cross section consider the pair of nodes at the corners.
for n_id_0, n_id_1, load_direction in [
(shell.ni_id, shell.nj_id, LoadDirection.ZPOS),
(shell.nj_id, shell.nk_id, LoadDirection.XNEG),
(shell.nk_id, shell.nl_id, LoadDirection.ZNEG),
(shell.nl_id, shell.ni_id, LoadDirection.XPOS),
]:
print_d(D, f"node ids = {n_id_0}, {n_id_1}")
node_0, node_1 = ctx.nodes_by_id[n_id_0], ctx.nodes_by_id[n_id_1]
assert_load_direction(
node_0=node_0, node_1=node_1, direction=load_direction
)
node_distance = node_0.distance_n(node_1)
assert node_distance > 0
print_d(D, f"node distance = {node_distance}")
cross_section_area = shell.section.thickness * node_distance
print_d(D, f"cross section area = {cross_section_area}")
cross_section_thermal_force_n = shell_thermal_stress * cross_section_area
print_d(D, f"cross section thermal force = {cross_section_thermal_force_n}")
nodal_thermal_force_n = cross_section_thermal_force_n / 2
assert np.isclose(
cross_section_thermal_force_n, (cross_section_thermal_force_n / 2) * 2
)
print_d(
D,
f"Before applying force node_0: x = {thermal_loads_by_nid[n_id_0].x} z = {thermal_loads_by_nid[n_id_0].z}",
)
print_d(
D,
f"Before applying force node_1: x = {thermal_loads_by_nid[n_id_1].x} z = {thermal_loads_by_nid[n_id_1].z}",
)
for n_id in [n_id_0, n_id_1]:
thermal_loads_by_nid[n_id].add_load(
magnitude=nodal_thermal_force_n, direction=load_direction
)
print_d(
D,
f"After applying force node_0: x = {thermal_loads_by_nid[n_id_0].x} z = {thermal_loads_by_nid[n_id_0].z}",
)
print_d(
D,
f"After applying force node_1: x = {thermal_loads_by_nid[n_id_1].x} z = {thermal_loads_by_nid[n_id_1].z}",
)
thermal_load_str = "".join(
[load.to_tcl(n_id) for n_id, load in thermal_loads_by_nid.items()]
)
return comment(
"thermal loads",
thermal_load_str,
units="load nodeTag N_x N_y N_z N_rx N_ry N_rz",
)
def opensees_thermal_moment_deck_loads(
c: Config, sim_params: SimParams, deck_elements: DeckShells, ctx: BuildContext,
):
"""Thermal moment loads for deck shells, if in the simulation parameters."""
if sim_params.moment_delta_temp is None:
return ""
class LoadDirection(Enum):
"""Direction a thermal load is applied to a shell."""
XPOS = 1
XNEG = 2
ZPOS = 3
ZNEG = 4
def assert_load_direction(node_0: Node, node_1: Node, direction: LoadDirection):
"""Assert the load direction is perpendicular to the nodes."""
# TODO: Remove return.
return
if direction in [LoadDirection.XPOS, LoadDirection.XNEG]:
assert node_0.z == node_1.z
elif direction in [LoadDirection.ZPOS, LoadDirection.ZNEG]:
assert node_0.x == node_1.x
else:
raise ValueError(f"Unknown thermal load direction {direction}")
class ThermalLoad:
"""Total thermal load to be applied to a node."""
def __init__(self):
self.x = 0
self.z = 0
def add_load(self, magnitude: float, direction: LoadDirection):
"""Add a load in a given direction."""
if direction == LoadDirection.XPOS:
self.x += magnitude
elif direction == LoadDirection.XNEG:
self.x -= magnitude
elif direction == LoadDirection.ZPOS:
self.z += magnitude
elif direction == LoadDirection.ZNEG:
self.z -= magnitude
else:
raise ValueError(f"Unknown thermal load direction {direction}")
def to_tcl(self, n_id: int):
"""Return a string with 0, 1, or 2 OpenSees load commands."""
if np.isclose(self.x, 0) and np.isclose(self.z, 0):
return ""
return (
f"\nload {n_id} 0 0 0 {np.around(self.x, 3)} 0 {np.around(self.z, 3)}"
)
thermal_loads_by_nid: Dict[int, ThermalLoad] = defaultdict(ThermalLoad)
for shell in det_shells(deck_elements):
print_d(D, shell)
print_d(D, np.array(deck_elements).shape)
print_d(D, "")
print_d(D, f"cte = {c.cte}")
print_d(D, f"d_temp = {sim_params.moment_delta_temp}")
shell_strain_top = c.cte * (sim_params.moment_delta_temp / 2)
print_d(D, f"strain_top = {shell_strain_top}")
shell_youngs_si = shell.section.youngs * 1e6
shell_stress_top = shell_youngs_si * shell_strain_top
print_d(D, f"shell youngs SI = {shell_youngs_si}")
print_d(D, f"stress_top = {shell_stress_top}")
# For each cross section consider the pair of nodes at the corners.
for n_id_0, n_id_1, load_direction in [
(shell.ni_id, shell.nj_id, LoadDirection.XPOS),
(shell.nj_id, shell.nk_id, LoadDirection.ZPOS),
(shell.nk_id, shell.nl_id, LoadDirection.XNEG),
(shell.nl_id, shell.ni_id, LoadDirection.ZNEG),
]:
print_d(D, f"node ids = {n_id_0}, {n_id_1}")
node_0, node_1 = ctx.nodes_by_id[n_id_0], ctx.nodes_by_id[n_id_1]
assert_load_direction(
node_0=node_0, node_1=node_1, direction=load_direction
)
node_distance = node_0.distance_n(node_1)
print_d(D, f"node distance = {node_distance}")
print_d(D, f"section thickness = {shell.section.thickness}")
force_top_n = (
shell_stress_top
* (shell.section.thickness / 2)
* (1 / 2)
* node_distance
)
moment_top_nm = force_top_n * (2 / 3) * (shell.section.thickness / 2)
print_d(D, f"force top n = {force_top_n}")
print_d(D, f"moment nm = {moment_top_nm}")
print_d(
D,
f"Before applying moment: node_0 = {thermal_loads_by_nid[n_id_0].x}, {thermal_loads_by_nid[n_id_0].z}",
)
print_d(
D,
f"Before applying moment: node_1 = {thermal_loads_by_nid[n_id_1].x}, {thermal_loads_by_nid[n_id_1].z}",
)
# The moment per node is moment_top_nm / 2. But since we also want
# to include moment_bottom_nm / 2 which is equal to moment_top_nm,
# then we just use moment_top_nm.
for n_id in [n_id_0, n_id_1]:
thermal_loads_by_nid[n_id].add_load(
magnitude=moment_top_nm, direction=load_direction
)
print_d(
D,
f"After applying moment: node_0 = {thermal_loads_by_nid[n_id_0].x}, {thermal_loads_by_nid[n_id_0].z}",
)
print_d(
D,
f"After applying moment: node_1 = {thermal_loads_by_nid[n_id_1].x}, {thermal_loads_by_nid[n_id_1].z}",
)
thermal_load_str = "".join(
[load.to_tcl(n_id) for n_id, load in thermal_loads_by_nid.items()]
)
return comment(
"thermal loads",
thermal_load_str,
units="load nodeTag N_x N_y N_z N_rx N_ry N_rz",
)
```
#### File: opensees/parse/d3.py
```python
from collections import defaultdict
from timeit import default_timer as timer
from typing import List
import numpy as np
from bridge_sim.model import Config, ResponseType, RT
from bridge_sim.sim.model import SimParams
from bridge_sim.sim.run import Parsed
from bridge_sim.sim.run.opensees.parse.common import opensees_to_numpy
from bridge_sim.util import print_i
def parse_translation_responses_3d(
results_dict,
fem_params: SimParams,
sim_ind: int,
responses_path: str,
response_type: ResponseType,
):
"""Parse translation fem from a 3D OpenSees simulation."""
print(f"response_type = {response_type}")
if response_type not in [RT.XTrans, RT.YTrans, RT.ZTrans]:
raise ValueError("Must be translation response type")
start = timer()
translation_responses = opensees_to_numpy(responses_path)
translation_responses *= -1
print_i(
f"OpenSees: Parsed {response_type.name()} responses in"
+ f" {timer() - start:.2f}s"
)
results_dict[sim_ind][response_type] = translation_responses
def parse_stress_strain_responses_3d(
results_dict, sim_params: SimParams, sim_ind: int, response_paths: List[str],
):
"""Parse stress or strain fem from a 3D OpenSees simulation."""
lines = []
for response_path in response_paths:
with open(response_path) as f:
new_lines = f.read()
if new_lines.endswith("\n"):
new_lines = new_lines[:-1]
new_lines = list(map(float, new_lines.split()))
sections = len(new_lines) / 8
if int(len(new_lines)) / 8 != sections:
raise ValueError("Unexpected length of parsed strains")
per_element_lines = np.array_split(new_lines, sections)
lines.append(per_element_lines)
lines = np.array(lines)
print(lines.shape)
# Save all strain responses under this one key.
results_dict[sim_ind][ResponseType.StrainXXB] = lines
def parse_responses_3d(
c: Config, expt_params: List[SimParams], os_runner: "OSRunner"
) -> Parsed:
"""Parse fem from a 3D OpenSees simulation."""
# A dictionary of simulation index to ResponseType to parsed fem.
results_dict = defaultdict(dict)
for sim_ind, fem_params in enumerate(expt_params):
print(f"Parsing, sim_ind = {sim_ind}")
# Parse x translation fem if necessary.
parse_translation_responses_3d(
results_dict=results_dict,
fem_params=fem_params,
sim_ind=sim_ind,
responses_path=os_runner.x_translation_path(c, fem_params),
response_type=ResponseType.XTrans,
)
# Parse y translation fem if necessary.
parse_translation_responses_3d(
results_dict=results_dict,
fem_params=fem_params,
sim_ind=sim_ind,
responses_path=os_runner.y_translation_path(c, fem_params),
response_type=ResponseType.YTrans,
)
# Parse z translation fem if necessary.
parse_translation_responses_3d(
results_dict=results_dict,
fem_params=fem_params,
sim_ind=sim_ind,
responses_path=os_runner.z_translation_path(c, fem_params),
response_type=ResponseType.ZTrans,
)
# Parse strain fem if necessary.
parse_stress_strain_responses_3d(
results_dict=results_dict,
sim_params=fem_params,
sim_ind=sim_ind,
response_paths=[
os_runner.strain_path(c, fem_params, i) for i in [1, 2, 3, 4]
],
)
return results_dict
```
#### File: jerbaroo/bridge-sim/example.py
```python
import matplotlib.pyplot as plt
from bridge_sim import bridges, configs, model, plot, sim
config = configs.opensees_default(bridges.bridge_narrow)
point_loads = [model.PointLoad(x=5, z=0, load=100)]
responses = sim.responses.load(config, model.RT.YTrans, point_loads)
plot.contour_responses(config, responses, point_loads)
plot.top_view_bridge(config.bridge, piers=True)
plt.tight_layout()
plt.show()
############################################
# Example 2: responses to a static vehicle #
############################################
import matplotlib.pyplot as plt
from bridge_sim import bridges, configs, model, plot, sim
config = configs.opensees_default(bridges.bridge_narrow, shorten_paths=True)
point_loads = model.Vehicle(
# Load intensity of each axle.
load=[5000, 4000, 4000, 5000, 7000],
# Distance between each pair of axles.
axle_distances=[2, 2, 2, 1],
# Width of each axle, distance between point loads.
axle_width=2.5,
# Speed of the vehicles.
kmph=20,
).point_load_pw(config=config, time=3.5, list=True)
responses = sim.responses.load(config, model.RT.YTrans, point_loads)
plot.contour_responses(config, responses, point_loads)
plot.top_view_bridge(config.bridge, piers=True)
plt.tight_layout()
plt.show()
###########################################
# Example 3: responses to pier settlement #
###########################################
import matplotlib.pyplot as plt
from bridge_sim import bridges, configs, sim, model, plot
config = configs.opensees_default(bridges.bridge_wide)
responses = sim.responses.load(
config,
model.RT.YTrans,
pier_settlement=[model.PierSettlement(0, 1.2)]
)
plot.contour_responses(config, responses)
plot.top_view_bridge(config.bridge, piers=True)
plt.tight_layout()
plt.show()
################################################
# Example 4: plotting different response types #
################################################
import matplotlib.pyplot as plt
from bridge_sim import bridges, configs, model, plot, sim
config = configs.opensees_default(bridges.bridge_wide)
plt.figure(figsize=(12, 8))
for subplot, response_type in enumerate([
model.RT.YTrans, model.RT.ZTrans,
model.RT.StrainXXB, model.RT.StrainZZB,
]):
responses = sim.responses.load(
config,
response_type,
pier_settlement=[model.PierSettlement(0, 1.2)],
)
plt.subplot(2, 2, subplot + 1)
plot.contour_responses(config, responses, interp=(200, 60))
plot.top_view_bridge(config.bridge, piers=True)
plt.title(response_type.name())
plt.tight_layout()
plt.show()
#######################################
# Example 5: creating a custom bridge #
#######################################
import matplotlib.pyplot as plt
from bridge_sim import bridges, configs, model, plot, sim
from bridge_sim.bridges import Bridge, Lane, MaterialDeck, MaterialSupport, Support
def new_bridge():
return Bridge(
name="square", # Name used to identify saved/loaded data.
msl=0.5, # Maximum shell length.
length=10, # Length of this bridge.
width=10, # Width of this bridge.
supports=[
Support(
x=5, # X position of center of the support.
z=0, # Z position of center of the support.
length=2, # Length between support columns (X direction).
height=2, # Height from top to bottom of support.
width_top=2, # Width of support column at top (Z direction).
width_bottom=1, # Width of support column at bottom (Z direction).
materials=[ # List of materials for the support columns.
MaterialSupport(
density=0.7,
thickness=0.7,
youngs=40000,
poissons=0.2,
start_frac_len=0,
)
],
fix_z_translation=True,
fix_x_translation=True,
)
],
# List of materials for the bridge deck.
materials=[MaterialDeck(thickness=0.7, youngs=40000, poissons=0.2,)],
# List of lanes where traffic can drive on the bridge.
lanes=[Lane(-1, 1, True)],
)
config = configs.opensees_default(new_bridge)
point_loads = [model.PointLoad(x=8, z=0, load=100)]
responses = sim.responses.load(config, model.RT.YTrans, point_loads)
plot.contour_responses(config, responses, point_loads)
plot.top_view_bridge(config.bridge, piers=True, lanes=True)
plt.tight_layout()
plt.show()
##########################################################
# Example 6: simple animation of traffic over bridge 705 #
##########################################################
from bridge_sim import bridges, configs, plot, traffic
config = configs.opensees_default(bridges.bridge_705(0.5))
time = 10
config.sensor_freq = 1 / 10
traffic_scenario = traffic.normal_traffic(config)
traffic_sequence = traffic_scenario.traffic_sequence(config, time)
traffic = traffic_sequence.traffic()
plot.animate.animate_traffic(
config=config,
traffic_sequence=traffic_sequence,
traffic=traffic,
save="animation.mp4"
)
################################################
# Example 7: animation of responses to traffic #
################################################
# NOPYTEST
from bridge_sim import bridges, configs, model, plot, temperature, traffic
config = configs.opensees_default(bridges.bridge_705(10), il_num_loads=100)
time = 10
config.sensor_freq = 1 / 10
traffic_scenario = traffic.normal_traffic(config)
traffic_sequence = traffic_scenario.traffic_sequence(config, time)
weather = temperature.load("holly-springs-19")
weather["temp"] = temperature.resize(weather["temp"], tmin=-5, tmax=35)
plot.animate.animate_responses(
config=config,
traffic_sequence=traffic_sequence,
response_type=model.ResponseType.YTrans,
units="mm",
save="traffic-responses.mp4",
pier_settlement=[
(model.PierSettlement(4, 1.2), model.PierSettlement(4, 2))],
weather=weather,
start_date="01/05/2019 00:00",
end_date="01/05/2019 23:59",
install_day=30,
start_day=365,
end_day=366,
with_creep=True,
)
#################################################
# Example 8: contour plot of temperature effect #
#################################################
import matplotlib.pyplot as plt
import numpy as np
from bridge_sim import bridges, configs, model, sim, plot, temperature
config = configs.opensees_default(bridges.bridge_705(msl=10))
bridge = config.bridge
response_type = model.RT.StrainXXB
points = [
model.Point(x=x, y=0, z=z)
for x in np.linspace(bridge.x_min, bridge.x_max, num=int(bridge.length * 2))
for z in np.linspace(bridge.z_min, bridge.z_max, num=int(bridge.width * 2))
]
temp_effect = temperature.effect(
config=config, response_type=response_type, points=points, temps_bt=[[20], [22]]
).T[0] # Only considering a single temperature profile.
responses = sim.model.Responses( # Converting to "Responses" for plotting.
response_type=response_type,
responses=[(temp_effect[p], points[p]) for p in range(len(points))],
).without_nan_inf()
plot.contour_responses(config, responses)
plot.top_view_bridge(config.bridge, piers=True)
plt.tight_layout()
plt.show()
###################################################
# Example 9: time series, traffic and temperature #
###################################################
# NOPYTEST
import matplotlib.pyplot as plt
from bridge_sim import bridges, configs, model, sim, temperature, traffic
config = configs.opensees_default(bridges.bridge_705(10), il_num_loads=100)
points = [model.Point(x=10), model.Point(x=20)]
response_type = model.RT.YTrans
# First generate some traffic data.
traffic_sequence = traffic.normal_traffic(config).traffic_sequence(config, 10)
traffic_array = traffic_sequence.traffic_array()
responses_to_traffic = sim.responses.to_traffic_array(
config=config,
traffic_array=traffic_array,
response_type=response_type,
points=points,
)
# And responses to temperature.
weather = temperature.load("holly-springs-19")
weather["temp"] = temperature.resize(weather["temp"], tmin=-5, tmax=31)
temp_responses = sim.responses.to_temperature(
config=config,
points=points,
responses_array=responses_to_traffic,
response_type=response_type,
weather=weather,
start_date="01/05/2019 00:00",
end_date="02/05/2019 00:00",
)
plt.plot((responses_to_traffic + temp_responses).T)
plt.show()
```
#### File: bridge-sim/tests/test_traffic.py
```python
import numpy as np
from bridge_sim.configs import test_config
from bridge_sim.model import Vehicle
from bridge_sim.sim.run import ulm_xzs
from bridge_sim.traffic import normal_traffic, load_traffic
from bridge_sim.util import flatten
config = test_config(msl=10)[0]
def test_load_traffic():
time = 10
traffic_scenario = normal_traffic(config=config)
ts1, t1, ta1 = load_traffic(
config=config,
traffic_scenario=traffic_scenario,
time=time,
)
ts2, t2, ta2 = load_traffic(
config=config,
traffic_scenario=traffic_scenario,
time=time,
)
assert (ta1 == ta2).all()
def test_traffic_scenario():
time = 10
traffic_scenario = normal_traffic(config=config)
traffic_sequence = traffic_scenario.traffic_sequence(config, time)
warmed_up = max(
vs[0].time_left_bridge(config.bridge)
for vs in traffic_sequence.vehicles_per_lane
)
assert traffic_sequence.start_time == warmed_up
assert traffic_sequence.final_time == warmed_up + time
def test_traffic_and_traffic_array():
time = 10
traffic_scenario = normal_traffic(config=config)
traffic_sequence = traffic_scenario.traffic_sequence(config, time)
traffic = traffic_sequence.traffic()
traffic_array = traffic_sequence.traffic_array()
assert len(traffic_sequence.times) == 1 + time / config.sensor_freq
assert len(traffic) == len(traffic_sequence.times)
assert len(traffic_array) == len(traffic_sequence.times)
xzs = ulm_xzs(config)
wheel_track_xs = config.bridge.wheel_track_xs(config)
for t, time in enumerate(traffic_sequence.times):
traffic_vehicles = flatten(traffic[t], Vehicle)
# Assert the amount of load is equal in both cases.
traffic_load = 0
for v in traffic_vehicles:
for point_load in v.point_load_pw(config, time, list=True):
traffic_load += point_load.load
assert np.isclose(traffic_load, sum(traffic_array[t]))
# Assert the position of loads is equal in both cases.
for v in traffic_vehicles:
for point_load in v.point_load_pw(config, time, list=True):
(lo, weight_lo), (hi, weight_hi) = v._axle_track_weights(
point_load.x, wheel_track_xs
)
lo = lo + v.lane * config.il_num_loads
if hi is None:
x = xzs[lo][0]
assert np.isclose(x, point_load.x)
else:
hi = hi + v.lane * config.il_num_loads
x0, z0 = xzs[lo]
x1, z1 = xzs[hi]
assert x0 < point_load.x
assert x1 > point_load.x
if point_load.z < 0:
assert z0 < 0
assert z1 < 0
else:
assert z0 > 0
assert z1 > 0
```
#### File: bridge-sim/tests/test_vehicle.py
```python
from copy import deepcopy
import pytest
import numpy as np
from bridge_sim.configs import test_config
from bridge_sim.model import Vehicle, PointLoad
from bridge_sim.util import flatten
from bridge_sim.vehicles import truck1
config = test_config(msl=10)[0]
def test_constructor():
with pytest.raises(ValueError):
Vehicle(load=[100], axle_distances=[1], axle_width=2.5, kmph=20)
def test_is_load_per_axle():
v = Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20)
assert not v._is_load_per_axle()
v = Vehicle(load=[25, 25], axle_distances=[1], axle_width=2.5, kmph=20)
assert v._is_load_per_axle()
def test_total_load():
v = Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20)
assert v.total_load() == 100
v = Vehicle(load=[25, 25], axle_distances=[1], axle_width=2.5, kmph=20)
assert v.total_load() == 50
def test_load_per_axle():
v = Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20)
assert len(v.load_per_axle()) == 2
assert sum(v.load_per_axle()) == 100
v = Vehicle(load=[25, 25], axle_distances=[1], axle_width=2.5, kmph=20)
assert len(v.load_per_axle()) == 2
assert sum(v.load_per_axle()) == 50
def test_wheel_track_zs():
v = Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20)
assert v.wheel_tracks_zs(config) == [-9.65, -7.15]
assert v.wheel_tracks_zs(config) != [-9.60, -7.15]
# Second lane.
v = Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=1)
assert v.wheel_tracks_zs(config) == [+7.15, +9.65]
def test_xs_at():
# Lane 0, time 0.
v0 = Vehicle(load=100, axle_distances=[1, 1.5], axle_width=2.5, kmph=20)
xs = v0.xs_at(times=[0], bridge=config.bridge)[0]
assert xs[0] == 0
assert xs[1] == -1
assert xs[2] == -2.5
# Lane 1, time 0.
v1 = Vehicle(load=100, axle_distances=[1, 1.5], axle_width=2.5, kmph=20, lane=1)
xs = v1.xs_at(times=[0], bridge=config.bridge)[0]
assert xs[0] == config.bridge.x_max
assert xs[1] == config.bridge.x_max + 1
assert xs[2] == config.bridge.x_max + 2.5
# Lane 0, times 1 and 8.
xs = v0.xs_at(times=[1, 8], bridge=config.bridge)
front = 5.5555555556
assert np.isclose(xs[0][0], front)
assert np.isclose(xs[0][1], front - 1)
assert np.isclose(xs[0][2], front - 2.5)
front = 44.4444444444
assert np.isclose(xs[1][0], front)
assert np.isclose(xs[1][1], front - 1)
assert np.isclose(xs[1][2], front - 2.5)
# Lane 1, times 1 and 8.
xs = v1.xs_at(times=[1, 8], bridge=config.bridge)
front = config.bridge.x_max - 5.5555555556
assert np.isclose(xs[0][0], front)
assert np.isclose(xs[0][1], front + 1)
assert np.isclose(xs[0][2], front + 2.5)
front = config.bridge.x_max - 44.4444444444
assert np.isclose(xs[1][0], front)
assert np.isclose(xs[1][1], front + 1)
assert np.isclose(xs[1][2], front + 2.5)
# Lane 0, times 0 and 3, starts behind.
v = Vehicle(load=100, axle_distances=[1, 1.5], axle_width=2.5, kmph=20, init_x=-20)
xs = v.xs_at(times=[0, 3], bridge=config.bridge)
front = -20
assert xs[0][0] == front
assert xs[0][1] == front - 1
assert xs[0][2] == front - 2.5
front = -3.3333333333
assert np.isclose(xs[1][0], front)
assert np.isclose(xs[1][1], front - 1)
assert np.isclose(xs[1][2], front - 2.5)
# Lane 1, times 0 and 3, starts behind.
v = Vehicle(load=100, axle_distances=[1, 1.5], axle_width=2.5, kmph=20, lane=1, init_x=-20)
xs = v.xs_at(times=[0, 3], bridge=config.bridge)
front = config.bridge.x_max + 20
assert xs[0][0] == front
assert xs[0][1] == front + 1
assert xs[0][2] == front + 2.5
front = config.bridge.x_max + 3.3333333333
assert np.isclose(xs[1][0], front)
assert np.isclose(xs[1][1], front + 1)
assert np.isclose(xs[1][2], front + 2.5)
def test_x_at():
v = Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20)
assert np.isclose(v.x_at(time=1, bridge=config.bridge), 5.5555555556)
def test_on_bridge():
for lane in [0, 1]:
# Negative time.
assert not Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=lane).on_bridge(
time=-0.0000001, bridge=config.bridge,
)
# Negative init_x.
assert not Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, init_x=-0.0000001, lane=lane).on_bridge(
time=0, bridge=config.bridge,
)
# Time 0.
assert Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=lane).on_bridge(
time=0, bridge=config.bridge,
)
# Time 1, init_x ~= - kmph / 3.6.
assert Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, init_x=-5.5555555555, lane=lane).on_bridge(
time=1, bridge=config.bridge,
)
assert not Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, init_x=-5.5555555556, lane=lane).on_bridge(
time=1, bridge=config.bridge,
)
# Time ~= (bridge length + 1) / kmph / 3.6.
assert Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=lane).on_bridge(
time=18.675, bridge=config.bridge,
)
assert not Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=lane).on_bridge(
time=18.675001, bridge=config.bridge,
)
def test_passed_bridge():
for lane in [0, 1]:
# Time 0.
assert not Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=lane).passed_bridge(
time=0, bridge=config.bridge,
)
# Negative time.
assert not Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=lane).passed_bridge(
time=-1, bridge=config.bridge,
)
# Negative init x.
assert not Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=lane, init_x=-1).passed_bridge(
time=0, bridge=config.bridge,
)
# Front axle at x_max.
assert not Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=lane).passed_bridge(
time=18.495, bridge=config.bridge,
)
# Rear axle at x_max.
assert not Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=lane).passed_bridge(
time=18.675, bridge=config.bridge,
)
# Rear axle passed x_max.
assert Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=lane).passed_bridge(
time=18.675001, bridge=config.bridge,
)
def test_time_at():
# Lane 0 half way.
halfway_time = 9.2475
assert halfway_time == Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=0).time_at(
x=102.75 / 2, bridge=config.bridge,
)
# Lane 1 half way.
assert halfway_time == Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=1).time_at(
x=102.75 / 2, bridge=config.bridge,
)
# Lane 0 full way.
fullway_time = 18.495
assert fullway_time == Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=0).time_at(
x=102.75, bridge=config.bridge,
)
# Lane 1 full way.
assert fullway_time == Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=1).time_at(
x=0, bridge=config.bridge,
)
# Lane 0 passed bridge.
passed_time = 19.035
assert passed_time == Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=0, init_x=-2).time_at(
x=103.75, bridge=config.bridge,
)
# Lane 1 passed bridge.
assert passed_time == Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=1, init_x=-2).time_at(
x=-1, bridge=config.bridge,
)
def test_time_entering_bridge():
# Lane 0, init x 0.
assert 0 == Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=0).time_entering_bridge(config.bridge)
# Lane 1, init x 0.
assert 0 == Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=1).time_entering_bridge(config.bridge)
# Lane 0, init x negative.
assert np.isclose(
0.18,
Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=0, init_x=-1).time_entering_bridge(config.bridge)
)
# Lane 1, init x negative.
assert np.isclose(
0.18,
Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=1, init_x=-1).time_entering_bridge(config.bridge)
)
def test_time_entered_bridge():
# Lane 0, init x 0.
assert 0.18 == Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=0).time_entered_bridge(config.bridge)
# Lane 1, init x 0.
assert 0.18 == Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=1).time_entered_bridge(config.bridge)
# Lane 0, init x negative.
assert np.isclose(
0.18 * 2,
Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=0, init_x=-1).time_entered_bridge(config.bridge)
)
# Lane 1, init x negative.
assert np.isclose(
0.18 * 2,
Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=1, init_x=-1).time_entered_bridge(config.bridge)
)
def test_time_leaving_bridge():
# Lane 0, init x 0.
assert 18.495 == Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=0).time_leaving_bridge(config.bridge)
# Lane 1, init x 0.
assert 18.495 == Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=1).time_leaving_bridge(config.bridge)
# Lane 0, init x negative.
assert np.isclose(
18.495 + 0.18,
Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=0, init_x=-1).time_leaving_bridge(config.bridge)
)
# Lane 1, init x negative.
assert np.isclose(
18.495 + 0.18,
Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=1, init_x=-1).time_leaving_bridge(config.bridge)
)
def test_time_left_bridge():
# Lane 0, init x 0.
assert 18.495 + 0.18 == Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=0).time_left_bridge(
config.bridge)
# Lane 1, init x 0.
assert 18.495 + 0.18 == Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=1).time_left_bridge(
config.bridge)
# Lane 0, init x negative.
assert np.isclose(
18.495 + 2 * 0.18,
Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=0, init_x=-1).time_left_bridge(
config.bridge)
)
# Lane 1, init x negative.
assert np.isclose(
18.495 + 2 * 0.18,
Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=1, init_x=-1).time_left_bridge(
config.bridge)
)
def test__axle_track_weights():
for lane in [0, 1]:
v = Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=lane)
xs = config.bridge.wheel_track_xs(config)
# Wheel x = 0.
(lo, weight_lo), (hi, weight_hi) = v._axle_track_weights(0, xs)
assert lo == 0
assert weight_lo == 1
assert hi is None
assert weight_hi == 0
# Wheel x = 0.000001.
(lo, weight_lo), (hi, weight_hi) = v._axle_track_weights(0.000001, xs)
assert lo == 0
assert weight_lo == 1 - (0.000001 / xs[1])
assert hi == 1
assert weight_hi == 0.000001 / xs[1]
# Wheel x = halfway first bucket.
halfway_x = xs[1] / 2
(lo, weight_lo), (hi, weight_hi) = v._axle_track_weights(halfway_x, xs)
assert lo == 0
assert weight_lo == 0.5
assert hi == 1
assert weight_hi == 0.5
# Wheel x = first bucket.
(lo, weight_lo), (hi, weight_hi) = v._axle_track_weights(xs[1], xs)
assert lo == 1
assert weight_lo == 1
assert hi is None
assert weight_hi == 0
# Wheel x = last bucket.
assert xs[-1] == 102.75
(lo, weight_lo), (hi, weight_hi) = v._axle_track_weights(xs[-1], xs)
assert lo == 599
assert weight_lo == 1
assert hi is None
assert weight_hi == 0
def test_axle_track_indices():
# Mostly tested by function above. So just one small test per lane.
# Lane 0, first bucket.
v = Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=0)
indices = list(v._axle_track_indices(config, times=[0]))
assert len(indices) == 1
assert len(indices[0]) == 1
(lo, load_lo), (hi, load_hi) = indices[0][0]
assert lo == 0
assert load_lo == 50 # Two axles (50 on one of them).
assert hi is None
assert load_hi == 0
# Lane 0, halfway through first bucket.
(lo, load_lo), (hi, load_hi) = list(v._axle_track_indices(config, times=[0.0001]))[0][0]
assert lo == 0
assert hi == 1
assert load_lo + load_hi == 50
assert load_lo < 50
# Lane 1.
v = Vehicle(load=[25, 40], axle_distances=[1], axle_width=2.5, kmph=20, lane=1)
indices = list(v._axle_track_indices(config, times=[0]))
assert len(indices) == 1
assert len(indices[0]) == 1
(lo, load_lo), (hi, load_hi) = indices[0][0]
assert lo == 1199
assert load_lo == 25 # 25 on the front axle.
assert hi is None
assert load_hi == 0
def test__times_on_bridge():
v = Vehicle(load=100, axle_distances=[1], axle_width=2.5, kmph=20, lane=0)
sorted_times = np.arange(-1, 100, config.sensor_freq)
v_times_indices, v_times = v._times_on_bridge(config, sorted_times)
assert len(v_times_indices) == len(v_times)
for v_times_index, v_time in zip(v_times_indices, v_times):
assert v_time == sorted_times[v_times_index]
assert v.on_bridge(time=sorted_times[v_times_indices[0]], bridge=config.bridge)
assert v.on_bridge(time=sorted_times[v_times_indices[-1]], bridge=config.bridge)
if v_times_indices[0] > 0:
assert not v.on_bridge(time=sorted_times[v_times_indices[0] - 1], bridge=config.bridge)
if v_times_indices[-1] < len(sorted_times) - 1:
assert not v.on_bridge(time=sorted_times[v_times_indices[-1] + 1], bridge=config.bridge)
def test_to_point_load_pw():
entering_time = truck1.time_entering_bridge(bridge=config.bridge)
entered_time = truck1.time_entered_bridge(bridge=config.bridge)
leaving_time = truck1.time_leaving_bridge(bridge=config.bridge)
left_time = truck1.time_left_bridge(bridge=config.bridge)
wagen1_top_lane = deepcopy(truck1)
wagen1_top_lane.lane = 1
assert truck1.lane != wagen1_top_lane.lane
assert truck1.init_x == 0
# As Truck 1 enters the bridge.
wagen1_times = np.linspace(entering_time, entered_time - 0.001, 100)
for time in wagen1_times:
loads = truck1.point_load_pw(config=config, time=time)
flat_loads = flatten(loads, PointLoad)
total_kn = sum(map(lambda l: l.load, flat_loads))
assert total_kn < truck1.total_load()
# As Truck 1 is fully on the bridge.
wagen1_times = np.linspace(entered_time, leaving_time, 100)
for time in wagen1_times:
loads = truck1.point_load_pw(config=config, time=time)
flat_loads = flatten(loads, PointLoad)
total_kn = sum(map(lambda l: l.load, flat_loads))
assert total_kn == truck1.total_load()
# As Truck 1 is leaving the bridge.
wagen1_times = np.linspace(leaving_time + 0.001, left_time, 100)
for time in wagen1_times:
loads = truck1.point_load_pw(config=config, time=time)
flat_loads = flatten(loads, PointLoad)
total_kn = sum(map(lambda l: l.load, flat_loads))
assert total_kn < truck1.total_load()
```
|
{
"source": "jerber/FireORM",
"score": 3
}
|
#### File: fireorm/utils/make_update_obj.py
```python
from fireorm import DELETE_FIELD
def make_update_obj_rec(original, new, current_path, update_d):
# first get all the deletions of fields
del_fields = original.keys() - new.keys()
for field in del_fields:
update_d['.'.join([*current_path, field])] = DELETE_FIELD
# now get all the object and value changes
for field, new_value in new.items():
if not field in original:
update_d['.'.join([*current_path, field])] = new_value
continue
original_value = original[field]
if new_value == original_value:
continue
# this is the case where they are different...
if type(new_value) != type(original_value) or not type(new_value) == dict:
update_d['.'.join([*current_path, field])] = new_value
continue
# now you know this is a dict and it's different
make_update_obj_rec(original_value, new_value, [*current_path, field], update_d)
def make_update_obj(original, new):
update_d = {}
make_update_obj_rec(original=original, new=new, current_path=[], update_d=update_d)
return update_d
```
#### File: tests/Models/test_model-old.py
```python
import time
import os
import fireorm
from fireorm.Models import Model, DateModel
from fireorm.Fields import *
class Pet(DateModel):
type = TextField(required=True, default='Dog')
age = NumberField()
class Teacher(DateModel):
name = TextField(required=True)
age = NumberField(required=True)
pet = NestedModel(Pet, default=Pet())
class Meta:
collection_name = 'teacher-testing'
fields_to_print = ['createdAt']
class Manager(Model):
name = TextField(required=True)
age = NumberField(required=True)
company = TextField(required=True, default='Dunder Mifflin')
startedWorkingAt = DateField()
def test_teacher_model():
t = Teacher()
t.age = 20
t.name = 'char'
batch = fireorm.batch()
t.save(batch=batch)
batch.commit()
print(t)
def test_queries():
ts = Teacher.collection.order_by('createdAt', 'asc').limit(2).stream()
print(ts)
def test_user_model():
class Salesman(Model):
name = TextField()
company = TextField()
s = Salesman()
s.name = 'Jim'
s.save()
s = Salesman.collection.get(s.id)
print(s.name) # Jim
def test_student_model():
class Student(Model):
name = TextField()
school = TextField(required=True, default='UPenn')
class Meta:
collection_name = 'students'
fields_to_print = ['name']
s = Student(name='<NAME>')
s.save() # creates a new document in the "students" collection
print(s) #
class ExchangeStudent(Student):
originalCountry = TextField(required=True)
class Meta:
collection_name = 'exchangeStudents'
fields_to_print = None
e = ExchangeStudent(originalCountry='UK')
print(e.school) # UPenn
e.save()
print(e)
def manager_example():
m = Manager(name='<NAME>') # you can pass in fields or set them later
m.age = 45
m.save() # Success! New doc in collection "manager" as: { name: <NAME>, age: 45, company: Dunder Mifflin }
m = Manager()
m.name = '<NAME>'
m.save() # Exception since age is required but not given
def queries_example():
managers = Manager.collection.where('name', '==', '<NAME>').limit(1).stream()
print(managers)
manager = Manager.collection.get('Z8S75KU2n7QQnIm2cExy')
print(manager)
if __name__ == '__main__':
# test_teacher_model()
# print(fireorm.db.conn.__dict__)
# test_queries()
# test_student_model()
queries_example()
# manager_example()
```
|
{
"source": "Jerberus/ISYlib-python",
"score": 3
}
|
#### File: ISYlib-python/ISY/IsyNodeClass.py
```python
from __future__ import print_function
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = "Copyright (C) 2017 <NAME>"
__license__ = "BSD"
import hashlib
from .IsyUtilClass import IsySubClass, val2bool
#from .IsyExceptionClass import *
import ISY.IsyExceptionClass as IsyE
# from IsyClass import *
# from IsyNodeClass import *
# from IsyProgramClass import *
# from IsyVarClass import *
__all__ = ['IsyNode', 'IsyNodeFolder', 'IsyScene']
# library_using_super
class _IsyNodeBase(IsySubClass):
def __init__(self, isy, ndict):
super(_IsyNodeBase, self).__init__(isy, ndict)
self._dimable = self._is_dimable()
#_objtype = (0, "unknown")
_objtype = "unknown"
def on(self, val=255):
""" Send On command to a node
args:
optional value for on level
"""
self._on(val, "DON")
def faston(self, val=255):
""" Send Fast On command to a node
args:
optional value for on level
"""
self._on(val, "DFON")
def _on(self, val, cmd):
if not str(val).isdigit:
raise IsyE.IsyTypeError("On Command : Bad Value : node=%s val=%s" %
self._mydict["address"], str(val))
if "property" in self._mydict:
if "ST" in self._mydict["property"]:
self._mydict["property"]["ST"]["value"] = str(val)
if self._dimable:
self._mydict["property"]["ST"]["formatted"] = "{:.0%}".format(val/255)
else:
self._mydict["property"]["ST"]["formatted"] = "On"
self.isy._node_send(self._mydict["address"], "cmd", cmd, val)
def off(self):
""" Send Off command to a node
args: None
"""
self._off("DOF")
def fastoff(self):
""" Send Fast Off command to a node
args: None
"""
self._off("DFOF")
def _off(self, cmd="DOF"):
self.isy._node_send(self._mydict["address"], "cmd", cmd)
if "property" in self._mydict:
# self._mydict["property"]["time"] = 0
if "ST" in self._mydict["property"]:
self._mydict["property"]["ST"]["value"] = str(0)
self._mydict["property"]["ST"]["formatted"] = "Off"
def beep(self):
self.isy._node_send(self._mydict["address"], "cmd", "BEEP")
def get_spoken(self):
""" get notes property 'spoken' """
return self._get_prop("spoken")
spoken = property(get_spoken)
def get_path(self):
return self.isy._node_get_path(self._mydict['address'], self._objtype)
path = property(get_path)
def members_list(self):
pass
def member_iter(self, flag=0):
return self.members_list()
def member_list(self):
if 'members' in self._mydict:
# print("mydict['members'] : ", type(self._mydict['members']) )
if type(self._mydict['members']) == 'dict':
return self._mydict['members'].keys()
# if type(self._mydict['members']) == 'list':
return self._mydict['members'][:]
return [ ]
def _is_dimable(self):
if 'type' in self._mydict:
a = self._mydict["type"].split('.')
if a[0] == "1":
return True
return False
def is_dimable(self):
return(self._dimable)
dimable = property(is_dimable)
def get_callback(self):
return self.isy.callback_get(self._mydict["address"])
def set_callback(self, func, *args):
if func is None:
return self.isy.callback_del(self._mydict["address"])
else:
return self.isy.callback_set(self._mydict["address"], func, args)
callback = property(get_callback, set_callback)
def is_member(self, obj):
if "members" in self._mydict:
if isinstance(obj, str):
return obj in self._mydict["members"]
elif isinstance(obj, _IsyNodeBase):
return obj._get_prop("address") in self._mydict["members"]
return False
def member_add(self, node, flag=0):
r = self.isy.soapcomm("SetParent",
node=node._get_prop("address"), nodeType=node.nodeType(),
parent=self._mydict["address"], parentType=self.nodeType())
def _rename(self, cmd, newname):
if self.debug & 0x01:
print("rename : ", self.__class__.__name__, " : ", newname)
#if not isinstance(newname, str) or len(newname) == 0:
# print("newname : ", newname)
# raise IsyE.IsyTypeError("rename : name value not str")
r = self.isy.soapcomm(cmd,
id=self._mydict["address"], name=newname )
return r
# check if scene _contains_ node
def __contains__(self, other):
return self.is_member(other)
# check if obj _contains_ attib
# def __contains__(self, other):
# if isinstance(other, str):
# return other in self._getlist
# else:
# return False
# class MemberDicte(dict):
#
# def __getitem__(self, key):
# val = dict.__getitem__(self, key)
# print('GET', key)
# return val
#
# def __setitem__(self, key, val):
# print('SET', key, val)
# dict.__setitem__(self, key, val)
#
# def __delitem__(self, key):
# print('DEL', key)
# dict.__delitem__(self, key)
#
# def __repr__(self):
# dictrepr = dict.__repr__(self)
# return '%s(%s)' % (type(self).__name__, dictrepr)
#
# def get(self, key, default_val):
# print('GET', key, default_val)
# dict.get(self, key, default_val)
#
# def update(self, *args, **kwargs):
# print('update', args, kwargs)
# for k, v in dict(*args, **kwargs).iteritems():
# self[k] = v
#
# convers a node Id to a int
# eg: "9 4A 5F 2" => 00001001010010100101111100000010 => 155868930
#
def node_id_to_int(h):
a = h.split(' ')
return ( int(a[0], 16) << 24 ) | ( int(a[1], 16) << 16 ) | \
( int(a[2], 16) << 8 ) | int(a[3], 16)
# def rate
# def onlevel
class IsyNode(_IsyNodeBase):
""" Node Class for ISY
Attributes:
status / ST
ramprate / RR
onlevel / OL
Readonly Attributes:
address
formatted
enabled
pnode
type
name
ELK_ID
flag
funtions:
get_rr:
set_rr:
Bugs: Results are undefined for Node class objects that
represent a deleted node
"""
_getlist = ['address', 'enabled', 'formatted', 'family',
'ELK_ID',
'parent', 'parent-type',
'name', 'pnode', 'flag', 'wattage',
'isLoad', 'location', 'description', 'spoken',
'dimable'
'OL', 'RR', 'ST', 'type']
_setlist = ['RR', 'OL', 'status', 'ramprate', 'onlevel', 'enable', 'wattage']
_propalias = {'status': 'ST', 'value': 'ST', 'val': 'ST',
'id': 'address', 'addr': 'address',
'ramprate': 'RR', 'onlevel': 'OL',
"node-flag": "flag"}
#_boollist = [ "enabled" ]
def __init__(self, isy, ndict):
# self._objtype = (1, "node")
self._objtype = "node"
self._nodeprops = None
super(self.__class__, self).__init__(isy, ndict)
# self._dimable = self._is_dimable()
# if not self.isy.eventupdates:
# #update only nodes
# if "node-flag" in self._mydict:
# self.update()
# print("addr", self._mydict["address"], type(self._mydict["address"]))
self._hash = hashlib.sha256(self._mydict["address"].encode('utf-8'))
if self.debug & 0x01:
print("Init Node : \"" + self._mydict["address"] + \
"\" : \"" + self._mydict["name"] + "\"")
# self.isy._printdict(self.__dict__)
# Special case from BaseClass due to ST/RR/OL props
def _get_prop(self, prop):
# print("IN get_prop ", prop)
if prop == "formatted":
prop = "ST"
value = "formatted"
else:
value = "value"
if prop in self._propalias:
prop = self._propalias[prop]
if not prop in self._getlist:
# if prop in ['parent', 'parent-type']:
# return None
raise IsyE.IsyPropertyError("no property Attribute {!s}".format(prop))
# check if we have a property
if prop in ['isLoad', 'location', 'description', 'spoken']:
if self._nodeprops is None:
self._nodenotes = self.isy.node_get_notes(self._mydict["address"])
if self._nodenotes is None:
return None
if prop in self._nodenotes:
return self._nodenotes[prop]
else:
# return None
return ""
if prop in ['ST', 'OL', 'RR']:
# Scene's do not have property values
if "property" in self._mydict and prop in self._mydict["property"]:
# print(self._mydict["property"])
# print("prop value", prop, value)
return self._mydict["property"][prop][value]
else:
return None
# if self._mydict["property"]["time"] == 0:
# self.update()
# elif self.isy.cachetime:
# if time.gmtime() < (self.cachetime + self._mydict["property"]["time"]):
# self.update()
else:
# if prop in self._mydict:
# if prop in self._boollist:
# return(val2bool(self._mydict[prop]))
# else:
# return self._mydict[prop]
# else:
# return None
return super(self.__class__, self)._get_prop(prop)
def _set_prop(self, prop, new_value):
""" generic property set """
# print("IN set_prop ", prop, new_value)
if self.debug & 0x04:
print("_set_prop ", prop, " : ", new_value)
if prop in self._propalias:
prop = self._propalias[prop]
if not prop in self._setlist:
if prop == "ST":
self.on(new_value)
return
else:
raise IsyE.IsyPropertyError("_set_prop : " \
"Invalid property Attribute " + prop)
if prop == 'enable':
self._mydict[prop] = bool(new_value)
self.isy.node_enable(self._mydict["address"], bool(new_value))
elif prop in ['OL', 'RR']:
if not str(new_value).isdigit:
raise IsyE.IsyTypeError("Set Property : Bad Value : node=%s prop=%s val=%s" %
self._mydict["address"], prop, str(new_value))
self.isy._node_send(self._mydict["address"], "set", prop, str(new_value))
# self._mydict["property"]["time"] = 0
if prop in self._mydict["property"]:
# if isinstance(new_value, (int, float)) : # already checked with isdigit
self._mydict["property"][prop]["value"] = new_value
# we need to tie this to some action
elif prop in self._mydict:
# self._mydict[prop] = new_value
pass
else:
#print("_set_prop AttributeError")
raise AttributeError("no Attribute " + prop)
def _gettype(self):
""" Type of Node (readonly) """
return "node"
# enable node
def get_enable(self):
""" get enable/disable status a node """
return self._get_prop("enable")
def set_enable(self, new_bool):
""" Set enable status a node
args:
enable bool
"""
return self._set_prop("enable", new_bool)
enable = property(get_enable, set_enable, None, "enable/disable a node")
def get_wattage(self):
""" get wattage """
return self._get_prop("wattage")
def set_wattage(self, watts):
""" set wattage property """
return self.isy.node_set_powerinfo(self._mydict["address"], wattage=watts)
wattage = property(get_wattage, set_wattage)
# ramprate property
# obj mathod for getting/setting a Node's value
# sets how fast a light fades on.
def get_rr(self):
""" Get/Set RampRate property of Node """
return self._get_prop("RR")
def set_rr(self, new_value):
""" Get/Set RampRate property of Node """
return self._set_prop("RR", new_value)
ramprate = property(get_rr, set_rr)
# On Level property
# obj mathod for getting/setting a Node's value
# where in most cases light is how bright the light is
# when turned on
def get_ol(self):
""" Get/Set On Level property of Node """
return self._get_prop("OL")
def set_ol(self, new_value):
""" Get/Set On Level property of Node """
return self._set_prop("OL", new_value)
onlevel = property(get_ol, set_ol)
# def get_fm(self):
# """ property On Level Value of Node """
# return self._get_prop("formatted")
# formatted = property(get_fm)
# status property
# obj mathod for getting/setting a Node's value
# where in most cases light is how bright the light is
def get_status(self):
""" Get/Set Status property of Node """
return self._get_prop("ST")
def set_status(self, new_value):
""" Get/Set Status property of Node """
return self.on(new_value)
status = property(get_status, set_status)
def dim(self):
"""
decrease brightness of a device by ~3%
"""
self.isy._node_send(self._mydict["address"], "cmd", "DIM")
def brighten(self):
"""
increase brightness of a device by ~3%
"""
self.isy._node_send(self._mydict["address"], "cmd", "BRT")
#
# readonly to node attribute
#
def rename(self, newname):
return self._rename("RenameNode", newname)
#
#
#
def update(self):
""" force object to manualy update it's propertys """
xurl = "/rest/nodes/" + self._mydict["address"]
if self.debug & 0x01:
print("_updatenode pre _getXML")
_nodestat = self.isy._getXMLetree(xurl)
# del self._mydict["property"]["ST"]
for prop in _nodestat.iter('property'):
tprop = dict()
for k, v in list(prop.items()):
tprop[k] = v
if "id" in tprop:
self._mydict["property"][tprop["id"]] = tprop
# self._mydict["property"]["time"] = time.gmtime()
# experimental
def __bool__(self):
#print("__nonzero__ call", self._mydict["property"]["ST"]["value"], \)
# " :: ", int(self._mydict["property"]["ST"]["value"])
return(bool(int(self._mydict["property"]["ST"]["value"])) > 0)
# use the node address as the hash value
def __hash__(self):
return(self._hash)
# def __str__(self):
# print("__str__ call")
# return("my str : " + self._mydict["name"])
def __float__(self):
# print("__float__ call")
return float(int(self._mydict["property"]["ST"]["value"]) / float(255))
class IsyScene(_IsyNodeBase):
""" Node Group Class for ISY
writeonly attributes:
status
readonly attributes:
address
name
flag
deviceGroup
parent
parent-type
ELK_ID
"""
_getlist = ['address', 'name', "ELK_ID", "deviceGroup",
'flag', 'parent', 'parent-type']
_setlist = []
_propalias = {'id': 'address', 'addr': 'address',
"group-flag": "flag"}
def __init__(self, *args):
#self._objtype = (2, "scene")
self._objtype = "scene"
super(self.__class__, self).__init__(*args)
# status property
# obj mathod for getting/setting a Scene's value
# where in most cases light is how bright the light is
def set_status(self, new_value):
""" set status value of Scene """
return self._set_prop("ST", new_value)
status = property(None, set_status)
def _getmembers(self):
""" List members of a scene or group """
if "members" in self._mydict:
return self._mydict["members"].keys()
else:
return None
members = property(_getmembers)
def member_list(self):
return self._getmembers()
def is_member(self, obj):
if "members" in self._mydict:
if isinstance(obj, str):
return obj in self._mydict["members"]
elif isinstance(obj, _IsyNodeBase):
return obj._get_prop("address") in self._mydict["members"]
return False
def rename(self, newname):
""" rename node/scene/folder """
return self._rename("RenameGroup", newname)
def member_del(self, node):
r = self.isy.scene_del_node(
self._mydict["address"],
node)
# r = self.isy.soapcomm("RemoveFromGroup",
# node=node._get_prop("address"),
# group=self._mydict["address"])
return r
def member_add_controler(self, node, flag=16):
""" Add Node to scene/group as Responder """
return self.member_add(node, flag)
def member_add_responder(self, node, flag=32):
""" Add Node to scene/group Controller """
return self.member_add(node, flag)
def member_add(self, node, flag=16):
""" Add Node to scene/group """
r = self.isy.scene_add_node(
self._mydict["address"],
node,
flag=0x10)
# r = self.isy.soapcomm("MoveNode",
# node=node._get_prop("address"),
# group=self._mydict["address"],
# flag=16)
return r
def member_iter(self, flag=0):
""" iter though members
Folders iter though their contents (nodes/scenes/folders)
Scene iter though their members (nodes)
Nodes iter though sub-nodes (nodes)
"""
if "members" in self._mydict:
for k in list(self._mydict["members"].keys()):
if flag and not(flag & self._mydict["members"][k]):
continue
else:
yield k
def __iter__(self):
return self.member_iter()
# check if scene _contains_ node
def __contains__(self, other):
return self.is_member(other)
class IsyNodeFolder(_IsyNodeBase):
""" Node Folder Class for ISY
readonly attributes:
address
name
flag
"""
_getlist = ['address', 'name', 'flag']
_setlist = []
_propalias = {'id': 'address', 'addr': 'address', "folder-flag": "flag"}
def __init__(self, *args):
#self._objtype = (3, "folder")
self._objtype = "folder"
super(self.__class__, self).__init__(*args)
def member_add(self, node, flag=0):
""" add Node/Scene or Folder to Folder Obj
Args:
node = address, name or Node/Scene/Folder Obj
sets Parent for node/scene/folder to current Obj Folder
calls SOAP SetParent()
"""
r = self.isy.soapcomm("SetParent",
node=node._get_prop("address"), nodeType=node.nodeType(),
parent=self._mydict["address"], parentType=self.nodeType())
return r
def member_del(self, node):
""" del Node/Scene or Folder to Folder Obj
Args:
node = address, name or Node/Scene/Folder Obj
del node/scene/folder to current Obj Folder
(and moves to base folder)
calls SOAP SetParent()
"""
r = self.isy.soapcomm("SetParent",
node=node._get_prop("address"), nodeType=node.nodeType())
return r
def rename(self, newname):
""" renames current Obj Folder
args:
name = new folder name
calls SOAP RenameFolder()
"""
return self._rename("RenameFolder", newname)
def __iter__(self):
return self.member_iter()
def __contains__(self, other):
pass
#
# Do nothing
# (syntax check)
#
if __name__ == "__main__":
import __main__
print(__main__.__file__)
print("syntax ok")
exit(0)
```
|
{
"source": "jerbly/azuremlutils",
"score": 2
}
|
#### File: azuremlutils/tests/test_callback.py
```python
from azuremlutils import AzureRunLogCallback
class FakeRecorder:
metric_names = ["a", "epoch", "time", "mydict"]
log = [1, 2, 3, {"b": 4, "c": 5}]
class FakeLearner:
recorder = FakeRecorder()
class FakeRunContext:
output = []
def log(self, name, value):
self.output.append(f"{name}={value}")
def test_callback():
fake_run_context = FakeRunContext()
cb = AzureRunLogCallback(fake_run_context)
cb.learn = FakeLearner()
cb.after_epoch()
assert fake_run_context.output == ["a=1", "mydict_b=4", "mydict_c=5"]
```
|
{
"source": "jerbly/icevision",
"score": 2
}
|
#### File: ultralytics/yolov5/show_results.py
```python
__all__ = ["show_results", "interp"]
from icevision.imports import *
from icevision.utils import *
from icevision.core import *
from icevision.data import *
from icevision.models.base_show_results import base_show_results
from icevision.models.ultralytics.yolov5.dataloaders import (
build_infer_batch,
valid_dl,
infer_dl,
)
from icevision.models.ultralytics.yolov5.prediction import (
predict,
predict_dl,
)
from icevision.models.interpretation import Interpretation
from icevision.models.interpretation import _move_to_device
from icevision.core.record_components import LossesRecordComponent
from yolov5.utils.loss import ComputeLoss
def show_results(
model: nn.Module,
dataset: Dataset,
detection_threshold: float = 0.5,
num_samples: int = 6,
ncols: int = 3,
denormalize_fn: Optional[callable] = denormalize_imagenet,
show: bool = True,
device: Optional[torch.device] = None,
) -> None:
return base_show_results(
predict_fn=predict,
model=model,
dataset=dataset,
num_samples=num_samples,
ncols=ncols,
denormalize_fn=denormalize_fn,
show=show,
detection_threshold=detection_threshold,
device=device,
)
def loop_yolo(dl, model, losses_stats, device):
samples_plus_losses = []
compute_loss = ComputeLoss(model)
with torch.no_grad():
for (x, y), sample in pbar(dl):
torch.manual_seed(0)
x, y = _move_to_device(x, y, device)
preds = model(x)
loss = compute_loss(preds, y)[0]
loss = {
"loss_yolo": float(loss.cpu().numpy()),
"loss_total": float(loss.cpu().numpy()),
}
for l in losses_stats.keys():
losses_stats[l].append(loss[l])
loss_comp = LossesRecordComponent()
loss_comp.set_losses(loss)
sample[0].add_component(loss_comp)
sample[0].set_img(tensor_to_image(x[0]))
samples_plus_losses.append(sample[0])
return samples_plus_losses, losses_stats
_LOSSES_DICT = {
"loss_yolo": [],
"loss_total": [],
}
interp = Interpretation(
losses_dict=_LOSSES_DICT,
valid_dl=valid_dl,
infer_dl=infer_dl,
predict_dl=predict_dl,
)
interp._loop = loop_yolo
```
|
{
"source": "jerbob/python-challenges",
"score": 4
}
|
#### File: easy/length_filter/__init__.py
```python
__author__ = "<NAME>"
"""
Write a function that takes a list of strings and a minimum length number, and returns only the strings that are
longer than the provided number.
Your function must be named "longer_than".
"""
def longer_than():
return
```
|
{
"source": "jerbob/site",
"score": 2
}
|
#### File: main/error_handlers/http_404.py
```python
from werkzeug.exceptions import NotFound
from pysite.base_route import ErrorView
class Error404View(ErrorView):
name = "error_404"
error_code = 404
def get(self, error: NotFound):
return "replace me with a template, 404 not found", 404
```
|
{
"source": "JerBouma/OpenBBTerminal",
"score": 2
}
|
#### File: cryptocurrency/due_diligence/messari_view.py
```python
__docformat__ = "numpy"
# pylint: disable=C0201
import logging
import os
from typing import List, Optional
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import ticker
from matplotlib import dates as mdates
from openbb_terminal.config_terminal import theme
from openbb_terminal import feature_flags as obbff
from openbb_terminal.cryptocurrency import cryptocurrency_helpers
from openbb_terminal.decorators import check_api_key
from openbb_terminal import config_plot as cfgPlot
from openbb_terminal.cryptocurrency.due_diligence.messari_model import (
get_available_timeseries,
get_fundraising,
get_governance,
get_investors,
get_links,
get_marketcap_dominance,
get_messari_timeseries,
get_project_product_info,
get_roadmap,
get_team,
get_tokenomics,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
lambda_long_number_format,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal.cryptocurrency.dataframe_helpers import prettify_paragraph
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_messari_timeseries_list(
limit: int = 10,
query: str = "",
only_free: bool = True,
export: str = "",
) -> None:
"""Display messari timeseries list
[Source: https://messari.io/]
Parameters
----------
limit : int
number to show
query : str
Query to search across all messari timeseries
only_free : bool
Display only timeseries available for free
export : str
Export dataframe data to csv,json,xlsx file
"""
df = get_available_timeseries()
if not df.empty:
if only_free:
df = df.drop(df[df["Requires Paid Key"]].index)
if query:
mask = np.column_stack(
[
df[col].str.contains(query, na=False, regex=False, case=False)
for col in ["Title", "Description"]
]
)
df = df.loc[mask.any(axis=1)]
if df.empty:
console.print(f"\nNo timeseries found with query {query}\n")
else:
print_rich_table(
df.head(limit),
index_name="ID",
headers=list(df.columns),
show_index=True,
title="Messari Timeseries",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"mt",
df,
)
else:
console.print("\nUnable to retrieve data from Messari.\n")
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_messari_timeseries(
coin: str,
timeseries_id: str,
start: str,
end: str,
interval: str,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Display messari timeseries
[Source: https://messari.io/]
Parameters
----------
coin : str
Crypto symbol to check market cap dominance
start : int
Initial date like string (e.g., 2021-10-01)
end : int
End date like string (e.g., 2021-10-01)
interval : str
Interval frequency (e.g., 1d)
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df, title = get_messari_timeseries(
coin=coin, timeseries_id=timeseries_id, start=start, end=end, interval=interval
)
if not df.empty:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
ax.plot(df.index, df[df.columns[0]])
ax.set_title(f"{coin}'s {title}")
ax.set_ylabel(title)
ax.set_xlim(df.index[0], df.index[-1])
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"mt",
df,
)
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_marketcap_dominance(
coin: str,
start: str,
end: str,
interval: str,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Display market dominance of a coin over time
[Source: https://messari.io/]
Parameters
----------
coin : str
Crypto symbol to check market cap dominance
start : int
Initial date like string (e.g., 2021-10-01)
end : int
End date like string (e.g., 2021-10-01)
interval : str
Interval frequency (e.g., 1d)
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = get_marketcap_dominance(coin=coin, start=start, end=end, interval=interval)
if not df.empty:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(df.index, df["marketcap_dominance"])
ax.set_title(f"{coin}'s Market Cap Dominance over time")
ax.set_ylabel(f"{coin} Percentage share")
ax.set_xlim(df.index[0], df.index[-1])
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"mcapdom",
df,
)
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_links(coin: str, export: str = "") -> None:
"""Display coin links
[Source: https://messari.io/]
Parameters
----------
coin : str
Crypto symbol to check links
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = get_links(coin)
if not df.empty:
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title=f"{coin} Links",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"links",
df,
)
else:
console.print("\nUnable to retrieve data from Messari.\n")
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_roadmap(
coin: str,
descend: bool,
limit: int,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Display coin roadmap
[Source: https://messari.io/]
Parameters
----------
coin : str
Crypto symbol to check roadmap
descend: bool
reverse order
limit : int
number to show
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = get_roadmap(coin)
if not df.empty:
df["Date"] = df["Date"].dt.date
show_df = df
show_df = show_df.sort_values(by="Date", ascending=descend)
show_df.fillna("Unknown", inplace=True)
print_rich_table(
show_df.head(limit),
headers=list(show_df.columns),
show_index=False,
title=f"{coin} Roadmap",
)
df_prices, _ = cryptocurrency_helpers.load_yf_data(
symbol=coin,
currency="USD",
days=4380,
interval="1d",
)
if not df_prices.empty:
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
roadmap_dates = np.array(
pd.to_datetime(df["Date"], format="%Y-%m-%d", errors="coerce")
)
df_copy = df
df_copy["Date"] = pd.to_datetime(
df_copy["Date"], format="%Y-%m-%d", errors="coerce"
)
df_copy = df_copy[df_copy["Date"].notnull()]
titles = list(df_copy[df_copy["Date"] > df_prices.index[0]]["Title"])
roadmap_dates = mdates.date2num(roadmap_dates)
counter = 0
max_price = df_prices["Close"].max()
for x in roadmap_dates:
if x > mdates.date2num(df_prices.index[0]):
ax.text(
x,
max_price * 0.7,
titles[counter],
rotation=-90,
verticalalignment="center",
size=6,
)
counter += 1
ax.vlines(
x=roadmap_dates,
color="orange",
ymin=0,
ymax=max_price,
)
ax.plot(df_prices.index, df_prices["Close"].values)
ax.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
ax.set_title(f"{coin.upper()} Price and Roadmap")
ax.set_ylabel("Price [$]")
ax.set_xlim(df_prices.index[0], df_prices.index[-1])
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"rm",
df,
)
else:
console.print("\nUnable to retrieve data from Messari.\n")
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_tokenomics(
coin: str,
coingecko_symbol: str,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Display coin tokenomics
[Source: https://messari.io/]
Parameters
----------
coin : str
Crypto symbol to check tokenomics
coingecko_symbol : str
Coingecko crypto symbol to check tokenomics
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
df, circ_df = get_tokenomics(coin, coingecko_symbol)
if not df.empty and not circ_df.empty:
df = df.applymap(lambda x: lambda_long_number_format(x, 2))
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title=f"{coin} Tokenomics",
)
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
ax2 = ax.twinx()
elif is_valid_axes_count(external_axes, 2):
(ax, ax2) = external_axes
else:
return
df_prices, _ = cryptocurrency_helpers.load_yf_data(
symbol=coin,
currency="USD",
days=4380,
interval="1d",
)
merged_df = pd.concat([circ_df, df_prices], axis=1)
color_palette = theme.get_colors()
ax.plot(
merged_df.index,
merged_df["values"],
color=color_palette[0],
label="Circ Supply",
)
ax.plot(np.nan, label="Price", color=color_palette[1])
if not df_prices.empty:
ax2.plot(merged_df.index, merged_df["Close"], color=color_palette[1])
ax2.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
ax2.set_ylabel(f"{coin} price [$]")
theme.style_twin_axis(ax2)
ax2.yaxis.set_label_position("right")
ax.legend()
ax.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, _: lambda_long_number_format(x))
)
ax.set_title(f"{coin} circulating supply over time")
ax.set_ylabel("Number of tokens")
ax.set_xlim(merged_df.index[0], merged_df.index[-1])
theme.style_primary_axis(ax)
ax.yaxis.set_label_position("left")
ax.legend()
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"tk",
df,
)
else:
console.print("\nUnable to retrieve data from Messari.\n")
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_project_info(
coin: str,
export: str = "",
) -> None:
"""Display project info
[Source: https://messari.io/]
Parameters
----------
coin : str
Crypto symbol to check project info
export : str
Export dataframe data to csv,json,xlsx file
"""
(df_info, df_repos, df_audits, df_vulns) = get_project_product_info(coin)
if not df_info.empty:
print_rich_table(
df_info,
headers=list(df_info.columns),
show_index=False,
title=f"{coin} General Info",
)
else:
console.print("\nGeneral info not found\n")
if not df_repos.empty:
print_rich_table(
df_repos,
headers=list(df_repos.columns),
show_index=False,
title=f"{coin} Public Repositories",
)
else:
console.print("\nPublic repositories not found\n")
if not df_audits.empty:
print_rich_table(
df_audits,
headers=list(df_audits.columns),
show_index=False,
title=f"{coin} Audits",
)
else:
console.print("\nAudits not found\n")
if not df_vulns.empty:
print_rich_table(
df_vulns,
headers=list(df_vulns.columns),
show_index=False,
title=f"{coin} Vulnerabilities",
)
else:
console.print("\nVulnerabilities not found\n")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"pi",
df_info,
)
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_investors(
coin: str,
export: str = "",
) -> None:
"""Display coin investors
[Source: https://messari.io/]
Parameters
----------
coin : str
Crypto symbol to check coin investors
export : str
Export dataframe data to csv,json,xlsx file
"""
(df_individuals, df_organizations) = get_investors(coin)
if not df_individuals.empty or not df_organizations.empty:
if not df_individuals.empty:
print_rich_table(
df_individuals,
headers=list(df_individuals.columns),
show_index=False,
title=f"{coin} Investors - Individuals",
)
else:
console.print("\nIndividual investors not found\n")
if not df_organizations.empty:
print_rich_table(
df_organizations,
headers=list(df_organizations.columns),
show_index=False,
title=f"{coin} Investors - Organizations",
)
else:
console.print("\nInvestors - Organizations not found\n")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"inv",
df_individuals,
)
else:
console.print("\nInvestors not found\n")
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_team(
coin: str,
export: str = "",
) -> None:
"""Display coin team
[Source: https://messari.io/]
Parameters
----------
coin : str
Crypto symbol to check coin team
export : str
Export dataframe data to csv,json,xlsx file
"""
(df_individuals, df_organizations) = get_team(coin)
if not df_individuals.empty or not df_organizations.empty:
if not df_individuals.empty:
print_rich_table(
df_individuals,
headers=list(df_individuals.columns),
show_index=False,
title=f"{coin} Team - Individuals",
)
else:
console.print("\nIndividual team members not found\n")
if not df_organizations.empty:
print_rich_table(
df_organizations,
headers=list(df_organizations.columns),
show_index=False,
title=f"{coin} Team - Organizations",
)
else:
console.print("\nTeam organizations not found\n")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"team",
df_individuals,
)
else:
console.print("\nTeam not found\n")
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_governance(
coin: str,
export: str = "",
) -> None:
"""Display coin governance
[Source: https://messari.io/]
Parameters
----------
coin : str
Crypto symbol to check coin governance
export : str
Export dataframe data to csv,json,xlsx file
"""
(summary, df) = get_governance(coin)
if summary:
summary = prettify_paragraph(summary)
console.print(summary, "\n")
if not df.empty:
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title=f"{coin} Governance details",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"gov",
df,
)
else:
console.print(f"\n{coin} governance details not found\n")
@log_start_end(log=logger)
@check_api_key(["API_MESSARI_KEY"])
def display_fundraising(
coin: str,
export: str = "",
) -> None:
"""Display coin fundraising
[Source: https://messari.io/]
Parameters
----------
coin : str
Crypto symbol to check coin fundraising
export : str
Export dataframe data to csv,json,xlsx file
"""
(summary, df_sales_rounds, df_treasury_accs, df_details) = get_fundraising(coin)
if summary:
summary = prettify_paragraph(summary)
console.print(summary, "\n")
if not df_sales_rounds.empty:
df_sales_rounds = df_sales_rounds.applymap(
lambda x: lambda_long_number_format(x, 2)
)
print_rich_table(
df_sales_rounds,
headers=list(df_sales_rounds.columns),
show_index=False,
title=f"{coin} Sales Rounds",
)
else:
console.print("\nSales rounds not found\n")
if not df_treasury_accs.empty:
print_rich_table(
df_treasury_accs,
headers=list(df_treasury_accs.columns),
show_index=False,
title=f"{coin} Treasury Accounts",
)
else:
console.print("\nTreasury accounts not found\n")
if not df_details.empty:
values = []
labels = []
investors = df_details.loc[df_details["Metric"] == "Investors [%]"][
"Value"
].item()
founders = df_details.loc[df_details["Metric"] == "Organization/Founders [%]"][
"Value"
].item()
airdrops = (
df_details.loc[df_details["Metric"] == "Rewards/Airdrops [%]"][
"Value"
].item(),
)
if isinstance(investors, (int, float)) and investors > 0:
values.append(investors)
labels.append("Investors")
if isinstance(founders, (int, float)) and founders > 0:
values.append(founders)
labels.append("Organization/Founders")
if isinstance(airdrops[0], (int, float)) and airdrops[0] > 0:
values.append(airdrops[0])
labels.append("Rewards/Airdrops")
if len(values) > 0 and sum(values) > 0:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
ax.pie(
[s / 100 for s in values],
normalize=False,
labels=labels,
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
autopct="%1.0f%%",
startangle=90,
colors=theme.get_colors()[1:4],
)
ax.set_title(f"{coin} Fundraising Distribution")
if obbff.USE_ION:
plt.ion()
plt.show()
df_details.fillna("-", inplace=True)
print_rich_table(
df_details,
headers=list(df_details.columns),
show_index=False,
title=f"{coin} Fundraising Details",
)
else:
console.print("\nFundraising details not found\n")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"fr",
df_details,
)
```
#### File: cryptocurrency/tools/tools_controller.py
```python
__docformat__ = "numpy"
# pylint: disable=C0302
import argparse
import logging
from typing import List
from prompt_toolkit.completion import NestedCompleter
from openbb_terminal import feature_flags as obbff
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_ONLY_RAW_DATA_ALLOWED,
check_non_negative_float,
check_percentage_range,
check_positive,
check_positive_float,
)
from openbb_terminal.cryptocurrency.tools import tools_view
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.rich_config import console, MenuText
logger = logging.getLogger(__name__)
class ToolsController(BaseController):
"""Tools Controller class"""
CHOICES_COMMANDS = ["aprtoapy", "il"]
PATH = "/crypto/tools/"
def __init__(self, queue: List[str] = None):
"""Constructor"""
super().__init__(queue)
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = {c: {} for c in self.controller_choices}
choices["support"] = self.SUPPORT_CHOICES
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
mt = MenuText("crypto/tools/")
mt.add_cmd("aprtoapy")
mt.add_cmd("il")
console.print(text=mt.menu_text, menu="Cryptocurrency - Tools")
@log_start_end(log=logger)
def call_il(self, other_args: List[str]):
"""Process il command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="il",
description="""Tool to calculate Impermanent Loss in a custom liquidity pool.
Users can provide percentages increases for two tokens (and their weight in the liquidity pool)
and verify the impermanent loss that can occur.""",
)
parser.add_argument(
"-pcA",
"--priceChangeA",
dest="priceChangeA",
type=check_non_negative_float,
help="Token A price change in percentage",
default=0,
)
parser.add_argument(
"-pcB",
"--priceChangeB",
dest="priceChangeB",
type=check_non_negative_float,
help="Token B price change in percentage",
default=100,
)
parser.add_argument(
"-p",
"--proportion",
dest="proportion",
type=check_percentage_range,
help="""Pool proportion. E.g., 50 means that pool contains 50%% of token A and 50%% of token B,
30 means that pool contains 30%% of token A and 70%% of token B""",
default=50,
)
parser.add_argument(
"-v",
"--value",
dest="value",
type=check_positive_float,
help="Initial amount of dollars that user provides to liquidity pool",
default=1000,
)
parser.add_argument(
"-n",
"--narrative",
dest="narrative",
action="store_true",
help="Flag to show narrative instead of dataframe",
default=False,
)
if other_args and not other_args[0][0] == "-":
other_args.insert(0, "-pcA")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
tools_view.display_il(
price_changeA=ns_parser.priceChangeA,
price_changeB=ns_parser.priceChangeB,
proportion=ns_parser.proportion,
initial_pool_value=ns_parser.value,
narrative=ns_parser.narrative,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_aprtoapy(self, other_args: List[str]):
"""Process aprtoapy command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="aprtoapy",
description="""
Tool to calculate APY from APR value.
Compouding periods, i.e., the number of times compounded per year
can be defined with -c argument.
""",
)
parser.add_argument(
"--apr",
dest="apr",
type=check_positive_float,
help="APR value in percentage to convert",
default=100,
)
parser.add_argument(
"-c",
"--compounding",
dest="compounding",
type=check_positive,
help="Number of compounded periods in a year. 12 means compounding monthly",
default=12,
)
parser.add_argument(
"-n",
"--narrative",
dest="narrative",
action="store_true",
help="Flag to show narrative instead of dataframe",
default=False,
)
if other_args and not other_args[0][0] == "-":
other_args.insert(0, "--apr")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
tools_view.display_apy(
apr=ns_parser.apr,
compounding_times=ns_parser.compounding,
narrative=ns_parser.narrative,
export=ns_parser.export,
)
```
#### File: stocks/fundamental_analysis/dcf_model.py
```python
__docformat__ = "numpy"
import logging
import os
from io import BytesIO
from pathlib import Path
from typing import Any, Dict, List, Tuple, Union
from urllib.request import urlopen
from zipfile import ZipFile
import financedatabase as fd
import pandas as pd
import requests
import yfinance as yf
from bs4 import BeautifulSoup
from openpyxl import worksheet
from sklearn.linear_model import LinearRegression
from openbb_terminal.decorators import log_start_end
from openbb_terminal.stocks.fundamental_analysis import dcf_static
from openbb_terminal.helper_funcs import compose_export_path
logger = logging.getLogger(__name__)
CURRENCIES = [
"ALL",
"AFN",
"ARS",
"AWG",
"AUD",
"AZN",
"BSD",
"BBD",
"BYN",
"BZD",
"BMD",
"BOB",
"BAM",
"BWP",
"BGN",
"BRL",
"BND",
"KHR",
"CAD",
"KYD",
"CLP",
"CNY",
"COP",
"CRC",
"HRK",
"CUP",
"CZK",
"DKK",
"DOP",
"XCD",
"EGP",
"SVC",
"EUR",
"FKP",
"FJD",
"GHS",
"GIP",
"GTQ",
"GGP",
"GYD",
"HNL",
"HKD",
"HUF",
"ISK",
"INR",
"IDR",
"IRR",
"IMP",
"ILS",
"JMD",
"JPY",
"JEP",
"KZT",
"KPW",
"KRW",
"KGS",
"LAK",
"LBP",
"LRD",
"MKD",
"MYR",
"MUR",
"MXN",
"MNT",
"MNT",
"MZN",
"NAD",
"NPR",
"ANG",
"NZD",
"NIO",
"NGN",
"NOK",
"OMR",
"PKR",
"PAB",
"PYG",
"PEN",
"PHP",
"PLN",
"QAR",
"RON",
"RUB",
"SHP",
"SAR",
"RSD",
"SCR",
"SGD",
"SBD",
"SOS",
"KRW",
"ZAR",
"LKR",
"SEK",
"CHF",
"SRD",
"SYP",
"TWD",
"THB",
"TTD",
"TRY",
"TVD",
"UAH",
"AED",
"GBP",
"USD",
"UYU",
"UZS",
"VEF",
"VND",
"YER",
"ZWD",
]
@log_start_end(log=logger)
def string_float(string: str) -> float:
"""Convert a string to a float
Parameters
----------
string : str
String to be converted
Returns
-------
number : float
Analysis of filings text
"""
if string.strip().replace(",", "").replace("-", "") == "":
return 0
return float(string.strip().replace(",", "").replace("-", ""))
def insert_row(
name: str, index: str, df: pd.DataFrame, row_v: List[str]
) -> pd.DataFrame:
"""Allows a row to be added given an index and name
Parameters
----------
name : str
Name to be added to df
index : str
The row the new item will go after
df : pd.DataFrame
The dataframe to be modified
row_v : List[str]
The items to be added to the row
Returns
-------
new_df : pd.DataFrame
The new dataframe
"""
pd.options.mode.chained_assignment = None
if name not in df.index:
row_number = df.index.get_loc(index) + 1
df1 = df[0:row_number]
df2 = df[row_number:]
df1.loc[name] = row_v
df_result = pd.concat([df1, df2])
return df_result
return df
@log_start_end(log=logger)
def set_cell(
ws: worksheet,
cell: str,
text: Union[int, str, float] = None,
font: str = None,
border: str = None,
fill: str = None,
alignment: str = None,
num_form: str = None,
):
"""Set the value for a cell
Parameters
----------
ws : worksheet
The worksheet to be modified
cell : str
The cell that will be modified
text : Union[int, str, float]
The new value of the cell
font : str
The type of font
border : str
The type of border
fill : str
The type of fill
alignment : str
The type of alignment
num_form : str
The format for numbers
"""
if text:
ws[cell] = text
if font:
ws[cell].font = font
if border:
ws[cell].border = border
if fill:
ws[cell].fill = fill
if alignment:
ws[cell].alignment = alignment
if num_form:
ws[cell].number_format = num_form
@log_start_end(log=logger)
def get_fama_raw() -> pd.DataFrame:
"""Get Fama French data
Returns
-------
df : pd.DataFrame
Fama French data
"""
with urlopen( # nosec
"https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/F-F_Research_Data_Factors_CSV.zip"
) as url:
# Download Zipfile and create pandas DataFrame
with ZipFile(BytesIO(url.read())) as zipfile:
with zipfile.open("F-F_Research_Data_Factors.CSV") as zip_open:
df = pd.read_csv(
zip_open,
header=0,
names=["Date", "MKT-RF", "SMB", "HML", "RF"],
skiprows=3,
)
df = df[df["Date"].apply(lambda x: len(str(x).strip()) == 6)]
df["Date"] = df["Date"].astype(str) + "01"
df["Date"] = pd.to_datetime(df["Date"], format="%Y%m%d")
df["MKT-RF"] = pd.to_numeric(df["MKT-RF"], downcast="float")
df["SMB"] = pd.to_numeric(df["SMB"], downcast="float")
df["HML"] = pd.to_numeric(df["HML"], downcast="float")
df["RF"] = pd.to_numeric(df["RF"], downcast="float")
df["MKT-RF"] = df["MKT-RF"] / 100
df["SMB"] = df["SMB"] / 100
df["HML"] = df["HML"] / 100
df["RF"] = df["RF"] / 100
df = df.set_index("Date")
return df
@log_start_end(log=logger)
def get_historical_5(ticker: str) -> pd.DataFrame:
"""Get 5 year monthly historical performance for a ticker with dividends filtered
Parameters
----------
ticker : str
The ticker to be analyzed
Returns
-------
df : pd.DataFrame
Historical data
"""
tick = yf.Ticker(ticker)
df = tick.history(period="5y", interval="1mo")
df = df[df.index.to_series().apply(lambda x: x.day == 1)]
df = df.drop(["Dividends", "Stock Splits"], axis=1)
df = df.dropna()
return df
@log_start_end(log=logger)
def get_fama_coe(ticker: str) -> float:
"""Use Fama and French to get the cost of equity for a company
Parameters
----------
ticker : str
The ticker to be analyzed
Returns
-------
coef : float
The stock's Fama French coefficient
"""
df_f = get_fama_raw()
df_h = get_historical_5(ticker)
df = df_h.join(df_f)
df = df.dropna()
df["Monthly Return"] = df["Close"].pct_change()
df["Excess Monthly Return"] = df["Monthly Return"] - df["RF"]
df = df.dropna()
x = df[["MKT-RF", "SMB", "HML"]]
y = df["Excess Monthly Return"]
model = LinearRegression().fit(x, y)
coefs = model.coef_
return (
df["RF"].mean()
+ coefs[0] * df["MKT-RF"].mean()
+ coefs[1] * df["SMB"].mean()
+ coefs[2] * df["HML"].mean()
) * 12
@log_start_end(log=logger)
def others_in_sector(
ticker: str, sector: str, industry: str, no_filter: bool = False
) -> List[str]:
"""Get other stocks in a ticker's sector
Parameters
----------
ticker : str
The ticker to be excluded
sector : str
The sector to pull from
industry : str
The industry to pull from
no_filter : bool
True means that we do not filter based on market cap
Returns
-------
tickers : List[str]
List of tickers in the same sector
"""
industry = industry.replace("—", " - ")
industry = industry.replace("/", " ")
similars = fd.select_equities(sector=sector, industry=industry)
# This filters similars to match market cap and removes ticker analyzed
if ticker in similars:
market_cap = similars[ticker]["market_cap"]
similars.pop(ticker, None)
if not no_filter:
similars = {
k: v for (k, v) in similars.items() if v["market_cap"] == market_cap
}
similars = list(similars)
return similars
def create_dataframe(ticker: str, statement: str, period: str = "annual"):
"""
Creates a df financial statement for a given ticker
Parameters
----------
ticker : str
The ticker to create a dataframe for
statement : str
The financial statement dataframe to create
period : str
Whether to look at annual, quarterly, or trailing
Returns
-------
statement : pd.DataFrame
The financial statement requested
rounding : int
The amount of rounding to use
statement_currency: str
The currency the financial statements are reported in
"""
if statement not in ["BS", "CF", "IS"]:
raise ValueError("statement variable must be 'BS','CF', or 'IS'")
if period not in ["annual", "quarterly", "trailing"]:
raise ValueError(
"statement variable must be 'annual','quarterly', or 'trailing'"
)
per_url = f"{period}/" if period != "annual" else ""
URL = f"https://stockanalysis.com/stocks/{ticker}/financials/"
URL += dcf_static.statement_url[statement] + per_url
ignores = dcf_static.statement_ignore[statement]
r = requests.get(URL, headers=dcf_static.headers)
if "404 - Page Not Found" in r.text:
return pd.DataFrame(), None, None
try:
df = pd.read_html(r.text)[0]
except ValueError:
return pd.DataFrame(), None, None
soup = BeautifulSoup(r.content, "html.parser")
phrase = soup.find("div", attrs={"class": "info-long svelte-f7kao3"})
phrase = phrase.get_text().lower() if phrase else ""
if "thousand" in phrase:
rounding = 1_000
elif "millions" in phrase:
rounding = 1_000_000
elif "billions" in phrase:
rounding = 1_000_000_000
else:
return pd.DataFrame(), None, None
for currency in CURRENCIES:
if currency.lower() in phrase:
statement_currency = currency
break
df = df.set_index("Year")
df = df.loc[:, ~(df == "Upgrade").any()]
for ignore in ignores:
if ignore in df.index:
df = df.drop([ignore])
df = df[df.columns[::-1]]
if statement == "IS":
vals = ["Revenue", dcf_static.gaap_is]
elif statement == "BS":
vals = ["Cash & Equivalents", dcf_static.gaap_bs]
elif statement == "CF":
vals = ["Net Income", dcf_static.gaap_cf]
if vals[0] in df.index:
blank_list = ["0" for _ in df.loc[vals[0]].to_list()]
else:
return pd.DataFrame(), None, None
for i, _ in enumerate(vals[1][1:]):
df = insert_row(vals[1][i + 1], vals[1][i], df, blank_list)
return df, rounding, statement_currency
@log_start_end(log=logger)
def get_similar_dfs(ticker: str, info: Dict[str, Any], n: int, no_filter: bool = False):
"""
Get dataframes for similar companies
Parameters
----------
ticker : str
The ticker to create a dataframe for
into : Dict[str,Any]
The dictionary produced from the yfinance.info function
n : int
The number of similar companies to produce
no_filter : bool
True means that we do not filter based on market cap
Returns
-------
new_list : List[str, pd.DataFrame]
A list of similar companies
"""
similars = others_in_sector(ticker, info["sector"], info["industry"], no_filter)
i = 0
new_list = []
while i < n and similars:
similar_ret = [create_dataframe(similars[0], x)[0] for x in ["BS", "IS", "CF"]]
blank = [x.empty for x in similar_ret]
if True not in blank:
vals = [similars[0], similar_ret]
new_list.append(vals)
i += 1
similars.pop(0)
return new_list
@log_start_end(log=logger)
def clean_dataframes(*args) -> List[pd.DataFrame]:
"""
All dataframes in the list take on the length of the shortest dataframe
Parameters
----------
*args : List[pd.DataFrame]
List of dataframes to clean
Returns
-------
dfs : List[pd.DataFrame]
Cleaned list of dataframes
"""
min_cols = min(x.shape[1] for x in args)
dfs = [x.iloc[:, -min_cols:] for x in args]
return dfs
@log_start_end(log=logger)
def get_value(df: pd.DataFrame, row: str, column: int) -> Tuple[float, float]:
"""
Gets a specific value from the dataframe
Parameters
----------
df : pd.DataFrame
The dataframe to get the information from
row : str
The row to get the information from
column : int
The column to get the information from
Returns
-------
value : List[float]
The information in float format
"""
val1 = df.at[row, df.columns[column]]
if isinstance(val1, str):
fin_val1: float = float(val1.replace(",", "").replace("-", "-0"))
else:
fin_val1 = float(val1)
val2 = df.at[row, df.columns[column + 1]]
if isinstance(val2, str):
fin_val2: float = float(val2.replace(",", "").replace("-", "-0"))
else:
fin_val2 = float(val2)
return fin_val1, fin_val2
@log_start_end(log=logger)
def frac(num: float, denom: float) -> Union[str, float]:
"""
Converts a numerator and a denominator in a fraction, checking for invalid denominators
Parameters
----------
num : float
The numerator
denom : float
The denominator
Returns
-------
value : Union[str, float]
The fraction
"""
return "N/A" if denom == 0 else num / denom
@log_start_end(log=logger)
def generate_path(n: int, ticker: str, date: str) -> Path:
"""
Create the path to save an excel file to
Parameters
----------
n : int
The try number
ticker : str
The ticker to be saved
date : str
The date the dcf was generated
Returns
-------
path : Path
The path to save a file to
"""
val = "" if n == 0 else f"({n})"
export_folder, _ = compose_export_path(
func_name="dcf", dir_path=os.path.abspath(os.path.dirname(__file__))
)
trypath = os.path.join(
export_folder,
f"{ticker} {date}{val}.xlsx",
)
return Path(trypath)
```
|
{
"source": "Jerbuck/content-playground",
"score": 3
}
|
#### File: Jerbuck/content-playground/custom_object.py
```python
import sys
username = None
password = <PASSWORD>
base_url = None
objects = []
class CustomObject(object):
"""Data class for custom object."""
def __init__(self):
self.username = None
self.password = <PASSWORD>
self.base_url = None
self.objects = []
def print(self):
"""Print the contents of the custom object."""
print(f"\n--> Username: {self.username}")
print(f"--> Password: {self.password}")
print(f"--> Base URL: {self.base_url}")
print(f"--> Objects: {self.objects}\n")
def load(self, data):
"""Load the custom object from a dictionary."""
try:
self.username = data["username"]
self.password = data["password"]
self.base_url = data["base-url"]
self.objects = data['objects']
if (self.username == None) or \
(self.password == None) or \
(self.base_url == None) or \
(self.objects == []):
print(f"\nERROR: Load called with null data.")
sys.Exit()
except:
print(f"\nERROR: Unable to parse data into custom object.")
sys.exit()
if __name__ == '__main__':
custom_object = CustomObject()
custom_object.print()
```
#### File: content-playground/readers/xml_reader.py
```python
import sys
import os
import xmltodict
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from readers.base_reader import BaseReader
from custom_object import CustomObject
class XmlReader(BaseReader):
"""Class to parse a XML file given the file path."""
def __init__(self, file_path):
try:
with open(file_path, "r") as file:
xml_contents = file.read()
self.data = xmltodict.parse(xml_contents)['content']
except:
print(f"\nERROR: Invalid XML format.")
sys.exit()
if __name__ == '__main__':
reader = XmlReader("sample.xml")
custom_object = CustomObject()
custom_object.load(reader.data)
custom_object.print()
```
#### File: content-playground/tests/test_yaml_reader.py
```python
import unittest
import os
import sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from custom_object import CustomObject
from readers.yaml_reader import YamlReader
class Test_YamlReader(unittest.TestCase):
def test_custom_object_load_with_bad_file_path_verify_sys_exit_is_called(self):
with self.assertRaises(SystemExit):
custom_object = CustomObject()
reader = YamlReader("bad-path.bad")
custom_object.load(reader.data)
def test_custom_object_load_with_invalid_file_verify_sys_exit_is_called(self):
with self.assertRaises(SystemExit):
custom_object = CustomObject()
reader = YamlReader("sample.xml") #json works regardless
custom_object.load(reader.data)
def test_custom_object_load_with_good_data_verify_field_contents_are_correct(self):
custom_object = CustomObject()
reader = YamlReader("sample.yml")
custom_object.load(reader.data)
self.assertEqual(custom_object.username, "fake-username")
self.assertEqual(custom_object.password, "<PASSWORD>")
self.assertEqual(custom_object.base_url, "https://www.fake-url.com")
self.assertEqual(len(custom_object.objects), 3)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "Jerbuck/dnac-sdk-playground",
"score": 3
}
|
#### File: dnac-sdk-playground/dnac_sdk_playground/args_user.py
```python
import argparse
# Itemize generic arguments below:
filePath = ""
class ArgsUser(object):
def __init__(self):
self.__parse_args()
def __parse_args(self):
"""Generic argument user class for inheritance."""
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", help="provide a file path for consumption")
args = parser.parse_args()
self.filePath = args.file
```
|
{
"source": "jerbzz/agile-esp8266-indicator",
"score": 3
}
|
#### File: jerbzz/agile-esp8266-indicator/wifimgr.py
```python
import network
import socket
import ure
import time
ap_ssid = "Agile Indicator Setup"
ap_password = ""
ap_authmode = 0 # WPA2
connect_to_open_wifis = False
# If `link_to_next_webui` is set to `True` on the successfully connected page there is a link to the IP of the ESP in the newly connected WiFi.
# This is useful if the ESP shows an other web interface after the WiFiManager, because the user can just click the link and doesn't have to search the new IP of the ESP.
link_to_next_webui = True
NETWORK_PROFILES = 'wifi.dat'
wlan_ap = network.WLAN(network.AP_IF)
wlan_sta = network.WLAN(network.STA_IF)
server_socket = None
def unquote_plus(s):
r = s.replace('+', ' ').split('%')
for i in range(1, len(r)):
s = r[i]
try:
r[i] = chr(int(s[:2], 16)) + s[2:]
except ValueError:
r[i] = '%' + s
return ''.join(r)
def get_connection():
"""return a working WLAN(STA_IF) instance or None"""
# First check if there already is any connection:
if wlan_sta.isconnected():
return wlan_sta
connected = False
try:
# ESP connecting to WiFi takes time, wait a bit and try again:
time.sleep(3)
if wlan_sta.isconnected():
return wlan_sta
# Read known network profiles from file
profiles = read_profiles()
# Search WiFis in range
wlan_sta.active(True)
networks = wlan_sta.scan()
AUTHMODE = {0: "open", 1: "WEP", 2: "WPA-PSK", 3: "WPA2-PSK", 4: "WPA/WPA2-PSK"}
for ssid, bssid, channel, rssi, authmode, hidden in sorted(networks, key=lambda x: x[3], reverse=True):
ssid = ssid.decode('utf-8')
encrypted = authmode > 0
print("ssid: %s chan: %d rssi: %d authmode: %s" % (ssid, channel, rssi, AUTHMODE.get(authmode, '?')))
if encrypted:
if ssid in profiles:
password = profiles[ssid]
connected = do_connect(ssid, password)
else:
print("skipping unknown encrypted network")
elif connect_to_open_wifis:
connected = do_connect(ssid, None)
if connected:
break
except OSError as e:
print("exception", str(e))
# start web server for connection manager:
if not connected:
connected = start()
return wlan_sta if connected else None
def read_profiles():
with open(NETWORK_PROFILES) as f:
lines = f.readlines()
profiles = {}
for line in lines:
ssid, password = line.strip("\n").split(";")
profiles[ssid] = password
return profiles
def write_profiles(profiles):
lines = []
for ssid, password in profiles.items():
lines.append("%s;%s\n" % (ssid, password))
with open(NETWORK_PROFILES, "w") as f:
f.write(''.join(lines))
def do_connect(ssid, password):
wlan_sta.active(True)
if wlan_sta.isconnected():
return None
print('Trying to connect to %s...' % ssid)
wlan_sta.connect(ssid, password)
for retry in range(100):
connected = wlan_sta.isconnected()
if connected:
break
time.sleep(0.1)
print('.', end='')
if connected:
print('\nConnected. Network config: ', wlan_sta.ifconfig())
else:
print('\nFailed. Not Connected to: ' + ssid)
return connected
def send_header(client, status_code=200, content_length=None, redirect=None):
client.sendall("HTTP/1.0 {} OK\r\n".format(status_code))
if redirect:
client.sendall("Location: {}\r\n".format(redirect))
else:
client.sendall("Content-Type: text/html\r\n")
if content_length is not None:
client.sendall("Content-Length: {}\r\n".format(content_length))
client.sendall("\r\n")
def send_response(client, payload, status_code=200):
content_length = len(payload)
send_header(client, status_code, content_length)
if content_length > 0:
client.sendall(payload)
client.close()
def handle_root(client):
wlan_sta.active(True)
ssids = sorted(ssid.decode('utf-8') for ssid, *_ in wlan_sta.scan())
send_header(client)
client.sendall("""\
<html>
<h1 style="color: #5e9ca0; text-align: center;">
<span style="color: #ff0000;">
Agile Indicator WiFi Setup
</span>
</h1>
<form action="configure" method="post">
<table style="margin-left: auto; margin-right: auto;">
<tbody>
""")
while len(ssids):
ssid = ssids.pop(0)
client.sendall("""\
<tr>
<td colspan="2">
<input type="radio" name="ssid" value="{0}" />{0}
</td>
</tr>
""".format(ssid))
client.sendall("""\
<tr>
<td>Password:</td>
<td><input name="password" type="password" /></td>
</tr>
</tbody>
</table>
<p style="text-align: center;">
<input type="submit" value="Submit" />
</p>
</form>
</html>
""")
client.close()
def handle_configure(client, content):
match = ure.search("ssid=([^&]*)&password=(.*)", content)
if match is None:
send_response(client, "Parameters not found", status_code=400)
return False
# version 1.9 compatibility
try:
ssid = unquote_plus(match.group(1).decode("utf-8"))
password = unquote_plus(match.group(2).decode("utf-8"))
except UnicodeEncodeError:
ssid = unquote_plus(match.group(1))
password = unquote_plus(match.group(2))
if len(ssid) == 0:
send_response(client, "SSID must be provided", status_code=400)
return False
if do_connect(ssid, password):
response = """\
<html>
<center>
<br><br>
<h1 style="color: #5e9ca0; text-align: center;">
<span style="color: #ff0000;">
ESP successfully connected to WiFi network %(ssid)s with IP %(ip)s.
</span>
</h1>""" % dict(ssid=ssid, ip=wlan_sta.ifconfig()[0])
if link_to_next_webui:
response += """\
<p style="text-align: center;">
<a href="http://%(ip)s/">To new Interface</a><br>
(You must be connected to the set network to follow this Link)
</p>
""" % dict(ip=wlan_sta.ifconfig()[0])
response += """\
<br><br>
</center>
</html>
"""
send_response(client, response)
try:
profiles = read_profiles()
except OSError:
profiles = {}
profiles[ssid] = password
write_profiles(profiles)
time.sleep(5)
return True
else:
response = """\
<html>
<center>
<h1 style="color: #5e9ca0; text-align: center;">
<span style="color: #ff0000;">
ESP could not connect to WiFi network %(ssid)s.
</span>
</h1>
<br><br>
<form>
<input type="button" value="Go back!" onclick="history.back()"></input>
</form>
</center>
</html>
""" % dict(ssid=ssid)
send_response(client, response)
return False
def handle_not_found(client, url):
if 'favicon' in url:
send_header(client, status_code=404)
else:
send_header(client, status_code=307, redirect='/')
client.close()
def stop():
global server_socket
if server_socket:
server_socket.close()
server_socket = None
def start(port=80):
global server_socket
addr = socket.getaddrinfo('0.0.0.0', port)[0][-1]
stop()
wlan_sta.active(True)
wlan_ap.active(True)
wlan_ap.config(essid=ap_ssid, password=<PASSWORD>, authmode=ap_authmode)
server_socket = socket.socket()
server_socket.bind(addr)
server_socket.listen(1)
print('Connect to WiFi ssid ' + ap_ssid + ', default password: ' + ap_password)
print('and access the ESP via your favorite web browser at 192.168.4.1.')
print('Listening on:', addr)
while True:
if wlan_sta.isconnected():
return True
client, addr = server_socket.accept()
print('client connected from', addr)
try:
client.settimeout(5.0)
request = bytearray()
try:
while "\r\n\r\n" not in request:
request.extend(client.recv(512))
except OSError:
pass
if "HTTP" not in request:
# skip invalid requests
continue
if "POST" in request and "Content-Length: " in request:
content_length = int(ure.search("Content-Length: ([0-9]+)?", bytes(request)).group(1))
content = bytearray(request[bytes(request).index(b"\r\n\r\n") + 4:])
content_length_remaining = content_length - len(content)
while content_length_remaining > 0:
chunk = client.recv(512)
content.extend(chunk)
content_length_remaining -= len(chunk)
request = bytes(request)
print("Request is: {}".format(request))
# version 1.9 compatibility
try:
url = ure.search("(?:GET|POST) /(.*?)(?:\\?.*?)? HTTP", request).group(1).decode("utf-8").rstrip("/")
except Exception:
url = ure.search("(?:GET|POST) /(.*?)(?:\\?.*?)? HTTP", request).group(1).rstrip("/")
print("URL is {}".format(url))
if url == "":
handle_root(client)
elif url == "configure":
handle_configure(client, bytes(content))
else:
handle_not_found(client, url)
finally:
client.close()
```
|
{
"source": "jercas/Competition",
"score": 3
}
|
#### File: Competition/brandClassification/stepBased_lr_decay.py
```python
import numpy as np
def stepBased_decay(epoch):
# Initialize the base initial learning rate α, drop factor and epochs to drop every set of epochs.
initialAlpha = 0.01
# Drop learning rate by a factor of 0.25 every 5 epochs.
factor = 0.5
#factor = 0.5
dropEvery = 5
# Compute learning rate for the current epoch.
alpha = initialAlpha * (factor ** np.floor((1 + epoch) / dropEvery))
return float(alpha)
```
|
{
"source": "jercas/ML_DL_aiPlayground",
"score": 2
}
|
#### File: CNN/CIFAR10/vgg19_keras.py
```python
import keras
import numpy as np
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, AveragePooling2D
from keras.initializers import he_normal
from keras import optimizers
from keras.callbacks import LearningRateScheduler, TensorBoard
from keras.layers.normalization import BatchNormalization
from keras.utils.data_utils import get_file
num_classes = 10
batch_size = 128
epochs = 200
iterations = 391
dropout = 0.5
weight_decay = 0.0001
log_filepath = r'./vgg19_retrain_logs/'
def scheduler(epoch):
if epoch < 80:
return 0.1
if epoch < 160:
return 0.01
return 0.001
WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels.h5'
filepath = get_file('vgg19_weights_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH, cache_subdir='models')
# data loading
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# data preprocessing
x_train[:,:,:,0] = (x_train[:,:,:,0]-123.680)
x_train[:,:,:,1] = (x_train[:,:,:,1]-116.779)
x_train[:,:,:,2] = (x_train[:,:,:,2]-103.939)
x_test[:,:,:,0] = (x_test[:,:,:,0]-123.680)
x_test[:,:,:,1] = (x_test[:,:,:,1]-116.779)
x_test[:,:,:,2] = (x_test[:,:,:,2]-103.939)
# build model
model = Sequential()
# Block 1
model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block1_conv1', input_shape=x_train.shape[1:]))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block1_conv2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))
# Block 2
model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block2_conv1'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block2_conv2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))
# Block 3
model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block3_conv1'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block3_conv2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block3_conv3'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block3_conv4'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))
# Block 4
model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block4_conv1'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block4_conv2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block4_conv3'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block4_conv4'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))
# Block 5
model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block5_conv1'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block5_conv2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block5_conv3'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block5_conv4'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool'))
# model modification for cifar-10
model.add(Flatten(name='flatten'))
model.add(Dense(4096, use_bias = True, kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='fc_cifa10'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(Dense(4096, kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='fc2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(Dense(10, kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='predictions_cifa10'))
model.add(BatchNormalization())
model.add(Activation('softmax'))
# load pretrained weight from VGG19 by name
model.load_weights(filepath, by_name=True)
# -------- optimizer setting -------- #
sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
tb_cb = TensorBoard(log_dir=log_filepath, histogram_freq=0)
change_lr = LearningRateScheduler(scheduler)
cbks = [change_lr,tb_cb]
print('Using real-time data augmentation.')
datagen = ImageDataGenerator(horizontal_flip=True,
width_shift_range=0.125,height_shift_range=0.125,fill_mode='constant',cval=0.)
datagen.fit(x_train)
model.fit_generator(datagen.flow(x_train, y_train,batch_size=batch_size),
steps_per_epoch=iterations,
epochs=epochs,
callbacks=cbks,
validation_data=(x_test, y_test))
model.save('retrain.h5')
```
#### File: ML_DL_aiPlayground/DNN/SequentialTest.py
```python
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense,Activation
def binaryClassification():
'''For a single-input model with 2 classes (binary classification)'''
# 构造Sequential序贯模型
# 向model传递一个layer的list
model = Sequential([
Dense(32, input_dim=100),
Activation('relu'),
Dense(1, activation='sigmoid')
])
'''
通过.add()方法,逐个加入
model = Sequential()
model.add(Dense(32, input_shape=(784,)))
model.add(Activation('relu'))
'''
# 指定输入数据shape
# 第一层接受一个关于输入数据shape的参数,后面的各个层则可以自动的推导出中间数据的shape
'''
传递一个input_shape的关键字参数给第一层。
input_shape是一个tuple类型的数据,其中也可以填入None,如果填入None则表示此位置可能是任何正整数。
数据的batch大小不应包含在其中。
model.add(Dense(32, input_shape=784))
有些2D层,如Dense,支持通过指定其输入维度input_dim来隐含的指定输入数据shape。
一些3D的时域层支持通过参数input_dim和input_length来指定输入shape。
model.add(Dense(32, input_dim=784))
如果需要为输入指定一个固定大小的batch_size(常用于stateful RNN网络)。
可以传递batch_size参数到一个层中。
例如想指定输入张量的batch大小是32,数据shape是(6,8),则需要传递batch_size=32和input_shape=(6,8)。
'''
# 编译
model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['accuracy'])
# 创建虚拟训练集
data = np.random.random((1000,100))
labels = np.random.randint(2, size=(1000,1))
# 训练模型,在32个batch中进行10轮迭代
model.fit(data, labels, epochs=10, batch_size=32)
def categoricalClassification():
'''For a single-input model with 10 classes (categorical classification)'''
# 创建模型
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(10, activation='softmax'))
# 编译
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# 创建数据集
data = np.random.random((1000,100))
labels = np.random.randint(10,size=(1000,1))
# 将标签转换为独热编码分类
one_hot_labels = keras.utils.to_categorical(labels,num_classes=10)
# 训练模型,在32个batch中进行10轮迭代
model.fit(data, one_hot_labels, epochs=10, batch_size=32)
binaryClassification()
#categoricalClassification()
```
#### File: ML_DL_aiPlayground/RNN/predicting_by_rawdata.py
```python
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import Dropout
from keras.layers import Embedding
from keras.layers import LSTM
import numpy as np
import subprocess
def main():
stock_data = load_stock_trading_records("60-2016-01-01.txt")
stock_code = get_stock_code(stock_data)
for key in stock_code:
model = train(stock_data, key)
length = stock_data[:,0].size
today = predict(model, stock_data[length - 60:, 1:7])
print(today,"\n")
break
#print "testing!\n"
def load_stock_trading_records(filename):
subprocess.call(["perl", "./switch.pl"])
linetype = np.dtype({
'names':['StockCode', 'YestClose', 'TodayOpen', 'TodayHigh', 'TodayLow', 'TodayClose', 'TodayAvg',
'UpDownValue', 'UpDownRate', 'TradeSharesRate', 'TradeShares', 'Turnover',
'TradeCount', 'Amplitude', 'SuspendDays', 'PE', 'TradeDays'],
'formats':['i', 'f', 'f', 'f', 'f', 'f', 'f',
'f', 'f', 'f', 'i', 'i',
'i', 'i', 'i', 'S32', 'i']})
f = file("test_data")
f.readline()
stock_data = np.loadtxt(f, delimiter=",")
f.close()
return stock_data
def get_stock_code(stock_data):
stockcode = np.unique(stock_data[:,0])
return stockcode
def train(stock_data, stock_code):
origin_training_data = stock_data[stock_data[:,0] == stock_code]
data_dim = 6
timesteps = 60
length = origin_training_data[:,0].size
b = np.array([], dtype = np.int32)
for i in range(length - timesteps):
a = range(i,i+timesteps,1)
b = np.concatenate([b,a])
test_origin = origin_training_data[b,1:7]
test = test_origin.reshape((length - timesteps, timesteps, 6))
labels_origin = origin_training_data[timesteps:,1:7]
labels = labels_origin.reshape(length - timesteps, 6)
# expected input data shape: (batch_size, timesteps, data_dim)
model = Sequential()
model.add(LSTM(32, return_sequences=True, input_shape=(timesteps, data_dim))) # returns a sequence of vectors of dimension 32
model.add(LSTM(32, return_sequences=True)) # returns a sequence of vectors of dimension 32
model.add(Dropout(0.8))
model.add(LSTM(32)) # return a single vector of dimension 32
model.add(Dropout(0.8))
model.add(Dense(6, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.fit(test, labels, epochs=10, batch_size=32)
return model
def predict(model, stock_data):
inputdata = stock_data.reshape(1, 60, 6)
result = model.predict(inputdata)
# result = stock_data[-1]
return result
if __name__ == '__main__':
main()
```
|
{
"source": "jercas/MLiA_Learning_Code",
"score": 3
}
|
#### File: machinelearninginaction/Ch09/treeExplore.py
```python
from numpy import *
from Tkinter import *
import regTrees
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
def reDraw(tolS,tolN):
reDraw.f.clf() # clear the figure
reDraw.a = reDraw.f.add_subplot(111)
if chkBtnVar.get():
if tolN < 2: tolN = 2
myTree=regTrees.createTree(reDraw.rawDat, regTrees.modelLeaf,\
regTrees.modelErr, (tolS,tolN))
yHat = regTrees.createForeCast(myTree, reDraw.testDat, \
regTrees.modelTreeEval)
else:
myTree=regTrees.createTree(reDraw.rawDat, ops=(tolS,tolN))
yHat = regTrees.createForeCast(myTree, reDraw.testDat)
reDraw.a.scatter(reDraw.rawDat[:,0], reDraw.rawDat[:,1], s=5) #use scatter for data set
reDraw.a.plot(reDraw.testDat, yHat, linewidth=2.0) #use plot for yHat
reDraw.canvas.show()
def getInputs():
try: tolN = int(tolNentry.get())
except:
tolN = 10
print "enter Integer for tolN"
tolNentry.delete(0, END)
tolNentry.insert(0,'10')
try: tolS = float(tolSentry.get())
except:
tolS = 1.0
print "enter Float for tolS"
tolSentry.delete(0, END)
tolSentry.insert(0,'1.0')
return tolN,tolS
def drawNewTree():
tolN,tolS = getInputs()#get values from Entry boxes
reDraw(tolS,tolN)
root=Tk()
reDraw.f = Figure(figsize=(5,4), dpi=100) #create canvas
reDraw.canvas = FigureCanvasTkAgg(reDraw.f, master=root)
reDraw.canvas.show()
reDraw.canvas.get_tk_widget().grid(row=0, columnspan=3)
Label(root, text="tolN").grid(row=1, column=0)
tolNentry = Entry(root)
tolNentry.grid(row=1, column=1)
tolNentry.insert(0,'10')
Label(root, text="tolS").grid(row=2, column=0)
tolSentry = Entry(root)
tolSentry.grid(row=2, column=1)
tolSentry.insert(0,'1.0')
Button(root, text="ReDraw", command=drawNewTree).grid(row=1, column=2, rowspan=3)
chkBtnVar = IntVar()
chkBtn = Checkbutton(root, text="Model Tree", variable = chkBtnVar)
chkBtn.grid(row=3, column=0, columnspan=2)
reDraw.rawDat = mat(regTrees.loadDataSet('sine.txt'))
reDraw.testDat = arange(min(reDraw.rawDat[:,0]),max(reDraw.rawDat[:,0]),0.01)
reDraw(1.0, 10)
root.mainloop()
```
#### File: machinelearninginaction/Ch13/pca.py
```python
from numpy import *
def loadDataSet(fileName, delim='\t'):
fr = open(fileName)
stringArr = [line.strip().split(delim) for line in fr.readlines()]
datArr = [map(float,line) for line in stringArr]
return mat(datArr)
def pca(dataMat, topNfeat=9999999):
meanVals = mean(dataMat, axis=0)
meanRemoved = dataMat - meanVals #remove mean
covMat = cov(meanRemoved, rowvar=0)
eigVals,eigVects = linalg.eig(mat(covMat))
eigValInd = argsort(eigVals) #sort, sort goes smallest to largest
eigValInd = eigValInd[:-(topNfeat+1):-1] #cut off unwanted dimensions
redEigVects = eigVects[:,eigValInd] #reorganize eig vects largest to smallest
lowDDataMat = meanRemoved * redEigVects#transform data into new dimensions
reconMat = (lowDDataMat * redEigVects.T) + meanVals
return lowDDataMat, reconMat
def replaceNanWithMean():
datMat = loadDataSet('secom.data', ' ')
numFeat = shape(datMat)[1]
for i in range(numFeat):
meanVal = mean(datMat[nonzero(~isnan(datMat[:,i].A))[0],i]) #values that are not NaN (a number)
datMat[nonzero(isnan(datMat[:,i].A))[0],i] = meanVal #set NaN values to mean
return datMat
```
#### File: selfImplementation/Ch05_logisticRegression/applyMain.py
```python
import numpy as np
from sigmoid import sigmoid
from gradAscent import stochasticGradAscent1
def classifyVector(trainingX, weights):
"""
distinguish different predict label by hypothesis output
Args:
trainingX: training data set
weights: optima weights/thetas
Returns:
0: positive output
1: negative output
"""
# hypothesis function: h(θ)=g(Xθ)
prob = sigmoid(sum(trainingX * weights))
if prob > 0.5:
return 1.0
else:
return 0.0
def colicTest():
"""
open file and process data, then calculate predict error rate
Args:
Returns:
errorRate: learning model predict error rate
"""
# get data set, compose input data set
trainData = open('horseColicTraining.txt')
testData = open('horseColicTest.txt')
trainingSet = []
trainingLabels = []
# data preprocessing
# traverse all of the training examples
for line in trainData.readlines():
# process data format
currLine = line.strip().split('\t')
lineArr = []
# traverse all of the features in one examples
for i in range(21):
lineArr.append(float(currLine[i]))
# columns 0-20 : features
trainingSet.append(lineArr)
# column 21 : label
trainingLabels.append(float(currLine[21]))
# training phase
# data process over, then training model get the optimum
trainWeights = stochasticGradAscent1(np.array(trainingSet), trainingLabels, 500)
# error counter , testing data amount
errorCount = 0
numTest = 0.0
# Testing phase
for line in testData.readlines():
numTest += 1.0
currLine = line.strip().split('\t')
lineArr = []
# testing input data
for i in range(21):
lineArr.append(float(currLine[i]))
# error judgement
if int(classifyVector(np.array(lineArr), trainWeights)) != int(currLine[21]):
# predict error! , Count plus one
errorCount += 1
# calculate error rate
errorRate = (float(errorCount) / numTest)
print('The error rate of this test is:{0}'.format(errorRate))
return errorRate
def multiTest():
"""
calculate average error rate
"""
numTests = 10
errorSum = 0.0
for i in range(numTests):
errorSum += colicTest()
print('after {0} iterations the average error rate is: {1}'.format(numTests, errorSum/float(numTests)))
def main():
multiTest()
if __name__ == '__main__':
main()
```
#### File: selfImplementation/Ch05_logisticRegression/gradAscent.py
```python
import numpy as np
import random
from sigmoid import sigmoid
def batchGradAscent(dataMatIn, labelMatIn):
"""
batch gradient ascent algorithm (traversal all of the training data set in one iteration)
-- find the optima of weights/thetas
Args:
dataMatIn: training/input data
labelMatIn: label/actual result/output data
Returns:
weights: optima weights/thetas
"""
# transform train data list into matrix
dataMatrix = np.mat(dataMatIn)
# transform label data list into matrix
labelVector = np.mat(labelMatIn).transpose()
# m: number of input examples
# n: number of features
m,n = np.shape(dataMatrix)
# parameters setting
# learning rata
alpha = 0.001
# maximum iterations
maxCycles = 500
# weights / thetas
weights = np.ones((n,1))
# start gradent ascent
for i in range(maxCycles):
# hypothesis function: h(θ)=g(Xθ)
h = sigmoid(dataMatrix * weights)
# cost function: J(θ)=
error = labelVector - h
# gradient ascent: θ = θ + α*xT*(g(xθ)-y)/m
# = θ + α*xT*(h-y)/m
# = θ + α*xT*error/m
weights = weights + alpha * dataMatrix.transpose() * error
return weights
def stochasticGradAscent0(dataMatIn, labelMatIn):
"""
stochastic gradient ascent algorithm (traversal some of the training data set in one iteration)
-- find the optima of weights/thetas
Args:
dataMatIn: training/input data
labelMatIn: label/actual result/output data
Returns:
weights: optima weights/thetas
"""
# transform data type for calculate
dataMatIn = np.array(dataMatIn,dtype='float64')
labelMatIn = np.array(labelMatIn,dtype='float64')
m,n = np.shape(dataMatIn)
alpha = 0.01
weights = np.ones(n)
# different from the BGA, SGA algorithm don't have matrix compution instead of scalar compution
for i in range(m):
h = sigmoid(sum(dataMatIn[i] * weights))
error = labelMatIn[i] - h
weights = weights + alpha * error * dataMatIn[i]
return weights
def stochasticGradAscent1(dataMatIn, labelMatIn, numIter=150):
"""
improvement stochastic gradient ascent algorithm (traversal some of the training data set in one iteration)
with dynamic learning rate alpha
-- find the optima of weights/thetas
Args:
dataMatIn: training/input data
labelMatIn: label/actual result/output data
numIter: training iteration
Returns:
weights: optima weights/thetas
"""
dataMatIn = np.array(dataMatIn,dtype='float64')
labelMatIn = np.array(labelMatIn,dtype='float64')
m,n = np.shape(dataMatIn)
weights = np.ones(n)
# start gradent ascent
for j in range(numIter):
# update all of the features
dataIndex = range(m)
for i in range(m):
# dynamic change alpha value every iteration, at least = 0.01
alpha = 4/(1.0 + j + i) + 0.01
# change stochastic step
randIndex = int(random.uniform(0, len(dataIndex)))
# here we changed all the i to randIndex
h = sigmoid(sum(dataMatIn[randIndex] * weights))
error = labelMatIn[randIndex] - h
weights = weights + alpha * error * dataMatIn[randIndex]
return weights
```
#### File: selfImplementation/Ch05_logisticRegression/testMain.py
```python
from gradAscent import batchGradAscent,stochasticGradAscent0, stochasticGradAscent1
from plot import plot
from loadDataSet import loadDataSet
def main():
dataSet, labelSet = loadDataSet()
weights = batchGradAscent(dataSet, labelSet)
plot(weights)
weights = stochasticGradAscent0(dataSet, labelSet)
plot(weights)
weights = stochasticGradAscent1(dataSet, labelSet)
plot(weights)
if __name__ == '__main__':
main()
```
|
{
"source": "jercas/offer66-leetcode-newcode",
"score": 3
}
|
#### File: offer66-leetcode-newcode/toTheMoon/algorithm_quickSort.py
```python
"""
基础排序算法:快速排序
最优情况:待排序列升序有序O(nlogn),即,1 2 3 4 5 6 7,基准选择第一个数,调整次数最少,注意只是调试次数减少,比较次数没变少。
最差情况:待排序序列降序有序O(n^2),即,7 6 5 4 3 2 1,这种情况就退化为冒泡排序。
"""
def quick_sort1(data):
"""快速排序"""
if len(data) >= 2: # 递归入口及出口
mid = data[0] # 选取基准值,也可以选取第一个或最后一个元素
left, right = [], [] # 定义基准值左右两侧的列表
data.remove(mid) # 从原始数组中移除基准值
for num in data:
if num >= mid:
right.append(num)
else:
left.append(num)
return quick_sort1(left) + [mid] + quick_sort1(right)
else:
return data
def quick_sort2(data, left, right):
"""快速排序"""
# 如果start和end碰头了,说明要我排的这个子数列就剩下一个数了,就不用排序了
if not left < right:
return
mid = data[left] # 拿出第一个数当作基准数mid
low = left # low来标记左侧从基准数始找比mid大的数的位置
high = right # high来标记右侧end向左找比mid小的数的位置
# 循环,只要low和high没有碰头就一直进行,当low和high相等说明碰头了
while low < high:
# 从high开始向左,找到第一个比mid小或者等于mid的数,标记位置,(如果high的数比mid大,我们就左移high)
# 并且要确定找到之前,如果low和high碰头了,也不找了
while low < high and data[high] > mid:
high -= 1
# 跳出while后,high所在的下标就是找到的右侧比mid小的数
# 把找到的数放到左侧的空位 low 标记了这个空位
data[low] = data[high]
# 从low开始向右,找到第一个比mid大的数,标记位置,(如果low的数小于等于mid,我们就右移low)
# 并且我们要确定找到之前,如果low和high碰头了,也不找了
while low < high and data[low] <= mid:
low += 1
# 跳出while循环后low所在的下标就是左侧比mid大的数所在位置
# 我们把找到的数放在右侧空位上,high标记了这个空位
data[high] = data[low]
# 以上我们完成了一次 从右侧找到一个小数移到左侧,从左侧找到一个大数移动到右侧
# 当这个while跳出来之后相当于low和high碰头了,我们把mid所在位置放在这个空位
data[low] = mid
# 这个时候mid左侧看的数都比mid小,mid右侧的数都比mid大
# 然后我们对mid左侧所有数进行上述的排序
quick_sort2(data, left, low - 1)
# 我们mid右侧所有数进行上述排序
quick_sort2(data, low + 1, right)
if __name__ == '__main__':
array = [2,3,5,7,1,4,6,15,5,2,7,9,10,15,9,17,12]
q1, q2 = array.copy(), array.copy()
print("排序前序列 -> ", array)
print("排序后序列 -> ", quick_sort1(q1), '\n', quick_sort2(q2, 0, len(array)-1), q2)
```
#### File: offer66-leetcode-newcode/toTheMoon/leetcode_002_AddTwoNumbers.py
```python
"""
leetcode-2: 两数相加 Medium
'链表' '双指针'
给出两个 非空 的链表用来表示两个非负的整数。其中,它们各自的位数是按照 逆序 的方式存储的,并且它们的每个节点只能存储 一位 数字。
如果,我们将这两个数相加起来,则会返回一个新的链表来表示它们的和。
您可以假设除了数字 0 之外,这两个数都不会以 0 开头。
输入:(2 -> 4 -> 3) + (5 -> 6 -> 4)
输出:7 -> 0 -> 8
原因:342 + 465 = 807
"""
"""
Thinking:
1.首先想到了跟leetcode no.1 two sum的区别,two sum空间结构是数组,而本题是链表,在遍历方式上有区别;且two sum属于变化的查找问题,
利用target-sum1查找出sum2即可,该过程利用hash最快,二分也可考虑,而本题则是一个链表操作+细节考虑的问题,对进位的操作和对长短位加数的
处理都要注意。
2.单位相加,首先要考虑到进位问题 -- 必须要有变量记录本位的运算是否产生进位,已经前一次相加是否产生了进位,若产生需要加入。
3.俩链表遍历,要考虑 两位数+一位数 一位数+空 空+空等等的长短位相加问题。
4.最高位的进位,在遍历结束后一定要考虑最高位的运算是否产生了进位,如产生要生成一个新的最高位节点进位为1,ListNode(1)。
5.细枝末节的问题,(1)直接//整除,获取carry是否进位,而不需要x+y+c-10这么蠢; (2)同理结果直接 %10 创建新节点,而不需要x+y+c-10这么蠢。
综上需要考虑的测试用例,10+210、None+10、99+1、23+5、None+None
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
时间复杂度:O(max(m,n)),m和n分别表示l1和l2的长度, 80ms beaten 73.95%
空间复杂度:O(max(m,n)),额外链表空间长度最多为max(m,n)+1,11.8MB beaten 29.46%。
"""
# 允许使用额外空间
head = ListNode(0)
# 保存首节点位置,方便返回结果
res = head
# 初始化进位
carry = 0
while l1 != None or l2 != None:
# 获取本位的值,如果某一数长度将其记为0
x = l1.val if l1 else 0
y = l2.val if l2 else 0
# 计算结果,注意附加上进位carry
count = carry + x + y
# 进位结果,整除获取
carry = count // 10
# 保存结果,创建新节点
head.next = ListNode(count % 10)
# 移动各指针
head = head.next
# 对俩加数链表判空,针对高位加低位数的情况
if l1 != None:
l1 = l1.next
if l2 != None:
l2 = l2.next
# 遍历完俩加数指针,如果最高位出现进位结果,需要额外添加
if carry > 0:
head.next = ListNode(1)
return res.next
```
#### File: offer66-leetcode-newcode/toTheMoon/leetcode_005_LongestPalindromicSubstring.py
```python
"""
leetcode-5: 最长回文子串 MEDIUM
'字符串' '动态规划'
给定一个字符串 s,找到 s 中最长的回文子串。你可以假设 s 的最大长度为 1000。
"""
class Solution:
def longestPalindrome(self, s: str) -> str:
n = len(s)
maxl, start = 0, 0
for i in range(n):
# s[i-maxl-1:i+1] == s[i-maxl-1:i+1][::-1] palindromic substring, left= i - maxl -1, right = i+1
if i - maxl >= 1 and s[i-maxl-1 : i+1] == s[i-maxl-1 : i+1][::-1]:
start = i - maxl - 1
maxl += 2
continue
if i - maxl >= 0 and s[i-maxl : i+1] == s[i-maxl : i+1][::-1]:
start = i - maxl
maxl += 1
return s[start: start + maxl]
if __name__ == "__main__":
Q = ["babad", "cbbd"]
A = ["bab", "bb"]
solution = Solution()
for i in range(2):
if A[i] == solution.longestPalindrome(Q[i]):
print('\n', Q[i], "-->", A[i])
print('AC')
```
#### File: offer66-leetcode-newcode/toTheMoon/leetcode_011_ContainerWithMostWater.py
```python
"""
leetcode-11: 盛最多水的容器 MEDIUM
'数学' '双指针'
给定 n 个非负整数 a1,a2,...,an,每个数代表坐标中的一个点 (i, ai) 。在坐标内画 n 条垂直线,垂直线 i 的两个端点分别为 (i, ai) 和 (i, 0)。
找出其中的两条线,使得它们与 x 轴共同构成的容器可以容纳最多的水。
说明:你不能倾斜容器,且 n 的值至少为 2。
"""
"""
Thinking:
0.双指针遍历:对该问题如果出现相同的容器高,则一定是有更大底长的容量更大,考虑使用双指针首尾分别进行移动,获取当前容量,并动态保存最大的容量;
又考虑木桶短板效应,最大容量为较短一侧筒壁高*底,每次判断是否更新最大容量后,因为更高的筒壁优先级更高,故向中间移动较短筒壁一侧;
当两侧指针相遇时,遍历完毕退出循环返回最大容量值,类同二分查找的循环条件。
"""
class Solution(object):
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
时间复杂度:O(n),双指针双向遍历一次,132ms beaten 99.62%
空间复杂度:O(1),未使用任何额外空间,13MB beaten 39.75%
"""
maxV = 0
l = 0
r = len(height) - 1
while l < r:
maxV = max(maxV, min(height[l], height[r]) * (r - l))
if height[l] < height[r]:
l += 1
else:
r -= 1
return maxV
if __name__ == "__main__":
Q = [1,8,6,2,5,4,8,3,7]
A = 49
solution = Solution()
if solution.maxArea(Q) == A:
print("The most water of the container {0} is {1}".format(Q, A))
print("AC")
```
#### File: offer66-leetcode-newcode/toTheMoon/leetcode_015_ThreeSum.py
```python
"""
leetcode-15: 三数之和 MEDIUM
'数组' '双指针'
给定一个包含 n 个整数的数组 nums,判断 nums 中是否存在三个元素 a,b,c ,使得 a + b + c = 0 ?找出所有满足条件且不重复的三元组。
注意:答案中不可以包含重复的三元组。
"""
"""
Thinking:
1: 首先直接考虑暴力三循环, O(n^3)直接超时没啥好说的;
2:考虑借鉴TwoSum中的哈希表将时间复杂度优化到 O(n^2),先固定两个数,套用哈希表寻找第三个数。
3:使用双指针,只固定一个数,寻找另外两数a+b=-c,即将三数相加转化为两数相加等于第三个数
先对数组排序以处理重复结果情况,遍历时遇到重复数字直接跳过。
"""
class Solution(object):
def threeSum1(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
时间复杂度:O(n^3),超时
空间复杂度:O(1),未使用额外空间
"""
nums.sort()
n = len(nums)
res = []
for i in range(n - 2):
for j in range(i + 1, n - 1):
for k in range(j + 1, n):
if nums[i] + nums[j] + nums[k] == 0:
res.append([nums[i], nums[j], nums[k]])
res = list(set([tuple(t) for t in res]))
res = [list(v) for v in res]
return res
def threeSum2(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
时间复杂度:O(n^2),超时
空间复杂度:O(n),使用哈希表空间换时间
"""
nums.sort()
n = len(nums)
res = []
for i in range(n - 2):
hashmap = {}
for j in range(i + 1, n - 1):
another = 0 - nums[i] - nums[j]
if another in hashmap:
r = [nums[i], nums[j], nums[hashmap[another]]]
res.append(r)
hashmap[nums[j]] = j
return res
def threeSum3(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
时间复杂度:O(n),一次遍历得出结果,524ms beaten 92.02%
空间复杂度:O(1),未使用额外空间, 14.8MB beaten 83.17%
"""
nums.sort()
n = len(nums)
res = []
for i in range(n - 2):
# 数组已经排序,如果定位数字自身已经大于0,且其后的都大于0,则在此范围内任何组合均大于0,直接跳出循环
if nums[i] > 0:
break
# 定位时,因为已经排序完毕,若出现两个相同数的情况,其结果也必定一致,直接跳出该次循环
if i > 0 and nums[i] == nums[i - 1]:
continue
# 定位一个数,对另外两个数采用双向查找
l, r = i + 1, n - 1
while l < r:
cur = nums[l] + nums[r] + nums[i]
if cur == 0:
res.append([nums[i], nums[l], nums[r]])
l += 1
r -= 1
while l < r and nums[l] == nums[l - 1]:
l += 1
while l < r and nums[r] == nums[r + 1]:
r -= 1
elif cur < 0:
l += 1
else:
r -= 1
return res
if __name__ == "__main__":
Q = [-1, 0, 1, 2, -1, -4]
A = [[-1, -1, 2], [-1, 0, 1]]
solution = Solution()
if solution.threeSum1(Q) == A and solution.threeSum3(Q) == A:
print("In {0}, the sum of {1} -> 0".format(Q, A))
print("AC")
```
#### File: offer66-leetcode-newcode/toTheMoon/leetcode_017_LetterCombinationsofaPhoneNumber.py
```python
"""
leetcode-17: 电话号码的字母组合 MEDIUM
'字符串' '回溯算法'
给定一个仅包含数字 2-9 的字符串,返回所有它能表示的字母组合。
给出数字到字母的映射如下(与电话按键相同)。注意 1 不对应任何字母。
Tips:
尽管上面的答案是按字典序排列的,但是你可以任意选择答案输出的顺序。
"""
"""
Thinking:
1. 回溯是一种通过穷举所有可能情况来找到所有解的算法。
如果一个候选解最后被发现并不是可行解,回溯算法会舍弃它,并在前面的一些步骤做出一些修改,并重新尝试找到可行解。
给出如下回溯函数 backtrack(combination, next_digits) ,它将一个目前已经产生的组合 combination 和接下来准备要输入的数字 next_digits 作为参数。
如果没有更多的数字需要被输入,那意味着当前的组合已经产生好了。
如果还有数字需要被输入: 遍历下一个数字所对应的所有映射的字母。
将当前的字母添加到组合最后,也就是 combination = combination + letter。
重复这个过程,输入剩下的数字: backtrack(combination + letter, next_digits[1:])。
2. 非递归暴力三循环
"""
class Solution(object):
num2char = {'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z']}
def letterCombinations1(self, digits):
"""
:type digits: str
:rtype: List[str]
时间复杂度:O(3^n * 4^m),回溯算法,20ms beaten 92.24%
空间复杂度:O(3^n * 4^m),数组保存返回结果,11.7MB beaten 30.54%
"""
def backtrack(combination, next_digits):
# if there is no more digits to check
if len(next_digits) == 0:
# the combination is done
res.append(combination)
# if there are still digits to check
else:
# iterate over all letters which map the next available digit
for letter in self.num2char[next_digits[0]]:
# append the current letter to the combination and proceed to the next digits
backtrack(combination + letter, next_digits[1:])
res = []
if digits:
backtrack("", digits)
return res
def letterCombinations2(self, digits):
"""
:type digits: str
:rtype: List[str]
时间复杂度:O(3^n),回溯算法,20ms beaten 92.24%
空间复杂度:O(3^n * 4^m),数组保存返回结果,11.7MB beaten 33.46%
"""
if not digits:
return []
res = [""]
for num in digits:
next_res = []
for alp in self.num2char[num]:
for tmp in res:
next_res.append(tmp + alp)
res = next_res
return res
if __name__ == '__main__':
Q = '23'
A1 = ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"]
A2 = ['ad', 'bd', 'cd', 'ae', 'be', 'ce', 'af', 'bf', 'cf']
solution = Solution()
if solution.letterCombinations1(Q) == A1 and solution.letterCombinations2(Q) == A2:
print("Input: {0}; Output: {1}".format(Q, A1))
print('AC')
```
#### File: offer66-leetcode-newcode/toTheMoon/leetcode_078_Subsets.py
```python
"""
leetcode-78: 子集 MEDIUM
'数组' '回溯'
给定一组不含重复元素的整数数组 nums,返回该数组所有可能的子集(幂集)。
Hint:
解集不能包含重复的子集。
"""
class Solution(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if len(nums) == 0:
return [[]]
res = self.subsets(nums[:-1])
res += [a + [nums[-1]] for a in res]
#print(ans, '\n')
return res
if __name__ == "__main__":
Q = [1, 2, 3]
A = [[3],[1],[2],[1,2,3],[1,3],[2,3],[1,2],[]]
solution = Solution()
ans = solution.subsets(Q)
for i in A:
if i in ans:
print(i)
print('AC')
```
#### File: offer66-leetcode-newcode/toTheMoon/leetcode_080_RemoveDuplicatesfromSortedArray II.py
```python
"""
leetcode-80: 删除排序数组中的重复项 II MEDIUM
'数组' '双指针'
给定一个排序数组,你需要在原地删除重复出现的元素,使得每个元素最多出现两次,返回移除后数组的新长度。
不要使用额外的数组空间,你必须在原地修改输入数组并在使用 O(1) 额外空间的条件下完成。
"""
class Solution:
def removeDuplicates(self, nums: 'List[int]') -> int:
i = 0
for n in nums:
if i < 2 or n != nums[i - 2]:
nums[i] = n
i += 1
return i
if __name__ == "__main__":
Q1, Q2 = [1,1,1,2,2,3], [0,0,1,1,1,1,2,3,3]
A11, A12 = 5, 7
A21, A22 = [1, 1, 2, 2, 3], [0,0,1,1,1,1,2,3,3]
solution = Solution()
if A11 == solution.removeDuplicates(Q1):
print('equal')
for i in range(A11):
Q1[i] == A21[i]
print('match')
if A12 == solution.removeDuplicates(Q2):
print('equal')
for i in range(A12):
Q2[i] == A22[i]
print('match')
print("AC")
```
#### File: offer66-leetcode-newcode/toTheMoon/leetcode_121_BestTimetoBuyandSellStock.py
```python
"""
leetcode-121: 买卖股票的最佳时机 EASY
'动态规划' '数组'
给定一个数组,它的第 i 个元素是一支给定股票第 i 天的价格。
如果你最多只允许完成一笔交易(即买入和卖出一支股票),设计一个算法来计算你所能获取的最大利润。
注意你不能在买入股票前卖出股票。
"""
import sys
class Solution:
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if len(prices) <= 0:
return 0
minPrice = sys.maxsize
profit = 0
for price in prices:
minPrice = min(minPrice, price)
profit = max(profit, price - minPrice)
return profit
if __name__ == "__main__":
Q = [[7,1,5,3,6,4], [7,6,4,3,1]]
A = [5, 0]
solution = Solution()
for i in range(2):
if solution.maxProfit(Q[i]) == A[i]:
print("{0} ==> Max Profit:{1}".format(Q[i],A[i]))
print("AC")
```
#### File: offer66-leetcode-newcode/toTheMoon/leetcode_152_MaximumProductSubarray.py
```python
"""
leetcode-152: 乘积最大子序列 MEDIUM
'数组' '动态规划'
给定一个整数数组 nums ,找出一个序列中乘积最大的连续子序列(该序列至少包含一个数)。
返回最大乘积
"""
import sys
class Solution:
def maxProduct1(self, nums: 'List[int]') -> int:
rev = nums[::-1]
for i in range(1, len(nums)):
nums[i] *= nums[i - 1] or 1
rev[i] *= rev[i - 1] or 1
print(nums, "+", rev)
return max(max(nums), max(rev))
def maxProduct2(self, nums: 'List[int]') -> int:
ans = -sys.maxsize
imax, imin = 1, 1
for i in range(len(nums)):
if nums[i] < 0:
tmp = imax
imax = imin
imin = tmp
imax = max(imax*nums[i], nums[i])
imin = min(imin*nums[i], nums[i])
print(nums[i], '-->', 'imax', imax, 'imin', imin)
ans = max(ans, imax)
return ans
if __name__ == "__main__":
Q = [[2,3,-2,4], [-2, 0, -1], [2, -5, -2, -4, 3]]
A = [6, 0, 24]
solution = Solution()
for i in range(3):
if A[i] == solution.maxProduct2(Q[i]) and A[i] == solution.maxProduct1(Q[i]):
Q = [[2, 3, -2, 4], [-2, 0, -1], [2, -5, -2, -4, 3]]
print(Q[i], "-->", A[i])
print('AC\n')
```
#### File: offer66-leetcode-newcode/toTheMoon/offer66_10_2_jumpFloor.py
```python
"""
offer66-10.2
'动态规划'
一只青蛙一次可以跳上1级台阶,也可以跳上2级。求该青蛙跳上一个n级的台阶总共有多少种跳法
(先后次序不同算不同的结果)
"""
class Solution:
def jumpFloor(self, n):
fib = [1 ,1]
if n <= 1:
return fib[n]
for i in range(2, n+1):
t = fib[0] + fib[1]
fib[0] = fib[1]
fib[1] = t
return fib[1]
if __name__ == "__main__":
Q = 5
A = 8
solution = Solution()
if A == solution.jumpFloor(Q):
print("AC")
```
#### File: offer66-leetcode-newcode/toTheMoon/offer66_10_Fibonacci.py
```python
"""
offer66-10
‘动态规划’
大家都知道斐波那契数列,现在要求输入一个整数n,请你输出斐波那契数列的第n项
(从0开始,第0项为0)
"""
class Solution:
def Fibonacci(self, n):
# write code here
a, b = 0, 1
fib = [a ,b]
if n <= 1:
return fib[n]
for i in range(2, n+1):
fib.append(a + b)
a = fib[-2]
b = fib[-1]
return fib[-1]
if __name__ == "__main__":
Q = [1,2,3,4,5]
A = [1,1,2,3,5]
solution = Solution()
for i in range(len(Q)):
if A[i] == solution.Fibonacci(Q[i]):
print(Q[i],"==>",solution.Fibonacci(Q[i]))
print("AC")
```
|
{
"source": "jercas/PythonCrashCourse",
"score": 3
}
|
#### File: PythonCrashCourse/LearningCode/3_1_name.py
```python
def showname():
names = ['jer','cas','ety']
for name in names:
print(name.title())
showname()
```
#### File: PythonCrashCourse/LearningCode/8_13_userProfile.py
```python
def buildProfile(firstName,lastName,**userInfo):
profile={}
profile['firstName'] = firstName
profile['lastName'] = lastName
for key,value in userInfo.items():
profile[key] = value
return profile
userProfile = buildProfile('Jercas','Ety',
hometown = 'LuoShan',
educationBackground = 'master')
print(userProfile)
```
#### File: LearningCode/8_15and8_16_printModels/printFunction.py
```python
def printIt(message):
print(message.title())
```
|
{
"source": "jercas/PythonDataVisualization",
"score": 3
}
|
#### File: jercas/PythonDataVisualization/pieChart.py
```python
import matplotlib.pyplot as plt
def main():
# 存储扇形块标签列表
labels = ['computer science', 'foreign languages', 'analytical chemistry', 'education',
'humanities', 'physics', 'biology', 'math and statistics', 'engineering']
# 存储扇形块大小列表
sizes = [21,3,7,7,8,9,10,15,19]
# 存储扇形块颜色列表,自适应识别string类型的颜色标记
colors = ['darkolivegreen', 'gold', 'lightskyblue', 'lightcoral', 'red', 'purple', '#f280de' ,'orange', 'green']
explode = (0,0,0,0,0,0,0,0,0.1)
fig1, ax1 = plt.subplots()
# 调用pyplot.pie()绘制饼图/扇形图
ax1.pie(sizes, colors=colors,labels=labels, explode=explode, autopct='%1.1f%%', pctdistance=0.5,
labeldistance=1.1, shadow=True, startangle=180, radius=1, counterclock=False, frame=False)
# 相等的纵横比保证饼图始终是圆的
ax1.axis('equal')
plt.show()
if __name__ == '__main__':
main()
```
|
{
"source": "jercas/PythonPlaygroud",
"score": 3
}
|
#### File: PythonPlayground/iTunesPlaylist/findDuplicates.py
```python
import plistlib
def findDuplicates(fileName):
"""查找重复曲目"""
print("Finding duplicate tracks in "+ fileName +" ...")
# 读取播放列表
# P-list文件将对象表示为字典,而播放列表文件使用的是一个字典的字典字典(值仍为一个字典);readPlist读入一个P-list文件作为输入,返回一个字典字典
plist = plistlib.readPlist(fileName)
# 从播放列表字典中获取Tracks键的值--声轨字典存为tracks
tracks = plist['Tracks']
# 创建存储重复声轨的空字典
trackNames = {}
# 循环迭代声轨字典,获取声轨名、声轨长度,trackId-key track-value(tracks是一个字典字典,而其value仍是一个字典)
for trackId,track in tracks.items():
try:
# 获取声轨字典中声轨名和声轨长度,这两个key的value
name = track['Name']
duration = track['Total Time']
# 在存储重复声轨字典中查找该声轨名是否已在其中
if name in trackNames:
# 如已在其中,则对声轨长度做二次比较,若也符合则证明重复
# //整除,将音轨长度除以1000,将秒转换为毫秒
if duration//1000 == trackNames[name][0]//1000:
# 获取重复计数count(trackNames value中的存储顺序为0:duration,1:count)
# 故用trackNames[key][index]获取对应位置的值,即下文中(duration,count)元祖中2位置的count
count = trackNames[name][1]
# 将重复次数+1,修改重复音轨字典中对应的key的value——音轨重复次数,覆盖原位置
trackNames[name] = (duration,count+1)
else:
# 第一次遇到该音轨,将(duration,count)作为元祖值存入重复声轨字典的对应key-name中
trackNames[name] = (duration,1)
except:
# 忽略未命名音轨
pass
# 将重复音轨作为一个(name,count)元祖存储在列表中
# 新建重复音轨列表dup
dups = []
# 循环迭代重复声轨字典,此时trackNames中存储的是单个播放列表中所有的歌曲名,以name-value(duration,count)对形式存储
for key,value in trackNames.items():
# 重复次数大于1,说明有重复
if value[1]>1:
# 以(重复次数,歌曲名)二元组形式,加入重复声轨列表中
dups.append((value[1],key))
# 将重复声轨信息存储到文本信息中(有重复信息的情况下)
if len(dups)>0:
print("Found "+str(len(dups))+" duplicates. Track names saved to dup.txt")
else:
print("No duplicate tracks found!")
f = open("dups.txt","w")
for val in dups:
f.write("[{0[0]}] {0[1]}\n".format(val))
f.close()
```
#### File: PythonPlayground/iTunesPlaylist/plotStats.py
```python
from matplotlib import pyplot
import plistlib
import numpy
def plotStats(fileName):
"""收集音轨的评分和时长"""
# 将传入的p-list格式播放列表解析
plist = plistlib.readPlist(fileName)
# 获取tracks字典
tracks = plist['Tracks']
# 创建两个空列表,分别保存歌曲评分和音轨时长
ratings = []
durations = []
# 遍历音轨字典
for trackId,track in tracks.items():
try:
ratings.append(track["Album Rating"])
durations.append(track["Total Time"])
except:
# 忽略未命名音轨
pass
# 检查上述数据是否收集完成
if ratings == [] or durations == []:
print("No valid Album Rating/Total Time data in "+ fileName +" ")
return
# 数据绘图部分
# 调用numpy将音轨时长数据放到32位整数数组中
x = numpy.array(durations,numpy.int32)
y = numpy.array(ratings,numpy.int32)
# 将音轨时长转换位毫秒到位,以整体数组进行操作运作到其中每个元素中
x = x/60000.0
# subplot(nrows, ncols, plot_number)参数定义为,所绘制图有 2行-1列-下一个点应绘制在第1行
pyplot.subplot(2,1,1)
# plot()以参数x,y位置创建一个点,并用o表示用圆圈来绘制数据
pyplot.plot(x,y,'o')
# axis()将x轴、y轴设置的略微大一点儿范围,以便在图和轴之间留一些空间
pyplot.axis([0,1.05*numpy.max(x),-1,110])
# xlabel()、ylabel(),为x轴、y轴设置说明文字
pyplot.xlabel('Track duration')
pyplot.ylabel('Track rating')
# 绘制柱状图
pyplot.subplot(2,1,2)
# hist()在同一张图中的第二行中,绘制时长直方图;其中bins参数设置数据分区个数,每个分区用于添加在整个范围内的计数
pyplot.hist(x,bins=20)
pyplot.xlabel('Track duration')
pyplot.ylabel('Count')
# 于窗口中绘制图形
pyplot.show()
```
|
{
"source": "jercas/RetinaTextBoxes-",
"score": 2
}
|
#### File: RetinaTextBoxes-/Pytorch/train.py
```python
from __future__ import print_function
import time
import os
import argparse
import numpy as np
import cv2
from subprocess import Popen, PIPE
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader, Dataset
from tensorboardX import SummaryWriter
from augmentations import Augmentation_traininig
from loss import FocalLoss, OHEM_loss
from retinanet import RetinaNet
from datagen import ListDataset
from encoder import DataEncoder
from torch.autograd import Variable
# Indicate visible gpu device
device_ids = [2,3,4,6]
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, device_ids))
# Multi-semantic transform
def str2bool(v):
return v.lower() in ("yes", "y", "true", "t", "1", "Yes", "Y", "True", "T")
# Dynamic adjust lr
def adjust_learning_rate(cur_lr, optimizer, gamma, step):
lr = cur_lr * (gamma ** (step))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
# usage:
# CUDA_VISIBLE_DEVICES= python train.py --root=./DB/ --dataset=PLATE --batch_size=8 --multi_scale=True --logdir=./logs --save_folder=./models --num_workers=6 --input_size=960 --resume=./models/ckpt_40000.pth
parser = argparse.ArgumentParser(description='PyTorch RetinaTextBoxes++ Training')
parser.add_argument('--root', default='./DB/',
type=str, help='root of the dataset dir')
parser.add_argument('--lr', default=1e-3,
type=float, help='learning rate')
parser.add_argument('--input_size', default=768,
type=int, help='Input size for training')
parser.add_argument('--batch_size', default=8,
type=int, help='Batch size for training')
parser.add_argument('--num_workers', default=8,
type=int, help='Number of workers used in data loading')
parser.add_argument('--resume',
type=str, help='resume from checkpoint')
parser.add_argument('--dataset', default='ICDAR2015',
type=str, help='select training dataset')
parser.add_argument('--multi_scale', default=False,
type=str2bool, help='Use multi-scale training')
parser.add_argument('--focal_loss', default=True,
type=str2bool, help='Use Focal loss or OHEM loss')
parser.add_argument('--logdir', default='./logs/',
type=str, help='Tensorboard log dir')
parser.add_argument('--max_iter', default=40000,
type=int, help='Number of training iterations')
parser.add_argument('--gamma', default=0.5,
type=float, help='Gamma update for SGD')
parser.add_argument('--save_interval', default=5000,
type=int, help='Frequency for saving checkpoint models')
parser.add_argument('--save_folder', default='./models/',
type=str, help='Location to save checkpoint models')
parser.add_argument('--evaluation', default=False,
type=str2bool, help='Evaluation during training')
parser.add_argument('--eval_step', default=1000,
type=int, help='Evaluation step')
parser.add_argument('--eval_device', default=2,
type=int, help='GPU device for evaluation')
parser.add_argument('--cls_thresh', default=0.5,
type=int, help='classification thresh')
parser.add_argument('--nms_thresh', default=0.25,
type=int, help='nms thresh')
args = parser.parse_args()
# confirm GPU & Focal loss use
assert torch.cuda.is_available(), 'Error: CUDA not found!'
assert args.focal_loss, "OHEM + ce_loss is not working... :("
# confirm existence of the folder for saving model and log if there are not exist, if not create them.
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
if not os.path.exists(args.logdir):
os.mkdir(args.logdir)
# Data load
print('==> Preparing data..')
trainset = ListDataset(root=args.root, dataset=args.dataset, train=True,
transform=Augmentation_traininig, input_size=args.input_size, multi_scale=args.multi_scale)
trainloader = DataLoader(trainset, batch_size=args.batch_size,
shuffle=True, collate_fn=trainset.collate_fn, num_workers=args.num_workers)
# Set model (focal_loss vs OHEM_CE loss)
# Backbone - se-resnet50
print('==>loss initializing...\n')
if args.focal_loss:
imagenet_pretrain = 'weights/retinanet_se50.pth'
criterion = FocalLoss()
num_classes = 1
else:
imagenet_pretrain = 'weights/retinanet_se50_OHEM.pth'
criterion = OHEM_loss()
num_classes = 2
# Training Detail option
stepvalues = (10000, 20000, 30000, 40000, 50000) if args.dataset in ["SynthText"] else (2000, 4000, 6000, 8000, 10000)
best_loss = float('inf') # best test loss
start_epoch = 0 # start from epoch 0 or last epoch
iteration = 0
cur_lr = args.lr
mean=(0.485,0.456,0.406)
var=(0.229,0.224,0.225)
step_index = 0
pEval = None
# Model
print('==>network establishing...\n')
net = RetinaNet(num_classes)
net.load_state_dict(torch.load(imagenet_pretrain))
# Resume training if there are any break off
if args.resume:
print('==> Resuming from checkpoint..', args.resume)
checkpoint = torch.load(args.resume)
net.load_state_dict(checkpoint['net'])
start_epoch = checkpoint['epoch']
iteration = checkpoint['iteration']
cur_lr = checkpoint['lr']
step_index = checkpoint['step_index']
print(" net: {0}\n start_epoch: {1}\n iteration: {2}\n current_lr: {3}\n step_index: {4}\n".format(net, start_epoch, iteration, cur_lr, step_index))
#optimizer.load_state_dict(state["optimizer"])
print('==>training detail...\n')
print("multi_scale : ", args.multi_scale)
print("input_size : ", args.input_size)
print("stepvalues : ", stepvalues)
print("start_epoch : ", start_epoch)
print("iteration : ", iteration)
print("cur_lr : ", cur_lr)
print("step_index : ", step_index)
print("gpu available : ", torch.cuda.is_available())
print("num_gpus : ", torch.cuda.device_count())
# Set data parallel training
net = torch.nn.DataParallel(net, device_ids=[0,1,2,3])
net.cuda()
# Training
print("==>training start...")
net.train()
# Freeze BN layer for pre-trained backbone
net.module.freeze_bn()
# Set optimizer -- SGD or Adam
optimizer = optim.SGD(net.parameters(), lr=cur_lr, momentum=0.9, weight_decay=1e-4) #optim.Adam(net.parameters(), lr=cur_lr)
# Encode anchor to each feature maps
encoder = DataEncoder(cls_thresh=0.5, nms_thresh=0.2)
# Tensorboard visualize recorder
writer = SummaryWriter(logdir=args.logdir)
lossest = 1
save_lossest = False
t0 = time.time()
for epoch in range(start_epoch, 10000):
if iteration > args.max_iter:
break
for inputs, loc_targets, cls_targets in trainloader:
# prepare data and cls & loc label
inputs = Variable(inputs.cuda())
loc_targets = Variable(loc_targets.cuda())
cls_targets = Variable(cls_targets.cuda())
optimizer.zero_grad()
# predict result
loc_preds, cls_preds = net(inputs)
# get the loss between prediction and ground truth
loc_loss, cls_loss = criterion(loc_preds, loc_targets, cls_preds, cls_targets)
# total loss
loss = loc_loss + cls_loss
# bp
loss.backward()
# optimizing - stochastic gradient descendent
optimizer.step()
# Recording intermediate log
if iteration % 20 == 0:
t1 = time.time()
print('iter ' + repr(iteration) + ' (epoch ' + repr(epoch) + ') || loss: %.4f || loc_loss: %.4f || cls_loss: %.4f (Time : %.1f)'\
% (loss.sum().item(), loc_loss.sum().item(), cls_loss.sum().item(), (t1 - t0)))
t0 = time.time()
# record log and visualization by tensorboard
writer.add_scalar('loc_loss', loc_loss.sum().item(), iteration)
writer.add_scalar('cls_loss', cls_loss.sum().item(), iteration)
writer.add_scalar('loss', loss.sum().item(), iteration)
# show inference image in tensorboard
infer_img = np.transpose(inputs[0].cpu().numpy(), (1,2,0))
infer_img *= var
infer_img += mean
infer_img *= 255.
infer_img = np.clip(infer_img, 0, 255)
infer_img = infer_img.astype(np.uint8)
h, w, _ = infer_img.shape
boxes, labels, scores = encoder.decode(loc_preds[0], cls_preds[0], (w,h))
boxes = boxes.reshape(-1, 4, 2).astype(np.int32)
if boxes.shape[0] is not 0:
infer_img = cv2.polylines(infer_img, boxes, True, (0,255,0), 2)
writer.add_image('image', img_tensor=infer_img, global_step=iteration, dataformats='HWC')
writer.add_scalar('input_size', h, iteration)
writer.add_scalar('learning_rate', cur_lr, iteration)
t0 = time.time()
if loss.sum().item() < lossest:
lossest = loss.sum().item()
save_lossest = True
# Saving intermediate model
if iteration % args.save_interval == 0 and iteration > 0 or save_lossest == True:
print('Saving model state at iteration : ', iteration)
state = {
'net': net.module.state_dict(),
"optimizer": optimizer.state_dict(),
'iteration' : iteration,
'epoch': epoch,
'lr' : cur_lr,
'step_index' : step_index
}
model_file = "{0}/ckpt_{1}_loss_{2}.pth".format(args.save_folder, repr(iteration), lossest)
torch.save(state, model_file)
save_lossest = False
if iteration in stepvalues:
step_index += 1
cur_lr = adjust_learning_rate(cur_lr, optimizer, args.gamma, step_index)
if iteration > args.max_iter:
break
# Evaluation while training
if args.evaluation and iteration % args.eval_step == 0:
try:
if pEval is None:
print("Evaluation started at iteration {0} on {1}...".format(iteration, args.dataset))
eval_cmd = "CUDA_VISIBLE_DEVICES=" + str(args.eval_device) + \
" python eval.py" + \
" --tune_from=" + args.save_folder + 'ckpt_' + repr(iteration) + '.pth' + \
" --input_size=1024" + \
" --output_zip=result_temp1"
pEval = Popen(eval_cmd, shell=True, stdout=PIPE, stderr=PIPE)
elif pEval.poll() is not None:
(scoreString, stdErrData) = pEval.communicate()
hmean = float(str(scoreString).strip().split(":")[3].split(",")[0].split("}")[0].strip())
writer.add_scalar('test_hmean', hmean, iteration)
print("test_hmean for {}-th iter : {:.4f}".format(iteration, hmean))
if pEval is not None:
pEval.kill()
pEval = None
except Exception as e:
print("exception happened in evaluation ", e)
if pEval is not None:
pEval.kill()
pEval = None
iteration += 1
```
|
{
"source": "jerch/django-hidefield",
"score": 2
}
|
#### File: example/exampleapp/models.py
```python
from __future__ import unicode_literals
from django.db import models
from hidefield.fields import HideField
from django.utils.encoding import python_2_unicode_compatible
class HideCharField(HideField, models.CharField):
pass
class HideTextField(HideField, models.TextField):
pass
class HideForeignKey(HideField, models.ForeignKey):
pass
@python_2_unicode_compatible
class MyModel(models.Model):
name = HideCharField(max_length=32)
text = HideTextField(hide='no-data')
parent = HideForeignKey('self', blank=True, null=True, hide='data', on_delete=models.CASCADE)
def __str__(self):
return self.name
```
|
{
"source": "jerch/django-stubs",
"score": 2
}
|
#### File: mypy_django_plugin/transformers/managers.py
```python
from mypy.checker import fill_typevars
from mypy.nodes import GDEF, Decorator, FuncDef, MemberExpr, NameExpr, RefExpr, StrExpr, SymbolTableNode, TypeInfo
from mypy.plugin import ClassDefContext, DynamicClassDefContext
from mypy.types import CallableType, Instance, TypeVarType, UnboundType, get_proper_type
from mypy_django_plugin.lib import fullnames, helpers
def create_new_manager_class_from_from_queryset_method(ctx: DynamicClassDefContext) -> None:
semanal_api = helpers.get_semanal_api(ctx)
callee = ctx.call.callee
assert isinstance(callee, MemberExpr)
assert isinstance(callee.expr, RefExpr)
base_manager_info = callee.expr.node
if base_manager_info is None:
if not semanal_api.final_iteration:
semanal_api.defer()
return
assert isinstance(base_manager_info, TypeInfo)
passed_queryset = ctx.call.args[0]
assert isinstance(passed_queryset, NameExpr)
derived_queryset_fullname = passed_queryset.fullname
if derived_queryset_fullname is None:
# In some cases, due to the way the semantic analyzer works, only passed_queryset.name is available.
# But it should be analyzed again, so this isn't a problem.
return
base_manager_instance = fill_typevars(base_manager_info)
assert isinstance(base_manager_instance, Instance)
new_manager_info = semanal_api.basic_new_typeinfo(
ctx.name, basetype_or_fallback=base_manager_instance, line=ctx.call.line
)
sym = semanal_api.lookup_fully_qualified_or_none(derived_queryset_fullname)
assert sym is not None
if sym.node is None:
if not semanal_api.final_iteration:
semanal_api.defer()
else:
# inherit from Any to prevent false-positives, if queryset class cannot be resolved
new_manager_info.fallback_to_any = True
return
derived_queryset_info = sym.node
assert isinstance(derived_queryset_info, TypeInfo)
new_manager_info.line = ctx.call.line
new_manager_info.type_vars = base_manager_info.type_vars
new_manager_info.defn.type_vars = base_manager_info.defn.type_vars
new_manager_info.defn.line = ctx.call.line
new_manager_info.metaclass_type = new_manager_info.calculate_metaclass_type()
current_module = semanal_api.cur_mod_node
current_module.names[ctx.name] = SymbolTableNode(GDEF, new_manager_info, plugin_generated=True)
if len(ctx.call.args) > 1:
expr = ctx.call.args[1]
assert isinstance(expr, StrExpr)
custom_manager_generated_name = expr.value
else:
custom_manager_generated_name = base_manager_info.name + "From" + derived_queryset_info.name
custom_manager_generated_fullname = ".".join(["django.db.models.manager", custom_manager_generated_name])
if "from_queryset_managers" not in base_manager_info.metadata:
base_manager_info.metadata["from_queryset_managers"] = {}
base_manager_info.metadata["from_queryset_managers"][custom_manager_generated_fullname] = new_manager_info.fullname
# So that the plugin will reparameterize the manager when it is constructed inside of a Model definition
helpers.add_new_manager_base(semanal_api, new_manager_info.fullname)
class_def_context = ClassDefContext(cls=new_manager_info.defn, reason=ctx.call, api=semanal_api)
self_type = fill_typevars(new_manager_info)
assert isinstance(self_type, Instance)
queryset_method_names = []
# we need to copy all methods in MRO before django.db.models.query.QuerySet
for class_mro_info in derived_queryset_info.mro:
if class_mro_info.fullname == fullnames.QUERYSET_CLASS_FULLNAME:
for name, sym in class_mro_info.names.items():
queryset_method_names.append(name)
break
for name, sym in class_mro_info.names.items():
if isinstance(sym.node, FuncDef):
func_node = sym.node
elif isinstance(sym.node, Decorator):
func_node = sym.node.func
else:
continue
helpers.copy_method_to_another_class(
class_def_context, self_type, new_method_name=name, method_node=func_node
)
# Gather names of all BaseManager methods
manager_method_names = []
for manager_mro_info in new_manager_info.mro:
if manager_mro_info.fullname == fullnames.BASE_MANAGER_CLASS_FULLNAME:
for name, sym in manager_mro_info.names.items():
manager_method_names.append(name)
# Copy/alter all methods in common between BaseManager/QuerySet over to the new manager if their return type is
# the QuerySet's self-type. Alter the return type to be the custom queryset, parameterized by the manager's model
# type variable.
for class_mro_info in derived_queryset_info.mro:
if class_mro_info.fullname != fullnames.QUERYSET_CLASS_FULLNAME:
continue
for name, sym in class_mro_info.names.items():
if name not in manager_method_names:
continue
if isinstance(sym.node, FuncDef):
func_node = sym.node
elif isinstance(sym.node, Decorator):
func_node = sym.node.func
else:
continue
method_type = func_node.type
if not isinstance(method_type, CallableType):
if not semanal_api.final_iteration:
semanal_api.defer()
return None
original_return_type = method_type.ret_type
if original_return_type is None:
continue
# Skip any method that doesn't return _QS
original_return_type = get_proper_type(original_return_type)
if isinstance(original_return_type, UnboundType):
if original_return_type.name != "_QS":
continue
elif isinstance(original_return_type, TypeVarType):
if original_return_type.name != "_QS":
continue
else:
continue
# Return the custom queryset parameterized by the manager's type vars
return_type = Instance(derived_queryset_info, self_type.args)
helpers.copy_method_to_another_class(
class_def_context,
self_type,
new_method_name=name,
method_node=func_node,
return_type=return_type,
original_module_name=class_mro_info.module_name,
)
```
|
{
"source": "jercheng/PaddleX",
"score": 2
}
|
#### File: models/utils/seg_metrics.py
```python
import numpy as np
import paddle
import paddle.nn.functional as F
def loss_computation(logits_list, labels, losses):
loss_list = []
for i in range(len(logits_list)):
logits = logits_list[i]
loss_i = losses['types'][i]
loss_list.append(losses['coef'][i] * loss_i(logits, labels))
return loss_list
def calculate_area(pred, label, num_classes, ignore_index=255):
"""
Calculate intersect, prediction and label area
Args:
pred (Tensor): The prediction by model.
label (Tensor): The ground truth of image.
num_classes (int): The unique number of target classes.
ignore_index (int): Specifies a target value that is ignored. Default: 255.
Returns:
Tensor: The intersection area of prediction and the ground on all class.
Tensor: The prediction area on all class.
Tensor: The ground truth area on all class
"""
if len(pred.shape) == 4:
pred = paddle.squeeze(pred, axis=1)
if len(label.shape) == 4:
label = paddle.squeeze(label, axis=1)
if not pred.shape == label.shape:
raise ValueError('Shape of `pred` and `label should be equal, '
'but there are {} and {}.'.format(pred.shape,
label.shape))
# Delete ignore_index
mask = label != ignore_index
pred = pred + 1
label = label + 1
pred = pred * mask
label = label * mask
pred = F.one_hot(pred, num_classes + 1)
label = F.one_hot(label, num_classes + 1)
pred = pred[:, :, :, 1:]
label = label[:, :, :, 1:]
pred_area = []
label_area = []
intersect_area = []
for i in range(num_classes):
pred_i = pred[:, :, :, i]
label_i = label[:, :, :, i]
pred_area_i = paddle.sum(pred_i)
label_area_i = paddle.sum(label_i)
intersect_area_i = paddle.sum(pred_i * label_i)
pred_area.append(pred_area_i)
label_area.append(label_area_i)
intersect_area.append(intersect_area_i)
pred_area = paddle.concat(pred_area)
label_area = paddle.concat(label_area)
intersect_area = paddle.concat(intersect_area)
return intersect_area, pred_area, label_area
def mean_iou(intersect_area, pred_area, label_area):
"""
Calculate iou.
Args:
intersect_area (Tensor): The intersection area of prediction and ground truth on all classes.
pred_area (Tensor): The prediction area on all classes.
label_area (Tensor): The ground truth area on all classes.
Returns:
np.ndarray: iou on all classes.
float: mean iou of all classes.
"""
intersect_area = intersect_area.numpy()
pred_area = pred_area.numpy()
label_area = label_area.numpy()
union = pred_area + label_area - intersect_area
class_iou = []
for i in range(len(intersect_area)):
if union[i] == 0:
iou = 0
else:
iou = intersect_area[i] / union[i]
class_iou.append(iou)
miou = np.mean(class_iou)
return np.array(class_iou), miou
def accuracy(intersect_area, pred_area):
"""
Calculate accuracy
Args:
intersect_area (Tensor): The intersection area of prediction and ground truth on all classes..
pred_area (Tensor): The prediction area on all classes.
Returns:
np.ndarray: accuracy on all classes.
float: mean accuracy.
"""
intersect_area = intersect_area.numpy()
pred_area = pred_area.numpy()
class_acc = []
for i in range(len(intersect_area)):
if pred_area[i] == 0:
acc = 0
else:
acc = intersect_area[i] / pred_area[i]
class_acc.append(acc)
macc = np.sum(intersect_area) / np.sum(pred_area)
return np.array(class_acc), macc
def kappa(intersect_area, pred_area, label_area):
"""
Calculate kappa coefficient
Args:
intersect_area (Tensor): The intersection area of prediction and ground truth on all classes..
pred_area (Tensor): The prediction area on all classes.
label_area (Tensor): The ground truth area on all classes.
Returns:
float: kappa coefficient.
"""
intersect_area = intersect_area.numpy()
pred_area = pred_area.numpy()
label_area = label_area.numpy()
total_area = np.sum(label_area)
po = np.sum(intersect_area) / total_area
pe = np.sum(pred_area * label_area) / (total_area * total_area)
kappa = (po - pe) / (1 - pe)
return kappa
def f1_score(intersect_area, pred_area, label_area):
intersect_area = intersect_area.numpy()
pred_area = pred_area.numpy()
label_area = label_area.numpy()
class_f1_sco = []
for i in range(len(intersect_area)):
if pred_area[i] + label_area[i] == 0:
f1_sco = 0
elif pred_area[i] == 0:
f1_sco = 0
else:
prec = intersect_area[i] / pred_area[i]
rec = intersect_area[i] / label_area[i]
f1_sco = 2 * prec * rec / (prec + rec)
class_f1_sco.append(f1_sco)
return np.array(class_f1_sco)
```
|
{
"source": "jerch/node-sixel",
"score": 2
}
|
#### File: node-sixel/wasm/wasmer_example.py
```python
from wasmer import engine, Store, Module, Instance, ImportObject, Function
from wasmer_compiler_llvm import Compiler
# some test data:
# - broken attributes --> mode 1
# - 2 lines with width 7
# - pending line with current_width 2
TEST = b'"1;1;7ABCDEFG$-ABCDEFG$-AB'
def handle_band(width: int) -> int:
print('got a line of:', width)
assert width == 7
return 0
def mode_parsed(mode: int) -> int:
print('mode selected:', mode)
assert mode == 1
return 0
# load wasm engine
store = Store(engine.JIT(Compiler))
module = Module(store, open('./decoder.wasm', 'rb').read())
import_object = ImportObject()
import_object.register("env", {
"handle_band": Function(store, handle_band),
'mode_parsed': Function(store, mode_parsed),
})
instance = Instance(module, import_object)
mem = instance.exports.memory.int8_view()
chunk_address = instance.exports.get_chunk_address()
# load test data
mem[chunk_address:] = TEST
# run
instance.exports.init(-1, 0, 256, 1)
instance.exports.decode(0, len(TEST))
print('current_width:', instance.exports.current_width())
assert instance.exports.current_width() == 2
```
|
{
"source": "jerck1/MetodosComputacionales1",
"score": 4
}
|
#### File: MetodosComputacionales1/Act_3_derivadas_newton/derivada_metodos.py
```python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# In[2]:
def f(x,n,A1=1,A2=1,A=1):#n: nos permitirá movernos entre una función y otra
if(n==1):
return A1/x**6-A2/x**12
elif(n==2):
return A*np.cos(x)
labels={1:'A1/x**6-A2/x**12',2:'A*np.cos(x)'}
# In[3]:
def derivada_adelante(x,n,h):
return (f(x+h,n)-f(x,n))/h
def derivada_atrás(x,n,h):
return (f(x,n)-f(x-h,n))/h
def derivada_central(x,n,h):
return (f(x+h,n)-f(x-h,n))/(2*h)
# In[4]:
hs=[1e-6,0.01]
labels2={0:'adelante',1:'atrás',2:'central'}
for h in hs:
print(f"Gráficas para h={h}")
fig,ax=plt.subplots(2,3,sharex=False,sharey=False,
gridspec_kw={'hspace':0.5, 'wspace': 0.5},figsize=(10,8))
for n in [1,2]:
x= np.linspace(0.1,1,100) if n==1 else np.linspace(0.0,10,100) # 100 puntos entre 0.01 y 1
df1=derivada_adelante(x,n,h)
df2=derivada_atrás(x,n,h)
df3=derivada_central(x,n,h)
datos = {f'f{n}(x)': f(x,n), f'f\'{n}adelante(x)': df1
, f'f\'{n}atrás(x)': df2
, f'f\'{n}central(x)': df3}
datos = pd.DataFrame(data=datos)
for i in range(3):
ax[0][i].set_title(f"{labels2[i]}",x=.5/1.,y=1.1/1.,loc="center")
ax[n-1][i].plot(x,f(x,n),label=f"{labels[n]}")
ax[n-1][i].plot(x,df1)
print(datos[:10])
plt.savefig(f"derivadas_h={h}.png")
# # <center> Método de Newton <center>
# $$f(x^{*})=0$$
# Escogemos un punto inicial $x_0$ tal que:
# $$x^{*}=x_0+h$$
# Entonces:
# $$f(x^*)=f(x_0+h)\simeq f(x_0)+f'(x_0)h$$
# Despejamos $h$ teniendo en cuenta que $f(r)=0$:
# $$h=-\frac{f(x_0)}{f'(x_0)}$$
# Hallamos $x^{*}$ en términos de $x_0$ y h:
#
# $$x^{*}=x_0+h\simeq x_0-\frac{f(x_0)}{f'(x_0)}$$
# En realidad el término derecho es una aproximación de la raíz $x^{*}$, a esta la llamamos $x_1$:
# $$x_{1}=x_0-\frac{f(x_0)}{f'(x_0)}$$
# Ahora tomamos $x_{1}$ para hallar una mejor:
# $$x_{2}= x_1-\frac{f(x_1)}{f'(x_1)}$$
# y así sucesivamente ($n$ veces):
# $$x_{n+1}=x_{n}-\frac{f(x_{n})}{f'(x_{n})}$$
# In[5]:
def fp(x,n,A1=1,A2=1,A=1):
if(n==1):
return -6*A1/x**7+12*A2/x**13
if(n==2):
return -A*np.sin(x)
# In[6]:
def fpp(x,n,A1=1,A2=1,A=1):
if(n==1):
return np.longdouble(6*7*A1/x**8-12*13*A2/x**14)
if(n==2):
return -A*np.cos(x)
# ## Con la derivada analítica
# In[7]:
def Newton_anal(x0,n,error):
xant=x0
xsig=xant-fp(xant,n)/fpp(xant,n)
while(np.abs(xsig-xant)>error):
xant=xsig
xsig=xant-fp(xant,n)/fpp(xant,n)
# print(fp(xant,n),fpp(xant,n),xant,xsig)
return xsig
# In[8]:
error=1e-12
for n in [1,2]:
print(f"\nExtremos para {labels[n]}:")
print("x*")
if(n==1):
seed=0.5
print("{0:9f}".format(Newton_anal(seed,n,error)))
elif(n==2):
seed=np.array([-0.5,2,6,9])
for x0 in seed:
print("{0:9f}".format(Newton_anal(x0,n,error)))
# ## Con la derivada Numérica
# In[9]:
def Newton_num(x0,n,error):
xant=x0
h=error/100
diff_f=derivada_adelante(xant,n,h)
diff_2_f=(derivada_adelante(xant+h,n,h)-diff_f)/h
xsig=xant-diff_f/diff_2_f
cnt=0
while(np.abs(xsig-xant)>error):
xant=xsig
diff_f=derivada_adelante(xant,n,h)
diff_2_f=(derivada_adelante(xant+h,n,h)-diff_f)/h
xsig=xant-diff_f/diff_2_f
# print(fp(xant,n),fpp(xant,n),xant,xsig)
cnt+=1
if(cnt>1000):
print("Supera 1000 iteraciones")
break
return xsig
# In[10]:
error=1e-4
for n in [1,2]:
print(f"\nExtremos para {labels[n]}:")
print("x*")
if(n==1):
seed=0.5
print("{0:9f}".format(Newton_num(seed,n,error)))
elif(n==2):
seed=np.array([-0.5,2,6,9])
for x0 in seed:
print("{0:9f}".format(Newton_num(x0,n,error)))
# ## Anexo: Comparación gráfica entre la derivada analítica y la de derivada hacia adelante
# In[11]:
h=1e-6#0.01
for n in [1,2]:
x= np.linspace(0.1,1,100) if n==1 else np.linspace(0.0,10,100) # 100 puntos entre 0.01 y 1
df1=derivada_adelante(x,n,h)
plt.figure()
plt.plot(x,df1)
plt.plot(x,fp(x,n))
# In[ ]:
```
|
{
"source": "jercoco/QSQF",
"score": 2
}
|
#### File: QSQF/model/net_qspline_A.py
```python
import torch
import torch.nn as nn
from torch.nn.functional import pad
from torch.autograd import Variable
import logging
logger = logging.getLogger('DeepAR.Net')
class Net(nn.Module):
def __init__(self, params,device):
'''
We define a recurrent network that predicts the future values
of a time-dependent variable based on past inputs and covariates.
'''
super(Net, self).__init__()
self.params = params
self.device = device
self.lstm = nn.LSTM(input_size=params.lstm_input_size,
hidden_size=params.lstm_hidden_dim,
num_layers=params.lstm_layers,
bias=True,
batch_first=False,
dropout=params.lstm_dropout)
# initialize LSTM forget gate bias to be 1 as recommanded by
# http://proceedings.mlr.press/v37/jozefowicz15.pdf
for names in self.lstm._all_weights:
for name in filter(lambda n: "bias" in n, names):
bias = getattr(self.lstm, name)
n = bias.size(0)
start, end = n // 4, n // 2
bias.data[start:end].fill_(1.)
#Plan A:
#beta_01:[beta0,beta1]
self.beta_n1 = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, 1)
self.pre_beta_1 = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, 1)
self.pre_sigma = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, params.num_spline)
self.pre_gamma = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, params.num_spline)
# softmax to make sure Σu equals to 1
self.sigma = nn.Softmax(dim=1)
# softplus to make sure gamma is positive
self.gamma = nn.Softplus()
# softplus to make sure beta0 is positive
self.beta_1 = nn.Softplus()
def forward(self, x, hidden, cell):
_, (hidden, cell) = self.lstm(x, (hidden, cell))
# use h from all three layers to calculate mu and sigma
hidden_permute = \
hidden.permute(1, 2, 0).contiguous().view(hidden.shape[1], -1)
#Plan A:
beta_n1 = self.beta_n1(hidden_permute)
pre_beta_1 = self.pre_beta_1(hidden_permute)
beta_1 = self.beta_1(pre_beta_1)
beta_1=-beta_1
pre_sigma = self.pre_sigma(hidden_permute)
sigma = self.sigma(pre_sigma)
pre_gamma = self.pre_gamma(hidden_permute)
gamma = self.gamma(pre_gamma)
#Plan A:
return ((beta_n1,beta_1,sigma,torch.squeeze(gamma)),hidden,cell)
def init_hidden(self, input_size):
return torch.zeros(self.params.lstm_layers, input_size,
self.params.lstm_hidden_dim,
device=self.device)
def init_cell(self, input_size):
return torch.zeros(self.params.lstm_layers, input_size,
self.params.lstm_hidden_dim,
device=self.device)
def predict(self, x, hidden, cell, sampling=False):
"""
generate samples by sampling from
"""
batch_size = x.shape[1]
samples = torch.zeros(self.params.sample_times,batch_size,
self.params.pred_steps,
device=self.device)
for j in range(self.params.sample_times):
decoder_hidden = hidden
decoder_cell = cell
for t in range(self.params.pred_steps):
func_param,decoder_hidden,decoder_cell=\
self(x[self.params.pred_start+t].unsqueeze(0),
decoder_hidden,decoder_cell)
beta_n1,beta_1,sigma,gamma=func_param
#pred_cdf is a uniform ditribution
uniform = torch.distributions.uniform.Uniform(
torch.tensor([0.0], device=sigma.device),
torch.tensor([1.0], device=sigma.device))
pred_cdf=uniform.sample([batch_size])
beta_0=gamma[:,:1]-2*beta_1*sigma[:,:1]
beta_N=torch.cat((beta_n1,beta_0),dim=1)
beta=pad(gamma,(1,0))[:,:-1]
beta[:,0]=beta_0[:,0]
beta=(gamma-beta)/(2*sigma)
beta=beta-pad(beta,(1,0))[:,:-1]
beta[:,-1]=gamma[:,-1]-beta[:,:-1].sum(dim=1)
ksi=pad(torch.cumsum(sigma,dim=1),(1,0))[:,:-1]
indices=ksi<pred_cdf
pred=(beta_N*pad(pred_cdf,(1,0),value=1)).sum(dim=1)
pred=pred+((pred_cdf-ksi).pow(2)*beta*indices).sum(dim=1)
samples[j, :, t] = pred
#predict value at t-1 is as a covars for t,t+1,...,t+lag
for lag in range(self.params.lag):
if t<self.params.pred_steps-lag-1:
x[self.params.pred_start+t+1,:,0]=pred
sample_mu = torch.mean(samples, dim=0) # mean or median ?
sample_std = samples.std(dim=0)
return samples, sample_mu, sample_std
def loss_fn(func_param, labels: Variable):
beta_n1,beta_1,sigma,gamma=func_param
beta_0=gamma[:,:1]-2*beta_1*sigma[:,:1]
beta_N=torch.cat((beta_n1,beta_0),dim=1)
beta=pad(gamma,(1,0))[:,:-1]
beta[:,0]=beta_0[:,0]
beta=(gamma-beta)/(2*sigma)
beta=beta-pad(beta,(1,0))[:,:-1]
beta[:,-1]=gamma[:,-1]-beta[:,:-1].sum(dim=1)
#calculate the maximum for each segment of the spline
ksi=torch.cumsum(sigma,dim=1)
df1=ksi.expand(sigma.shape[1],sigma.shape[0],sigma.shape[1]).T.clone()
df2=pad(ksi.T.unsqueeze(2),(1,0),'constant',value=1)
ksi=pad(ksi,(1,0))[:,:-1]
knots=df1-ksi
knots[knots<0]=0
knots=(df2*beta_N).sum(dim=2)+(knots.pow(2)*beta).sum(dim=2)
knots=pad(knots.T,(1,0))[:,:-1]#F(ksi_1~K)=0~max
diff=labels.view(-1,1)-knots
alpha_l=diff>0
alpha_A=torch.sum(alpha_l*beta,dim=1)
alpha_B=beta_N[:,1]-2*torch.sum(alpha_l*beta*ksi,dim=1)
alpha_C=beta_N[:,0]-labels+torch.sum(alpha_l*beta*ksi*ksi,dim=1)
#since A may be zero, roots can be from different methods.
not_zero=(alpha_A!=0)
alpha=torch.zeros_like(alpha_A)
#since there may be numerical calculation error,#0
idx=(alpha_B**2-4*alpha_A*alpha_C)<0#0
diff=diff.abs()
index=diff==(diff.min(dim=1)[0].view(-1,1))
index[~idx,:]=False
#index=diff.abs()<1e-4#0,1e-4 is a threshold
#idx=index.sum(dim=1)>0#0
alpha[idx]=ksi[index]#0
alpha[~not_zero]=-alpha_C[~not_zero]/alpha_B[~not_zero]
not_zero=~(~not_zero | idx)#0
delta=alpha_B[not_zero].pow(2)-4*alpha_A[not_zero]*alpha_C[not_zero]
alpha[not_zero]=(-alpha_B[not_zero]+torch.sqrt(delta))/(2*alpha_A[not_zero])
crps_1=labels*(2*alpha-1)
#lam2=lambda n:2*beta_N[:,n-1]*(1/n/(n+1)-alpha.pow(n)/n)
#crps_2=reduce(lambda a,b:a+b,[lam2(n) for n in range(1,2+1)])
crps_2=beta_N[:,0]*(1-2*alpha)+beta_N[:,1]*(1/3-alpha.pow(2))
crps_3=torch.sum(2*beta/((2+1)*(2+2))*(1-ksi).pow(2+2),dim=1)
crps_4=torch.sum(alpha_l*2*beta/(2+1)*(torch.unsqueeze(alpha,1)-ksi).pow(2+1),dim=1)
crps=crps_1+crps_2+crps_3-crps_4
crps = torch.mean(crps)
return crps
```
|
{
"source": "jercytryn/inception",
"score": 4
}
|
#### File: inception/image/analyze.py
```python
import numpy
from .scene import estimate_scene_description
def validate_as_foreground(image):
"""
Automatically detects whether the image is a valid foreground element, ready to insert
:Parameters:
image : `Image` or `numpy.array`
The image to validate
:Returns:
True if the image is a valid foreground, False otherwise
"""
# currently, just hack this by doing a floodfill inward and then check if the entire border
# is transparent
threshold = .01
if image.shape < 2 or image.shape[0] < 2 or image.shape[1] < 2:
return False
# get the background color
background_color = detect_bg(image)
chans = len(background_color)
# TODO: support other colors than white
# for now, just short-circuit if the detected color is not white
if tuple([1.0])*chans != background_color:
return False
# next, perform a floodfill
## Edit: for now, just check whether all border rows/columns are within a small threshold of the
# background color
for slice in (image[0,...], image[-1,...], image[:,0,...], image[:,-1,...]):
if len(slice.shape) > 1:
slice = slice.sum(axis=1) / 3
bg = numpy.array(background_color).sum() / 3
else:
bg = background_color
if not numpy.all(abs(slice - bg) < threshold):
return False
return True
def detect_bg(image):
"""
Helper function to automatically "detect" e.g. guess at the background color of a (usually opaque) image
:Parameters:
image : `Image` or `numpy.ndarray`
The image whose background color we wish to detect
:Returns:
The background color of the image
:Rtype:
`tuple`
"""
# HACK:
# for now, simply scan the border of the image and use the most common pixel value
rows, cols = image.shape[:2]
if len(image.shape) > 2:
chans = image.shape[2]
else:
image = image.reshape(rows,cols,1)
chans = 1
# top and left:
border_pixels = numpy.concatenate((image[:1,:,:].reshape(cols, chans), image[:,:1,:].reshape(rows,chans)))
if rows > 1:
border_pixels = numpy.concatenate((border_pixels, image[-1:,:,:].reshape(cols,chans)))
if cols > 1:
border_pixels = numpy.concatenate((border_pixels, image[:,-1:,:].reshape(rows,chans)))
# grab the most common pixel value
counter = {}
indices = {}
for pixel in border_pixels:
# ignore mostly transparent pixels
if len(pixel) < 4 or pixel[3] > .1:
val = tuple((255 * pixel).astype('uint8'))
counter[val] = counter.get(val, 0) + 1
if counter:
mode = tuple(float(a)/255 for a in max(counter.keys(), key=lambda x: counter[x]))
return mode
return tuple([1.0])*chans # default to whites
```
#### File: image/matte/closedformmatte.py
```python
import scipy.sparse
import scipy.sparse.linalg
import scipy.ndimage
import numpy.linalg
from ..analyze import detect_bg
def alphamatte(image, **kwargs):
"""
Mattes the given image using closed form matting
:Parameters:
image : `numpy.array`
The input image to matte
scribble : `numpy.array`
An image that provides constraints on definite foreground and definite background,
background is given value of 0 and foreground a value of 1. Everything gray is unknown.
If not given, constraints are determined procedurally based on difference from background color
Default=None
epsilon : `float`
Regularizing term, default=.0000001
win_size : `int`
Window size, default=1
:Returns:
The resulting alpha channel
:Rtype:
`numpy.array`
"""
return runMatting(image, **kwargs)
def generate_scribbles(image, bg_color, bg_threshold=.000000000001, fg_threshold=.05):
"""
Auto-generate conservative scribbles from an image with a given solid background color
"""
# use a very conservative estimation of scribbles
# everything that is exactly the bg_color becomes background
# everything that is very far from the bg_color becomes foreground
image_diff = abs(image[...,:3] - bg_color[:3])
bg_mask = numpy.all(image_diff < bg_threshold, axis=2)
fg_mask = numpy.all(image_diff > fg_threshold, axis=2)
consts_map = bg_mask | fg_mask # all constraints
consts_vals = fg_mask # just foreground
return (consts_map, consts_vals)
def runMatting(image, scribble=None, epsilon=None, win_size=None):
"""
Runs the closed form matting algorithm
:Parameters:
image : `numpy.array`
The input image to matte
scribble : `numpy.array`
An image that provides constraints on definite foreground and definite background,
background is given value of 0 and foreground a value of 1. Everything gray is unknown.
If not given, constraints are determined procedurally based on difference from background color
Default=None
epsilon : `float`
Regularizing term, default=.0000001
win_size : `int`
Window size, default=1
:Returns:
The resulting alpha channel
:Rtype:
`numpy.array`
"""
if scribble is None:
consts_map, consts_vals = generate_scribbles(image, detect_bg(image))
else:
bg_mask = numpy.all(scribble[...,:3] < .05, axis=2)
fg_mask = numpy.all(scribble[...,:3] > .95, axis=2)
consts_map = bg_mask | fg_mask # all constraints
consts_vals = fg_mask # just foreground
return solveAlpha(image, consts_map, consts_vals, epsilon=epsilon, win_size=win_size)
def solveAlpha(image, consts_map, consts_vals, epsilon=None, win_size=None, lambda_val=100):
h, w, _ = image.shape[:3]
img_size = w * h
kwargs = {}
if epsilon is not None:
kwargs['epsilon'] = epsilon
if win_size is not None:
kwargs['win_size'] = win_size
A = getLaplacian1(image, consts_map, **kwargs)
D = scipy.sparse.spdiags(consts_map.flatten(1),0,img_size,img_size).tocsc();
x = scipy.sparse.linalg.spsolve((A + lambda_val*D), lambda_val * numpy.multiply(consts_map.flatten(1), consts_vals.flatten(1)))
return x.reshape(h,w,order='F').clip(0,1)
def getLaplacian1(image, consts, epsilon=.0000001, win_size=1):
neb_size = (win_size * 2 + 1)**2
h, w, c = image.shape[:3]
if (c > 3):
c = 3
img_size = w*h
#consts = scipy.ndimage.binary_erosion(consts, numpy.ones((win_size*2+1, win_size*2+1)), border_value=1)
indsM = numpy.array(range(img_size)).reshape(h,w,order='F')
tlen = sum(sum(1 - consts[win_size:-win_size, win_size:-win_size]) * (neb_size**2))
row_inds = numpy.zeros((tlen,1))
col_inds = numpy.zeros((tlen,1))
vals = numpy.zeros((tlen,1))
len_val = 0
for j in range(win_size, w-win_size):
for i in range(win_size, h-win_size):
if (consts[i,j]):
continue
win_inds = indsM[i-win_size:i+win_size+1, j-win_size:j+win_size+1].flatten(1)
winI = image[i-win_size:i+win_size+1,j-win_size:j+win_size+1,:3].reshape(neb_size, c, order='F')
win_mu = winI.mean(axis=0).transpose()
win_var = numpy.linalg.inv((winI.transpose().dot(winI)/neb_size) - win_mu.dot(win_mu.transpose()) + numpy.identity(c)*epsilon/neb_size)
winI = winI - numpy.tile(win_mu.transpose(), (neb_size, 1))
tvals = (1 + winI.dot(win_var).dot(winI.transpose())) / neb_size
row_inds[len_val:neb_size**2 + len_val] = numpy.tile(win_inds, (1,neb_size)).reshape(neb_size**2, 1, order='F')
col_inds[len_val:neb_size**2 + len_val] = numpy.tile(win_inds.transpose(), (neb_size,1)).reshape(neb_size**2, 1, order='F')
vals[len_val:neb_size**2 + len_val, 0] = tvals.flatten(1)
len_val += neb_size**2
vals = vals[:len_val].squeeze()
row_inds = row_inds[:len_val].squeeze()
col_inds = col_inds[:len_val].squeeze()
A = scipy.sparse.coo_matrix((vals, (row_inds, col_inds)), shape=(img_size, img_size)).tocsc()
sumA = A.sum(axis=1)
return (scipy.sparse.spdiags(sumA.flatten(1), 0, img_size, img_size) - A)
```
#### File: image/operation/poisson.py
```python
import numpy
import cv2
from .base import Operation
from ..image import Image
class PoissonOperation(Operation):
"""
Implements a poisson image blending operation, a la
Perez, Patrick, <NAME>, and <NAME>. "Poisson image editing." ACM Transactions on Graphics (TOG). Vol. 22. No. 3. ACM, 2003.
APA
"""
def __init__(self, source_image, dest_image, offset=(0, 0), clone_type=cv2.NORMAL_CLONE):
"""
Initializes a poisson blending operation
:Parameters:
source_image : `Image`
The image to composite
dest_image : `Image`
The image to blend source into
offset : `tuple`
A tuple of (row, column) denoting the offset into the destination image where the upper-left
corner of the source image should begin. Default=(0,0)
clone_type : `int`
A flag determining the type of blending to do. Support normal clone (cv2.NORMAL_CLONE), mixed
clone for mixing gradients (cv2.MIXED_CLONE) and feature exchange (cv2.FEATURE_EXCHANGE).
See http://docs.opencv.org/3.0-beta/modules/photo/doc/cloning.html for more details.
"""
self.source_image = self.image = source_image
self.dest_image = dest_image
self.offset = offset
self.clone_type = clone_type
self.opimage = None
def run(self):
"""
Runs the operation
:Returns:
A single image with foreground and background blended together seamlessly
:Rtype:
`Image`
"""
# TODO: add support for merging off the edge of the image
rows, cols = self.image.shape[:2]
self.image.to_rgba()
# float->int and swaps channels
opencv_source = self.source_image.opencvimage
opencv_dest = self.dest_image.opencvimage
# construct a mask with 0 corresponding to alpha of 0 in the source
self.mask_image = (self.source_image[...,3] * 255).astype('uint8')
offset = (self.offset[1] + cols / 2, self.offset[0] + rows / 2)
opencv_result = cv2.seamlessClone(opencv_source, opencv_dest,
self.mask_image, offset, self.clone_type)
self.opimage = Image(opencv_result[:, :, ::-1]) # swap channels back to rgb
return self.opimage
```
#### File: image/place/place.py
```python
import numpy
def normalize_shape(image, offset, width, height, dtype=None, expand=False):
"""
Normalizes the shape of the given image to the given dimensions, placing its upper right corner
at the upper right corner of a black image but offset by the given offset.
:Parameters:
image : `numpy.array`
The image to normalize
offset : `tuple`
A tuple of (row, column) denoting the offset into the black image where the upper-left
corner of the source image should begin. Default=(0,0)
width : `int`
The width to normalize to in pixels
height : `int`
The height to normalize to in pixels
dtype : `basestring`
The datatype of the resulting image
expand : `bool`
If True, rather than conform the image inside the (height, width) array, the array is expanded
such that the image is not cropped, even when it exceeds the boundary of the given width/height.
Otherwise, the image is cropped outside the width/height. Default=False
"""
if dtype is None:
dtype = image.dtype
# blow up op image to match max width and height
before_rows = max(offset[0], 0)
before_cols = max(offset[1], 0)
chans = image.shape[2]
# insert the image into a new normalized (black) image
r0, c0 = abs(min(offset[0], 0)), abs(min(offset[1], 0)) # how much to offset into the image itself
r1, c1 = min(image.shape[0], height-before_rows), min(image.shape[1], width-before_cols)
if expand:
# covers all cases regardless of the offset and size of image relative to the situated width/height
height = max(image.shape[0] + before_rows, height + r0)
width = max(image.shape[1] + before_cols, width + c0)
r0, c0 = 0, 0
r1, c1 = image.shape[0], image.shape[1]
new_image = numpy.zeros((height, width, chans), dtype=dtype)
new_image[before_rows:before_rows+(r1-r0), before_cols:before_cols+(c1-c0), :] = image[r0:r1, c0:c1, :]
return new_image
```
#### File: image/statadjust/colorspace.py
```python
import numpy, math
###############################################################
# color space transformations
def srgb_to_linear(image):
"""
Converts the given image from sRGB color space to linear color space
:Parameters:
image : `numpy.array`
The sRGB image
:Returns:
THe image in linear color space
"""
a = 0.055
return numpy.select([image <= 0.04045],[image/12.92],default=pow((image + a)/(1+a), 2.4))
def linear_to_srgb(image):
"""
Converts the given image from linear color space to sRGB color space
:Parameters:
image : `numpy.array`
The linear image
:Returns:
THe image in sRGB color space
"""
a = 0.055
return numpy.select([image <= 0.0031308],[12.92 * image],default=(1+a)*pow(image,1/2.4) - a)
# hsv color space
# adapted from standard python colorsys lib
# see also http://stackoverflow.com/questions/7274221/changing-image-hue-with-python-pil
def rgb_to_hsv(image):
"""
Converts the given linear color space RGB image to HSV
:Parameters:
image : `numpy.array`
The linear color image
:Returns:
The image in HSV color space
"""
hsv = numpy.zeros_like(image)
hsv[..., 3:] = image[..., 3:]
r, g, b = image[..., 0], image[..., 1], image[..., 2]
maxc = numpy.max(image[..., :3], axis=-1)
minc = numpy.min(image[..., :3], axis=-1)
v = maxc
diffvals = (maxc != minc)
hsv[diffvals, 1] = (maxc-minc)[diffvals] / maxc[diffvals]
rc, gc, bc = numpy.zeros_like(r), numpy.zeros_like(r), numpy.zeros_like(r)
rc[diffvals] = (maxc-r)[diffvals] / (maxc-minc)[diffvals]
gc[diffvals] = (maxc-g)[diffvals] / (maxc-minc)[diffvals]
bc[diffvals] = (maxc-b)[diffvals] / (maxc-minc)[diffvals]
hsv[..., 0] = numpy.select([r == maxc, g == maxc], [bc-gc, 2.0+rc-bc], default=4.0+gc-rc)
hsv[..., 0] = (hsv[..., 0]/6.0) % 1.0
hsv[..., 2] = v
return hsv
def hsv_to_rgb(image):
"""
Converts the given linear color space HSV image to RGB
:Parameters:
image : `numpy.array`
The linear color image
:Returns:
The image in RGB color space
"""
rgb = numpy.zeros_like(image)
rgb[..., 3:] = image[..., 3:]
h, s, v = image[..., 0], image[..., 1], image[..., 2]
i = (h*6.0).astype('uint8') # XXX assume truncates!
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
i = i%6
r = numpy.select([s == 0.0, i == 0, i == 1, i == 2, i == 3, i == 4], [v, v, q, p, p, t], default=v)
g = numpy.select([s == 0.0, i == 0, i == 1, i == 2, i == 3, i == 4], [v, t, v, v, q, p], default=p)
b = numpy.select([s == 0.0, i == 0, i == 1, i == 2, i == 3, i == 4], [v, p, p, t, v, v], default=q)
rgb[..., 0] = r
rgb[..., 1] = g
rgb[..., 2] = b
return rgb
def rgb_to_xyY(image):
"""
Converts the given linear color space RGB image to xyY color space
:Parameters:
image : `numpy.array`
The linear color image
:Returns:
The image in xyY color space
"""
# see http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
# srgb
r, g, b = image[..., 0], image[..., 1], image[..., 2]
# to XYZ first
XYZ = numpy.zeros_like(image)
XYZ[..., 3:] = image[..., 3:]
X = .4125 * r + .3576 * g + .1805 * b
Y = .2126 * r + .7152 * g + .0722 * b
Z = .0193 * r + .1192 * g + .9505 * b
XYZ[..., 0] = X
XYZ[..., 1] = Y
XYZ[..., 2] = Z
# srgb reference white:
# x=0.3127, y=0.3290; z=0.3583
# now to xyY
xyY = numpy.zeros_like(image)
# when X=Y=Z=0, set x and y to reference white
xyY[..., 0] = .3127
xyY[..., 1] = .3290
mask = numpy.ma.mask_or(numpy.ma.mask_or((X != Y),(Y != Z)),(Z != 0))
xyY[..., 3:] = image[..., 3:]
xyY[..., 0][mask] = X[mask] / (X+Y+Z)[mask]
xyY[..., 1][mask] = Y[mask] / (X+Y+Z)[mask]
xyY[..., 2] = Y
return xyY
def xyY_to_rgb(image):
"""
Converts the given linear color space xyY image to RGB color space
:Parameters:
image : `numpy.array`
The linear color image
:Returns:
The image in RGB color space
"""
# http://www.brucelindbloom.com/index.html?Eqn_xyY_to_XYZ.html
# convert to XYZ first
x, y, Y = image[..., 0], image[..., 1], image[..., 2]
XYZ = numpy.zeros_like(image)
mask = (y != 0)
XYZ[...,0][mask] = (x * Y)[mask] / y[mask]
XYZ[...,1][mask] = Y[mask]
XYZ[...,2][mask] = ((1-x-y) * Y)[mask] / y[mask]
X, Y, Z = XYZ[...,0], XYZ[...,1], XYZ[...,2]
rgb = numpy.zeros_like(image)
rgb[..., 3:] = image[..., 3:]
r = 3.2406 * X + -1.5372 * Y + -.4986 * Z
g = -.9689 * X + 1.8758 * Y + .0415 * Z
b = .0557 * X + -.2040 * Y + 1.0570 * Z
rgb[..., 0] = r
rgb[..., 1] = g
rgb[..., 2] = b
return rgb
# based off of (Wyszecki & Stiles, p.224-9)
# Note: 0.24792 is a corrected value for the error found in W&S as 0.24702
k_temp_table=numpy.array([(0, 0.18006, 0.26352, -0.24341),
(10, 0.18066, 0.26589, -0.25479),
(20, 0.18133, 0.26846, -0.26876),
(30, 0.18208, 0.27119, -0.28539),
(40, 0.18293, 0.27407, -0.30470),
(50, 0.18388, 0.27709, -0.32675),
(60, 0.18494, 0.28021, -0.35156),
(70, 0.18611, 0.28342, -0.37915),
(80, 0.18740, 0.28668, -0.40955),
(90, 0.18880, 0.28997, -0.44278),
(100, 0.19032, 0.29326, -0.47888),
(125, 0.19462, 0.30141, -0.58204),
(150, 0.19962, 0.30921, -0.70471),
(175, 0.20525, 0.31647, -0.84901),
(200, 0.21142, 0.32312, -1.0182),
(225, 0.21807, 0.32909, -1.2168),
(250, 0.22511, 0.33439, -1.4512),
(275, 0.23247, 0.33904, -1.7298),
(300, 0.24010, 0.34308, -2.0637),
(325, 0.24792, 0.34655, -2.4681),
(350, 0.25591, 0.34951, -2.9641),
(375, 0.26400, 0.35200, -3.5814),
(400, 0.27218, 0.35407, -4.3633),
(425, 0.28039, 0.35577, -5.3762),
(450, 0.28863, 0.35714, -6.7262),
(475, 0.29685, 0.35823, -8.5955),
(500, 0.30505, 0.35907, -11.324),
(525, 0.31320, 0.35968, -15.628),
(550, 0.32129, 0.36011, -23.325),
(575, 0.32931, 0.36038, -40.770),
(600, 0.33724, 0.36051, -116.45)])
def cct_to_xy(temperature):
"""
Convert the two-channel mired, tint temperature image to xy chromaticity
:Parameters:
temperature : `numpy.array`
An array of depth 2 containing mired, tint
:Returns:
The image in xy chromaticity space
"""
# adapted from original "Understanding and Improving the Realism of Image Composites" code
# http://www.brucelindbloom.com/index.html?Eqn_XYZ_to_T.html
# also see opt-prop matlab toolbox
mired = temperature[..., 0]
tint = temperature[..., 1]
k_tint_scale = -3000.0;
## Begin
# Find inverse temperature to use as index.
r = mired;
# Convert tint to offset in uv space.
offset = tint * (1.0 / k_tint_scale);
indexarray = numpy.zeros(temperature.shape[:-1], dtype='uint64')
# initialize mask to all false
mask = (indexarray != indexarray)
# Search for line pair containing coordinate.
for index in range(30):
newmask = (r < k_temp_table[index + 1][0])
newmask = (newmask | (index==29))
indexarray[newmask & (~mask)] = index
mask = mask | newmask
# Find relative weight of first line.
f = (k_temp_table[indexarray + 1,0] - r) / (k_temp_table[indexarray + 1,0] - k_temp_table[indexarray,0])
# Interpolate the black body coordinates.
u = k_temp_table[indexarray,1] * f + k_temp_table[indexarray + 1,1] * (1.0 - f)
v = k_temp_table[indexarray,2] * f + k_temp_table[indexarray + 1,2] * (1.0 - f)
# Find vectors along slope for each line.
uu1 = 1.0
vv1 = k_temp_table[indexarray,3]
uu2 = 1.0
vv2 = k_temp_table[indexarray + 1,3]
len1 = (1.0 + vv1 * vv1) ** (1/2.0)
len2 = (1.0 + vv2 * vv2) ** (1/2.0)
uu1 = uu1 / len1
vv1 = vv1 / len1
uu2 = uu2 / len2
vv2 = vv2 / len2
# Find vector from black body point.
uu3 = uu1 * f + uu2 * (1.0 - f)
vv3 = vv1 * f + vv2 * (1.0 - f)
len3 = (uu3 * uu3 + vv3 * vv3) ** (1/2.0)
uu3 = uu3 / len3
vv3 = vv3 / len3
# Adjust coordinate along this vector.
u = u + uu3 * offset
v = v + vv3 * offset
# Convert to xy coordinates.
denom = (u - 4.0 * v + 2.0);
x = 1.5 * u / denom
y = v / denom
result = numpy.zeros_like(temperature)
result[..., 0] = x
result[..., 1] = y
return result
def xyY_to_cct(image):
"""
Convert the xyY linear image to a 2-channel image containing mired, tint
:Parameters:
image : `numpy.array`
The image in xyY space
:Returns:
The color temperature image
"""
# adapted from original "Understanding and Improving the Realism of Image Composites" code
# also http://www.brucelindbloom.com/index.html?Eqn_XYZ_to_T.html
# also i_xy2cct.m in opt-prop matlab toolbox source code
k_tint_scale = -3000.0
x = image[..., 0]
y = image[..., 1]
##
# Convert to uv space.
denom = -x + 6*y + 1.5
mask = (numpy.ma.mask_or((x != y),(y != 0)))
# assuming rgb (3 channel)
uv = numpy.zeros(image.shape[:-1] + (2,),dtype='float64')
uv[...,0][mask] = (2.0 * x)[mask] / denom[mask]
uv[...,1][mask] = (3.0 * y)[mask] / denom[mask]
u = uv[..., 0]
v = uv[..., 1]
# Search for line pair coordinate is between.
last_dt = numpy.zeros(image.shape[:-1], dtype='float64')
last_dv = numpy.zeros_like(last_dt)
last_du = numpy.zeros_like(last_dt)
indexarray = numpy.zeros(image.shape[:-1], dtype='uint64')
best_dt = numpy.zeros(image.shape[:-1], dtype='float64')
best_dv = numpy.zeros_like(best_dt)
best_du = numpy.zeros_like(best_dt)
# initialize mask to all false
mask = (indexarray != indexarray)
for index in range(1, 31):
# Convert slope (of mired line) to delta-u and delta-v, with length 1.
du = 1.0
dv = k_temp_table[index][3]
length = math.sqrt (1.0 + dv * dv)
du = du / length
dv = dv / length
# Find delta from black body point to test coordinate.
uu = u - k_temp_table[index][1]
vv = v - k_temp_table[index][2]
# Find distance above or below the mired line.
dt = - uu * dv + vv * du # (du,dv) X (uu, vv). s.t., norm(du, -dv) = 1.0f
# If below line, we have found line pair.
newmask = (dt <= 0.0)
newmask = (newmask | (index == 30))
indexarray[newmask & (~mask)] = index
best_dt[newmask & (~mask)] = dt[newmask & (~mask)]
best_du[newmask & (~mask)] = du
best_dv[newmask & (~mask)] = dv
mask = mask | newmask
last_dt[~mask] = dt[~mask]
last_du[~mask] = du
last_dv[~mask] = dv
# Find fractional weight of two lines
best_dt[(best_dt > 0.0) & mask] = 0.0
best_dt[mask] = -best_dt[mask] # the distant to k_temp_table[idx] along slope
#f: weight to k_temp_table[index]
f = numpy.zeros(image.shape[:-1], dtype='float64')
m = (~(indexarray == 2))
f[m] = best_dt[m] / (last_dt + best_dt)[m]
# Interpolate the temperature.
mired = k_temp_table[indexarray-1,0] * f + k_temp_table[indexarray,0] * (1.0 - f)
#temperature = 1.0e6 / mired;
# Find delta from black body point to test coordinate.
uu = u - (k_temp_table[indexarray-1,1] * f + k_temp_table[indexarray,1] * (1.0 - f))
vv = v - (k_temp_table[indexarray-1,2] * f + k_temp_table[indexarray,2] * (1.0 - f))
# Interpolate vectors along slope (of mired lines).
du = best_du * (1.0 - f) + last_du * f
dv = best_dv * (1.0 - f) + last_dv * f
length = (du * du + dv * dv) ** (1/2.0)
m = (length != 0)
du[m] = du[m] / length[m]
dv[m] = dv[m] / length[m]
du[~m] = 0.0
dv[~m] = 0.0
# Find distance along slope (of mired lines).
tint = (uu * du + vv * dv) * k_tint_scale
result = numpy.zeros(image.shape[:-1] + (2,), dtype=image.dtype)
result[..., 0] = mired
result[..., 1] = tint
return result
```
#### File: ui/tool/insert.py
```python
from PySide import QtCore, QtGui
from .base import AbstractSelectionTool
from inception.image import Image
from inception.image.operation import floodfill, scale, poisson, merge, statadjust, shadow
from inception.image.analyze import estimate_scene_description
class InsertionTool(AbstractSelectionTool):
name = "insertion"
def __init__(self, *args, **kwargs):
super(InsertionTool, self).__init__(*args, **kwargs)
self.insertionThread = InsertionThread(parent=self)
self.insertionThread.finished.connect(self.insertionDone)
self._scene_description = None
self.source = None
def reset(self):
super(InsertionTool, self).reset()
self._scene_description = None
def endSelection(self, widget):
if not self.insertionThread.isRunning():
self.insertionThread.srcImage = self.source
self.insertionThread.options = self.options
self.insertionThread.destImage = Image.from_qimage(self._imageCopy)
self.insertionThread.destImage.scene_description = self._scene_description
self.insertionThread.bbox = (self._topLeftPoint.x(), self._topLeftPoint.y(), self._bottomRightPoint.x(), self._bottomRightPoint.y())
self.insertionThread.widget = widget
self.insertionThread.start()
def insertionDone(self):
finalImage = self.insertionThread.compImage
self._imageCopy = finalImage.qimage
self._scene_description = finalImage.scene_description
self.insertionThread.widget.setImage(self._imageCopy)
self._scene_description = self.insertionThread.destImage.scene_description
self.insertionThread.widget.update()
def endResize(self, widget):
pass
def endMoving(self, widget):
pass
def clearSelectionBackground(self, imageWidget):
pass
def paint(self, widget, *args, **kwargs):
pass
def setSourceImage(self, source):
self.source = source
def setOptions(self, **kwargs):
self.options = kwargs
def update(self, filepath):
self.setSourceImage(Image.from_filepath(filepath))
class InsertionThread(QtCore.QThread):
def __init__(self, parent=None):
super(InsertionThread, self).__init__(parent=parent)
self.options = None
self.srcImage = None
self.bbox = None
self.destImage = None
self.compImage = None
self.widget = None
def run(self):
# step 1: floodfill the source image
print("Matting: %s" % self.options['matteOp'])
mattingOps = self.options['matteOp']
image = self.srcImage
for opCls in mattingOps:
op = opCls(self.srcImage)
image = op.run()
# step 2: scale the source image
width = self.bbox[2] - self.bbox[0] + 1
height = self.bbox[3] - self.bbox[1] + 1
scaleOp = scale.ScaleOperation(image, width, height)
result = scaleOp.run()
# generate shadow
genshadow = None
if self.options['shadows']:
print("Generating shadow...")
# cache scene description
if self.destImage.scene_description is None:
print("Caching scene description...")
self.destImage.scene_description = estimate_scene_description(self.destImage)
genshadow = shadow.GenerateShadowOperation(result, self.destImage, offset=(self.bbox[1], self.bbox[0])).run()
print("Generated shadow %s" % genshadow)
if self.options['statAdjust']:
result = statadjust.StatAdjustOperation(result, self.destImage, offset=(self.bbox[1], self.bbox[0])).run()
# step 3: poisson blend/merge with the dest image
print("Merge: %s" % self.options['mergeOp'])
if issubclass(self.options['mergeOp'], merge.MergeOperation):
args = [[self.destImage, result]]
offsets=[(0,0),(self.bbox[1], self.bbox[0])]
if self.options['shadows']:
args[0].insert(1, genshadow)
offsets.insert(1, (0,0))
kwargs = dict(offsets=offsets)
else:
# TODO: support shadows (will require poisson not to merge itself or similar)
args = [result, self.destImage]
kwargs = dict(offset=(self.bbox[1], self.bbox[0]))
op = self.options['mergeOp'](*args, **kwargs)
op.run()
self.compImage = op.opimage
```
|
{
"source": "Jerdak/opengl_tutorials_python",
"score": 3
}
|
#### File: Jerdak/opengl_tutorials_python/tutorial9.py
```python
from __future__ import print_function
from OpenGL.GL import *
from OpenGL.GL.ARB import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from OpenGL.GLUT.special import *
from OpenGL.GL.shaders import *
from glew_wish import *
from csgl import *
from PIL.Image import open as pil_open
import texture as textureutils
import common
import glfw
import sys
import os
import controls
import objloader
import vboindexer
# Global window
window = None
null = c_void_p(0)
def opengl_init():
global window
# Initialize the library
if not glfw.init():
print("Failed to initialize GLFW\n",file=sys.stderr)
return False
# Open Window and create its OpenGL context
window = glfw.create_window(1024, 768, "Tutorial 09", None, None) #(in the accompanying source code this variable will be global)
glfw.window_hint(glfw.SAMPLES, 4)
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL_TRUE)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
if not window:
print("Failed to open GLFW window. If you have an Intel GPU, they are not 3.3 compatible. Try the 2.1 version of the tutorials.\n",file=sys.stderr)
glfw.terminate()
return False
# Initialize GLEW
glfw.make_context_current(window)
glewExperimental = True
# GLEW is a framework for testing extension availability. Please see tutorial notes for
# more information including why can remove this code.a
if glewInit() != GLEW_OK:
print("Failed to initialize GLEW\n",file=stderropen.sys);
return False
return True
def c_type_fill(data,data_type):
rows = len(data)
cols = len(data[0])
t = rows * (cols * data_type)
tmp = t()
for r in range(rows):
for c in range(cols):
tmp[r][c] = data[r][c]
return tmp
def c_type_fill_1D(data,data_type):
rows = len(data)
t = rows * data_type
tmp = t()
for r in range(rows):
tmp[r] = data[r]
return tmp
def main():
# Initialize GLFW and open a window
if not opengl_init():
return
# Enable key events
glfw.set_input_mode(window,glfw.STICKY_KEYS,GL_TRUE)
glfw.set_cursor_pos(window, 1024/2, 768/2)
# Set opengl clear color to something other than red (color used by the fragment shader)
glClearColor(0.0,0.0,0.4,0.0)
# Enable depth test
glEnable(GL_DEPTH_TEST)
# Accept fragment if it closer to the camera than the former one
glDepthFunc(GL_LESS)
# Cull triangles which normal is not towards the camera
glEnable(GL_CULL_FACE)
vertex_array_id = glGenVertexArrays(1)
glBindVertexArray( vertex_array_id )
# Create and compile our GLSL program from the shaders
program_id = common.LoadShaders( ".\\shaders\\Tutorial9\\StandardShading.vertexshader",
".\\shaders\\Tutorial9\\StandardShading.fragmentshader" )
# Get a handle for our "MVP" uniform
matrix_id = glGetUniformLocation(program_id, "MVP")
view_matrix_id = glGetUniformLocation(program_id, "V")
model_matrix_id = glGetUniformLocation(program_id, "M")
# Load the texture
texture = textureutils.load_image(".\\content\\uvmap_suzanne.bmp")
# Get a handle for our "myTextureSampler" uniform
texture_id = glGetUniformLocation(program_id, "myTextureSampler")
# Read our OBJ file
vertices,faces,uvs,normals,colors = objloader.load(".\\content\\suzanne.obj")
vertex_data,uv_data,normal_data = objloader.process_obj( vertices,faces,uvs,normals,colors)
# Our OBJ loader uses Python lists, convert to ctype arrays before sending to OpenGL
vertex_data = objloader.generate_2d_ctypes(vertex_data)
uv_data = objloader.generate_2d_ctypes(uv_data)
normal_data = objloader.generate_2d_ctypes(normal_data)
indexed_vertices, indexed_uvs, indexed_normals, indices = vboindexer.indexVBO(vertex_data,uv_data,normal_data)
indexed_vertices = c_type_fill(indexed_vertices,GLfloat)
indexed_uvs = c_type_fill(indexed_uvs,GLfloat)
indexed_normals = c_type_fill(indexed_normals,GLfloat)
indices = c_type_fill_1D(indices,GLushort)
# Load OBJ in to a VBO
vertex_buffer = glGenBuffers(1);
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer)
glBufferData(GL_ARRAY_BUFFER, len(indexed_vertices) * 4 * 3, indexed_vertices, GL_STATIC_DRAW)
uv_buffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, uv_buffer)
glBufferData(GL_ARRAY_BUFFER, len(indexed_uvs) * 4 * 2, indexed_uvs, GL_STATIC_DRAW)
normal_buffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, normal_buffer)
glBufferData(GL_ARRAY_BUFFER, len(indexed_normals) * 4 * 3, indexed_normals, GL_STATIC_DRAW)
# Generate a buffer for the indices as well
elementbuffer = glGenBuffers(1)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, len(indices) * 2, indices , GL_STATIC_DRAW);
# vsync and glfw do not play nice. when vsync is enabled mouse movement is jittery.
common.disable_vsyc()
# Get a handle for our "LightPosition" uniform
glUseProgram(program_id);
light_id = glGetUniformLocation(program_id, "LightPosition_worldspace");
last_time = glfw.get_time()
frames = 0
while glfw.get_key(window,glfw.KEY_ESCAPE) != glfw.PRESS and not glfw.window_should_close(window):
glClear(GL_COLOR_BUFFER_BIT| GL_DEPTH_BUFFER_BIT)
current_time = glfw.get_time()
if current_time - last_time >= 1.0:
glfw.set_window_title(window,"Tutorial 9. FPS: %d"%(frames))
frames = 0
last_time = current_time
glUseProgram(program_id)
controls.computeMatricesFromInputs(window)
ProjectionMatrix = controls.getProjectionMatrix();
ViewMatrix = controls.getViewMatrix();
ModelMatrix = mat4.identity();
mvp = ProjectionMatrix * ViewMatrix * ModelMatrix;
# Send our transformation to the currently bound shader,
# in the "MVP" uniform
glUniformMatrix4fv(matrix_id, 1, GL_FALSE,mvp.data)
glUniformMatrix4fv(model_matrix_id, 1, GL_FALSE, ModelMatrix.data);
glUniformMatrix4fv(view_matrix_id, 1, GL_FALSE, ViewMatrix.data);
lightPos = vec3(4,4,4)
glUniform3f(light_id, lightPos.x, lightPos.y, lightPos.z)
# Bind our texture in Texture Unit 0
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
# Set our "myTextureSampler" sampler to user Texture Unit 0
glUniform1i(texture_id, 0);
#1rst attribute buffer : vertices
glEnableVertexAttribArray(0)
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
glVertexAttribPointer(
0, # attribute 0. No particular reason for 0, but must match the layout in the shader.
3, # len(vertex_data)
GL_FLOAT, # type
GL_FALSE, # ormalized?
0, # stride
null # array buffer offset (c_type == void*)
)
# 2nd attribute buffer : colors
glEnableVertexAttribArray(1)
glBindBuffer(GL_ARRAY_BUFFER, uv_buffer);
glVertexAttribPointer(
1, # attribute 1. No particular reason for 1, but must match the layout in the shader.
2, # len(vertex_data)
GL_FLOAT, # type
GL_FALSE, # ormalized?
0, # stride
null # array buffer offset (c_type == void*)
)
# 3rd attribute buffer : normals
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, normal_buffer);
glVertexAttribPointer(
2, # attribute
3, # size
GL_FLOAT, # type
GL_FALSE, # ormalized?
0, # stride
null # array buffer offset (c_type == void*)
)
# Draw the triangles, vertex data now contains individual vertices
# so use array length
# glDrawArrays(GL_TRIANGLES, 0, len(vertex_data))
# Index buffer
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer)
# Draw the triangles !
glDrawElements(
GL_TRIANGLES, # mode
len(indices), # count
GL_UNSIGNED_SHORT, # type
null # element array buffer offset
)
# Not strictly necessary because we only have
glDisableVertexAttribArray(0)
glDisableVertexAttribArray(1)
glDisableVertexAttribArray(2)
# Swap front and back buffers
glfw.swap_buffers(window)
# Poll for and process events
glfw.poll_events()
frames += 1
# !Note braces around vertex_buffer and uv_buffer.
# glDeleteBuffers expects a list of buffers to delete
glDeleteBuffers(1, [vertex_buffer])
glDeleteBuffers(1, [uv_buffer])
glDeleteBuffers(1, [normal_buffer])
glDeleteProgram(program_id)
glDeleteTextures([texture_id])
glDeleteVertexArrays(1, [vertex_array_id])
glfw.terminate()
if __name__ == "__main__":
main()
```
|
{
"source": "jerdavies/opencv-play",
"score": 4
}
|
#### File: opencv-play/OpenCV-play/Transformations.py
```python
import cv2 as cv # self-reminder to first open VScode from anaconda prompt
import numpy as np
# method that takes in path to image and returns image as a matrix of pixels
img = cv.imread('images/147.jpg')
# display image as new window
cv.imshow('Testphoto', img)
def translate(img, x, y):
"""Translate image by x (+ive is right) and y (+ive is down) pixels.
@type img: image
@type x, y: int
@rtype: image"""
transMat = np.float32([[1, 0, x], [0, 1, y]]) # transition matrix
dimensions = (img.shape[1], img.shape[0]) # width, height
return cv.warpAffine(img, transMat, dimensions)
# Translate
translated = translate(img, 100, 100)
cv.imshow('Translated', translated)
def rotate(img, angle, rotPoint=None):
"""Rotate image by given angle (+ive is counterclockwise).
@type img: image
@type angle: float
@rytpe: image"""
(height, width) = img.shape[:2]
if rotPoint is None:
rotPoint = (width//2, height//2)
rotMat = cv.getRotationMatrix2D(rotPoint, angle, 1.0)
dimensions = (width, height)
return cv.warpAffine(img, rotMat, dimensions)
# Rotate
rotated = rotate(img, 45)
cv.imshow('Rotated', rotated)
# Flip
flip = cv.flip(img, 0) # 0 =vert flip; 1=hznt flip
cv.imshow('Flip', flip)
# Crop
cropped = img[200:400, 300:400]
cv.imshow('Cropped', cropped)
# wait indefinitely until a key is pressed
cv.waitKey(0)
```
|
{
"source": "jerdin/BW_NavmeshReader",
"score": 3
}
|
#### File: jerdin/BW_NavmeshReader/NavMeshReader.py
```python
import struct
class NavMeshPolygon(object):
def __init__( self, data ):
self.elevation=(data[0],data[1])
self.edges=[]
self.edgesCount=data[2]
# End edgesCount function
# End class NavMeshPolygon
class NavMeshReader(object):
"""
Class reading NavMesh data from buffer
data=NavMeshReader( bytes ) # get whole data from buffer
poly=data.polygons[5] # get polygon
(x,y,adjacentIndex)=poly.edge[0] #get edge vertice
"""
def __init__( self, inp, bufLen=-1 ):
self.fmt = '=ffL'
self.fmtHeader = '=ffLL'
self.offset = 0
self.numPolys=0
self.polygons=[]
maxOffset=len( inp ) if bufLen<0 else bufLen
self.parse( inp )
while self.offset<maxOffset: self.parse( inp ) #parse moar one time if need
#End __init__ function
def parse( self, inp ):
(header,girth,numPolys,numVerts)=self.readBlock(inp, self.fmtHeader)
for i in range(numPolys):
data=self.readBlock(inp, self.fmt)
self.polygons.append(NavMeshPolygon(data))
for poly in self.polygons[self.numPolys:]:
for i in range( poly.edgesCount ):
edge=self.readBlock(inp, self.fmt)
poly.edges.append(edge)
self.numPolys += numPolys
# End parse function
def readBlock( self, bytes, fmt ):
structSize = struct.calcsize(fmt)
data = struct.unpack(fmt, bytes[self.offset:self.offset+structSize])
self.offset += structSize
return data
# End readBlock function
# End class navMeshReader
```
|
{
"source": "jerdna-regeiz/splitmind",
"score": 4
}
|
#### File: splitmind/splitmind/mind.py
```python
from .splitter.tmux import Tmux
from .thinker.pwndbg import Pwndbg
class Mind():
"""A builder to create a splitmind.
It splits always on the last created split if no 'of' is given or an other split is selected.
To split the original starting point use select(None) or use an 'of' which is not defined yet.
Further kwargs are always passed as is the the underlying splitter to be able to have splitter
specific additional functionality. Parameters not consumed by the splitter are passed as split
settings to the thinker
"""
def __init__(self, splitter=Tmux, thinker=Pwndbg):
if callable(splitter):
splitter = splitter()
if callable(thinker):
thinker = thinker()
self.splitter = splitter
self.thinker = thinker
self.last = None
def left (self, *args, of=None, display=None, **kwargs):
"""Creates a split left of the current split.
:param str|split of : use this split instead of current
:param str display : the section to be displayed here
:param various args : further args are passed to the splitting cmd
:param dict kwargs : further keyword args are passed to the splitter method"""
self.last = self.splitter.left(*args, of=of or self.last, display=display, **kwargs)
return self
def right(self, *args, of=None, display=None, **kwargs):
"""Creates a split right of the current split.
:param str|split of : use this split instead of current
:param str display : the section to be displayed here
:param various args : further args are passed to the splitting cmd
:param dict kwargs : further keyword args are passed to the splitter method"""
self.last = self.splitter.right(*args, of=of or self.last, display=display, **kwargs)
return self
def above(self, *args, of=None, display=None, **kwargs):
"""Creates a split above of the current split.
:param str|split of : use this split instead of current
:param str display : the section to be displayed here
:param various args : further args are passed to the splitting cmd
:param dict kwargs : further keyword args are passed to the splitter method"""
self.last = self.splitter.above(*args, of=of or self.last, display=display, **kwargs)
return self
def below(self, *args, of=None, display=None, **kwargs):
"""Creates a split below of the current split.
:param str|split of : use this split instead of current
:param str display : the section to be displayed here
:param various args : further args are passed to the splitting cmd
:param dict kwargs : further keyword args are passed to the splitter method"""
self.last = self.splitter.below(*args, of=of or self.last, display=display, **kwargs)
return self
def show(self, display, on=None, **kwargs):
"""Does not create a split but tells to display given section on some already created split.
:param str|split on : which split to be used
:param str display : the section to be displayed here
:param dict kwargs : further keyword args are passed to the splitter method"""
self.last = self.splitter.show(on=on or self.last, display=display, **kwargs)
return self
def select(self, display):
"""Selects the given display to continue from there.
Use None for the main split"""
if display is None:
self.last = None
else:
self.last = self.splitter.get(display)
return self
def tell_splitter(self, target=None, **kwargs):
"""Tells the splitter to configure according to the passed keyword arguments.
Which arguments are available and what happens entirely depends on the implementation of the
splitter"""
if target is None:
target = self.last
self.splitter.do(target=target, **kwargs)
return self
def build(self, **kwargs):
"""Builds the splitmind, by telling the thinker where to put his thoughts
:param dict kwagrs : passed to thinker setup to applie thinker specific value
"""
self.splitter.finish(**kwargs)
self.thinker.setup(self.splitter.splits(), **kwargs)
```
#### File: splitmind/splitmind/models.py
```python
from abc import ABC, abstractmethod
from collections import namedtuple
class Split(ABC, namedtuple('_Split', ['id','tty', 'display', 'settings'])):
"""Represents a split capable of displaying information.
Must be copyable without sideeffects"""
@abstractmethod
def size(self):
pass
```
|
{
"source": "jerdonegan/bus-factor",
"score": 3
}
|
#### File: jerdonegan/bus-factor/bus_class.py
```python
import os
import sys
import git
from git import Repo
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
class BusFactor:
def __init__(self, repo_url, critical_threshold=.5, path=None):
self.repo_url = repo_url
self._critical_threshold = critical_threshold
self.path = path
self._create_path()
self._clone_repo()
self._get_files()
self._get_authors()
self._create_dataframe()
self._get_critical_contributers()
self._plot_busfactor()
self._plot_critical_contributors()
@property
def critical_threshold(self):
"""
View the critical threshold.
"""
return self._critical_threshold
@critical_threshold.setter
def critical_threshold(self, ct):
"""
Set the critical Threshold.
Input:
ct: (Float) Critical Threshold between 0 and 1
"""
msg = 'Crtitcal Threshold should be a float between 0 and 1'
assert (ct >= 0) & (ct <= 1), msg
self._critical_threshold = ct
self._get_critical_contributers()
self._plot_busfactor()
self._plot_critical_contributors()
def _create_path(self):
"""
Create a folder for the git repo. If path is not set path will be set
as base user folder.
"""
if self.path is None:
self.path = os.path.expanduser(f'~/')
self.git_name = self.repo_url.rsplit('/',1)[1]
self.path = os.path.join(self.path, self.git_name)
if not os.path.exists(self.path):
os.mkdir(self.path)
def _clone_repo(self):
"""
Clone the repo.
If repo is in path newest update is pulled.
If repo is not already cloned master branch is cloned
"""
try:
self.repo = Repo(self.path)
self.repo.git.checkout('HEAD', force=True)
self.repo.remotes.origin.pull()
return f'Repo is in {self.path}'
except:# [git.exc.NoSuchPathError, git.exc.InvalidGitRepositoryError]:
self.repo = Repo.clone_from(self.repo_url, self.path, branch='master')
return f'Repo Cloned to {self.path}'
def _get_files(self):
"""
Creates a list of files in the repo
"""
self._files = self.repo.git.execute(
'git ls-tree --full-tree -r --name-only HEAD'
).split('\n')
def _get_authors(self):
"""
Loop through files and get authors and number of lines by each author
for each file. Creates a dictionary or authors and number of lines
"""
def count_lines(lines):
# Skip blank lines
used_lines = [l for l in lines if l.strip() != '']
return len(used_lines)
self.authors = {}
for file in self._files:
for commit, lines in self.repo.blame('HEAD', file):
if commit.author.name in self.authors.keys():
self.authors[commit.author.name] += count_lines(lines)
else:
self.authors[commit.author.name] = count_lines(lines)
def _create_dataframe(self):
"""
Creat a datrframe of authors and lines from dictionary.
Dataframe is sorted and extra columns added
"""
df = pd.DataFrame.from_dict(
self.authors,
orient='index'
).reset_index()
df.columns = ['author', 'lines']
df = df.reset_index(drop=True).sort_values(
'lines',
ascending=False
)
df['total_line_count'] = df.lines.cumsum()
df['author_count'] = list(range(1, len(df)+1))
df['total_line_percent'] = df.total_line_count/df.lines.sum()
df['line_percent'] = df.lines/df.lines.sum()
self.authors_df = df.reset_index(drop=True)
def _get_critical_contributers(self):
"""
Find all the critical contributers.
Crfitical contributers are minimun number of authors to maintain
the repo.
"""
self.critical_contributers_df = self.authors_df[
self.authors_df.total_line_percent < self._critical_threshold
]
# Returns the author with the most lines if the dataframe is empty
if self.critical_contributers_df.empty:
self.critical_contributers_df = pd.DataFrame(
self.authors_df.iloc[0, :]
).T
def _get_path(self, path):
"""
If a path is supplied, supplies path is returned else cwd.
"""
if path:
file_path = path
else:
file_path = os.getcwd()
return file_path
def _plot_busfactor(self, path=None):
"""
Create a plot of the bus factor.
"""
ct_label = f'{self.critical_threshold*100:.0f}% Critical Threshold'
cc = self.critical_contributers_df.shape[0]
title = self.git_name.replace('-', ' ').title()
plt.style.use('default')
fig, ax = plt.subplots(figsize=(12,8))
self.authors_df.plot('author_count', 'total_line_percent',
ax=ax, label='Total Line Percentage')
plt.xlabel('Number of Authors')
plt.ylabel('Total Line Percent')
plt.title(title)
ax.yaxis.set_major_formatter(PercentFormatter(xmax=1))
ax.yaxis.grid(True)
xlim = self.authors_df.author_count.max()*.05
ax.set_xlim(-xlim, self.authors_df.author_count.max()+xlim)
ax.set_ylim(0,1.05)
ticks =list(np.arange(0,1.01,0.1).round(1))
ax.set_yticks(ticks)
ax.axhline(self.critical_threshold, color='red', linestyle='--', label=ct_label)
ax.plot([cc], [self.critical_threshold], 'o', color='red')
ax.annotate(
cc,
xy = [cc, self.critical_threshold],
xytext = [cc, self.critical_threshold+.01],
color='red',
size=15,
horizontalalignment='right',
verticalalignment='bottom',
)
ax.legend()
self.bus_factor_plot = fig
plt.close()
def save_bus_factor_plot(self, path=None):
"""
Save the bus factor plot
"""
file_path = self._get_path(path)
gn = self.git_name.replace('-', '_')
fp = f'{file_path}/bus_factor_{gn}.png'
self.bus_factor_plot.savefig(fp)
return f'Bus Factor Plot saved as: {fp}'
def _plot_critical_contributors(self, path=None):
"""
Create the critical contributiors plot
"""
cc = self.critical_contributers_df.shape[0]
plt.style.use('default')
fig, ax = plt.subplots(figsize=(12,8))
title = self.git_name.replace('-', ' ').title()
title = f'{title} - Top {cc} contributers'
self.critical_contributers_df.plot.bar(
'author', 'line_percent',
ax=ax, label='Line Percentage per Author',
color='orange'
)
ax.yaxis.set_major_formatter(PercentFormatter(xmax=1))
ax.yaxis.grid(True)
plt.xticks(rotation=30)
plt.xlabel('Author')
plt.ylabel('Total Percent')
plt.title(title)
plt.tight_layout()
self.critical_contributers_fig = fig
plt.close()
def save_critical_plot(self, path=None):
"""
Save the critical contributers plot
"""
file_path = self._get_path(path)
gn = self.git_name.replace('-', '_')
fp = f'{file_path}/critical_contributers_{gn}.png'
self.critical_contributers_fig.savefig(fp)
return f'Critical Contributers Plot saved as: {fp}'
```
|
{
"source": "jerdra/boutiques",
"score": 2
}
|
#### File: python/boutiques/importer.py
```python
from argparse import ArgumentParser
from jsonschema import ValidationError
from boutiques.validator import validate_descriptor
from boutiques.localExec import loadJson
from boutiques.logger import raise_error
import boutiques
import yaml
import json
import os
import os.path as op
class ImportError(Exception):
pass
class Importer():
def __init__(self, input_descriptor, output_descriptor,
input_invocation, output_invocation):
self.input_descriptor = input_descriptor
self.output_descriptor = output_descriptor
self.input_invocation = input_invocation
self.output_invocation = output_invocation
def upgrade_04(self):
"""
Differences between 0.4 and current (0.5):
-schema version (obv)
-singularity should now be represented same as docker
-walltime should be part of suggested_resources structure
I.e.
"schema-version": "0.4",
...... becomes.....
"schema-version": "0.5",
I.e.
"container-image": {
"type": "singularity",
"url": "shub://gkiar/ndmg-cbrain:master"
},
...... becomes.....
"container-image": {
"type": "singularity",
"image": "gkiar/ndmg-cbrain:master",
"index": "shub://",
},
I.e.
"walltime-estimate": 3600,
...... becomes.....
"suggested-resources": {
"walltime-estimate": 3600
},
"""
descriptor = loadJson(self.input_descriptor)
if descriptor["schema-version"] != "0.4":
raise_error(ImportError, "The input descriptor must have "
"'schema-version'=0.4")
descriptor["schema-version"] = "0.5"
if "container-image" in descriptor.keys():
if "singularity" == descriptor["container-image"]["type"]:
url = descriptor["container-image"]["url"]
img = url.split("://")
if len(img) == 1:
descriptor["container-image"]["image"] = img[0]
elif len(img) == 2:
descriptor["container-image"]["image"] = img[1]
descriptor["container-image"]["index"] = img[0] + "://"
del descriptor["container-image"]["url"]
elif ("docker" == descriptor["container-image"]["type"] and
descriptor["container-image"].get("index")):
url = descriptor["container-image"]["index"].split("://")[-1]
descriptor["container-image"]["index"] = url
if "walltime-estimate" in descriptor.keys():
descriptor["suggested-resources"] =\
{"walltime-estimate": descriptor["walltime-estimate"]}
del descriptor["walltime-estimate"]
with open(self.output_descriptor, 'w') as fhandle:
fhandle.write(json.dumps(descriptor, indent=4, sort_keys=True))
validate_descriptor(self.output_descriptor)
def get_entry_point(self, input_descriptor):
entrypoint = None
with open(os.path.join(self.input_descriptor, "Dockerfile")) as f:
content = f.readlines()
for line in content:
split = line.split()
if len(split) >= 2 and split[0] == "ENTRYPOINT":
entrypoint = split[1].strip("[]\"")
return entrypoint
def import_bids(self):
path, fil = os.path.split(__file__)
template_file = os.path.join(path, "templates", "bids-app.json")
with open(template_file) as f:
template_string = f.read()
errors = []
app_name = os.path.basename(os.path.abspath(self.input_descriptor))
version = 'unknown'
version_file = os.path.join(self.input_descriptor, "version")
if os.path.exists(version_file):
with open(version_file, "r") as f:
version = f.read().strip()
git_repo = "https://github.com/BIDS-Apps/"+app_name
entrypoint = self.get_entry_point(self.input_descriptor)
container_image = "bids/"+app_name
analysis_types = "participant\", \"group\", \"session"
if not entrypoint:
errors.append("No entrypoint found in container.")
if len(errors):
raise_error(ValidationError, "Invalid descriptor:\n"+"\n".
join(errors))
template_string = template_string.replace("@@APP_NAME@@", app_name)
template_string = template_string.replace("@@VERSION@@", version)
template_string = template_string.replace("@@GIT_REPO_URL@@", git_repo)
template_string = template_string.replace("@@DOCKER_ENTRYPOINT@@",
entrypoint)
template_string = template_string.replace("@@CONTAINER_IMAGE@@",
container_image)
template_string = template_string.replace("@@ANALYSIS_TYPES@@",
analysis_types)
with open(self.output_descriptor, "w") as f:
f.write(template_string)
def import_cwl(self):
# Read the CWL descriptor
with open(self.input_descriptor, 'r') as f:
cwl_desc = yaml.load(f)
# validate yaml descriptor?
bout_desc = {}
# Command line
if cwl_desc.get('baseCommand') is None:
raise_error(ImportError, 'Cannot find baseCommand attribute, '
'perhaps you passed a workflow document, '
'this is not supported')
if type(cwl_desc['baseCommand']) is list:
command_line = ""
for i in cwl_desc['baseCommand']:
command_line += i+" "
else:
command_line = cwl_desc['baseCommand']
if cwl_desc.get('arguments'):
for i in cwl_desc['arguments']:
if type(i) is dict:
raise_error(ImportError, 'Dict arguments not supported.')
if "$(runtime." in i:
raise_error(ImportError, 'Runtime parameters '
' are not supported:'
" "+i)
command_line += i+" "
boutiques_inputs = []
# Inputs
def position(x):
if (type(x) is dict and
x.get('inputBinding') and
x['inputBinding'].get('position')):
return x['inputBinding']['position']
return 0
sorted_inputs = sorted(cwl_desc['inputs'],
key=lambda x: (position(cwl_desc['inputs'][x])))
# Mapping between CQL and Boutiques input types
boutiques_types = {
'string': 'String',
'File': 'File',
'File?': 'File',
'boolean': 'Flag',
'int': 'Number'
# float type?
}
for cwl_input in sorted_inputs:
bout_input = {}
# Easy stuff
bout_input['id'] = cwl_input # perhaps 'idify' that
cwl_in_obj = cwl_desc['inputs'][cwl_input]
if type(cwl_in_obj) is dict and cwl_in_obj.get('name'):
bout_input['name'] = cwl_in_obj['name']
else:
bout_input['name'] = cwl_input
value_key = "[{0}]".format(cwl_input.upper())
if (type(cwl_in_obj) is dict and
cwl_in_obj.get('inputBinding') is not None):
command_line += " "+value_key
bout_input['value-key'] = value_key
# CWL type parsing
if type(cwl_in_obj) is dict:
cwl_type = cwl_in_obj['type']
else:
cwl_type = 'string'
if type(cwl_type) is dict: # It must be an array
if cwl_type['type'] != "array":
raise_error(ImportError, "Only 1-level nested "
"types of type"
" 'array' are supported (CWL input: {0})".
format(cwl_input))
if cwl_type.get('inputBinding') is not None:
raise_error(ImportError, "Input bindings of "
"array elements "
"are not supported (CWL input: {0})".
format(cwl_input))
cwl_type = cwl_type['items']
bout_input['list'] = True
if type(cwl_type) != str:
raise_error(ImportError, "Unknown type:"
" {0}".format(str(cwl_type)))
boutiques_type = boutiques_types[cwl_type.replace("[]", "")
.replace("?", "")]
bout_input['type'] = boutiques_type
if cwl_type == 'int':
bout_input['integer'] = True
if '?' in cwl_type or boutiques_type == "Flag":
bout_input['optional'] = True
# CWL input binding
if type(cwl_in_obj) is dict:
cwl_input_binding = cwl_in_obj['inputBinding']
else:
cwl_input_binding = {}
if cwl_input_binding.get('prefix'):
bout_input['command-line-flag'] = cwl_input_binding['prefix']
if (not (cwl_input_binding.get('separate') is None) and
cwl_input_binding['separate'] is False):
bout_input['command-line-flag-separator'] = ''
boutiques_inputs.append(bout_input)
# array types
if cwl_type.endswith("[]"):
bout_input['list'] = True
if cwl_input_binding.get("itemSeparator"):
if cwl_input_binding['itemSeparator'] != ' ':
raise_error(ImportError, 'Array separators wont be '
'supported until #76 is implemented')
# Outputs
def resolve_glob(glob, boutiques_inputs):
if not glob.startswith("$"):
return glob
if not glob.startswith("$(inputs."):
raise_error(ImportError, "Unsupported reference: "+glob)
input_id = glob.replace("$(inputs.", "").replace(")", "")
for i in boutiques_inputs:
if i['id'] == input_id:
return i['value-key']
raise_error(ImportError, "Unresolved reference"
" in glob: " + glob)
boutiques_outputs = []
sorted_outputs = sorted(cwl_desc['outputs'],
key=(lambda x: cwl_desc['outputs'][x].
get('outputBinding')))
for cwl_output in sorted_outputs:
bout_output = {}
bout_output['id'] = cwl_output # perhaps 'idify' that
if cwl_desc['outputs'][cwl_output].get('name'):
bout_output['name'] = cwl_desc['outputs'][cwl_output]['name']
else:
bout_output['name'] = cwl_output
cwl_out_binding = (cwl_desc['outputs'][cwl_output].
get('outputBinding'))
if cwl_out_binding and cwl_out_binding.get('glob'):
glob = cwl_out_binding['glob']
bout_output['path-template'] = resolve_glob(glob,
boutiques_inputs)
cwl_out_obj = cwl_desc['outputs'][cwl_output]
if type(cwl_out_obj.get('type')) is dict:
if (cwl_out_obj['type'].get('type')
and cwl_out_obj['type']['type'] == 'array'):
bout_output['list'] = True
else:
raise_error(ImportError, 'Unsupported output type: '
+ cwl_output['type'])
boutiques_outputs.append(bout_output)
# Boutiques descriptors have to have at least 1 output file
if len(boutiques_outputs) == 0 or cwl_desc.get('stdout'):
stdout = cwl_desc.get('stdout') or 'stdout.txt'
command_line += " > "+stdout
boutiques_outputs.append(
{
'id': 'stdout',
'name': 'Standard output',
'path-template': 'stdout.txt'
}
)
# Mandatory boutiques fields
bout_desc['command-line'] = command_line
if cwl_desc.get("doc"):
bout_desc['description'] = (cwl_desc.get("doc").
replace(os.linesep, ''))
else:
bout_desc['description'] = "Tool imported from CWL."
bout_desc['inputs'] = boutiques_inputs
# This may not be a great idea but not sure if CWL tools have names
bout_desc['name'] = op.splitext(op.basename(self.input_descriptor))[0]
bout_desc['output-files'] = boutiques_outputs
bout_desc['schema-version'] = '0.5'
bout_desc['tool-version'] = "unknown" # perhaphs there's one in cwl
# Hints and requirements
def parse_req(req, req_type, bout_desc):
# We could support InitialWorkDirRequiment, through config files
if req_type == 'DockerRequirement':
container_image = {}
container_image['type'] = 'docker'
container_image['index'] = 'index.docker.io'
container_image['image'] = req['dockerPull']
bout_desc['container-image'] = container_image
return
if req_type == 'EnvVarRequirement':
bout_envars = []
for env_var in req['envDef']:
bout_env_var = {}
bout_env_var['name'] = env_var
bout_env_var['value'] = resolve_glob(
req['envDef'][env_var],
boutiques_inputs)
bout_envars.append(bout_env_var)
bout_desc['environment-variables'] = bout_envars
return
if req_type == 'ResourceRequirement':
suggested_resources = {}
if req.get('ramMin'):
suggested_resources['ram'] = req['ramMin']
if req.get('coresMin'):
suggeseted_resources['cpu-cores'] = req['coresMin']
bout_desc['suggested-resources'] = suggested_resources
return
if req_type == 'InitialWorkDirRequirement':
listing = req.get('listing')
for entry in listing:
file_name = entry.get('entryname')
assert(file_name is not None)
template = entry.get('entry')
for i in boutiques_inputs:
if i.get("value-key"):
template = template.replace("$(inputs."+i['id']+")",
i.get("value-key"))
template = template.split(os.linesep)
assert(template is not None)
name = op.splitext(file_name)[0]
boutiques_outputs.append(
{
'id': name,
'name': name,
'path-template': file_name,
'file-template': template
})
return
raise_error(ImportError, 'Unsupported requirement: '+str(req))
for key in ['requirements', 'hints']:
if(cwl_desc.get(key)):
for i in cwl_desc[key]:
parse_req(cwl_desc[key][i], i, bout_desc)
# enum types?
# Write descriptor
with open(self.output_descriptor, 'w') as f:
f.write(json.dumps(bout_desc, indent=4, sort_keys=True))
validate_descriptor(self.output_descriptor)
if self.input_invocation is None:
return
# Convert invocation
def get_input(descriptor_inputs, input_id):
for inp in descriptor_inputs:
if inp['id'] == input_id:
return inp
return False
boutiques_invocation = {}
with open(self.input_invocation, 'r') as f:
cwl_inputs = yaml.load(f)
for input_name in cwl_inputs:
if get_input(bout_desc['inputs'], input_name)['type'] != "File":
input_value = cwl_inputs[input_name]
else:
input_value = cwl_inputs[input_name]['path']
boutiques_invocation[input_name] = input_value
with open(self.output_invocation, 'w') as f:
f.write(json.dumps(boutiques_invocation, indent=4, sort_keys=True))
boutiques.invocation(self.output_descriptor,
"-i", self.output_invocation)
```
#### File: python/boutiques/puller.py
```python
import requests
import urllib
import os
from boutiques.logger import raise_error, print_info
from boutiques.publisher import ZenodoError
try:
# Python 3
from urllib.request import urlopen
from urllib.request import urlretrieve
except ImportError:
# Python 2
from urllib2 import urlopen
from urllib import urlretrieve
class Puller():
def __init__(self, zid, verbose=False, sandbox=False):
# remove zenodo prefix
try:
self.zid = zid.split(".", 1)[1]
except IndexError:
raise_error(ZenodoError, "Zenodo ID must be prefixed by "
"'zenodo', e.g. zenodo.123456")
self.verbose = verbose
self.sandbox = sandbox
self.cache_dir = os.path.join(os.path.expanduser('~'), ".cache",
"boutiques")
self.cached_fname = os.path.join(self.cache_dir,
"zenodo-{0}.json".format(self.zid))
def pull(self):
# return cached file if it exists
if os.path.isfile(self.cached_fname):
if(self.verbose):
print_info("Found cached file at %s"
% self.cached_fname)
return self.cached_fname
from boutiques.searcher import Searcher
searcher = Searcher(self.zid, self.verbose, self.sandbox,
exact_match=True)
r = searcher.zenodo_search()
for hit in r.json()["hits"]["hits"]:
file_path = hit["files"][0]["links"]["self"]
file_name = file_path.split(os.sep)[-1]
if hit["id"] == int(self.zid):
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
if(self.verbose):
print_info("Downloading descriptor %s"
% file_name)
downloaded = urlretrieve(file_path, self.cached_fname)
print("Downloaded descriptor to " + downloaded[0])
return downloaded[0]
raise_error(ZenodoError, "Descriptor not found")
```
#### File: boutiques/tests/test_example2.py
```python
import os
import subprocess
from unittest import TestCase
from boutiques import __file__ as bfile
import boutiques as bosh
class TestExample2(TestCase):
def get_examples_dir(self):
return os.path.join(os.path.dirname(bfile),
"schema", "examples")
def test_example2_no_exec(self):
example2_dir = os.path.join(self.get_examples_dir(), "example2")
ret = bosh.execute("simulate",
os.path.join(example2_dir,
"example2.json"),
"-i",
os.path.join(example2_dir,
"invocation.json"))
assert(ret.stdout != ""
and ret.stderr == ""
and ret.exit_code == 0
and ret.error_message == "")
def test_example2_exec(self):
example2_dir = os.path.join(self.get_examples_dir(), "example2")
ret = bosh.execute("launch",
os.path.join(example2_dir, "example2.json"),
os.path.join(example2_dir, "invocation.json"))
print(ret)
assert(ret.stdout == ""
and ret.stderr == ""
and ret.exit_code == 0
and ret.error_message == "")
ret = bosh.execute("launch",
os.path.join(example2_dir, "example2.json"),
"-x",
os.path.join(example2_dir, "invocation.json"))
print(ret)
assert(ret.stdout == ""
and ret.stderr == ""
and ret.exit_code == 0
and ret.error_message == "")
def test_example2_no_exec_random(self):
example2_dir = os.path.join(self.get_examples_dir(), "example2")
self.assertFalse(bosh.execute("simulate",
os.path.join(example2_dir,
"example2.json")).exit_code)
```
#### File: boutiques/tests/test_no_spaces.py
```python
import os
import subprocess
import pytest
from unittest import TestCase
from boutiques import __file__ as bfile
import boutiques as bosh
class TestNoSpaces(TestCase):
def get_examples_dir(self):
return os.path.join(os.path.dirname(bfile),
"schema", "examples")
def test_no_spaces(self):
out = bosh.execute("simulate",
os.path.join(self.get_examples_dir(),
"no_spaces.json"))
assert(' ' not in out.stdout)
```
|
{
"source": "jere357/fishermansfriend",
"score": 2
}
|
#### File: fishermansfriend/openCV/fisherman2.py
```python
import sys, os
sys.path.append(os.path.join(os.getcwd(),'python/'))
import numpy as np
import pdb
import time
import pyautogui
import cv2 as cv
def detect_screenshot(template):
t1= time.time()
tempstring = 'temppic.jpg'
pyautogui.screenshot(tempstring)
w, h = template.shape[::-1]
img = cv.imread(tempstring, 0)
res = cv.matchTemplate(img, template, cv.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(res)
top_left = min_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
"""
cv.rectangle(img,top_left, bottom_right, 255, 2)
plt.subplot(121),plt.imshow(res,cmap = 'gray')
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(img,cmap = 'gray')
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
plt.show()
"""
bobber_loc = ((top_left[0] + bottom_right[0])/2, (top_left[1] + bottom_right[1])/2)
pyautogui.moveTo(bobber_loc[0], bobber_loc[1])
t2= time.time()
#print(res)
#print("harddisk loading/saving time : {}s".format(round(t2-t1, 4)))
return bobber_loc
def calculate_distance(p1,p2):
#distance is not rooted because the pixel count is very low idk which one should be used this one works fine so i didnt bother
#maybe if u play at a lower resolution you should consider changing it idk
#return np.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)
return (abs(p1[0]-p2[0]) + abs(p1[1]-p2[1]))
if( __name__ == '__main__'):
fish_goal = 400
distance_sensitivity = 3 #variable for detection moment -- this depends on your resolution
#wait just a lil bit
time.sleep(4)
#if youre afk for some time your character sits down and he/she cant cast fishing
pyautogui.press("w")
fish_counter = 0
template = cv.imread("wcbobber.png", 0)
while(fish_counter < fish_goal):
time_begin = time.time()
#begin fishing - T keybind
print("stiscen T")
pyautogui.press("t")
#wait until bobber appears on screen
time.sleep(1.4)
r = detect_screenshot(template)
if(len(r) == 0):
#some mistake occured - recast fishing
continue
last_point = (r[0],r[1])
#if a bobber is even found:
if(len(r) > 0):
#confidence = r[0][1]
last_point = (r[0],r[1])
#move cursor to bobber
pyautogui.moveTo(r[0] ,r[1])
while(True):
distance_array = [3]
time_current = time.time()
current_point = (r[0],r[1])
distance = calculate_distance(current_point, last_point)
r = detect_screenshot(template)
if(time_current - time_begin > 20):
#taking way too long, recast fishing
break
if(len(r)==0):
break
#print("confidence: {} kooridnate: {},{} distance = {}".format(confidence, round(r[0][2][0],2) ,round(r[0][2][1],2), round(distance,3)))
#bobber went into the water
if(distance > distance_sensitivity*(sum(distance_array) / len(distance_array)) and len(distance_array) > 0):
#time.sleep(abs(np.random.normal(0.7,0.15)))
print("distance: {}, treshold: {}".format(distance, distance_sensitivity*(sum(distance_array) / len(distance_array))))
distance_array.append(distance)
time.sleep(0.3)
pyautogui.click(button= "right")
print("fish caught")
break
pyautogui.moveTo(current_point)
#it would sometime hold the right click pressed and move to at the same time (i think) and it would drag teh camera somewhere uninmportant
#this sleep waits for the previous bobber to disappear
time.sleep(3.2)
#log out once youre done
pyautogui.press("enter")
pyautogui.hotkey("shift", "7")
pyautogui.typewrite("/camp")
pyautogui.press("enter")
```
|
{
"source": "Jere3y/alipay_dc_client",
"score": 3
}
|
#### File: alipay_dc_client/alipay_dc_client/alipay_dc_client_exception.py
```python
class AliPayException(Exception):
def __init__(self, code, message):
self.__code = code
self.__message = message
def to_unicode(self):
return "AliPayException: code:{}, message:{}".format(self.__code, self.__message)
def __str__(self):
return self.to_unicode()
def __repr__(self):
return self.to_unicode()
class AliPayValidationError(Exception):
pass
```
#### File: alipay_dc_client/alipay_dc_client/client.py
```python
import json
import logging
from datetime import datetime
import hashlib
import OpenSSL
from urllib.parse import quote_plus
from urllib.request import urlopen
from base64 import decodebytes, encodebytes
# 常见加密算法
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from alipay_dc_client.alipay_dc_client_exception import AliPayException, AliPayValidationError
SUPPORT_ALG_LIST = (
b'rsaEncryption',
b'md2WithRSAEncryption',
b'md5WithRSAEncryption',
b'sha1WithRSAEncryption',
b'sha256WithRSAEncryption',
b'sha384WithRSAEncryption',
b'sha512WithRSAEncryption'
)
def get_app_cert_sn(cert_str):
"""
获取证书 SN 算法
"""
cert = _load_certificate(cert_str)
return _get_cert_sn_from_certificate(cert)
def get_alipay_root_cert_sn(root_cert_string):
"""
实际就是好几个证书,使用 _get_cert_sn_from_certificate(cert) 后,拼接的字符串
:param root_cert_string:
:return:
"""
cert_str_list = [i for i in root_cert_string.split('\n\n') if i]
certs = [_load_certificate(cert) for cert in cert_str_list]
root_cert_sn_list = []
for cert in certs:
try:
sign_alg = cert.get_signature_algorithm()
except ValueError:
continue
if sign_alg in SUPPORT_ALG_LIST:
cert_sn = _get_cert_sn_from_certificate(cert)
root_cert_sn_list.append(cert_sn)
return "_".join(root_cert_sn_list)
def _get_cert_sn_from_certificate(cert):
cert_issuer = cert.get_issuer()
name = f'CN={cert_issuer.CN},OU={cert_issuer.OU},O={cert_issuer.O},C={cert_issuer.C}'
string = name + str(cert.get_serial_number())
return hashlib.md5(string.encode()).hexdigest()
def _load_certificate(cert_str):
"""
FILETYPE_PEM 加载证书
:param cert_str: str的证书
:return: 加载后的证书
"""
return OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_str)
def _sorted_data(data):
for k, v in data.items():
if isinstance(v, dict):
# 将字典类型的数据dump出来
data[k] = json.dumps(v, separators=(',', ':'))
return sorted(data.items())
class AlipayDcClient:
"""
数字证书 (digital certificate) 版本
"""
def __init__(
self,
appid,
app_notify_url,
app_private_key_string,
app_public_key_cert_string,
alipay_public_key_cert_string,
alipay_root_cert_string,
debug=False
):
"""
DCAlipay(
appid='',
app_notify_url='http://example.com',
app_private_key_string='',
app_public_key_cert_string='',
alipay_public_key_cert_sring='',
aplipay_root_cert_string='',
)
:param appid:
:param app_notify_url:
:param app_private_key_string:
:param app_public_key_cert_string:
:param alipay_public_key_cert_string:
:param alipay_root_cert_string:
:param debug:
"""
# appid
self._appid = str(appid)
# 异步通知地址
self._app_notify_url = app_notify_url
# 仅支持 rsa2
self._sign_type = "RSA2"
# 支付宝根证书 sn
self._alipay_root_cert_sn = get_alipay_root_cert_sn(alipay_root_cert_string)
# app公钥证书sn
self._app_cert_sn = get_app_cert_sn(app_public_key_cert_string)
# 应用私钥
self._app_private_key = RSA.importKey(app_private_key_string)
# 支付宝公钥
alipay_public_key_cert = _load_certificate(alipay_public_key_cert_string)
alipay_public_key_string = OpenSSL.crypto.dump_publickey(
OpenSSL.crypto.FILETYPE_PEM, alipay_public_key_cert.get_pubkey()
).decode("utf-8")
self._alipay_public_key_string = alipay_public_key_string
self._alipay_public_key = RSA.importKey(self._alipay_public_key_string)
# debug log
if debug:
logging.basicConfig(level=logging.DEBUG)
self._gateway = "https://openapi.alipay.com/gateway.do"
def api(self, api_name, biz_content: dict = None, **kwargs):
"""
通用接口,输入api名称,自动生成接口
:param api_name: api名称例如:alipay.trade.order.settle
:param biz_content: biz_content参数,没有就不用理他
:param kwargs: 接口其他参数,不在biz_content里面的
:return:
"""
data = self._build_request_body(api_name,
return_url=None,
notify_url=None,
biz_content=biz_content,
**kwargs)
logging.debug("请求参数=" + str(data))
response_key = f"{api_name}_response".replace(".", "_")
logging.debug("response_key=" + response_key)
return self.verified_sync_response(data, response_key)
@property
def appid(self):
return self._appid
@property
def sign_type(self):
return self._sign_type
@property
def app_private_key(self):
return self._app_private_key
@property
def alipay_public_key(self):
return self._alipay_public_key
def _sign(self, unsigned_string):
# 计算签名
key = self.app_private_key
signer = PKCS1_v1_5.new(key)
signature = signer.sign(SHA256.new(unsigned_string.encode()))
# base64 编码,转换为unicode表示并移除回车
sign = encodebytes(signature).decode().replace("\n", "")
return sign
def _build_request_body(self,
method: str,
return_url: str = None,
notify_url: str = None,
biz_content: dict = None,
**kwargs):
data = {
"app_id": self._appid,
"method": method,
"charset": "utf-8",
"sign_type": self._sign_type,
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"version": "1.0",
}
if biz_content is not None:
data["biz_content"] = biz_content
data.update(kwargs)
if return_url is not None:
data["return_url"] = return_url
if method in (
"alipay.trade.app.pay",
"alipay.trade.wap.pay",
"alipay.trade.page.pay",
"alipay.trade.pay",
"alipay.trade.precreate",
) and (notify_url or self._app_notify_url):
data["notify_url"] = notify_url or self._app_notify_url
data["app_cert_sn"] = self.app_cert_sn
data["alipay_root_cert_sn"] = self.alipay_root_cert_sn
return data
def sign_data(self, data):
data.pop("sign", None)
ordered_items = _sorted_data(data)
raw_string = "&".join("{}={}".format(k, v) for k, v in ordered_items)
sign = self._sign(raw_string)
unquoted_items = ordered_items + [('sign', sign)]
signed_string = "&".join("{}={}".format(k, quote_plus(v)) for k, v in unquoted_items)
return signed_string
def _verify(self, raw_content, signature):
"""
验证签名
:param raw_content: 待签名的字符串
:param signature: 待验证的签名
:return:
"""
key = self.alipay_public_key
signer = PKCS1_v1_5.new(key)
digest = SHA256.new()
digest.update(raw_content.encode())
return bool(signer.verify(digest, decodebytes(signature.encode())))
def verify(self, data, signature):
if "sign_type" in data:
sign_type = data.pop("sign_type")
if sign_type != self._sign_type:
raise AliPayException(None, "Unknown sign type: {}".format(sign_type))
# 排序后的字符串
unsigned_items = _sorted_data(data)
message = "&".join(u"{}={}".format(k, v) for k, v in unsigned_items)
return self._verify(message, signature)
def verified_sync_response(self, data, response_type):
url = self._gateway + "?" + self.sign_data(data)
logging.debug("请求地址=" + url)
raw_string = urlopen(url).read().decode()
logging.debug("支付宝返回数据=" + raw_string)
return self._verify_alipay_response(raw_string, response_type)
def _verify_alipay_response(self, raw_string, response_type):
"""
return response if verification succeeded, raise exception if not
As to issue #69, json.loads(raw_string)[response_type] should not be returned directly,
use json.loads(plain_content) instead
failed response is like this
{
"alipay_trade_query_response": {
"sub_code": "isv.invalid-app-id",
"code": "40002",
"sub_msg": "无效的AppID参数",
"msg": "Invalid Arguments"
}
}
"""
response = json.loads(raw_string)
# 返回内容中没有签名字段
if "sign" not in response.keys():
result = response[response_type]
raise AliPayException(
code=result.get("code", "0"),
message=raw_string
)
sign = response["sign"]
if response_type not in response.keys():
# 有的时候返回的 key 是 error_response
plain_content = self._get_signed_string(raw_string, "error_response")
else:
plain_content = self._get_signed_string(raw_string, response_type)
logging.debug("待签名字符串为=" + plain_content)
if not self._verify(plain_content, sign):
raise AliPayValidationError
return json.loads(plain_content)
def _get_signed_string(self, raw_string: str, response_key):
"""
https://docs.open.alipay.com/200/106120
"""
# 括号匹配算法,碰到{则入栈,碰到}则出栈
# 栈内为空,则匹配成功
stack = []
logging.debug(f"_get_signed_string-->response_key={response_key}")
start_index = raw_string.find("{", raw_string.find(response_key))
logging.debug(f"_get_signed_string-->start_index={start_index}")
end_index = start_index
for i, char in enumerate(raw_string[start_index:], start_index):
logging.debug(f"_get_signed_string-->for={i}->{char}")
if char == "{":
stack.append("{")
elif char == "}":
stack.pop()
if len(stack) == 0:
end_index = i + 1
break
logging.debug(f"_get_signed_string-->end_index={end_index}")
signed_str = raw_string[start_index:end_index]
return signed_str
@property
def app_cert_sn(self):
return getattr(self, "_app_cert_sn")
@property
def alipay_root_cert_sn(self):
return getattr(self, "_alipay_root_cert_sn")
```
|
{
"source": "JerecTharen/NuzlockerPY",
"score": 4
}
|
#### File: NuzlockerPY/ConsoleCommander/ConsoleCommander.py
```python
class ConsoleCommander:
cmd = None
def __init__(self, cmdDict):
self.cmdDict = cmdDict
self.cmd = ''
self.PromptCommand()
def PromptCommand(self):
try:
self.cmd = input('\nPlease enter a command: ').lower()
except NameError:#only applies to linux environment
print('You did not enter \" before and after your command. Try again.')
self.PromptCommand()
print('You entered - ', self.cmd)
self.ProcessCommand()
def ProcessCommand(self):
if(self.cmd == 'exit'):
print('Have a great day!')
elif(self.cmd == 'help'):
for key in self.cmdDict.keys():
print('\n\r' + key + ',')
self.PromptCommand()
else:
try:
self.cmdDict[self.cmd]()
self.PromptCommand()
except KeyError:
print('Command not recognized. Please try again.')
self.PromptCommand()
```
#### File: NuzlockerPY/ConsoleHelpers/PrintHelper.py
```python
class PrintHelper:
@staticmethod
def PrintBanner(printStr, sep='='):
bannerStr = sep * len(printStr)
print(bannerStr)
print(printStr)
print(bannerStr)
```
|
{
"source": "jeredbare/paradigm",
"score": 3
}
|
#### File: paradigm/src/inetaccess.py
```python
import requests
import json
import datetime
def build_response_dict(build_dict, target, http, https, timestamp):
build_dict['fqdn'] = target['name']
build_dict['domain'] = target['domain']
build_dict['ip'] = target['ip']
build_dict['cidr'] = target['cidr']
build_dict['loc'] = target['desc']
build_dict['http'] = http
build_dict['https'] = https
build_dict['timestamp'] = timestamp
return build_dict
def scan_site(targets):
ok_responses = 0
json_response = {}
scan_data = []
for target in targets:
response_dict = {'fqdn': '', 'domain': '', 'ip': [], 'cidr': '', 'loc': [], 'http': '', 'https': '', 'timestamp': []}
try:
http = requests.get("http://{}".format(target['name']), timeout=2)
https = requests.get("https://{}".format(target['name']), timeout=2)
t = datetime.datetime.now()
scan_data.append(build_response_dict(response_dict, target, str(http.status_code), str(https.status_code), str(t.strftime("%d/%m/%Y %H:%M:%S"))))
if http.status_code != 200 or https.status_code != 200:
continue
ok_responses = ok_responses + 1
except requests.exceptions.ConnectionError as e:
scan_data.append(build_response_dict(response_dict, target, "No Response", "No Response", "----"))
continue
except requests.exceptions.ConnectTimeout as f:
scan_data.append(build_response_dict(response_dict, target, "No Response", "No Response", "----"))
continue
result_score = round((ok_responses / len(targets)) * 100)
json_response["result_score"] = str(result_score)
json_response["scan_data"] = scan_data
return json_response
```
#### File: paradigm/tests/inetaccess_test.py
```python
import unittest
from src import inetaccess
class ScanDataTest(unittest.TestCase):
def test_no_response(self):
expected_results = {
"result_score": "0.0",
"scan_data": [
{
"fqdn": "testing.notworking.com.co",
"domain": "notworking.com.co",
"ip": "0.0.0.0",
"cidr": "0.0.0.0/24",
"loc": "TESTING",
"http": "No Response",
"https": "No Response"
},
]
}
scan_results = inetaccess.scan_site('./tests/test_resources/scan_test.json')
self.assertEqual(scan_results, expected_results)
```
|
{
"source": "jerefarrher/skmrifilter",
"score": 3
}
|
#### File: skmrifilter/skmrifilter/metrics.py
```python
import numpy as np
def dice_coefficient(y_true, y_pred, axis=(1, 2, 3), epsilon=0.00001):
"""Compute mean dice coefficient over all classes.
Parameters
----------
y_true: Tensor
tensor of ground truth values for all classes.
shape (num_classes, x_dim, y_dim, z_dim)
y_pred: Tensor
tensor of predictions for all classes.
shape (num_classes, x_dim, y_dim, z_dim)
axis: Tuple
spatial axes to sum over when computing numerator and
denominator of dice coefficient.
epsilon: Float
constant to avoid dividing by 0.
Returns
-------
dice_coefficient: Float
computed value of dice coefficient.
"""
dice_numerator = 2.0 * np.sum(y_true * y_pred, axis=axis) + epsilon
dice_denominator = (
np.sum(y_true, axis=axis) + np.sum(y_pred, axis=axis) + epsilon
)
dice_coefficient = np.mean((dice_numerator) / (dice_denominator))
return dice_coefficient
def dice_loss(y_true, y_pred):
"""Compute dice_loss for all classes.
Parameters
----------
y_true: Tensor
tensor of ground truth values for all classes.
shape (num_classes, x_dim, y_dim, z_dim)
y_pred: Tensor
tensor of predictions for all classes.
shape (num_classes, x_dim, y_dim, z_dim).
"""
return 1 - dice_coefficient(y_true, y_pred)
```
#### File: skmrifilter/tests/test_data_transformer.py
```python
import numpy.testing as npt
from skimage.restoration import (
denoise_bilateral,
denoise_tv_chambolle,
denoise_wavelet,
)
from sklearn.pipeline import Pipeline
from skmrifilter.transformer import ImageFilterTransformer
def test_simple_transformer(noisy_image, noisy_wavelet):
t_wavelet = ImageFilterTransformer(denoise_wavelet)
transformed_image = t_wavelet.transform(noisy_image)
npt.assert_array_almost_equal(transformed_image, noisy_wavelet, decimal=15)
def test_pipe_no_kwargs(noisy_image, noisy_pipeline):
t_tv_chambelle = ImageFilterTransformer(denoise_tv_chambolle)
t_wavelet = ImageFilterTransformer(denoise_wavelet)
pipeline = Pipeline(
steps=[
(t_tv_chambelle.__repr__(), t_tv_chambelle),
(t_wavelet.__repr__(), t_wavelet),
]
)
transformed_image = pipeline.transform(noisy_image)
npt.assert_array_almost_equal(
noisy_pipeline, transformed_image, decimal=15
)
def test_pipeline_kwargs(noisy_image, noisy_pipeline_kwargs):
t_tv_chambelle = ImageFilterTransformer(denoise_tv_chambolle)
t_bilateral = ImageFilterTransformer(denoise_bilateral, multichannel=True)
t_wavelet = ImageFilterTransformer(denoise_wavelet)
pipe = Pipeline(
steps=[
(t_tv_chambelle.__repr__(), t_tv_chambelle),
(t_bilateral.__repr__(), t_bilateral),
(t_wavelet.__repr__(), t_wavelet),
]
)
pipe_transformed = pipe.transform(noisy_image)
npt.assert_array_almost_equal(
pipe_transformed, noisy_pipeline_kwargs, decimal=15
)
```
|
{
"source": "JereKoskela/arginfer",
"score": 3
}
|
#### File: arginfer/arginfer/argbook.py
```python
import collections
import math
from sortedcontainers import SortedSet
from sortedcontainers import SortedList
import bintrees
import pickle
import numpy as np
import pandas as pd
class Segment(object):
"""
A class representing a single segment. Each segment has a left and right, denoting
the loci over which it spans, a node giving the node to which it belongs in an ARG,
a prev and next, giving the previous and next segments in the chain,
and samples representing the samples underneath the segment.
"""
def __init__(self):
self.left = None
self.right = None
self.node = None
self.prev = None
self.next = None
self.samples = bintrees.AVLTree()
def __str__(self):
s = "({}:{}-{}->{}: prev={} next={})".format(
self.left, self.right, self.node.index, self.samples,repr(self.prev),
repr(self.next))
return s
def __lt__(self, other):
return ((self.left, self.right)
< (other.left, other.right))
def copy(self):
''':return a copy of this segment'''
s = Segment()
s.left = self.left
s.right = self.right
s.node = self.node
s.samples = self.samples
s.next = self.next
s.prev = self.prev
return s
def contains(self, x):
return x >= self.left and x < self.right
def is_mrca(self, sample_size):#V3
assert len(self.samples) <= sample_size
return len(self.samples) == sample_size
def equal_samples(self, other):
'''is self.samples == other.samples'''
# self.samples.is_subset(other.samples) and other.samples.is_subset(self.samples)
return sorted(self.samples) == sorted(other.samples)
def union_samples(self, other):
# s = self.samples[:]
# f = s[:]
# s.extend(other.samples)
return self.samples.union(other.samples)
def equal(self, other):
if self.node is None and other.node is None:
return self.left == other.left and \
self.right == other.right and self.node == other.node and \
sorted(self.samples) == sorted(other.samples)
elif self.node is not None and other.node is not None:
return self.left == other.left and \
self.right == other.right and self.node.index == other.node.index and \
sorted(self.samples) == sorted(other.samples)
else:
return False
def defrag_segment_chain(self):
y = self
while y.prev is not None:
x = y.prev
if x.right == y.left and x.node.index == y.node.index and y.equal_samples(x):
x.right = y.right
x.next = y.next
if y.next is not None:
y.next.prev = x
y = x
def get_first_segment(self):
'''get the fist segment in a chain of segments'''
seg = self
while seg.prev is not None:
seg = seg.prev
return seg
def get_variants(self, data):
'''get all snps of data that are in self
TODO: find an efficient way
'''
seg_variants = bintrees.AVLTree()
for item in data.keys():
if self.contains(item):
seg_variants[item] = item
return seg_variants
def add_snps(self, node , data):
'''add snps to node that are in this segment'''
for snp in data.keys():
if self.contains(snp) and \
sorted(self.samples) == sorted(data[snp]):
node.snps[snp] = snp
def get_seg_variants(self, data):
'''TODO: implement efficiently'''
assert self is not None
seg_variants = bintrees.AVLTree()
for item in data.keys():
if self.contains(item):
seg_variants[item] = item
return seg_variants
def get_intersect(self, start, end):
'''
return the intersection of self and [start, end)
'''
ret = []
if self.left <= start and self.right > start:
if self.right<= end:
ret = [start, self.right]
else:
ret = [start, end]
elif self.left > start and self.left < end:
if self.right<=end:
ret = [self.left, self.right]
else:
ret= [self.left, end]
return ret
class Node(object):
"""
A class representing a single node. Each node has a left and right child,
and a left and right parent. If a node arises from a recombination, then
left_child == right_child, while if it ends in a coalescence, then
left_parent == right_parent. Each node also has a time at which it
appears in the tree, and a list of segments of ancestral material, and a list of
snps. The snps represent mutations arising on the branch of which the node is a
child. The fact that snps are stored on a single branch is for computational
convenience; the MCMC algorithm marginalises over all contiguous branches which
subtend the same leaves at the site of the snp, and hence the snp could just as well
be stored on any one of them.
"""
def __init__(self, index):
self.left_child = None
self.right_child = None
self.left_parent = None
self.right_parent = None
self.first_segment = None
self.snps = bintrees.AVLTree()
self.time = None
self.breakpoint = None
self.index = index
def copy(self):
'''a copy of the node'''
cpy_node = Node(self.index)
s = self.first_segment
if s is not None:
x = s.copy()
cpy_node.first_segment = x
x.node = cpy_node
x = x.next
while x is not None:
s = x.copy()
s.prev.next = s
x.node = cpy_node
x = x.next
else:
cpy_node.first_segment = None
cpy_node.time = self.time
cpy_node.breakpoint = self.breakpoint
cpy_node.snps = self.snps.copy()
return cpy_node
def contains(self, x):
seg = self.first_segment
while seg is not None:
if seg.contains(x):
return True
seg = seg.next
return False
def x_segment(self, x):
'''return the segment containing x
given that we know self contains x'''
seg = self.first_segment
while seg is not None:
if seg.contains(x):
return seg
seg = seg.next
raise ValueError("x is not in node")
def num_links(self):
seg = self.first_segment
left = seg.left
while seg.next is not None:
seg = seg.next
return seg.right - left -1
def is_leaf(self):
return self.left_child == None
def is_root(self):
return self.left_parent == None
def equal(self, other):
'''
two nodes are exactly the same, to verify if
the original node changes after some updating.
'''
if self is not None and other is not None:
if self.time != other.time or\
sorted(self.snps) != sorted(other.snps) or\
self.index != other.index:
return False
else:
seg = self.first_segment
sego = other.first_segment
while seg is not None and sego is not None:
if not seg.equal(sego):
return False
seg = seg.next
sego = sego.next
if seg is None and sego is None:
return True
else:
return False
else:
raise ValueError("one or both nodes are NONE")
def arg_node_age(self):
'''the arg branch length of a node '''
if self.left_parent is not None:
return self.left_parent.time - self.time
else:
return 0
def upward_path(self, x):
'''for position x check if we can move upward.
this is used in finding the branch length at
position x in a tree'''
if self.left_parent is None:
block = True
return self, block
elif self.left_parent.index is not self.right_parent.index:
if self.left_parent.contains(x) + self.right_parent.contains(x) != 1:
print("in upward_path x is", x, "left_aprent", self.left_parent.index,
"right parent",self.right_parent.index, "node", self.index)
assert self.left_parent.contains(x) + self.right_parent.contains(x) == 1
block = False
if self.left_parent.contains(x):
return self.left_parent, block
else:
return self.right_parent, block
else:#CA
sib = self.sibling()
#--- after spr before clean up, sib might be NAM
if sib.first_segment != None and sib.contains(x):
block = True
return self.left_parent, block
else:
block = False
return self.left_parent, block
def tree_node_age(self, x, return_parent_time= False):
'''
the tree branch length of
node self, at position x
:param x the site
:param return_parent_time: if we only want to
report parent time ---> in the case of alelle age
'''
node = self
child_time = node.time
block = False
while not block:
node, block = node.upward_path(x)
assert node.time - child_time > 0
if not return_parent_time:
return node.time - child_time
else:
return node.time
def sibling(self):
'''
Find and return the sibling node of u
where u is a child of a CA
'''
assert self.left_parent is not None
assert self.left_parent.index == self.right_parent.index
p = self.left_parent
v = p.left_child
if v.index == self.index:
v = p.right_child
assert v.index is not self.index
return v
def push_snp_down(self, x):
# Push the snp at position x down one branch from node to one of its children
# provided only one is ancestral at x.
if self.left_child is None:
block = True
return self, block
elif self.left_child is not self.right_child:
if self.left_child.contains(x) and self.right_child.contains(x):
block = True
return self, block
elif self.left_child.contains(x):
self.left_child.snps.__setitem__(x, x)
self.snps.discard(x)
block = False
return self.left_child, block
else:
self.right_child.snps.__setitem__(x, x)
self.snps.discard(x)
block = False
return self.right_child, block
else:# rec
self.left_child.snps.__setitem__(x, x)
self.snps.discard(x)
block = False
return self.left_child, block
def get_tail(self):
seg = self.first_segment
while seg.next is not None:
seg = seg.next
return seg
def get_variants(self, data):
'''get all snps in data lay in the self segments
TODO: an efficient way
it is not efficient ot loop over data SNPs for each segment
'''
node_variants = bintrees.AVLTree()
seg = self.first_segment
while seg is not None:
for item in data.keys():
if seg.contains(item):
node_variants[item] = item
seg = seg.next
return node_variants
def update_child(self, oldchild, newchild):
'''update self child from oldchild to newchild'''
if self.left_child != None:
if self.left_child.index == oldchild.index:
self.left_child = newchild
if self.right_child != None:
if self.right_child.index == oldchild.index:
self.right_child = newchild
def reconnect(self, child):# BUG7
'''from child--> self--> parent: TO child ---> parent '''
leftparent = self.left_parent
rightparent = self.right_parent
child.left_parent = leftparent
child.right_parent = rightparent
child.breakpoint = self.breakpoint
leftparent.update_child(self, child)
rightparent.update_child(self, child)
def is_invisible_recomb(self):
'''self is a recomb child, check if the recomb is invisible'''
if self.left_parent.left_parent.index == \
self.right_parent.left_parent.index:# invisible
return True
else:
return False
class ARG(object):
'''
Ancestral Recombination Graph
'''
def __init__(self):
self.nodes = {}
self.roots = bintrees.AVLTree()# root indexes
self.rec = bintrees.AVLTree() # arg rec parents nodes
self.coal = bintrees.AVLTree() # arg CA parent node
self.num_ancestral_recomb = 0
self.num_nonancestral_recomb = 0
self.branch_length = 0
self.nextname = 1 # next node index
self.available_names = SortedSet()
def __iter__(self):
'''iterate over nodes in the arg'''
return list(self.nodes)
def __len__(self):
'''number of nodes'''
return len(self.nodes)
def __getitem__(self, index):
'''returns node by key: item'''
return self.nodes[index]
def __setitem__(self, index, node):
'''adds a node to the ARG'''
node.index = index
self.add(node)
def __contains__(self, index):
'''if ARG contains node key '''
return index in self.nodes
def copy(self):
'''return a copy of the ARG'''
arg = ARG()
for node in self.nodes.values():
arg.nodes[node.index] = node.copy()
# connect nodes
for node in self.nodes.values():
node2 = arg.__getitem__(node.index)
if node.left_child != None:
node2.left_child = arg.__getitem__(node.left_child.index)
node2.right_child = arg.__getitem__(node.right_child.index)
if node.left_parent != None:
node2.left_parent = arg.__getitem__(node.left_parent.index)
node2.right_parent = arg.__getitem__(node.right_parent.index)
arg.roots = self.roots.copy()# root indexes
arg.rec = self.rec.copy()# arg rec parents nodes
arg.coal = self.coal.copy() # arg CA parent node
arg.num_ancestral_recomb = self.num_ancestral_recomb
arg.num_nonancestral_recomb = self.num_nonancestral_recomb
arg.branch_length = self.branch_length
arg.nextname = self.nextname # next node index
arg.available_names = self.available_names.copy()
return arg
def equal(self, other):
'''if self is equal with other (structural equality)
TODO : complete this'''
if self.__len__() != other.__len__():
return False
else:
for node in self.nodes.values():
if node.index not in other:
return False
if not node.equal(other[node.index]):
return False
return True
def leaves(self, node=None):
"""
Iterates over the leaves of the ARG.
"""
if node is None:
for node in self.nodes.values():
if node.left_child == None:
yield node
else:
for node in self.preorder(node):
if node.left_child == None:
yield node
def preorder(self, node=None):
"""
Iterates through nodes in preorder traversal.
"""
visit = set()
if node is None:
node = self.__getitem__(self.roots.max_key())
queue = [node]
for node in queue:
if node in visit:
continue
yield node
visit.add(node)
if node.left_child != None:
queue.append(node.left_child)
if node.left_child.index != node.right_child.index:
queue.append(node.right_child)
def postorder(self, node=None):
"""
Iterates through nodes in postorder traversal.
"""
visit = collections.defaultdict(lambda: 0)
queue = list(self.leaves(node))
for node in queue:
yield node
if node.left_parent!= None:
visit[node.left_parent] +=1
if node.left_parent.left_child.index != node.left_parent.right_child.index:
num_child = 2
else:
num_child =1
# if all child has been visited then queue parent
if visit[node.left_parent] == num_child:
queue.append(node.left_parent)
if node.right_parent.index != node.left_parent.index:
visit[node.right_parent] +=1
# if all child has been visited then queue parent
if visit[node.right_parent] == num_child:
queue.append(node.right_parent)
def set_roots(self):
self.roots.clear()
for node in self.nodes.values():
if node.left_parent is None:
self.roots[node.index] = node.index
def get_times(self):
'''return a sorted set of the ARG node.time'''
times = SortedSet()
for node in self.nodes.values():
times.add(node.time)
return times
def get_higher_nodes(self, t):
''':return nodes.index of nodes with node.time >= t
TODO: a more efficient search option
'''
return [key for key in self.nodes if self.nodes[key].time >= t]
#==========================
# node manipulation
def alloc_segment(self, left = None, right = None, node = None,
samples = bintrees.AVLTree(), prev = None, next = None):
"""
alloc a new segment
"""
s = Segment()
s.left = left
s.right = right
s.node = node
s.samples = samples
s.next = next
s.prev = prev
return s
def alloc_node(self, index = None, time = None,
left_child = None, right_child = None):
"""
alloc a new Node
"""
node = Node(index)
node.time = time
node.first_segment = None
node.left_child = left_child
node.right_child = right_child
node.left_parent = None
node.right_parent = None
node.breakpoint = None
node.snps = bintrees.AVLTree()
return node
def store_node(self, segment, node):
'''store node with segments: segment'''
x = segment
if x is not None:
while x.prev is not None:
x = x.prev
s = self.alloc_segment(x.left, x.right, node, x.samples.copy())
node.first_segment = s
x.node = node
x = x.next
while x is not None:
s = self.alloc_segment(x.left, x.right, node, x.samples.copy(), s)
s.prev.next = s
x.node = node
x = x.next
else:#
node.first_segment = None
self.nodes[node.index] = node
def copy_node_segments(self, node):
'''
copy the segments of a node,
in CA event or Rec events, we need to copy the first node
in order to make changes on them
'''
x = node.first_segment
if x is None:
return None
else:
assert x.prev is None
s = self.alloc_segment(x.left, x.right, node, x.samples.copy())
x.node = node
x = x.next
while x is not None:
s = self.alloc_segment(x.left, x.right, node, x.samples.copy(), s)
s.prev.next = s
x.node = node
x = x.next
return s
def get_available_names(self):
'''get free names from 0 to max(nodes)'''
self.available_names = SortedSet()
current_names = SortedSet(self.__iter__())
counter = 0
prev = current_names[0]
while counter < len(current_names):
if current_names[counter] != prev + 1:
self.available_names.update(range(prev+1, current_names[counter]))
prev = current_names[counter]
counter += 1
def new_name(self):
'''returns a new name for a node'''
if self.available_names:
name = self.available_names.pop()
else:
name = self.nextname
self.nextname += 1
return name
def add(self, node):
''' add a ready node to the ARG:
'''
self.nodes[node.index] = node
return node
def rename(self, oldindex, newindex):
'''renames a node in the ARG'''
node = self.nodes[oldindex]
node.index = newindex
del self.nodes[oldindex]
self.nodes[newindex] = node
def total_branch_length(self):
'''the ARG total branch length'''
total_material = 0
for node in self.nodes.values():
if node.left_parent is not None:
age = node.left_parent.time - node.time
seg = node.first_segment
while seg is not None:
total_material += ((seg.right - seg.left)* age)
seg = seg.next
return total_material
#=======================
#spr related
def detach(self, node, sib):
'''
Detaches a specified coalescence node from the rest of the ARG
'''
# print("Detach()",node.index, "sib", sib.index, "p",node.left_parent.index)
assert node.left_parent.index == node.right_parent.index
parent = node.left_parent
sib.left_parent = parent.left_parent
sib.right_parent = parent.right_parent
sib.breakpoint = parent.breakpoint
grandparent = parent.left_parent
if grandparent is not None:
grandparent.update_child(parent, sib)
grandparent = parent.right_parent
grandparent.update_child(parent, sib)
def reattach(self, u, v, t, new_names):
# Reattaches node u above node v at time t, new_names is a avltree of all
#new nodes.index in a new ARG in mcmc
assert t > v.time
# assert v.left_parent == None or t < v.left_parent.time
if u.left_parent is None:# new_name
new_name = self.new_name()
new_names[new_name] = new_name
# self.coal[new_name] = new_name # add the new CA parent to the ARG.coal
parent = self.add(self.alloc_node(new_name))
parent.left_child = u
u.left_parent = parent
u.right_parent = parent
else:
assert u.left_parent.index == u.right_parent.index
parent = u.left_parent
parent.time = t
parent.breakpoint = v.breakpoint
v.breakpoint = None
parent.left_parent = v.left_parent
grandparent = v.left_parent
if grandparent is not None:
grandparent.update_child(v, parent)
parent.right_parent = v.right_parent
grandparent = v.right_parent
if grandparent is not None:
grandparent.update_child(v, parent)
v.left_parent = parent
v.right_parent = parent
if parent.left_child.index == u.index:
parent.right_child = v
else:
parent.left_child = v
return new_names
def push_mutation_down(self, node, x):
'''
for a given node push the mutation (at x) as down as possible
normally mutations automatically should stay at their
lowest possible position. This might be useful for initial ARG
'''
block = False
while not block:
node, block = node.push_snp_down(x)
def push_all_mutations_down(self, node):
'''push down all mutations on node as low as possible'''
snp_keys = [k for k in node.snps]
for x in snp_keys:
self.push_mutation_down(node, x)
# iter = len(node.snps)
# i = 0
#
# while iter > 0:
# x = node.snps[i]
# self.push_mutation_down(node, x)
# iter -= 1
# if node.snps and len(node.snps) > i:
# if node.snps[i] == x:
# i += 1
def find_tmrca(self, node, x):
'''
check the parent of node to see
if it is mrca for site x
'''
if node.left_parent is None:
block = True
return node, block
elif node.left_parent.index is not node.right_parent.index:
assert node.left_parent.contains(x) + node.right_parent.contains(x) == 1
block = False
if node.left_parent.contains(x):
return node.left_parent, block
else:
return node.right_parent, block
elif node.left_parent.contains(x):
block = False
return node.left_parent, block
else:# it is mrca for x
block = True
return node.left_parent, block
def tmrca(self, x):
'''tmrca for site x
1. start from a leaf
2. follow the path of x until its mrca
'''
node = self.__getitem__(0)
block = False
while not block:
node, block = self.find_tmrca(node, x)
return node.time
def total_tmrca(self, sequence_length):
'''
return the tmrca of all the sites in the ARG
'''
break_points = self.breakpoints(only_ancRec= True, set= True)
break_points.add(0)
break_points.add(sequence_length)
tot_tmrca = np.zeros(int(sequence_length))
count =0
while count < len(break_points)-1:
x_tmrca= self.tmrca(break_points[count])
tot_tmrca[int(break_points[count]):int(break_points[count+1])] = x_tmrca
count +=1
return tot_tmrca
def mean_tmrca(self, sequence_length):
'''return a value for tmrca of the ARG, which is the mean over all trmrcas'''
break_points = self.breakpoints(only_ancRec= True, set= True)
break_points.add(0)
break_points.add(sequence_length)
tmrca_list = []
count =0
while count < len(break_points)-1:
x_tmrca= self.tmrca(break_points[count])
tmrca_list.append(x_tmrca*(int(break_points[count+1])-int(break_points[count])))
count += 1
return np.mean(tmrca_list)
def allele_age(self):
''':return a pd df with four columns:
1. site: the genomic position of the SNP
2. recent age: the most recent age for the allele
3. mid age: the midpoint of node age and its parent (tree node) time
4. latest age: the latest time (back in time) for the mutation
The df is sorted based on site.
'''
#find the nodes with mutations
snp_nodes = [] # nodes with len(snps) > 0
for node in self.nodes.values():
if node.snps:
snp_nodes.append(node)
# now for each node and find age for each mut
age_df = pd.DataFrame(columns=["site", "recent age", "mid age", "latest age"])
for node in snp_nodes:
# num_branches = collections.defaultdict(list)
node_time = node.time
for x in node.snps:
parent_age = node.tree_node_age(x, return_parent_time=True)
age_df.loc[age_df.shape[0]] =[x, node_time,
(node_time+parent_age)/2, parent_age]
age_df.sort_values(by=['site'], ascending=True, inplace=True)
age_df.reset_index(inplace=True, drop=True)
return age_df
def invisible_recombs(self):
'''return the proportion of invisible recombs '''
invis_count=0
for node in self.nodes.values():
if node.breakpoint != None and node.is_invisible_recomb():
invis_count +=1
return invis_count/(self.num_ancestral_recomb+self.num_nonancestral_recomb)
#@property
def breakpoints(self, only_ancRec= False, set= True):
'''
:param only_ancRec: only ancestral rec with repetition
:param set: if set, only uqique posittions are returned
:param invisible count the number of invisible recombs
:return: either a list/set of all recombs
or a list of anc rec that has repetition
'''
if set:
br = SortedSet()
else:
br = SortedList()
if not only_ancRec:
for node in self.nodes.values():
if node.breakpoint != None:
br.add(node.breakpoint)
else:
for node in self.nodes.values():
if node.breakpoint != None and\
node.contains(node.breakpoint):#ancestral
br.add(node.breakpoint)
return br
#========== probabilites
def log_likelihood(self, mutation_rate, data):
'''
log_likelihood of mutations on a given ARG up to a normalising constant
that depends on the pattern of observed mutations, but not on the ARG
or the mutation rate.
Note after spr and berfore clean up we might have NAM lineages,
this method covers take this into account.
:param m : is number of snps
'''
snp_nodes = [] # nodes with len(snps) > 0
total_material = 0
number_of_mutations = 0
#get total matereial and nodes with snps
for node in self.nodes.values():
if node.first_segment != None:
assert node.left_parent != None
age = node.left_parent.time - node.time
seg = node.first_segment
assert seg.prev == None
while seg is not None:
total_material += ((seg.right - seg.left)* age)
seg = seg.next
if node.snps:
number_of_mutations += len(node.snps)
snp_nodes.append(node)
self.branch_length = total_material
# print("number_of_mutations", number_of_mutations, "m", len(data))
assert number_of_mutations == len(data) # num of snps
if mutation_rate == 0:
if number_of_mutations == 0:
ret = 0
else:
ret = -float("inf")
else:
ret = number_of_mutations * math.log(total_material * mutation_rate) -\
(total_material * mutation_rate)
# now calc prob of having this particular mutation pattern
for node in snp_nodes:
# num_branches = collections.defaultdict(list)
for x in node.snps:
potential_branch_length = node.tree_node_age(x)
ret += math.log(potential_branch_length / total_material)
# # verify the mutation is on the correct spot
verify_mutation_node(node, data)
return ret
def log_prior(self, sample_size, sequence_length, recombination_rate, Ne,
NAM = True, new_roots = False , kuhner = False):
'''
probability of the ARG under coalescen with recombination
this is after a move and before clean up. then there might be some
extra NAM lineages, we ignore them.
:param NAM: no-ancestral material node. If NAm node is allowed. note after spr and
before clean up step there might be some NAM in the ARG which is ok. But after clean up
or on the initial ARG there should not be any.
'''
# order nodes by time
#TODO: find an efficient way to order nodes
ordered_nodes = [v for k, v in sorted(self.nodes.items(),
key = lambda item: item[1].time)]
number_of_lineages = sample_size
number_of_links = number_of_lineages * (sequence_length - 1)
number_of_nodes = self.__len__()
counter = sample_size
time = 0
ret = 0
rec_count = 0
coal_count = 0
roots = bintrees.AVLTree()
new_coal = bintrees.AVLTree()
if kuhner:
self.rec.clear()
self.num_ancestral_recomb = 0
self.num_nonancestral_recomb = 0
while counter < number_of_nodes:
node = ordered_nodes[counter]
assert node.time >= time # make sure it is ordered]
rate = (number_of_lineages * (number_of_lineages - 1)
/ (4*Ne)) + (number_of_links * (recombination_rate))
# ret -= rate * (node.time - time)
if node.left_child.index == node.right_child.index: #rec
assert node.left_child.first_segment != None
assert node.left_child.left_parent.first_segment != None
assert node.left_child.right_parent.first_segment != None
ret -= rate * (node.time - time)
gap = node.left_child.num_links()-\
(node.left_child.left_parent.num_links() +
node.left_child.right_parent.num_links())
ret += math.log(recombination_rate)
assert gap >= 1
if gap == 1:
self.num_ancestral_recomb += 1
else:
self.num_nonancestral_recomb += 1
number_of_links -= gap
number_of_lineages += 1
if kuhner:# add rec
self.rec[node.index] = node.index
self.rec[ordered_nodes[counter+1].index] = ordered_nodes[counter+1].index
counter += 2
time = node.time
rec_count += 1
elif node.left_child.first_segment != None and\
node.right_child.first_segment != None:
ret -= rate * (node.time - time)
ret -= math.log(2*Ne)
if node.first_segment == None:
node_numlink = 0
number_of_lineages -= 2
counter += 1
if new_roots:
roots[node.index] = node.index
else:
node_numlink = node.num_links()
number_of_lineages -= 1
counter += 1
lchild_numlink = node.left_child.num_links()
rchild_numlink = node.right_child.num_links()
number_of_links -= (lchild_numlink + rchild_numlink) - node_numlink
time = node.time
coal_count += 1
if new_roots:
new_coal[node.index] = node.index
else:
counter += 1
if not NAM:
assert node.left_child.first_segment != None
assert node.right_child.first_segment != None
if new_roots:
return ret, roots, new_coal
else:
return ret
def dump(self, path = ' ', file_name = 'arg.arg'):
output = path + "/" + file_name
with open(output, "wb") as file:
pickle.dump(self, file)
def load(self, path = ' '):
with open(path, "rb") as file:
return pickle.load(file)
def verify(self):
'''
verify arg:
1. a node with parent must have seg
2. a node with no parent a. must be in roots b. different child
3. node.parent_time > node.time
4. arg name == node.index
5. recomb parent must have self.snps.empty()
6. nodes with child = None must be leaf
7. number coal + rec + roots check
8. seg.samples is not empty, seg.left < seg.right
'''
for node in self.nodes.values():
assert self.nodes[node.index].index == node.index
if node.left_parent is None: #roots
if node.first_segment is not None:
print("in verrify node is ", node.index)
self.print_state()
assert node.first_segment == None
assert node.index in self.roots
assert node.breakpoint == None
assert node.left_child.index != node.right_child.index
assert node.right_parent == None
assert node.index in self.coal
assert node.time > node.left_child.time
assert node.time > node.right_child.time
else: # rest
assert node.first_segment != None
assert node.first_segment.prev == None
assert node.get_tail().next == None
assert node.index not in self.roots
assert node.left_parent.time > node.time
if node.left_child is None: #leaves
assert node.right_child is None
assert node.time == 0
if node.left_parent.index != node.right_parent.index:
assert node.breakpoint != None
assert node.left_parent.left_child.index ==\
node.left_parent.right_child.index
assert node.right_parent.left_child.index ==\
node.right_parent.right_child.index
assert node.right_parent.left_child.index == node.index
assert not node.left_parent.snps
assert not node.right_parent.snps
assert node.left_parent.time == node.right_parent.time
assert node.left_parent.index in self.rec
assert node.right_parent.index in self.rec
if node.left_parent.first_segment.left > node.right_parent.first_segment.left:
print("in verify node", node.index)
print("node.left_parent", node.left_parent.index)
print("node.right_parent", node.right_parent.index)
assert node.left_parent.first_segment.left < node.right_parent.first_segment.left
else:
assert node.left_parent.index in self.coal
assert node.left_parent.left_child.index !=\
node.left_parent.right_child.index
assert node.breakpoint == None
if node.first_segment is not None:
seg = node.first_segment
assert seg.prev is None
while seg is not None:
assert seg.samples
assert seg.left < seg.right
assert seg.node.index == node.index
seg = seg.next
def print_state(self):
print("self.arg.coal", self.coal)
print("self.arg.rec", self.rec)
print("self.arg.roots", self.roots)
print("node", "time", "left", "right", "l_chi", "r_chi", "l_par", "r_par",
"l_bp", "snps", "fir_seg_sam",
sep="\t")
for j in self.nodes:
node = self.__getitem__(j)
if node.left_parent is not None or node.left_child is not None:
s = node.first_segment
if s is None:
print(j, "%.5f" % node.time, "root", "root",
node.left_child.index,
node.right_child.index,
node.left_parent,node.right_parent,
node.breakpoint,
node.snps ,None, sep="\t")
while s is not None:
l = s.left
r = s.right
if node.left_child is None:
print(j, "%.5f" % node.time, l,r, "Leaf", "Leaf",
node.left_parent.index,node.right_parent.index,
node.breakpoint,
node.snps, s.samples, sep="\t")#
elif node.left_parent is None:
print(j, "%.5f" % node.time, l, r,
node.left_child.index,
node.right_child.index, "Root", "Root",
node.breakpoint,
node.snps ,s.samples, sep="\t")
else:
print( j, "%.5f" % node.time, l, r,
node.left_child.index, node.right_child.index,
node.left_parent.index, node.right_parent.index,
node.breakpoint,
node.snps, s.samples, sep="\t")
s = s.next
#============== verification
def verify_mutation_node(node, data):
'''
verify node is the lowest possible position
the mutation can sit on.
'''
for x in node.snps:
# bth children have x
# left_child is not right_child
# for the segment containing x on node, samples == data[x]
if node.left_child is not None:
assert node.left_child.index is not node.right_child.index
assert node.left_child.contains(x) and node.right_child.contains(x)
node_samples = node.x_segment(x).samples
assert sorted(node_samples) == sorted(data[x])
```
|
{
"source": "jereliu/aden-tf",
"score": 2
}
|
#### File: calibre/model/gaussian_process.py
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import edward2 as ed
from tensorflow.python.ops.distributions.util import fill_triangular
import calibre.util.distribution as dist_util
import calibre.util.inference as inference_util
tfd = tfp.distributions
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
""" Kernel function """
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def square_dist(X, X2=None, ls=1.):
"""Computes Square distance between two sets of features.
Referenced from GPflow.kernels.Stationary.
Args:
X: (tf.Tensor) First set of features of dim N x D.
X2: (tf.Tensor or None) Second set of features of dim N2 x D.
ls: (float) value for length scale.
Returns:
(tf.Tensor) A N x N2 tensor for ||x-x'||^2 / ls**2
Raises:
(ValueError) If feature dimension of X and X2 disagrees.
"""
N, D = X.shape
X = X / ls
Xs = tf.reduce_sum(tf.square(X), axis=1)
if X2 is None:
dist = -2 * tf.matmul(X, X, transpose_b=True)
dist += tf.reshape(Xs, (-1, 1)) + tf.reshape(Xs, (1, -1))
return tf.clip_by_value(dist, 0., np.inf)
N2, D2 = X2.shape
if D != D2:
raise ValueError('Dimension of X and X2 does not match.')
X2 = X2 / ls
X2s = tf.reduce_sum(tf.square(X2), axis=1)
dist = -2 * tf.matmul(X, X2, transpose_b=True)
dist += tf.reshape(Xs, (-1, 1)) + tf.reshape(X2s, (1, -1))
return tf.clip_by_value(dist, 0., np.inf)
def rbf(X, X2=None, ls=1., ridge_factor=0.):
"""Defines RBF kernel.
k(x, x') = - exp(- |x-x'| / ls**2)
Args:
X: (tf.Tensor) First set of features of dim N x D.
X2: (tf.Tensor or None) Second set of features of dim N2 x D.
ls: (float) value for length scale
ridge_factor: (float32) ridge factor to stabilize Cholesky decomposition.
Returns:
(tf.Tensor) A N x N2 tensor for exp(-||x-x'||**2 / 2 * ls**2)
"""
N, _ = X.shape.as_list()
if ridge_factor and X2 is None:
ridge_mat = ridge_factor * tf.eye(N, dtype=tf.float32)
else:
ridge_mat = 0
return tf.exp(-square_dist(X, X2, ls=ls) / 2) + ridge_mat
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
""" Gaussian Process Prior """
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def prior(X, ls, kernel_func=rbf,
ridge_factor=1e-3, name=None):
"""Defines Gaussian Process prior with kernel_func.
Args:
X: (np.ndarray of float32) input training features.
with dimension (N, D).
kernel_func: (function) kernel function for the gaussian process.
Default to rbf.
ls: (float32) length scale parameter.
ridge_factor: (float32) ridge factor to stabilize Cholesky decomposition.
name: (str) name of the random variable
Returns:
(ed.RandomVariable) A random variable representing the Gaussian Process,
dimension (N,)
"""
X = tf.convert_to_tensor(X, dtype=tf.float32)
N, _ = X.shape.as_list()
K_mat = kernel_func(X, ls=ls, ridge_factor=ridge_factor)
return ed.MultivariateNormalTriL(loc=tf.zeros(N, dtype=tf.float32),
scale_tril=tf.cholesky(K_mat),
name=name)
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
""" Predictive Sampling functions """
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def sample_posterior_mean(X_new, X, f_sample, ls, kernel_func=rbf, ridge_factor=1e-3):
"""Sample posterior mean for f^*.
Posterior for f_new is conditionally independent from other parameters
in the model, therefore it's conditional posterior mean
can be obtained by sampling from the posterior conditional f^* | f:
In particular, we care about posterior predictive mean, i.e.
E(f^*|f) = K(X^*, X)K(X, X)^{-1}f
Args:
X_new: (np.ndarray of float) testing locations, N_new x D
X: (np.ndarray of float) training locations, N x D
f_sample: (np.ndarray of float) M samples of posterior GP sample, N x M
ls: (float) training lengthscale
kernel_func: (function) kernel function.
ridge_factor: (float32) small ridge factor to stabilize Cholesky decomposition.
Returns:
(np.ndarray) N_new x M vectors of posterior predictive mean samples
"""
Kx = kernel_func(X, X_new, ls=ls)
K = kernel_func(X, ls=ls, ridge_factor=ridge_factor)
# add ridge factor to stabilize inversion.
K_inv_f = tf.matrix_solve(K, f_sample)
return tf.matmul(Kx, K_inv_f, transpose_a=True)
def sample_posterior_full(X_new, X, f_sample, ls,
kernel_func=rbf,
kernel_func_xn=None,
kernel_func_nn=None,
ridge_factor=1e-3,
return_mean=False, return_vcov=False):
"""Sample posterior predictive distribution.
Sample posterior conditional from f^* | f ~ MVN, where:
E(f*|f) = K(X*, X)K(X, X)^{-1}f
Var(f*|f) = K(X*, X*) - K(X*, X)K(X, X)^{-1}K(X, X*)
Args:
X_new: (np.ndarray of float32) testing locations, N_new x D
X: (np.ndarray of float32) training locations, N x D
f_sample: (np.ndarray of float32) M samples of posterior GP sample,
N_obs x N_sample
ls: (float) training lengthscale
kernel_func: (function) kernel function for distance among X.
kernel_func_xn: (function or None) kernel function for distance between X and X_new,
if None then set to kernel_func.
kernel_func_nn: (function or None) kernel function for distance among X_new,
if None then set to kernel_func.
ridge_factor: (float32) small ridge factor to stabilize Cholesky decomposition.
Returns:
(np.ndarray of float32) N_new x M vectors of posterior predictive mean samples
"""
X = tf.convert_to_tensor(X, dtype=tf.float32)
X_new = tf.convert_to_tensor(X_new, dtype=tf.float32)
f_sample = tf.convert_to_tensor(f_sample, dtype=tf.float32)
N_new, _ = X_new.shape.as_list()
N, M = f_sample.shape.as_list()
if kernel_func_xn is None:
kernel_func_xn = kernel_func
if kernel_func_nn is None:
kernel_func_nn = kernel_func
# compute basic components
Kxx = kernel_func_nn(X_new, X_new, ls=ls)
Kx = kernel_func_xn(X, X_new, ls=ls)
K = kernel_func(X, ls=ls, ridge_factor=ridge_factor)
K_inv = tf.matrix_inverse(K)
# compute conditional mean and variance.
mu_sample = tf.matmul(Kx, tf.matmul(K_inv, f_sample), transpose_a=True)
Sigma = Kxx - tf.matmul(Kx, tf.matmul(K_inv, Kx), transpose_a=True)
# sample
with tf.Session() as sess:
cond_means, cond_cov, Kxx_val = sess.run([mu_sample, Sigma, Kxx])
if return_mean:
return cond_means.astype(np.float32)
if return_vcov:
return cond_cov.astype(np.float32)
f_new_centered = np.random.multivariate_normal(
mean=[0] * N_new, cov=cond_cov, size=M).T
f_new = f_new_centered + cond_means
return f_new.astype(np.float32)
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
""" Variational Family, Mean-field """
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def variational_mfvi(X, mfvi_mixture=False, n_mixture=1, name="", **kwargs):
"""Defines the mean-field variational family for Gaussian Process.
Args:
X: (np.ndarray of float32) input training features, with dimension (N, D).
mfvi_mixture: (float32) Whether to output variational family with a
mixture of MFVI.
n_mixture: (int) Number of MFVI mixture component to add.
name: (str) name for variational parameters.
kwargs: Dict of other keyword variables.
For compatibility purpose with other variational family.
Returns:
q_f, q_sig: (ed.RandomVariable) variational family.
q_f_mean, q_f_sdev: (tf.Variable) variational parameters for q_f
"""
X = tf.convert_to_tensor(X, dtype=tf.float32)
N, D = X.shape.as_list()
# define variational parameters
qf_mean = tf.get_variable(shape=[N], name='{}_mean'.format(name))
qf_sdev = tf.exp(tf.get_variable(shape=[N], name='{}_sdev'.format(name)))
# define variational family
mixture_par_list = []
if mfvi_mixture:
gp_dist = tfd.MultivariateNormalDiag(loc=qf_mean, scale_diag=qf_sdev,
name=name)
q_f, mixture_par_list = inference_util.make_mfvi_sgp_mixture_family(
n_mixture=n_mixture, N=N, gp_dist=gp_dist, name=name)
else:
q_f = ed.MultivariateNormalDiag(loc=qf_mean, scale_diag=qf_sdev,
name=name)
return q_f, qf_mean, qf_sdev, mixture_par_list
def variational_mfvi_sample(n_sample, qf_mean, qf_sdev,
mfvi_mixture=False, mixture_par_list=None,
**kwargs):
"""Generates f samples from GPR mean-field variational family.
Args:
n_sample: (int) number of samples to draw
qf_mean: (tf.Tensor of float32) mean parameters for variational family
qf_sdev: (tf.Tensor of float32) standard deviation for variational family.
mfvi_mixture: (bool) Whether to sample from a MFVI mixture
mixture_par_list: (list of np.ndarray) List of mixture distribution
parameters, containing:
mixture_logits: mixture logit for sgp-mfvi_mix family
mixture_logits_mfvi_mix: mixture logit within mfvi_mix family
qf_mean_mfvi, qf_sdev_mfvi:
variational parameters for mfvi_mix family
kwargs: Dict of other keyword variables.
For compatibility purpose with other variational family.
Returns:
(np.ndarray) sampled values.
"""
"""Generates f samples from GPR mean-field variational family."""
q_f = tfd.MultivariateNormalDiag(loc=qf_mean, scale_diag=qf_sdev, )
q_f_sample = q_f.sample(n_sample)
if mfvi_mixture:
(mixture_logits, mixture_logits_mfvi_mix,
mean_mfvi_mix, sdev_mfvi_mix) = mixture_par_list
q_f_sample_mfvi = inference_util.sample_mfvi_mixture_family(
N_sample=n_sample,
mixture_logits=mixture_logits_mfvi_mix,
mean_mfvi_mix=mean_mfvi_mix,
sdev_mfvi_mix=sdev_mfvi_mix, )
mix_prob = tf.nn.softmax(mixture_logits)
q_f_sample = tf.tensordot(
tf.stack([q_f_sample_mfvi, q_f_sample], axis=-1), mix_prob,
axes=[[-1], [0]])
return q_f_sample
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
""" Variational Family: Sparse Gaussian Process """
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
"""Implements the Sparse GP VI method by [1].
Select a set of inducing points Z, then:
Original posterior:
p(Y, F, U) = p(Y|F) p(F|U) p(U), where:
p(Y|F) ~ MVN(Y| Mu = F,
Sigma = s^2 * I)
p(F|U) ~ MVN(F| Mu = Kxz Kzz^{-1} U
Sigma = Kxx - Kxz Kzz^{-1} Kxz^T )
p(U) ~ MVN(U| Mu = 0, Sigma = Kzz)
Variational posterior:
q(Y) = p(Y|F)
q(F|U) = p(F|U)
q(U|m, S) ~ MVN(U| Mu = m, Sigma = S)
Consequently, U can be marginalized out, such that q(F) becomes
q(F|m, S) ~ MVN(F| Mu = Kxz Kzz^{-1} m
Sigma = Kxx - Kxz Kzz^{-1} (Kzz - S) Kzz^{-1} Kxz^T)
"""
def variational_sgpr(X, Z, ls=1., kernel_func=rbf, ridge_factor=1e-3,
mfvi_mixture=False, n_mixture=1,
name="", **kwargs):
"""Defines the mean-field variational family for GPR.
Args:
X: (np.ndarray of float32) input training features, with dimension (Nx, D).
Z: (np.ndarray of float32) inducing points, with dimension (Nz, D).
ls: (float32) length scale parameter.
kernel_func: (function) kernel function.
ridge_factor: (float32) small ridge factor to stabilize Cholesky decomposition
mfvi_mixture: (float32) Whether to output variational family with a
mixture of MFVI.
n_mixture: (int) Number of MFVI mixture component to add.
name: (str) name for the variational parameter/random variables.
kwargs: Dict of other keyword variables.
For compatibility purpose with other variational family.
Returns:
q_f, q_sig: (ed.RandomVariable) variational family.
q_f_mean, q_f_sdev: (tf.Variable) variational parameters for q_f
"""
X = tf.convert_to_tensor(X, dtype=tf.float32)
Z = tf.convert_to_tensor(Z, dtype=tf.float32)
Nx, Nz = X.shape.as_list()[0], Z.shape.as_list()[0]
# 1. Prepare constants
# compute matrix constants
Kxx = kernel_func(X, ls=ls)
Kxz = kernel_func(X, Z, ls=ls)
Kzz = kernel_func(Z, ls=ls, ridge_factor=ridge_factor)
# compute null covariance matrix using Cholesky decomposition
Kzz_chol_inv = tf.matrix_inverse(tf.cholesky(Kzz))
Kzz_inv = tf.matmul(Kzz_chol_inv, Kzz_chol_inv, transpose_a=True)
Kxz_Kzz_chol_inv = tf.matmul(Kxz, Kzz_chol_inv, transpose_b=True)
Kxz_Kzz_inv = tf.matmul(Kxz, Kzz_inv)
Sigma_pre = Kxx - tf.matmul(Kxz_Kzz_chol_inv, Kxz_Kzz_chol_inv, transpose_b=True)
# 2. Define variational parameters
# define free parameters (i.e. mean and full covariance of f_latent)
m = tf.get_variable(shape=[Nz], name='{}_mean_latent'.format(name))
s = tf.get_variable(shape=[Nz * (Nz + 1) / 2], name='{}_cov_latent_s'.format(name))
L = fill_triangular(s, name='{}_cov_latent_chol'.format(name))
S = tf.matmul(L, L, transpose_b=True, name='{}_cov_latent'.format(name))
# compute sparse gp variational parameter
# (i.e. mean and covariance of P(f_obs | f_latent))
qf_mean = tf.tensordot(Kxz_Kzz_inv, m, [[1], [0]], name='{}_mean'.format(name))
qf_cov = (Sigma_pre +
tf.matmul(Kxz_Kzz_inv,
tf.matmul(S, Kxz_Kzz_inv, transpose_b=True)) +
ridge_factor * tf.eye(Nx, dtype=tf.float32))
# define variational family
mixture_par_list = []
if mfvi_mixture:
gp_dist = tfd.MultivariateNormalFullCovariance(loc=qf_mean,
covariance_matrix=qf_cov)
q_f, mixture_par_list = inference_util.make_mfvi_sgp_mixture_family(
n_mixture=n_mixture, N=Nx,
gp_dist=gp_dist, name=name)
else:
q_f = ed.MultivariateNormalFullCovariance(loc=qf_mean,
covariance_matrix=qf_cov,
name=name)
return q_f, qf_mean, qf_cov, mixture_par_list
def variational_sgpr_sample(n_sample, qf_mean, qf_cov,
mfvi_mixture=False, mixture_par_list=None, **kwargs):
"""Generates f samples from GPR mean-field variational family.
Args:
n_sample: (int) number of samples to draw
qf_mean: (tf.Tensor of float32) mean parameters for
variational family
qf_cov: (tf.Tensor of float32) covariance for parameters for
variational family
mfvi_mixture: (bool) Whether to sample from a MFVI-SGP mixture
mixture_par_list: (list of np.ndarray) List of mixture distribution
parameters, containing [mixture_logits, qf_mean_mfvi, qf_sdev_mfvi].
kwargs: Dict of other keyword variables.
For compatibility purpose with other variational family.
Returns:
(np.ndarray) sampled values.
"""
q_f = tfd.MultivariateNormalFullCovariance(loc=qf_mean,
covariance_matrix=qf_cov, )
q_f_sample = q_f.sample(n_sample)
if mfvi_mixture:
(mixture_logits, mixture_logits_mfvi_mix,
mean_mfvi_mix, sdev_mfvi_mix) = mixture_par_list
q_f_sample_mfvi = inference_util.sample_mfvi_mixture_family(
N_sample=n_sample,
mixture_logits=mixture_logits_mfvi_mix,
mean_mfvi_mix=mean_mfvi_mix,
sdev_mfvi_mix=sdev_mfvi_mix, )
mix_prob = tf.nn.softmax(mixture_logits)
q_f_sample = tf.tensordot(
tf.stack([q_f_sample_mfvi, q_f_sample], axis=-1), mix_prob,
axes=[[-1], [0]])
return q_f_sample
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
""" Variational family III: Decoupled Gaussian Process """
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
"""Implements the Decoupled GP (DGP) VI method by [2].
Select a set of inducing points Zm and Zs, then:
Original posterior:
p(Y, F, U) = p(Y|F) p(F|U) p(U), where:
p(Y|F) ~ MVN(Y| Mu = F,
Sigma = s^2 * I)
p(F|U) ~ MVN(F| Mu = Kxm Kmm^{-1} U
Sigma = Kxx - Kxs Kss^{-1} Kxs^T )
p(U) ~ MVN(U| Mu = 0, Sigma = Kss)
Variational posterior:
q(Y) = p(Y|F)
q(F|U) = p(F|U)
q(U|m, S) ~ DGP
Consequently, q(F) becomes
q(F|m, S) ~ MVN(F| Mu = Kxm m
Sigma = Kxx - Kxs (Kss + S^{-1})^{-1} Kxs^T)
In practice, to make the problem unconstrained, we model S = LL^T.
Then
(Kss + S^{-1})^{-1} = L H^{-1} L^T,
where H = I + L^T Kss L.
"""
def variational_dgpr(X, Z, Zm=None, ls=1., kernel_func=rbf, ridge_factor=1e-3,
mfvi_mixture=False, n_mixture=1, name="", **kwargs):
"""Defines the mean-field variational family for GPR.
Args:
X: (np.ndarray of float32) input training features, with dimension (Nx, D).
Z: (np.ndarray of float32) inducing points, shape (Ns, D).
Zm: (np.ndarray of float32 or None) inducing points for mean, shape (Nm, D).
If None then same as Z
ls: (float32) length scale parameter.
kernel_func: (function) kernel function.
ridge_factor: (float32) small ridge factor to stabilize Cholesky decomposition
mfvi_mixture: (float32) Whether to output variational family with a
mixture of MFVI.
n_mixture: (int) Number of MFVI mixture component to add.
name: (str) name for the variational parameter/random variables.
kwargs: Dict of other keyword variables.
For compatibility purpose with other variational family.
Returns:
q_f, q_sig: (ed.RandomVariable) variational family.
q_f_mean, q_f_sdev: (tf.Variable) variational parameters for q_f
"""
X = tf.convert_to_tensor(X)
Zs = tf.convert_to_tensor(Z)
Zm = tf.convert_to_tensor(Zm) if Zm is not None else Zs
Nx, Nm, Ns = X.shape.as_list()[0], Zm.shape.as_list()[0], Zs.shape.as_list()[0]
# 1. Prepare constants
# compute matrix constants
Kxx = kernel_func(X, ls=ls)
Kmm = kernel_func(Zm, ls=ls)
Kxm = kernel_func(X, Zm, ls=ls)
Kxs = kernel_func(X, Zs, ls=ls)
Kss = kernel_func(Zs, ls=ls, ridge_factor=ridge_factor)
# 2. Define variational parameters
# define free parameters (i.e. mean and full covariance of f_latent)
m = tf.get_variable(shape=[Nm, 1], name='{}_mean_latent'.format(name))
s = tf.get_variable(shape=[Ns * (Ns + 1) / 2], name='{}_cov_latent_s'.format(name))
L = fill_triangular(s, name='{}_cov_latent_chol'.format(name))
# components for KL objective
H = tf.eye(Ns) + tf.matmul(L, tf.matmul(Kss, L), transpose_a=True)
cond_cov_inv = tf.matmul(L, tf.matrix_solve(H, tf.transpose(L)))
func_norm_mm = tf.matmul(m, tf.matmul(Kmm, m), transpose_a=True)
log_det_ss = tf.log(tf.matrix_determinant(H))
cond_norm_ss = tf.reduce_sum(tf.multiply(Kss, cond_cov_inv))
# compute sparse gp variational parameter (i.e. mean and covariance of P(f_obs | f_latent))
qf_mean = tf.squeeze(tf.tensordot(Kxm, m, [[1], [0]]), name='{}_mean'.format(name))
qf_cov = (Kxx -
tf.matmul(Kxs, tf.matmul(cond_cov_inv, Kxs, transpose_b=True)) +
ridge_factor * tf.eye(Nx, dtype=tf.float32)
)
# 3. Define variational family
mixture_par_list = []
if mfvi_mixture:
gp_dist = dist_util.VariationalGaussianProcessDecoupledDistribution(
loc=qf_mean,
covariance_matrix=qf_cov,
func_norm_mm=func_norm_mm,
log_det_ss=log_det_ss,
cond_norm_ss=cond_norm_ss)
q_f, mixture_par_list = inference_util.make_mfvi_sgp_mixture_family(
n_mixture=n_mixture, N=Nx,
gp_dist=gp_dist, name=name)
else:
q_f = dist_util.VariationalGaussianProcessDecoupled(loc=qf_mean,
covariance_matrix=qf_cov,
func_norm_mm=func_norm_mm,
log_det_ss=log_det_ss,
cond_norm_ss=cond_norm_ss,
name=name)
return q_f, qf_mean, qf_cov, mixture_par_list
variational_dgpr_sample = variational_sgpr_sample
```
#### File: calibre/util/calibration.py
```python
import tqdm
import numpy as np
import tensorflow as tf
from sklearn.model_selection import ShuffleSplit
def build_training_dataset(y_pred_sample, y_obs, X_obs, num_cdf_eval=100):
"""Building training dataset for nonparametric calibration.
Specifically, assume N observations and K CDF evaluation point, learn C
by building a classification dataset with size N*K as below:
for k in 1:K, for i in 1:N:
label: I(Y_i < t_k)
feature_1: t_k
feature_2: F_pred( t_k | X_i)
feature_3: X_i
Both features and labels are are organized in batches of shape
[n_cdf_eval, n_obs]
Args:
y_pred_sample: (np.ndarray) Samples from posterior predictive for each
observed y, with dimension (n_obs, n_posterior_sample).
y_obs: (np.ndarray) Observed y, with dimension (n_obs, 1).
X_obs: (np.ndarray) Observed X corresponding to y_obs,
with dimension (n_obs, n_feature)
num_cdf_eval: (int) Number of CDF evaluation for each y_obs.
Returns:
(dict of np.ndarray): Dictionary of np.ndarrays of labels and
features. It contains below key-value pair:
- "label": shape (n_cdf_eval, n_obs, 1)
- "feature_t": shape (n_cdf_eval, n_obs, 1)
- "feature_cdf": shape (n_cdf_eval, n_obs, 1)
- "feature_x": shape (n_cdf_eval, n_obs, n_feature)
Raises:
(ValueError): If dimension of y_pred_sample different from len(y_obs)
(ValueError): Shape of np.ndarray in dataset does not conform with
expected batch shape.
(ValueError): ndim of np.ndarray in dataset is not 2 or 3.
"""
n_obs, n_sample = y_pred_sample.shape
if n_obs != len(y_obs):
raise ValueError(
"First dimension of y_pred_sample must be same as len(y_obs). "
"Expected: {}, Observed: {}".format(len(y_obs), n_obs, ))
t_vals = np.linspace(np.min(y_obs), np.max(y_obs), num_cdf_eval)
# create and fill data dictionary
data_dict = dict()
data_dict["label"] = np.asarray(
[y_obs < t_val for t_val in t_vals]) # (n_obs, n_cdf_eval)
data_dict["feature_t"] = np.repeat(np.expand_dims(t_vals, -1),
repeats=n_obs, axis=-1)
data_dict["feature_cdf"] = np.asarray(
[np.mean(y_pred_sample < t_val, -1) for t_val in t_vals]
)
data_dict["feature_x"] = np.repeat(np.expand_dims(X_obs, 0),
repeats=num_cdf_eval, axis=0)
# check dimensions
for key, value in data_dict.items():
if value.shape[:2] != (num_cdf_eval, n_obs):
raise ValueError(
"Shape of '{}' does not conform with expected batch shape.\n"
"Observed: ({}, {}), Expected: ({}, {})".format(
key, value.shape[0], value.shape[1],
num_cdf_eval, n_obs,
)
)
if value.ndim != 3:
if value.ndim == 2:
data_dict[key] = np.expand_dims(value, axis=-1)
else:
raise ValueError(
"ndim of '{}' is expected to be either 2 or 3, "
"observed {}".format(key, value.ndim))
return data_dict
def build_input_pipeline(train_data_dict, test_data_dict,
train_batch_size=1000,
test_batch_size=100, seed=100):
"""Build an Iterator switching between train and heldout data."""
# extract label and feature, organize into np.ndarrays
label_train = train_data_dict["label"]
label_test = test_data_dict["label"]
feature_train = np.concatenate(
[train_data_dict[key] for key in train_data_dict.keys()
if "feature" in key], axis=-1)
feature_test = np.concatenate(
[test_data_dict[key] for key in test_data_dict.keys()
if "feature" in key], axis=-1)
# organize into np.ndarrays
n_train_data = label_train.size
n_test_data = label_test.size
label_train = label_train.reshape(n_train_data, 1).astype(np.int32)
label_test = label_test.reshape(n_test_data, 1).astype(np.int32)
feature_train = feature_train.reshape(n_train_data,
feature_train.shape[-1]).astype(np.float32)
feature_test = feature_test.reshape(n_test_data,
feature_test.shape[-1]).astype(np.float32)
# Build an iterator over training batches.
training_dataset = tf.data.Dataset.from_tensor_slices(
(feature_train, label_train))
training_batches = training_dataset.shuffle(
50000, reshuffle_each_iteration=True).repeat().batch(
train_batch_size)
training_iterator = training_batches.make_one_shot_iterator()
# Build a iterator over the heldout set with batch_size=heldout_size,
# i.e., return the entire heldout set as a constant.
heldout_dataset = tf.data.Dataset.from_tensor_slices(
(feature_test, label_test))
heldout_frozen = (heldout_dataset.take(n_test_data).repeat().batch(
test_batch_size))
heldout_iterator = heldout_frozen.make_one_shot_iterator()
# Combine these into a feedable iterator that can switch between
# training and validation inputs.
handle = tf.placeholder(tf.string, shape=[])
feedable_iterator = tf.data.Iterator.from_string_handle(
handle, training_batches.output_types, training_batches.output_shapes)
features, labels = feedable_iterator.get_next()
return (features, labels, handle, training_iterator, heldout_iterator,
n_train_data, n_test_data)
# def build_local_calibration_dataset(X_obs, Y_obs, Y_sample, n_eval = 5):
# """Building training dataset for localized calibration.
#
# Specifically, assume N observations, learn a calibration function
# C( F(y_obs|x), x ): F x X -> [0, 1]
#
# by running monotonic regression on below dataset:
#
# feature 1: F_ij = F_i(y_j)
# feature 2: x_i
# label: P_{ij} = I(y_i < y_j)
#
# where y_i,x_i are elements in Y_obs and X_obs,
# and F_i(y_j) = F(y<y_j|x_i) is the model cdf evaluated at x_i.
#
# Args:
# X_obs: (tf.Tensor) Observed x, with dimension (n_obs, p).
# Y_obs: (tf.Tensor) Observed y, with dimension (n_obs, ).
# Y_sample: (tf.Tensor) Samples from posterior predictive for each
# observed y, with dimension (n_obs, n_posterior_sample).
# n_eval: (int) Number of y_j's to evaluate F_i's at.
#
# Returns:
# (ValueError): If sample size indicated in Y_sample different
# from that of Y_obs.
# """
# X_obs = tf.convert_to_tensor(X_obs, dtype=tf.float32)
# Y_obs = tf.convert_to_tensor(Y_obs.squeeze(), dtype=tf.float32)
# Y_sample = tf.convert_to_tensor(Y_sample, dtype=tf.float32)
#
# # check model dimension
# n_obs, = Y_obs.shape.as_list()
# n_obs_1, n_sample = Y_sample.shape.as_list()
#
# if n_obs != n_obs_1:
# raise ValueError(
# "First dimension of y_pred_sample must be same as len(y_obs). "
# "Expected: {}, Observed: {}".format(n_obs, n_obs_1, ))
#
# # selects evaluation points
# y_eval =
#
# # prepare features
# # prepare feature 1: model cdf
#
#
# # prepare feature 2
# # compute empirical cdf evaluations
# F_obs = tf.reduce_mean(
# tf.cast(Y_sample < tf.expand_dims(Y_obs, -1),
# dtype=tf.float32), axis=-1)
#
# P_obs = tf.reduce_mean(
# tf.cast(tf.expand_dims(F_obs, -1) <
# tf.expand_dims(F_obs, 0), dtype=tf.float32), axis=0)
#
# return {"feature": F_obs, "label": P_obs}
def build_calibration_dataset(Y_obs, Y_sample):
"""Building training dataset for nonparametric calibration.
Specifically, assume N observations, learn a calibration function
P(Y<F(y_obs|x)): F -> [0, 1]
by running monotonic regression on below dataset:
feature: F_obs = F(y < y_obs | x_obs)
label: P(F < F_obs)
where P(F < F_obs) is the empirical cdf built from all F_obs'
Args:
Y_obs: (tf.Tensor) Observed y, with dimension (n_obs, ).
Y_sample: (tf.Tensor) Samples from posterior predictive for each
observed y, with dimension (n_obs, n_posterior_sample).
Returns:
(dict of tf.Tensor): Dictionary of tf.Tensor of labels and
features. It contains below key-value pair:
- "label": shape (n_obs, )
- "feature": shape (n_obs, )
Raises:
(ValueError): If sample size indicated in Y_sample different
from that of Y_obs.
"""
Y_obs = tf.convert_to_tensor(Y_obs.squeeze(), dtype=tf.float32)
Y_sample = tf.convert_to_tensor(Y_sample, dtype=tf.float32)
# check model dimension
n_obs, = Y_obs.shape.as_list()
n_obs_1, n_sample = Y_sample.shape.as_list()
if n_obs != n_obs_1:
raise ValueError(
"First dimension of y_pred_sample must be same as len(y_obs). "
"Expected: {}, Observed: {}".format(n_obs, n_obs_1, ))
# compute empirical cdf evaluations
F_obs = tf.reduce_mean(
tf.cast(Y_sample < tf.expand_dims(Y_obs, -1),
dtype=tf.float32), axis=-1)
P_obs = tf.reduce_mean(
tf.cast(tf.expand_dims(F_obs, -1) <
tf.expand_dims(F_obs, 0), dtype=tf.float32), axis=0)
return {"feature": F_obs, "label": P_obs}
def sample_ecdf(n_sample, base_sample, quantile, y_range=None, seed=None):
"""Sample observations form 1D empirical cdf using inverse CDF method.
Here empirical cdf is defined by base_sample and the
corresponding quantiles.
Args:
n_sample: (int) Number of samples.
base_sample: (np.ndarray of float32) Base samples to sample
from, shape (n_sample0, )
quantile: (np.ndarray of float32) Quantiles corresponding to
the base samples.
y_range: (tuple) (upper, lower) limit of the data.
Returns:
(np.ndarray of float32) Sample of shape (n_sample,) corresponding
to the empirical cdf.
"""
quantile = quantile.squeeze()
# for i in range(1, len(quantile)):
# quantile[i] = np.max([quantile[i], quantile[i-1]])
base_sample = np.sort(base_sample.squeeze())
# adjust sample if quantile doens't cover full range
min_quantile, max_quantile = quantile[0], quantile[-1]
if y_range:
if max_quantile < 1.:
additional_sample_size = int(
((1 - max_quantile) / (max_quantile - min_quantile)) * len(base_sample))
sample_limit_lower = np.max(base_sample)
sample_limit_higher = y_range[1]
additional_sample = np.random.uniform(low=sample_limit_lower,
high=sample_limit_higher,
size=additional_sample_size)
base_sample = np.concatenate([base_sample, additional_sample])
if min_quantile > 0.:
additional_sample_size = int(
(min_quantile / (1 - min_quantile)) * len(base_sample))
sample_limit_lower = y_range[0]
sample_limit_higher = np.min(base_sample)
additional_sample = np.random.uniform(low=sample_limit_lower,
high=sample_limit_higher,
size=additional_sample_size)
base_sample = np.concatenate([base_sample, additional_sample])
if len(base_sample) > len(quantile):
base_sample = base_sample[np.random.choice(len(base_sample),
len(quantile),
replace=False)]
elif len(base_sample) < len(quantile):
quantile = quantile[np.random.choice(len(quantile),
len(base_sample),
replace=False)]
quantile = np.sort(quantile.squeeze())
quantile = np.sort(quantile.squeeze())
base_sample = np.sort(base_sample.squeeze())
# identify sample id using inverse CDF lookup
np.random.seed(seed)
sample_prob = np.random.sample(size=n_sample)
sample_id = np.sum(np.expand_dims(sample_prob, 0) >
np.expand_dims(quantile, 1), axis=0) - 1
return base_sample[sample_id]
def resample_ecdf_batch(n_sample, base_sample_batch, quantile_batch,
y_range=None, seed=None, verbose=False):
"""Sample observations form 1D empirical cdf using inverse CDF method.
Args:
n_sample: (int) Number of samples.
base_sample_batch: (np.ndarray of float32) Base samples to sample
from, shape (n_batch, n_original_sample, )
quantile_batch: (np.ndarray of float32) Quantiles corresponding to
the base samples, shape (n_batch, n_quantiles, )
y_range: (tuple) (upper, lower) limit of the data
verbose: (bool) If True then print progress.
Returns:
(np.ndarray of float32) Sample of shape (n_batch, n_sample,)
corresponding to the empirical cdf.
Raises:
(ValueError) Batch size between base_sample_batch and
quantile_batch disagree.
"""
n_batch0, _ = base_sample_batch.shape
n_batch, _ = quantile_batch.shape
if n_batch != n_batch0:
raise ValueError(
"Batch sizes for base samples ({}) and "
"for quantiles ({}) disagree".format(n_batch0, n_batch))
# constrain quantile values to be within [0., 1.]
quantile_batch[quantile_batch > 1.] = 1.
quantile_batch[quantile_batch < 0.] = 0.
# process by batch
calibrated_sample_batch = []
batch_range = tqdm.tqdm(range(n_batch)) if verbose else range(n_batch)
for batch_id in batch_range:
base_sample = base_sample_batch[batch_id]
quantile = quantile_batch[batch_id]
calibrated_sample = sample_ecdf(n_sample=n_sample,
base_sample=base_sample,
quantile=quantile,
y_range=y_range,
seed=seed)
calibrated_sample_batch.append(calibrated_sample)
return np.asarray(calibrated_sample_batch)
```
|
{
"source": "jereliu/cabernet",
"score": 3
}
|
#### File: cabernet/controller/bne.py
```python
import collections
import numpy as np
import model
import inference.vi as vi
import inference.predictor as predictor
import inference.cdf as cdf
import util.dtype as dtype_util
DEFAULT_GP_LOG_LS_RESID = np.log(0.2).astype(np.float32)
DEFAULT_CDF_LOG_LS_RESID = np.log(0.3).astype(np.float32)
DEFAULT_N_POST_SAMPLE = 500
DEFAULT_CALIB_PERCENTILES_TRAIN = np.linspace(.0001, .9999, num=15).astype(dtype_util.NP_DTYPE)
DEFAULT_CALIB_PERCENTILES_PRED = np.linspace(.0001, .9999, num=100).astype(dtype_util.NP_DTYPE)
DEFAULT_OUTPUT_PERCENTILES = np.array([10, 25, 50, 75, 90]).astype(dtype_util.NP_DTYPE)
BNE_SUMMARY_NAMES = ("mean", "median", "var", "quantiles", "mean_cdf")
BNESummary = collections.namedtuple("BNESummary",
field_names=BNE_SUMMARY_NAMES)
BNESummary.__new__.__defaults__ = (None,) * len(BNESummary._fields)
# TODO(jereliu): model diagnosis for skewness and kurtosis.
class BNE(object):
def __init__(self,
X, y, base_pred,
X_new, base_pred_new,
X_calib=None, y_calib=None,
X_calib_induce=None, base_pred_calib_induce=None,
log_ls_system=DEFAULT_GP_LOG_LS_RESID,
log_ls_random=DEFAULT_CDF_LOG_LS_RESID,
calib_percentiles_train=DEFAULT_CALIB_PERCENTILES_TRAIN,
calib_percentiles_pred=DEFAULT_CALIB_PERCENTILES_PRED,
pred_percentiles=DEFAULT_OUTPUT_PERCENTILES):
"""Initializer."""
# names of summary statistics
self.summary_names = BNE_SUMMARY_NAMES
self.posterior_summary = BNESummary()
# initialize data
self.X_train = X
self.y_train = y
self.base_pred_train = base_pred
self.X_calib = self.X_train if X_calib is None else X_calib
self.y_calib = self.y_train if y_calib is None else y_calib
self.X_calib_induce = self.X_calib if X_calib_induce is None else X_calib_induce
self.base_pred_calib_induce = (self.base_pred_train if base_pred_calib_induce is None
else base_pred_calib_induce)
self.X_test = X_new
self.base_pred_test = base_pred_new
# initialize hyper parameters
self.log_ls_system = log_ls_system
self.log_ls_random = log_ls_random
self.calib_percentiles_train = calib_percentiles_train
self.calib_percentiles_pred = calib_percentiles_pred
self.pred_percentiles = pred_percentiles
# initialize internal parameters
# model
self.system_model = None
self.random_model = None
# initialize model estimator, predictor and sessions
self.system_estimator = None
self.random_estimator = None
self.system_predictor = None
self.random_predictor = None
self.random_summarizer = None
self.system_session = None
self.random_session = None
# initialize containers for posterior samples at training/predict locations
self.system_model_sample_train = None
self.random_model_sample_train = None
self.system_model_sample_calib = None # for training random component model
self.system_model_sample_pred = None
self.random_model_sample_pred = None
self.system_model_quantile_calib = None # for training random component model
self.system_model_quantile_pred = None
self.random_model_quantile_pred = None
def run_model(self, system_model_kwargs, random_model_kwargs):
"""Estimates and generates predictive samples/quantiles for full model."""
self.run_system_model(**system_model_kwargs)
self.run_random_model(**random_model_kwargs)
def run_system_model(self,
log_ls_system=None, restart_model=True,
n_sample=DEFAULT_N_POST_SAMPLE,
**estimate_kwargs):
"""Estimates and generates predictive samples/quantiles for sys model."""
# define model
if log_ls_system:
self.log_ls_system = log_ls_system
if not self.system_model or restart_model:
self.system_model = (
model.HierarchicalGP(X=self.X_train, y=self.y_train,
base_pred=self.base_pred_train,
resid_log_ls=self.log_ls_system)
)
# ESTIMATION
self._estimate_system_model(**estimate_kwargs)
# PREDICTION
self.system_predictor = predictor.Predictor(estimator=self.system_estimator)
# sample posterior, train, calibration and pred location
self.system_model_sample_train = self._sample_system_model(n_sample)
self.system_model_sample_calib = (
self._pred_sample_system_model(X_new=self.X_calib_induce,
base_pred_new=self.base_pred_calib_induce))
self.system_model_sample_pred = (
self._pred_sample_system_model(X_new=self.X_test,
base_pred_new=self.base_pred_test))
# sample quantiles, calibration location (for training random component model)
self.system_model_quantile_calib = (
self._pred_quantiles_system_model(sample_dict=self.system_model_sample_calib,
perc_eval=self.calib_percentiles_train))
# sample quantiles, predictive location (for prediction output)
self.system_model_quantile_pred = (
self._pred_quantiles_system_model(sample_dict=self.system_model_sample_pred,
perc_eval=self.calib_percentiles_pred))
def run_random_model(self,
log_ls_random=None, restart_model=True,
n_sample=DEFAULT_N_POST_SAMPLE,
**estimate_kwargs):
"""Estimates and generates predictive samples/quantiles for rand model."""
# define model
if log_ls_random:
self.log_ls_random = log_ls_random
if not self.random_model or restart_model:
self.random_model = (
model.MonoGP(X=self.X_calib, y=self.y_calib,
X_induce=self.X_calib_induce,
cdf_sample_induce=self.system_model_quantile_calib,
log_ls=self.log_ls_random)
)
# ESTIMATION
self._estimate_random_model(**estimate_kwargs)
# PREDICTION
self.random_predictor = predictor.Predictor(estimator=self.random_estimator)
# sample posterior, train and pred locations
self.random_model_sample_train = self._sample_random_model(n_sample)
self.random_model_sample_pred = (
self._pred_sample_random_model(X_new=self.X_test,
quant_pred_new=self.system_model_quantile_pred))
# SUMMARIZE
self.random_summarizer = (
cdf.CDFMoments(estimator=self.random_estimator,
cdf_sample_dict=self.random_model_sample_pred))
self.posterior_summary = BNESummary(**self._pred_summary_random_model())
def _estimate_system_model(self,
step_size=1e-2,
max_steps=50000, model_dir=None,
save_step=500, verbose=True,
restart_estimate=False,
**vi_kwargs):
"""Estimates systematic component model."""
# estimation
if not self.system_estimator:
# in case of first run, configure estimator graph
self.system_estimator = vi.VIEstimator(model=self.system_model)
self.system_estimator.config(step_size=step_size, **vi_kwargs)
elif restart_estimate:
# in case of re-run and want to restart, re-config
# estimator graph and erase current session.
self.system_estimator.config(step_size=step_size, **vi_kwargs)
self.system_session = None
else:
# in case of re-run and want to reuse current setting, do nothing
pass
self.system_session = self.system_estimator.run(sess=self.system_session,
max_steps=max_steps,
model_dir=model_dir,
save_step=save_step,
verbose=verbose)
def _estimate_random_model(self,
step_size=1e-2,
max_steps=10000, model_dir=None,
save_step=500, verbose=True,
restart_estimate=False,
**vi_kwargs):
"""Estimates random component model."""
# estimation
if not self.random_estimator:
# in case of first run, configure estimator graph
self.random_estimator = vi.VIEstimator(model=self.random_model)
self.random_estimator.config(step_size=step_size, **vi_kwargs)
elif restart_estimate:
# in case of re-run and want to restart, re-config
# estimator graph and erase current session.
self.random_estimator.config(step_size=step_size, **vi_kwargs)
self.random_session = None
else:
# in case of re-run and want to reuse current setting, do nothing
pass
self.random_session = self.random_estimator.run(sess=self.random_session,
max_steps=max_steps,
model_dir=model_dir,
save_step=save_step,
verbose=verbose)
def _sample_system_model(self, n_sample):
"""Generates posterior samples for model parameters at training locations."""
if not self.system_predictor:
raise ValueError("Predictor for systematic component model empty.")
if not self.system_session:
raise ValueError("Session for systematic component model empty.")
# posterior sampling for training sample
# in-sample posterior sample, training locations
self.system_predictor.config(sample_type="post_sample",
rv_dict=self.system_estimator.model.model_param,
n_sample=n_sample)
sample_dict = self.system_predictor.run(sess=self.system_session)
return sample_dict["post_sample"]
def _sample_random_model(self, n_sample):
"""Generates posterior samples for model parameters at training locations."""
if not self.random_predictor:
raise ValueError("Predictor for random component model is empty.")
if not self.random_session:
raise ValueError("Session for systematic component model is empty.")
# posterior sampling for training sample
# in-sample posterior sample, training locations
self.random_predictor.config(sample_type="post_sample",
rv_dict=self.random_estimator.model.model_param,
n_sample=n_sample)
sample_dict = self.random_predictor.run(sess=self.random_session)
return sample_dict["post_sample"]
def _pred_sample_system_model(self, X_new, base_pred_new):
"""Generates posterior samples at predictive locations."""
if not self.system_model_sample_train:
raise ValueError("Train sample for systematic component model empty.")
if not self.system_session:
raise ValueError("Session for systematic component model empty.")
self.system_predictor.config(sample_type="pred_sample",
X_new=X_new,
base_pred_new=base_pred_new,
post_sample_dict=self.system_model_sample_train)
sample_dict = self.system_predictor.run(sess=self.system_session)
return sample_dict["pred_sample"]
def _pred_sample_random_model(self, X_new, quant_pred_new):
"""Generates posterior samples at predictive locations."""
if not self.system_model_sample_train:
raise ValueError("Train sample for systematic component model empty.")
if not self.system_session:
raise ValueError("Session for systematic component model empty.")
self.random_predictor.config(sample_type="pred_sample",
X_new=X_new,
quant_dict_new=quant_pred_new,
post_sample_dict=self.random_model_sample_train,
verbose=True)
sample_dict = self.random_predictor.run(sess=self.random_session)
return sample_dict["pred_sample"]
def _pred_quantiles_system_model(self, sample_dict, perc_eval):
"""Generates predictive quantiles at predictive locations."""
if not self.system_model_sample_pred:
raise ValueError("Predictive sample for systematic component model empty.")
if not self.system_session:
raise ValueError("Session for systematic component model empty.")
self.system_predictor.config(sample_type="pred_quant",
perc_eval=perc_eval,
sample_dict=sample_dict)
sample_dict = self.system_predictor.run(sess=self.system_session)
return sample_dict["pred_quant"]
def _pred_summary_random_model(self):
"""Computes summary statistics from the estimated CDFs."""
for summary_name in self.summary_names:
self.random_summarizer.config(summary_name,
percentiles=self.pred_percentiles / 100.)
return self.random_summarizer.run()
@property
def log_ls_system(self):
return self.__log_ls_system
@log_ls_system.setter
def log_ls_system(self, value):
self.__log_ls_system = value.astype(dtype_util.NP_DTYPE)
@property
def log_ls_random(self):
return self.__log_ls_random
@log_ls_random.setter
def log_ls_random(self, value):
self.__log_ls_random = value.astype(dtype_util.NP_DTYPE)
```
#### File: cabernet/controller/experiment.py
```python
import os
import functools
import numpy as np
import critique
import util.io as io_util
import util.data as data_util
import util.gp_flow as gp_util
import util.dtype as dtype_util
import util.experiment as experiment_util
import controller.bne as bne
import controller.config as config
import matplotlib.pyplot as plt
DEFAULT_KERN_FUNC_NAMES = ("rbf_0.25", "rbf_1", "period1.5")
DEFAULT_KERN_FUNC_DICT = {
key: value for
key, value in gp_util.DEFAULT_KERN_FUNC_DICT_GPY.items()
if key in DEFAULT_KERN_FUNC_NAMES}
DEFAULT_N_INDUCE_POINTS = 20
DEFAULT_GP_LOG_LS_RESID = np.log(0.2).astype(dtype_util.NP_DTYPE)
DEFAULT_CDF_LOG_LS_RESID = np.log(0.3).astype(dtype_util.NP_DTYPE)
DEFAULT_PERCENTILES_TRAIN = np.linspace(.0001, .9999, num=15).astype(dtype_util.NP_DTYPE)
DEFAULT_PERCENTILES_PRED = np.linspace(.0001, .9999, num=100).astype(dtype_util.NP_DTYPE)
class Experiment(object):
def __init__(self,
experiment_name,
N_train, N_test, N_calib, N_calib_induce, N_valid,
data_gen_func, save_addr):
self.name = experiment_name
self.N_train = N_train
self.N_test = N_test
self.N_calib = N_calib
self.N_calib_induce = N_calib_induce
self.N_valid = N_valid
self.data_gen_func = data_gen_func
self.save_addr = save_addr
self.save_addr_data_plot = "{}/plot_data".format(self.save_addr)
self.save_addr_base_models = (
"{}/base/n_train_{}_n_test_{}_n_calib_{}/".format(
self.save_addr, N_train, N_test, N_calib))
def run(self, **kwargs):
"""Executes full experiment."""
self.prepare_data(**kwargs)
self.plot_data()
self.get_base_models()
self.prepare_vi_data(**kwargs)
self.run_bne_model(**kwargs)
self.compute_metrics()
def prepare_data(self,
seed_train=1000, seed_test=1000,
seed_calib=1000, **kwargs):
# feature generation functions
data_gen_func_x = data_util.gaussian_mix
data_gen_func_x_test = functools.partial(data_util.gaussian_mix,
sd_scale=2.5)
(self.X_train, self.y_train,
self.X_test, self.y_test,
self.X_valid, self.y_valid_sample,
self.calib_sample_id,
self.calib_sample_id_induce) = experiment_util.generate_data_1d(
N_train=self.N_train, N_test=self.N_test,
N_calib=self.N_calib, N_calib_induce=self.N_calib_induce,
N_valid=self.N_valid,
noise_sd=None,
data_gen_func=self.data_gen_func,
data_gen_func_x=data_gen_func_x,
data_gen_func_x_test=data_gen_func_x_test,
data_range=(-6., 6.), valid_range=(-6., 6.),
seed_train=seed_train, seed_test=seed_test,
seed_calib=seed_calib)
self.y_valid = self.y_valid_sample[:, 0]
# calibration data
self.X_calib = self.X_valid[self.calib_sample_id]
self.y_calib = self.y_valid[self.calib_sample_id]
self.X_calib_induce = self.X_valid[self.calib_sample_id_induce]
self.X_test = self.X_calib
self.y_test = self.y_calib
def plot_data(self):
os.makedirs(self.save_addr_data_plot, exist_ok=True)
plt.ioff()
# plot data
plt.figure(figsize=(12, 6))
plt.scatter(np.repeat(self.X_valid, 100),
self.y_valid_sample[:, :100].flatten(), marker=".", s=1)
plt.savefig("{}/data_valid_{}".format(self.save_addr_data_plot,
self.name))
plt.close()
plt.scatter(self.X_train, self.y_train, marker="o", s=5.)
plt.savefig("{}/data_train_{}".format(self.save_addr_data_plot,
self.name))
plt.close()
plt.scatter(self.X_test, self.y_test, marker="o", s=5.)
plt.savefig("{}/data_test_{}".format(self.save_addr_data_plot,
self.name))
plt.close()
plt.ion()
def get_base_models(self):
load_res = None
while load_res is None:
try:
load_res = io_util.load_results(["base_test_pred.pkl",
"base_valid_pred.pkl"],
file_addr=self.save_addr_base_models)
except FileNotFoundError:
self.prepare_base_models()
self.pred_dict_test = load_res["base_test_pred.pkl"]
self.pred_dict_valid = load_res["base_valid_pred.pkl"]
self.pred_dict_calib_induce = {
key: value[self.calib_sample_id_induce]
for key, value in self.pred_dict_valid.items()}
def prepare_base_models(self):
os.makedirs(self.save_addr_base_models, exist_ok=True)
y_valid_mean = np.mean(self.y_valid_sample, axis=1)
gp_util.fit_base_gp_models(self.X_train, self.y_train,
self.X_test, self.y_test,
self.X_valid, y_valid_mean,
kern_func_dict=DEFAULT_KERN_FUNC_DICT,
n_valid_sample=1000,
save_addr_prefix=self.save_addr_base_models,
y_range=[-2.5, 2.5])
def prepare_vi_data(self, n_induce=DEFAULT_N_INDUCE_POINTS, **kwargs):
induce_index = np.linspace(0, self.N_valid - 1, n_induce).astype(np.int)
self.X_induce = self.X_valid[induce_index, ...]
self.X_induce_mean = self.X_test
def run_bne_model(self,
system_model_kwargs=None, random_model_kwargs=None,
**kwargs):
"""Configure and executes BNE model."""
if system_model_kwargs is None or random_model_kwargs is None:
self.system_model_kwargs, self.random_model_kwargs = (
config.default_kwargs(self.X_induce, self.X_induce_mean))
# initiate model
self.bne_model = bne.BNE(X=self.X_test, y=self.y_test, base_pred=self.pred_dict_test,
X_new=self.X_valid, base_pred_new=self.pred_dict_valid,
X_calib=self.X_calib, y_calib=self.y_calib,
X_calib_induce=self.X_calib_induce,
base_pred_calib_induce=self.pred_dict_calib_induce,
log_ls_system=DEFAULT_GP_LOG_LS_RESID,
log_ls_random=DEFAULT_CDF_LOG_LS_RESID,
calib_percentiles_train=DEFAULT_PERCENTILES_TRAIN,
calib_percentiles_pred=DEFAULT_PERCENTILES_PRED)
# run inference
self.bne_model.run_model(self.system_model_kwargs,
self.random_model_kwargs)
def compute_metrics(self):
self.eval_metrics = critique.EvalMetrics(bne_model=self.bne_model,
X_valid=self.X_valid,
y_valid_sample=self.y_valid_sample)
```
#### File: cabernet/util/kernel.py
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
def square_dist(X, X2=None, ls=1.):
"""Computes Square distance between two sets of features.
Referenced from GPflow.kernels.Stationary.
Args:
X: (tf.Tensor) First set of features of dim N x D.
X2: (tf.Tensor or None) Second set of features of dim N2 x D.
ls: (float) value for length scale.
Returns:
(tf.Tensor) A N x N2 tensor for ||x-x'||^2 / ls**2
Raises:
(ValueError) If feature dimension of X and X2 disagrees.
"""
N, D = X.shape
X = X / ls
Xs = tf.reduce_sum(tf.square(X), axis=1)
if X2 is None:
dist = -2 * tf.matmul(X, X, transpose_b=True)
dist += tf.reshape(Xs, (-1, 1)) + tf.reshape(Xs, (1, -1))
return tf.clip_by_value(dist, 0., np.inf)
N2, D2 = X2.shape
if D != D2:
raise ValueError('Dimension of X and X2 does not match.')
X2 = X2 / ls
X2s = tf.reduce_sum(tf.square(X2), axis=1)
dist = -2 * tf.matmul(X, X2, transpose_b=True)
dist += tf.reshape(Xs, (-1, 1)) + tf.reshape(X2s, (1, -1))
return tf.clip_by_value(dist, 0., np.inf)
def rbf(X, X2=None, ls=1., ridge_factor=0.):
"""Defines RBF kernel.
k(x, x') = - exp(- |x-x'| / ls**2)
Args:
X: (tf.Tensor) First set of features of dim N x D.
X2: (tf.Tensor or None) Second set of features of dim N2 x D.
ls: (float) value for length scale
ridge_factor: (float32) ridge factor to stabilize Cholesky decomposition.
Returns:
(tf.Tensor) A N x N2 tensor for exp(-||x-x'||**2 / 2 * ls**2)
"""
N, _ = X.shape.as_list()
if ridge_factor and X2 is None:
ridge_mat = ridge_factor * tf.eye(N, dtype=tf.float32)
else:
ridge_mat = 0
return tf.exp(-square_dist(X, X2, ls=ls) / 2) + ridge_mat
```
#### File: cabernet/util/visual.py
```python
import os
import pathlib
import tqdm
import pandas as pd
import numpy as np
import scipy.stats as stats
import scipy.signal as signal
import statsmodels.nonparametric.api as smnp
import matplotlib.pyplot as plt
import seaborn as sns
from deprecated.calibration import coverage
import util.metric as metric_util
from matplotlib.colors import BoundaryNorm
def gpr_1d_visual(pred_mean,
pred_cov=None, pred_quantiles=[],
pred_samples=None,
X_train=None, y_train=None,
X_test=None, y_test=None, X_induce=None,
compute_rmse=True, rmse_id=None,
quantile_colors=None, quantile_alpha=0.1,
y_range=None, add_reference=False,
quantile_shade_legend=None,
title="", save_addr="", fontsize=12,
quantile_colors_norm=None, ax=None,
smooth_mean=False, smooth_quantile=True,
pred_mean_color='blue',
pred_mean_alpha=0.25, figsize=None):
"""Plots the GP posterior predictive mean and uncertainty.
Args:
pred_mean: (np.ndarray) posterior predictive mean at X_test
pred_cov: (np.ndarray or None) posterior predictive variance at X_test
pred_quantiles: (list of tuples) list of tuples of (upper, lower)
of np.ndarrays for the predictive quantiles.
Ignored if pred_cov is not None.
pred_samples: (list of np.ndarray) list of np.ndarray of samples from posterior.
X_train: (np.ndarray) X values in training dataset.
y_train: (np.ndarray) y values in training dataset.
X_test: (np.ndarray) X values in test dataset.
y_test: (np.ndarray) y values in test dataset.
X_induce: (np.ndarray) X values marking the position of inducing points.
compute_rmse: (bool) Whether to compute test RMSE.
rmse_id: (np.ndarray of int or None) Subset of X_test to compute
rmse on. If None then all X_test are used.
quantile_shade_legend: (list of str or None) Legend names for quantile shades. If None then no
legend will be added.
title: (str) Title of the image.
save_addr: (str) Address to save image to.
fontsize: (int) font size for title and axis labels
Raises:
(ValueError) If y_test is not multiple of X_test.
"""
# TODO(jereliu): Write a save function decorator.
if save_addr:
pathlib.Path(save_addr).parent.mkdir(parents=True, exist_ok=True)
plt.ioff()
if ax is None:
_, ax = plt.subplots(figsize=figsize)
# plot predictions:
X_test = np.unique(X_test, axis=0)
# posterior predictive
if isinstance(pred_mean, np.ndarray):
pred_mean = pred_mean.squeeze()[:len(X_test)]
if smooth_mean:
# compute window length for filter
window_len = len(pred_mean) // 15
if window_len % 2 == 0:
# savgol_filter requires odd window size
window_len = window_len + 1
pred_mean = signal.savgol_filter(pred_mean, window_len, polyorder=3)
ax.plot(X_test.squeeze(), pred_mean.squeeze(),
c=pred_mean_color, alpha=pred_mean_alpha)
# posterior confidence interval based on std
if isinstance(pred_cov, np.ndarray):
pred_cov = pred_cov.squeeze()[:len(X_test)]
# compute the three sets of predictive quantiles (mean +\- 3*sd)
pred_quantiles = [(pred_mean + np.sqrt(pred_cov),
pred_mean - np.sqrt(pred_cov)),
(pred_mean + 2 * np.sqrt(pred_cov),
pred_mean - 2 * np.sqrt(pred_cov)),
(pred_mean + 3 * np.sqrt(pred_cov),
pred_mean - 3 * np.sqrt(pred_cov))]
# posterior quantile
if isinstance(pred_quantiles, list):
if quantile_colors is None:
quantile_colors = ["black"] * len(pred_quantiles)
shade_list = []
if isinstance(quantile_alpha, float):
quantile_alpha = [quantile_alpha]
if len(quantile_alpha) == 1:
quantile_alpha = list(quantile_alpha) * len(pred_quantiles)
for id, (upper, lower) in enumerate(pred_quantiles):
upper = upper.squeeze()[:len(X_test)]
lower = lower.squeeze()[:len(X_test)]
if smooth_quantile:
# compute window length for filter
window_len = len(upper) // 8
if window_len % 2 == 0:
# savgol_filter requires odd window size
window_len = window_len + 1
upper = signal.savgol_filter(upper, window_len, polyorder=3)
lower = signal.savgol_filter(lower, window_len, polyorder=3)
if isinstance(quantile_colors, np.ndarray):
quantile_shade = rainbow_fill_between(ax, X_test.squeeze(), upper, lower,
colors=quantile_colors,
norm=quantile_colors_norm,
alpha=quantile_alpha[id])
else:
# first wash out previous color
ax.fill_between(X_test.squeeze(), upper, lower,
color="white",
edgecolor=None, linewidth=0.0)
quantile_shade = ax.fill_between(X_test.squeeze(), upper, lower,
color=quantile_colors[id],
alpha=quantile_alpha[id],
edgecolor=None, linewidth=0.0)
shade_list.append(quantile_shade)
if quantile_shade_legend:
plt.legend(shade_list, quantile_shade_legend)
# plot training data
if isinstance(X_train, np.ndarray):
if X_train.size < 50:
ax.plot(X_train.squeeze(), y_train.squeeze(), 'o',
c='red', markeredgecolor='black')
elif X_train.size < 100:
ax.plot(X_train.squeeze(), y_train.squeeze(), '.',
c='red', alpha=.5)
else:
ax.scatter(X_train.squeeze(), y_train.squeeze(), marker='.',
c='red', alpha=.5, s=1)
if isinstance(X_induce, np.ndarray):
for x_vertical in X_induce:
plt.axvline(x=x_vertical, c='black', alpha=.05)
# posterior samples
if isinstance(pred_samples, list):
for pred_sample in pred_samples:
pred_sample = pred_sample.squeeze()[:len(X_test)]
ax.plot(X_test.squeeze(), pred_sample,
color='teal', alpha=.01, linewidth=2)
# plot ground truth
if y_test is not None:
# compute rmse
if compute_rmse and pred_mean is not None:
if isinstance(rmse_id, np.ndarray):
test_rmse = metric_util.rmse(y_test[rmse_id],
pred_mean[rmse_id])
else:
test_rmse = metric_util.rmse(y_test, pred_mean)
title = '{}, RMSE={:.4f}'.format(title, test_rmse)
# plot y_test
if isinstance(X_test, np.ndarray):
y_X_ratio = len(y_test) / len(X_test)
if y_X_ratio.is_integer():
y_X_ratio = int(y_X_ratio)
for fold_index in range(y_X_ratio):
index_start = int(fold_index * len(X_test))
index_end = int((fold_index + 1) * len(X_test))
y_test_plot = y_test.squeeze()[index_start:index_end]
ax.plot(X_test.squeeze(), y_test_plot, c='black')
else:
raise ValueError("y_test must be multiple of X_test.")
ax.set_title(title, fontsize=fontsize)
if y_range is not None:
ax.set_ylim(y_range)
if add_reference:
ax.axhline(y=0, c='black')
if save_addr:
plt.savefig(save_addr)
plt.close()
plt.ion()
return ax
def gpr_2d_visual(pred_mean, pred_cov,
X_train, y_train, X_test, y_test,
title="", save_addr="", fontsize=12):
if save_addr:
pathlib.Path(save_addr).parent.mkdir(parents=True, exist_ok=True)
plt.ioff()
# prediction surface
n_reshape = int(np.sqrt(pred_mean.size))
pred_mean_plot = pred_mean.reshape(n_reshape, n_reshape)
X_valid = X_test.reshape(n_reshape, n_reshape, 2)
x_grid, y_grid = X_valid[:, :, 0], X_valid[:, :, 1]
ax = plt.axes(projection='3d')
if isinstance(X_train, np.ndarray):
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c="black")
ax.plot_surface(X=x_grid, Y=y_grid, Z=pred_mean_plot, cmap='inferno')
ax.set_zlim(np.min(y_test), np.max(y_test))
# optionally, compute RMSE
if pred_mean.size == y_test.size:
rmse = metric_util.rmse(y_test, pred_mean)
title = "{}, RMSE={:.4f}".format(title, rmse)
plt.title(title, fontsize=fontsize)
if save_addr:
plt.savefig(save_addr)
plt.close()
plt.ion()
def plot_base_prediction(base_pred, model_names,
X_valid, y_valid=None,
X_train=None, y_train=None,
X_test=None, y_test=None,
ax=None, y_range=[-4.5, 4.5],
save_addr="", title_size=12, legend_size=12,
**kwargs):
if save_addr:
pathlib.Path(save_addr).parent.mkdir(parents=True, exist_ok=True)
plt.ioff()
base_pred_plot = np.asarray([base_pred[model_name]
for model_name in model_names])
# prepare for plotting predictions
sns_data = pd.DataFrame(
{"x": np.tile(X_valid.squeeze(), reps=len(base_pred)),
"y": np.concatenate(base_pred_plot),
"model": np.repeat(model_names, repeats=X_valid.shape[0])})
# plot baseline predictions.
if not ax:
fig, ax = plt.subplots(1, 1)
sns.lineplot(x="x", y="y", hue="model", alpha=0.7,
data=sns_data, ax=ax, **kwargs)
if isinstance(y_train, np.ndarray):
ax.plot(X_train.squeeze(), y_train.squeeze(),
'o', c='red', markeredgecolor='black')
if isinstance(y_test, np.ndarray):
ax.plot(X_test.squeeze(), y_test.squeeze(),
'o', c='blue', markeredgecolor='black')
if isinstance(y_valid, np.ndarray):
ax.plot(X_valid, y_valid, c='black')
if y_range is not None:
ax.set_ylim(y_range)
ax.set_title("Base Model Predictions", fontsize=title_size)
ax.legend(loc='lower left', prop={'size': legend_size})
if save_addr:
plt.savefig(save_addr)
plt.close()
plt.ion()
def plot_ensemble_weight_mean_1d(X, weight_sample, model_names="",
ax_mean=None,
save_addr_prefix=""):
"""Plots the posterior mean and median of weight sample for K models.
Args:
X: (np.ndarray of float32) A 1D array of feature values, dimension (N_obs, )
weight_sample: (np.ndarray of float32) Sample of model ensemble weights
dimension (N_sample, N_obs, num_models).
model_names: (list of str) list of model names, dimension (num_models, ).
save_addr_prefix: (str) Prefix for save address.
fontsize: (int) font size for title and axis labels
"""
_, _, num_models = weight_sample.shape
weight_mean = np.nanmean(weight_sample, axis=0)
# plot posterior mean
if save_addr_prefix:
pathlib.Path(save_addr_prefix).mkdir(parents=True, exist_ok=True)
plt.ioff()
if not ax_mean:
fig_mean, ax_mean = plt.subplots(1, 1)
for k in range(num_models):
ax_mean.plot(X.squeeze(), weight_mean[:, k],
label=model_names[k] if model_names else "")
ax_mean.set_ylim(-0.05, 1.05)
ax_mean.set_title("Ensemble Weights, Posterior Mean")
if model_names:
ax_mean.legend(loc='upper left')
if save_addr_prefix:
plt.savefig("{}_weight_mean.png".format(save_addr_prefix))
plt.close()
plt.ion()
def plot_ensemble_weight_median_1d(X, weight_sample, model_names="",
ax_median=None,
save_addr_prefix=""):
"""Plots the posterior mean and median of weight sample for K models.
Args:
X: (np.ndarray of float32) A 1D array of feature values, dimension (N_obs, )
weight_sample: (np.ndarray of float32) Sample of model ensemble weights
dimension (N_sample, N_obs, num_models).
model_names: (list of str) list of model names, dimension (num_models, ).
save_addr_prefix: (str) Prefix for save address.
"""
_, _, num_models = weight_sample.shape
weight_median = np.nanpercentile(weight_sample, q=50, axis=0)
weight_lower = np.nanpercentile(weight_sample, q=25, axis=0)
weight_upper = np.nanpercentile(weight_sample, q=75, axis=0)
# plot posterior median
if save_addr_prefix:
pathlib.Path(save_addr_prefix).mkdir(parents=True, exist_ok=True)
plt.ioff()
if not ax_median:
fig_med, ax_median = plt.subplots(1, 1)
for k in range(num_models):
# plot median
ax_median.plot(X.squeeze(), weight_median[:, k],
label=model_names[k] if model_names else "")
# plot 50% confidence interval
ax_median.fill_between(X.squeeze(),
y1=weight_lower[:, k], y2=weight_upper[:, k],
alpha=0.1)
ax_median.set_ylim(-0.05, 1.05)
ax_median.set_title("Ensemble Weights, Posterior Median")
if model_names:
ax_median.legend(loc='upper left')
if save_addr_prefix:
plt.savefig("{}_weight_median.png".format(save_addr_prefix))
plt.close()
plt.ion()
# plot posterior mean
if save_addr_prefix:
plt.ioff()
def plot_ensemble_weight_mean_2d(X, weight_sample, model_names,
save_addr_prefix=""):
"""Plots the posterior mean and median of weight sample for K models.
Args:
X: (np.ndarray of float32) A 1D array of feature values, dimension (N_obs, )
weight_sample: (np.ndarray of float32) Sample of model ensemble weights
dimension (N_sample, N_obs, num_models).
model_names: (list of str) list of model names, dimension (num_models, ).
save_addr_prefix: (str) Prefix for save address.
"""
_, _, num_models = weight_sample.shape
weight_mean = np.nanmean(weight_sample, axis=0)
# plot posterior mean
if save_addr_prefix:
pathlib.Path("{}/weight_mean/".format(save_addr_prefix)).mkdir(
parents=True, exist_ok=True)
for k in range(num_models):
gpr_2d_visual(weight_mean[:, k], None,
None, None, X, np.array([-0.05, 1.05]),
title="Ensemble Posterior Mean, {}".format(model_names[k]),
save_addr="{}/weight_mean/{}.png".format(
save_addr_prefix, model_names[k]))
def prob_calibration_1d(Y_obs, Y_sample, title="", save_addr="", fontsize=12):
"""Plots the reliability diagram (i.e. CDF for F^{-1}(y) ) for 1D prediction.
Args:
Y_obs: (np.ndarray of float32) N observations of dim (N, 1)
Y_sample: (np.ndarray of float32) Samples of size M corresponding
to the N observations. dim (N, M)
title: (str) Title of the image.
save_addr: (str) Address to save image to.
fontsize: (int) font size for title and axis labels
"""
if save_addr:
pathlib.Path(save_addr).parent.mkdir(parents=True, exist_ok=True)
plt.ioff()
ecdf_sample = metric_util.ecdf_eval(Y_obs, Y_sample)
ecdf_func = metric_util.make_empirical_cdf_1d(ecdf_sample)
ecdf_eval = np.linspace(0, 1, 1000)
ecdf_valu = ecdf_func(ecdf_eval)
fig, ax = plt.subplots()
ax.plot(ecdf_eval, ecdf_eval, c="black")
ax.plot(ecdf_eval, ecdf_valu)
total_variation = np.mean(np.abs(ecdf_eval - ecdf_valu))
plt.title("Reliability Index, {}, Score: {:.3f}".format(
title, total_variation), fontsize=fontsize)
plt.xlabel(r"Empirical CDF for $\hat{F}(Y_i|X_i)$", fontsize=fontsize)
plt.ylabel("Expected CDF $Uniform(0, 1)$", fontsize=fontsize)
if save_addr:
plt.savefig(save_addr)
plt.close()
plt.ion()
def coverage_index_1d(Y_obs, Y_sample, title="", save_addr="", fontsize=12):
"""Plots the reliability diagram (i.e. CDF for F^{-1}(y) ) for 1D prediction.
Args:
Y_obs: (np.ndarray of float32) N observations of dim (N_obs, 1)
Y_sample: (np.ndarray of float32) Samples of size M corresponding
to the N observations. dim (N_obs, N_sample)
title: (str) Title of the image.
save_addr: (str) Address to save image to.
fontsize: (int) font size for title and axis labels
"""
if save_addr:
pathlib.Path(save_addr).parent.mkdir(parents=True, exist_ok=True)
plt.ioff()
nom_coverage, obs_coverage = metric_util.credible_interval_coverage(
Y_obs, Y_sample)
fig, ax = plt.subplots()
ax.plot(nom_coverage, nom_coverage, c="black")
ax.plot(nom_coverage, obs_coverage)
total_variation = np.mean(np.abs(obs_coverage - nom_coverage))
plt.title("Coverage Index, {}, Score: {:.3f}".format(
title, total_variation), fontsize=fontsize)
plt.xlabel("Claimed Credible Interval Coverage", fontsize=fontsize)
plt.ylabel("Observed Credible Interval Coverage", fontsize=fontsize)
if save_addr:
plt.savefig(save_addr)
plt.close()
plt.ion()
def marginal_calibration_1d(Y_obs, Y_sample, title="", save_addr=""):
"""Plots the reliability diagram (i.e. CDF for F^{-1}(y) ) for 1D prediction.
Args:
Y_obs: (np.ndarray of float32) N observations of dim (N, 1)
Y_sample: (np.ndarray of float32) Monte Carlo Samples of size M
corresponding to the N observations. dim (N, M)
title: (str) Title of the image.
save_addr: (str) Address to save image to.
"""
if save_addr:
pathlib.Path(save_addr).parent.mkdir(parents=True, exist_ok=True)
plt.ioff()
ecdf_eval = np.linspace(np.min(Y_obs), np.max(Y_obs), 1000)
ecdf_obsv = metric_util.make_empirical_cdf_1d(Y_obs)
ecdf_pred = metric_util.make_empirical_cdf_1d(Y_sample)
ecdf_sample_obsv = ecdf_obsv(ecdf_eval)
ecdf_sample_pred = ecdf_pred(ecdf_eval)
fig, ax = plt.subplots()
ax.plot((0, 1), (0, 1), c="black")
ax.plot(ecdf_sample_obsv, ecdf_sample_pred)
plt.xlabel("Empirical Distribution")
plt.ylabel("Predictive Distribution")
plt.title("Marginal Calibration, {}".format(title))
if save_addr:
plt.savefig(save_addr)
plt.close()
plt.ion()
def corr_matrix(corr_mat, ax=None, model_names="auto", save_addr=""):
"""Visualize correlation matrix."""
if save_addr:
pathlib.Path(save_addr).parent.mkdir(parents=True, exist_ok=True)
plt.ioff()
if not ax:
fig, ax = plt.subplots(1, 1)
# mask upper triangle
mask = np.zeros_like(corr_mat, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
cmap = sns.diverging_palette(240, 10, sep=160, n=256, as_cmap=True)
sns.heatmap(corr_mat,
mask=mask, cmap=cmap,
annot=True, annot_kws={'color': 'white'},
xticklabels=model_names,
yticklabels=model_names,
vmin=-1., vmax=1., center=0,
square=True, linewidths=.5,
ax=ax)
plt.yticks(rotation=0)
if save_addr:
plt.savefig(save_addr)
plt.close()
plt.ion()
def model_composition_1d(X_value, corr_mat, weight_sample,
base_pred, X_valid, y_valid, X_train, y_train,
model_names, save_addr=""):
"""Plot aligned graph with base prediction at left and correlation at right."""
if save_addr:
pathlib.Path(save_addr).parent.mkdir(parents=True, exist_ok=True)
plt.ioff()
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(18, 5))
# First plot: Base Model Fit
plot_base_prediction(base_pred=base_pred,
X_valid=X_valid, y_valid=y_valid,
X_train=X_train, y_train=y_train, ax=ax1)
ax1.axvline(X_value, c='red', alpha=0.5, linewidth=2)
ax1.set(adjustable='box-forced')
# Second plot: Mean prediction
plot_ensemble_weight_mean_1d(X=X_valid,
weight_sample=weight_sample,
model_names=model_names,
ax_mean=ax2)
ax2.axvline(X_value, c='red', alpha=0.5, linewidth=2)
ax2.set(adjustable='box-forced')
corr_matrix(corr_mat, model_names=model_names, ax=ax3)
ax3.set_title("X={}".format(X_value))
ax3.set(adjustable='box-forced')
if save_addr:
plt.savefig(save_addr)
plt.close()
plt.ion()
def posterior_heatmap_2d(plot_data, X,
X_monitor=None,
cmap='inferno_r',
norm=None, norm_method="percentile",
save_addr=''):
"""Plots colored 2d heatmap using scatterplot.
Args:
plot_data: (np.ndarray) plot data whose color to visualize over
2D surface, shape (N, ).
X: (np.ndarray) locations of the plot data, shape (N, 2).
X_monitor: (np.ndarray or None) Locations to plot data points to.
cmap: (str) Name of color map.
norm: (BoundaryNorm or None) Norm values to adjust color map.
If None then a new norm will be created according to norm_method.
norm_method: (str) The name of method to compute norm values.
See util.visual.make_color_norm for detail.
save_addr: (str) Address to save image to.
Returns:
(matplotlib.colors.BoundaryNorm) A color norm object for color map
to be passed to a matplotlib.pyplot function.
"""
if save_addr:
pathlib.Path(save_addr).parent.mkdir(parents=True, exist_ok=True)
plt.ioff()
if not norm:
norm = make_color_norm(plot_data, method=norm_method)
# 2d color plot using scatter
plt.figure(figsize=(10, 8))
plt.scatter(x=X[:, 0], y=X[:, 1],
s=3,
c=plot_data, cmap=cmap, norm=norm)
cbar = plt.colorbar()
# plot monitors
if isinstance(X_monitor, np.ndarray):
plt.scatter(x=X_monitor[:, 0], y=X_monitor[:, 1],
s=10, c='black')
# adjust plot window
plt.xlim((np.min(X[:, 0]), np.max(X[:, 0])))
plt.ylim((np.min(X[:, 1]), np.max(X[:, 1])))
if save_addr:
plt.savefig(save_addr, bbox_inches='tight')
plt.close()
plt.ion()
else:
plt.show()
return norm
def make_color_norm(color_data, method="percentile"):
"""Makes color palette norm for heatmap plots.
Args:
color_data: (np.ndarray or list) Either a single numpy array or
a list of numpy array that records numeric values to adjust
color map to.
method: (str) The name of method to compute norm values:
percentile: Adjust norm to the raw percentile of color_data.
residual: Adjust norm to the symmetric range of
[-min(abs(data)), -max(abs(data))].
Color norm values will space out evenly in between the range.
residual_percentile: Similar to 'residual'.
But color norm values will be adjusted with respect to the
percentile of abs(data).
Returns:
(matplotlib.colors.BoundaryNorm) A color norm object for color map
to be passed to a matplotlib.pyplot function.
"""
if isinstance(color_data, list):
color_data = np.concatenate(color_data)
if method == "percentile":
levels = np.percentile(color_data,
np.linspace(0, 100, 101))
elif method == "residual":
abs_max = np.max(np.abs(color_data))
levels = np.linspace(-abs_max, abs_max, 101)
elif method == "residual_percentile":
abs_levels = np.percentile(np.abs(color_data),
np.linspace(0, 100, 101))
levels = np.sort(np.concatenate([-abs_levels, abs_levels]))
else:
raise ValueError("Method {} is not supported".format(method))
return BoundaryNorm(levels, 256)
def scaled_1d_kde_plot(data, shade, bandwidth='scott',
vertical=False, legend=False, ax=None,
density_scale=None, **kwargs):
"""Plot a univariate kernel density estimate on one of the axes.
Adapted from _univariate_kdeplot from seaborn but allow user to
scale densityu estimates using density_scale.
"""
if ax is None:
ax = plt.gca()
# Calculate the KDE
kde = smnp.KDEUnivariate(data.astype('double'))
kde.fit(bw=bandwidth)
x, y = kde.support, kde.density
if density_scale:
y = density_scale * y / np.max(y)
# Make sure the density is nonnegative
y = np.amax(np.c_[np.zeros_like(y), y], axis=1)
# Flip the data if the plot should be on the y axis
if vertical:
x, y = y, x
# Check if a label was specified in the call
label = kwargs.pop("label", None)
# Otherwise check if the data object has a name
if label is None and hasattr(data, "name"):
label = data.name
# Decide if we're going to add a legend
legend = label is not None and legend
label = "_nolegend_" if label is None else label
# Use the active color cycle to find the plot color
facecolor = kwargs.pop("facecolor", None)
line, = ax.plot(x, y, **kwargs)
color = line.get_color()
line.remove()
kwargs.pop("color", None)
facecolor = color if facecolor is None else facecolor
# Draw the KDE plot and, optionally, shade
ax.plot(x, y, color=color, label=label, **kwargs)
shade_kws = dict(
facecolor=facecolor,
alpha=kwargs.get("alpha", 0.25),
clip_on=kwargs.get("clip_on", True),
zorder=kwargs.get("zorder", 1),
)
if shade:
if vertical:
ax.fill_betweenx(y, 0, x, **shade_kws)
else:
ax.fill_between(x, 0, y, **shade_kws)
# Set the density axis minimum to 0
ax.set_ylim(0, auto=None)
# Draw the legend here
handles, labels = ax.get_legend_handles_labels()
return ax, x, y
def add_vertical_segment(x, height, **kwargs):
"""Adds a vertical segment to plot."""
plt.plot([x, x], [0, height], **kwargs)
def compare_local_cdf_1d(X_pred, y_post_sample, y_true_sample,
n_x_eval=100, n_cdf_eval=1000, n_max_sample=100,
y_eval_grid=None,
save_addr='', **local_ecdf_kwargs):
"""
Args:
X_pred: (np.ndarray) feature locations, size (N, 1)
y_post_sample: (np.ndarray) y samples from model distribution, size (N, M_post_sample)
y_true_sample: (np.ndarray) y samples from true distribution. size (N, M_true_sample)
n_x_eval: (int) Number of locations to compute cdfs at within range of X_eval .
n_cdf_eval: (int) Number of cdf evaluations.
n_max_sample: (int) Maximum number of sample to take to compute ecdf.
save_addr: (str) Parent address to save figures to.
Raises:
(ValueError) If save_addr is None
"""
if not save_addr:
raise ValueError('save_addr cannot be None.')
local_ecdf_kwargs['y_eval_grid'] = y_eval_grid
(ecdf_diff, ecdf_true, ecdf_modl,
X_eval, y_eval_grid, X_pred, y_true_sample) = (
metric_util.ecdf_l1_dist(X_pred, y_post_sample, y_true_sample,
n_x_eval=n_x_eval, n_cdf_eval=n_cdf_eval,
n_max_sample=n_max_sample,
return_addtional_data=True,
**local_ecdf_kwargs))
if save_addr:
os.makedirs(save_addr, exist_ok=True)
plt.ioff()
for x_id in tqdm.tqdm(range(len(X_eval))):
save_name = os.path.join(save_addr, "{}.png".format(x_id))
#
plt.figure(figsize=(14, 6))
plt.subplot(221)
plt.scatter(X_pred, y_true_sample, marker='.', s=0.1)
plt.axvline(x=X_eval[x_id], c='red')
plt.subplot(223)
plt.plot(X_eval, ecdf_diff)
plt.axvline(x=X_eval[x_id], c='red')
plt.ylim(0, 0.2)
plt.title("L1 Distance = {:3f}".format(np.mean(ecdf_diff)))
#
plt.subplot(122)
quantile_val = np.linspace(0, 1, n_cdf_eval)
y_eval_data = y_eval_grid[x_id] if y_eval_grid.ndim > 1 else y_eval_grid
plt.plot(y_eval_data, ecdf_modl[x_id])
plt.plot(y_eval_data, ecdf_true[x_id])
plt.title("x = {:.3f}".format(X_eval[x_id]))
plt.legend(('Model CDF', 'Data CDF'))
if save_addr:
plt.savefig(save_name,
bbox_inches='tight', pad_inches=0)
plt.close()
if save_addr:
plt.ion()
""" Helper functions """
# Plot a rectangle
def rect(ax, x, y, w, h, c, **kwargs):
# Varying only in x
if len(c.shape) is 1:
rect = plt.Rectangle((x, y), w, h, color=c, ec=c, **kwargs)
ax.add_patch(rect)
# Varying in x and y
else:
# Split into a number of bins
N = c.shape[0]
hb = h / float(N);
yl = y
for i in range(N):
yl += hb
rect = plt.Rectangle((x, yl), w, hb,
color=c[i, :], ec=c[i, :], **kwargs)
ax.add_patch(rect)
# Fill a contour between two lines
def rainbow_fill_between(ax, X, Y1, Y2,
colors=None, norm=None,
cmap=plt.get_cmap("RdBu_r"), **kwargs):
plt.plot(X, Y1, lw=0) # Plot so the axes scale correctly
dx = X[1] - X[0]
N = X.size
# Pad a float or int to same size as x
if (type(Y2) is float or type(Y2) is int):
Y2 = np.array([Y2] * N)
# No colors -- specify linear
if norm is not None and colors is not None:
cmap_norm = norm(colors)
colors = cmap(cmap_norm)
# if colors is None:
# colors = []
# for n in range(N):
# colors.append(cmap(n / float(N)))
# # Varying only in x
# elif len(colors.shape) is 1:
# colors = cmap((colors - colors.min())
# / (colors.max() - colors.min()))
# # Varying only in x and y
# else:
# cnp = np.array(colors)
# colors = np.empty([colors.shape[0], colors.shape[1], 4])
# for i in range(colors.shape[0]):
# for j in range(colors.shape[1]):
# colors[i, j, :] = cmap((cnp[i, j] - cnp[:, :].min())
# / (cnp[:, :].max() - cnp[:, :].min()))
colors = np.array(colors)
# Create the patch objects
for (color, x, y1, y2) in zip(colors, X, Y1, Y2):
rect(ax, x, y2, dx, y1 - y2, color)
return ax
def add_color_bar(color_data, norm, cmap=plt.get_cmap("RdBu_r"),
h_w_ratio=10, ytick_num=10, ax=None,
color_label=None,
orientation="vertical"):
"""Plot a color bar to axis according to specified color range."""
if not ax:
_, ax = plt.subplots()
if not color_label:
color_label = color_data
N_color_data = color_data.size
# produce color data
color_data_norm = norm(color_data)
colors = cmap(color_data_norm)
# reshape so it is displayed horizontally/vertically
if orientation == "vertical":
colors = np.expand_dims(colors, axis=1)
colors = np.repeat(colors, N_color_data // h_w_ratio, axis=1)
else:
colors = np.expand_dims(colors, axis=0)
colors = np.repeat(colors, N_color_data // h_w_ratio, axis=0)
# plot
ax.imshow(colors, origin='lower')
# adjust tick
tick_id = np.arange(0, N_color_data + 1, step=N_color_data // ytick_num)
tick_id[-1] = N_color_data - 1
if orientation == "vertical":
ax.yaxis.set_ticks(tick_id)
ax.set_yticklabels(np.round(color_data[tick_id], 1))
ax.set_xticklabels([])
else:
ax.xaxis.set_ticks(tick_id)
ax.set_xticklabels(np.round(color_data[tick_id], 1))
ax.set_yticklabels([])
return ax
"""Default color norm"""
SIGNIFICANT_NORM = make_color_norm(
[np.linspace(0, 0.05, 40),
np.linspace(0.05, 0.95, 20),
np.linspace(0.95, 1, 40)],
method="percentile")
UNC_COLOR_PALETTE = {
"para": "#ED553B",
"str_system": "#20639B",
"str_random": "#173F5F",
"alea": "grey"
}
```
|
{
"source": "jereliu/feature-selection-bnn",
"score": 2
}
|
#### File: feature-selection-bnn/model/spinn.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import numbers
import argparse
from tensorflow.python.platform import tf_logging as logging
def group_lasso_regularizer(scale, scope=None):
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' %
scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _: None
def group_lasso(weights, name=None):
"""Applies group regularization to weights."""
with tf.name_scope(scope, 'group2_regularizer', [weights]) as name:
my_scale = tf.convert_to_tensor(scale,
dtype=weights.dtype.base_dtype,
name='scale')
return tf.multiply(
my_scale,
tf.reduce_sum(tf.sqrt(tf.reduce_sum(tf.square(weights), 1))),
name=name)
return group_lasso
def spinn_model(features, labels, mode, params):
net = tf.feature_column.input_layer(features,
params['feature_columns'])
if params['hidden_units'][0] == 0:
regularizer = tf.contrib.layers.l1_regularizer(scale=params['reg'])
response = tf.layers.dense(net, params['n_response'],
activation=None,
kernel_regularizer=regularizer)
else:
regularizer = group_lasso_regularizer(scale=params['reg'])
net = tf.layers.dense(net,
units=params['hidden_units'][0],
activation=tf.nn.relu,
kernel_regularizer=regularizer)
if len(params['hidden_units']) >= 2:
for units in params['hidden_units'][1:]:
net = tf.layers.dense(net,
units=units, activation=tf.nn.relu)
response = tf.layers.dense(net, params['n_response'],
activation=None)
response = tf.squeeze(response)
# Compute predictions.
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
"response": response,
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
mse_loss = tf.losses.mean_squared_error(labels=labels,
predictions=response)
loss = tf.losses.get_total_loss(add_regularization_losses=True)
# Compute evaluation metrics.
mse = tf.metrics.mean_squared_error(labels=labels,
predictions=response)
metrics = {'MSE': mse}
tf.summary.scalar("MSE", mse[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=metrics)
# Create training op.
assert mode == tf.estimator.ModeKeys.TRAIN
optimizer = tf.train.AdagradOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode,
loss=loss, train_op=train_op)
```
#### File: feature-selection-bnn/util/bnn.py
```python
import tensorflow_probability.python.edward2 as ed
def get_variable_dict(model_fn, X):
"""Return an OrderedDict of model variables.
Args:
model_fn: (function) A model function.
X: (Tensor or ndarray) A Tensor of input variables
Returns:
(OrderedDict of tf.Variables) Return a ordered dictionary of variable names
and corresponding tf.Variables
"""
with ed.tape() as rv_dict:
_ = model_fn(X)
return rv_dict
def make_log_prob_fn(model_fn, X, y):
"""Makes a log likelihood function for MCMC training.
Args:
model_fn: (function) A model function.
X: (Tensor or ndarray) A Tensor of input variables
y: (Tensor or ndarray) A Tensor of response variables
Returns:
(function): a log likelihood function for MCMC training
"""
bnn_log_joint = ed.make_log_joint_fn(model_fn)
rv_names = list(get_variable_dict(model_fn, X).keys())
def bnn_log_prob_fn(*rv_positional_args):
rv_kwargs = dict(zip(rv_names, rv_positional_args))
rv_kwargs.pop('y', None)
return bnn_log_joint(X, y=y, **rv_kwargs)
return bnn_log_prob_fn
```
#### File: feature-selection-bnn/util/visual.py
```python
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def plot_var_imp(imp_sample, true_var_imp, n_variable=50):
"""Plots violin plots of variable importance v.s. truth."""
# produce pandas data frame for plotting
n_sample, _ = imp_sample.shape
feature_names = ["x_{}".format(p) for p in range(n_variable)]
var_imp_data = pd.DataFrame(
{"feature": np.tile(feature_names, n_sample),
"importance": np.hstack(imp_sample[:, :n_variable])})
sns.violinplot(x="feature", y="importance", data=var_imp_data)
plt.scatter(x=range(n_variable),
y=true_var_imp[:n_variable], c="red")
def plot_prediction(pred_sample, true_func, save_addr="./tmp/"):
"""Plots model prediction v.s. truth."""
pred_mean = np.mean(pred_sample, 0)
plt.scatter(pred_mean, true_func)
plt.plot(np.arange(np.min(true_func), np.max(true_func), 0.1),
np.arange(np.min(true_func), np.max(true_func), 0.1), c='orange')
```
|
{
"source": "jereliu/GPflow-Slim",
"score": 2
}
|
#### File: GPflow-Slim/gpflowSlim/conditionals.py
```python
import tensorflow as tf
from . import settings
from .decors import name_scope
from .features import InducingPoints
@name_scope()
def conditional(Xnew, X, kern, f, *, full_cov=False, q_sqrt=None, white=False):
"""
Given f, representing the GP at the points X, produce the mean and
(co-)variance of the GP at the points Xnew.
Additionally, there may be Gaussian uncertainty about f as represented by
q_sqrt. In this case `f` represents the mean of the distribution and
q_sqrt the square-root of the covariance.
Additionally, the GP may have been centered (whitened) so that
p(v) = N(0, I)
f = L v
thus
p(f) = N(0, LL^T) = N(0, K).
In this case `f` represents the values taken by v.
The method can either return the diagonals of the covariance matrix for
each output (default) or the full covariance matrix (full_cov=True).
We assume K independent GPs, represented by the columns of f (and the
last dimension of q_sqrt).
:param Xnew: data matrix, size N x D.
:param X: data points, size M x D.
:param kern: GPflow kernel.
:param f: data matrix, M x K, representing the function values at X,
for K functions.
:param q_sqrt: matrix of standard-deviations or Cholesky matrices,
size M x K or M x M x K.
:param white: boolean of whether to use the whitened representation as
described above.
:return: two element tuple with conditional mean and variance.
"""
num_data = tf.shape(X)[0] # M
Kmm = kern.K(X) + tf.eye(num_data, dtype=settings.float_type) * settings.numerics.jitter_level
Kmn = kern.K(X, Xnew)
if full_cov:
Knn = kern.K(Xnew)
else:
Knn = kern.Kdiag(Xnew)
return base_conditional(Kmn, Kmm, Knn, f, full_cov=full_cov, q_sqrt=q_sqrt, white=white)
@name_scope()
def feature_conditional(Xnew, feat, kern, f, *, full_cov=False, q_sqrt=None, white=False):
Kmm = feat.Kuu(kern, jitter=settings.numerics.jitter_level)
Kmn = feat.Kuf(kern, Xnew)
if full_cov:
Knn = kern.K(Xnew)
else:
Knn = kern.Kdiag(Xnew)
return base_conditional(Kmn, Kmm, Knn, f, full_cov=full_cov, q_sqrt=q_sqrt, white=white)
@name_scope()
def base_conditional(Kmn, Kmm, Knn, f, *, full_cov=False, q_sqrt=None, white=False):
# compute kernel stuff
num_func = tf.shape(f)[1] # K
Lm = tf.cholesky(Kmm)
# Compute the projection matrix A
A = tf.matrix_triangular_solve(Lm, Kmn, lower=True)
# compute the covariance due to the conditioning
if full_cov:
fvar = Knn - tf.matmul(A, A, transpose_a=True)
shape = tf.stack([num_func, 1, 1])
else:
fvar = Knn - tf.reduce_sum(tf.square(A), 0)
shape = tf.stack([num_func, 1])
fvar = tf.tile(tf.expand_dims(fvar, 0), shape) # K x N x N or K x N
# another backsubstitution in the unwhitened case
if not white:
A = tf.matrix_triangular_solve(tf.transpose(Lm), A, lower=False)
# construct the conditional mean
fmean = tf.matmul(A, f, transpose_a=True)
if q_sqrt is not None:
if q_sqrt.get_shape().ndims == 2:
LTA = A * tf.expand_dims(tf.transpose(q_sqrt), 2) # K x M x N
elif q_sqrt.get_shape().ndims == 3:
L = tf.matrix_band_part(tf.transpose(q_sqrt, (2, 0, 1)), -1, 0) # K x M x M
A_tiled = tf.tile(tf.expand_dims(A, 0), tf.stack([num_func, 1, 1]))
LTA = tf.matmul(L, A_tiled, transpose_a=True) # K x M x N
else: # pragma: no cover
raise ValueError("Bad dimension for q_sqrt: %s" %
str(q_sqrt.get_shape().ndims))
if full_cov:
fvar = fvar + tf.matmul(LTA, LTA, transpose_a=True) # K x N x N
else:
fvar = fvar + tf.reduce_sum(tf.square(LTA), 1) # K x N
fvar = tf.transpose(fvar) # N x K or N x N x K
return fmean, fvar
@name_scope()
def uncertain_conditional(Xnew_mu, Xnew_var, feat, kern, q_mu, q_sqrt, *,
full_cov_output=False, full_cov=False, white=False):
"""
Calculates the conditional for uncertain inputs Xnew, p(Xnew) = N(Xnew_mu, Xnew_var).
See ``conditional`` documentation for further reference.
:param Xnew_mu: mean of the inputs, size N x Din
:param Xnew_var: covariance matrix of the inputs, size N x Din x Din
:param feat: gpflow.InducingFeature object, only InducingPoints is supported
:param kern: gpflow kernel or ekernel object.
:param q_mu: mean inducing points, size M x Dout
:param q_sqrt: cholesky of the covariance matrix of the inducing points, size M x M x Dout
:param full_cov_output: boolean wheter to compute covariance between output dimension.
Influences the shape of return value ``fvar``. Default is False
:param white: boolean whether to use whitened representation. Default is False.
:return fmean, fvar: mean and covariance of the conditional, size ``fmean`` is N x Dout,
size ``fvar`` depends on ``full_cov_output``: if True ``f_var`` is N x Dout x Dout,
if False then ``f_var`` is N x Dout
"""
# TODO: Tensorflow 1.3 doesn't support broadcasting in``tf.matmul`` and
# ``tf.matrix_triangular_solve``. This is reported in issue 216.
# As a temporary workaround, we are using ``tf.einsum`` for the matrix
# multiplications and tiling in the triangular solves.
# The code that should be used once the bug is resolved is added in comments.
if not isinstance(feat, InducingPoints):
raise NotImplementedError
if full_cov:
# TODO: ``full_cov`` True would return a ``fvar`` of shape N x N x D x D,
# encoding the covariance between input datapoints as well.
# This is not implemented as this feature is only used for plotting purposes.
raise NotImplementedError
num_data = tf.shape(Xnew_mu)[0] # number of new inputs (N)
num_func = tf.shape(q_mu)[1] # output dimension (D)
q_sqrt_r = tf.matrix_band_part(tf.transpose(q_sqrt, (2, 0, 1)), -1, 0) # D x M x M
eKuf = tf.transpose(feat.eKfu(kern, Xnew_mu, Xnew_var)) # M x N
Kuu = feat.Kuu(kern, jitter=settings.numerics.jitter_level) # M x M
Luu = tf.cholesky(Kuu) # M x M
if not white:
q_mu = tf.matrix_triangular_solve(Luu, q_mu, lower=True)
Luu_tiled = tf.tile(Luu[None, :, :], [num_func, 1, 1]) # remove line once issue 216 is fixed
q_sqrt_r = tf.matrix_triangular_solve(Luu_tiled, q_sqrt_r, lower=True)
Li_eKuf = tf.matrix_triangular_solve(Luu, eKuf, lower=True) # M x N
fmean = tf.matmul(Li_eKuf, q_mu, transpose_a=True)
eKff = kern.eKdiag(Xnew_mu, Xnew_var) # N
eKuffu = feat.eKufKfu(kern, Xnew_mu, Xnew_var) # N x M x M
Luu_tiled = tf.tile(Luu[None, :, :], [num_data, 1, 1]) # remove this line, once issue 216 is fixed
Li_eKuffu_Lit = tf.matrix_triangular_solve(Luu_tiled, tf.matrix_transpose(eKuffu), lower=True)
Li_eKuffu_Lit = tf.matrix_triangular_solve(Luu_tiled, tf.matrix_transpose(Li_eKuffu_Lit), lower=True) # N x M x M
cov = tf.matmul(q_sqrt_r, q_sqrt_r, transpose_b=True) # D x M x M
if full_cov_output:
fvar = (
tf.matrix_diag(tf.tile((eKff - tf.trace(Li_eKuffu_Lit))[:, None], [1, num_func])) +
tf.matrix_diag(tf.einsum("nij,dji->nd", Li_eKuffu_Lit, cov)) +
# tf.matrix_diag(tf.trace(tf.matmul(Li_eKuffu_Lit, cov))) +
tf.einsum("ig,nij,jh->ngh", q_mu, Li_eKuffu_Lit, q_mu) -
# tf.matmul(q_mu, tf.matmul(Li_eKuffu_Lit, q_mu), transpose_a=True) -
tf.matmul(fmean[:, :, None], fmean[:, :, None], transpose_b=True)
)
else:
fvar = (
(eKff - tf.trace(Li_eKuffu_Lit))[:, None] +
tf.einsum("nij,dji->nd", Li_eKuffu_Lit, cov) +
tf.einsum("ig,nij,jg->ng", q_mu, Li_eKuffu_Lit, q_mu) -
fmean ** 2
)
return fmean, fvar
```
#### File: GPflow-Slim/gpflowSlim/conjugate_gradient.py
```python
import tensorflow as tf
def dot(a, b):
with tf.name_scope("dot"):
return tf.reduce_sum(a*b)
def vec(X):
with tf.name_scope("vec"):
X = tf.transpose(X)
return tf.reshape(X, [-1, 1])
def cgsolver(K1, K2, b, C, max_iter=100, tol=1e-6):
delta = tol * tf.norm(b)
m = tf.shape(K1)[0]
n = tf.shape(K2)[0]
def body(x, k, r_norm_sq, r, p):
P = tf.transpose(tf.reshape(C * p, [n, m]))
Ap = C * vec(tf.matmul(tf.matmul(K1, P), K2))
Ap += p
alpha = r_norm_sq / dot(p, Ap)
x = x + alpha * p
r = r - alpha * Ap
r_norm_sq_prev = r_norm_sq
r_norm_sq = dot(r, r)
beta = r_norm_sq / r_norm_sq_prev
p = r + beta * p
return [x, k+1, r_norm_sq, r, p]
def cond(x, k, r_norm_sq, r, p):
return tf.logical_and(
tf.less(delta, r_norm_sq),
tf.less(k, max_iter))
r = b
loop_vars = [
tf.zeros_like(b), tf.constant(0),
dot(r, r), r, r]
return tf.while_loop(
cond, body, loop_vars)[0]
```
#### File: gpflowSlim/models/kgpr.py
```python
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
from .. import likelihoods
from .. import settings
from ..decors import name_scope
from ..mean_functions import Zero
from .model import Model
from ..conjugate_gradient import vec, cgsolver, dot
class KGPR(Model):
def __init__(self, X1, X2, Y, kern1, kern2, mask, mean_function=None, obs_var=0.1, **kwargs):
"""
X is a data matrix, size N x D
Y is a data matrix, size N x R
kern, mean_function are appropriate GPflow objects
"""
if 'name' in kwargs:
with tf.variable_scope(kwargs['name']):
self.likelihood = likelihoods.Gaussian(var=obs_var)
self.kern1 = kern1
self.kern2 = kern2
self.mean_function = mean_function or Zero()
else:
self.likelihood = likelihoods.Gaussian(var=obs_var)
self.kern1 = kern1
self.kern2 = kern2
self.mean_function = mean_function or Zero()
self.X1 = X1
self.X2 = X2
self.Y = Y
self.N = np.prod(Y.shape)
self.M = self.N - np.sum(mask)
self.noise = vec(self.likelihood.variance * np.ones_like(Y) + mask * 1e6)
Model.__init__(self, **kwargs)
self._parameters = self.mean_function.parameters + self.kern1.parameters \
+ self.kern2.parameters + self.likelihood.parameters
@name_scope('likelihood')
def _build_likelihood(self):
"""
Construct a tensorflow function to compute the likelihood.
\log p(Y | theta).
"""
K1 = self.kern1.K(self.X1)
K2 = self.kern2.K(self.X2)
e1, _ = tf.self_adjoint_eig(K1)
e2, _ = tf.self_adjoint_eig(K2)
e1 = tf.expand_dims(e1, 0)
e2 = tf.expand_dims(e2, 1)
e, _ = tf.nn.top_k(tf.reshape(tf.matmul(e2, e1), [-1]), k=self.M)
e = e * self.M / self.N
logdet = tf.reduce_sum(tf.log(e + self.likelihood.variance))
y = vec(self.Y)
C = self.noise ** (-0.5)
Ay = C * cgsolver(K1, K2, C * y, C)
quadratic = dot(y, Ay)
return - 0.5 * logdet - 0.5 * quadratic - 0.5 * self.M * np.log(2 * np.pi)
@name_scope('predict')
def _build_predict(self, Xnew1, Xnew2):
"""
Xnew is a data matrix, point at which we want to predict
This method computes
p(F* | Y )
where F* are points on the GP at Xnew, Y are noisy observations at X.
"""
K1 = self.kern1.K(self.X1)
K2 = self.kern2.K(self.X2)
K1u = self.kern1.K(self.X1, Xnew1)
K2u = self.kern2.K(self.X2, Xnew2)
m = tf.shape(K1)[0]
n = tf.shape(K2)[0]
y = vec(self.Y)
C = self.noise ** (-0.5)
Ky = C * cgsolver(K1, K2, C * y, C)
fmean = tf.matmul(tf.matmul(K1u, tf.transpose(tf.reshape(Ky, [n, m])), transpose_a=True), K2u)
return fmean
def predict_f(self, Xnew1, Xnew2):
"""
Compute the mean and variance of the latent function(s) at the points
Xnew.
"""
return self._build_predict(Xnew1, Xnew2)
```
|
{
"source": "jerelynlee/census",
"score": 2
}
|
#### File: censusweb/api/views.py
```python
import simplejson
from django.conf import settings
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.contrib.gis.shortcuts import render_to_kml, render_to_kmz
from django.contrib.gis.geos import Point
from django.template import RequestContext, Template, Context
from django.core.urlresolvers import reverse
from boundaryservice.models import Boundary,BoundarySet
import csv
import help_text
import mongoutils
import utils
from datetime import datetime
DATA_ALTERNATIVES = ['2000', '2010', 'delta', 'pct_change']
BOUNDARY_TYPES = [x[0] for x in BoundarySet.objects.values_list('slug').distinct()]
def homepage(request):
return render_to_response('homepage.html', {
'help_text': help_text,
'settings': settings,
},
context_instance=RequestContext(request))
def generic_view(request, template=None, **kwargs):
return render_to_response(template, { 'settings': settings }, context_instance=RequestContext(request))
def download_data_for_region(request, sumlev='', containerlev='', container='', datatype=''):
print sumlev, containerlev
if sumlev == '140' and containerlev == '040':
geo_list = utils.fetch_tracts_by_state(container)
elif sumlev == '140' and containerlev == '050':
geo_list = utils.fetch_tracts_by_county(container)
elif sumlev == '060' and containerlev == '050':
geo_list = utils.fetch_county_subdivisions_by_county(container)
elif sumlev == '160' and containerlev == '040':
geo_list = utils.fetch_places_by_state(container)
elif sumlev == '050' and containerlev == '040':
geo_list = utils.fetch_counties_by_state(container)
elif sumlev == '060' and containerlev == '040':
geo_list = utils.fetch_county_subdivisions_by_state(container)
geoids = ','.join([g[1] for g in geo_list])
if datatype == 'csv':
return data_as_csv(request, geoids)
elif datatype == 'json':
return data_as_json(request, geoids)
def get_tables_for_request(request):
tables = request.GET.get("tables", None)
if tables:
tables = tables.split(",")
else:
tables = settings.DEFAULT_TABLES
return tables
# --- JSON ---
def data_as_json(request, geoids):
tables = get_tables_for_request(request)
geographies = {}
geoids_list = filter(lambda g: bool(g), geoids.split(','))
for g in utils.fetch_geographies(geoids_list):
del g['xrefs']
for table in g["data"]["2010"].keys():
if table not in tables:
del g["data"]["2010"][table]
# Not all data has 2000 values
try:
del g["data"]["2000"][table]
del g["data"]["delta"][table]
del g["data"]["pct_change"][table]
except KeyError:
continue
geographies[g['geoid']] = g
return HttpResponse(simplejson.dumps(geographies), mimetype='application/json')
# --- CSV ---
def data_as_csv(request, geoids):
tables = get_tables_for_request(request)
labelset = mongoutils.get_labelset()
response = HttpResponse(mimetype="text/csv")
w = csv.writer(response)
w.writerow(_csv_row_header(tables, labelset))
geoids_list = filter(lambda g: bool(g), geoids.split(','))
for g in utils.fetch_geographies(geoids_list):
csvrow = _csv_row_for_geography(g, tables, labelset)
w.writerow(csvrow)
now = datetime.now()
date_string = "%s-%s-%s-%s" % (now.year, now.month, now.day, now.microsecond)
response['Content-Disposition'] = "attachment; filename=ire-census-%s.csv" % date_string
return response
def _csv_row_header(tables, labelset):
row = ["sumlev", "geoid", "name"]
for table in tables:
# Fail gracefully if a table isn't loaded (as in test
try:
labels = labelset['tables'][table]['labels']
except KeyError:
continue
for statistic in sorted(labels.keys()):
for alternative in DATA_ALTERNATIVES:
if alternative == '2010':
row.append(statistic)
else:
row.append("%s.%s" % (statistic,alternative))
return row
def _csv_row_for_geography(geography, tables, labelset):
row = [
geography['sumlev'],
geography['geoid'],
geography['metadata']['NAME']
]
for table in tables:
# Fail gracefully if a table isn't loaded (as in test
try:
labels = labelset['tables'][table]['labels']
except KeyError:
continue
for statistic in sorted(labels.keys()):
for alternative in DATA_ALTERNATIVES:
try:
row.append( geography['data'][alternative][table][statistic] )
except KeyError:
row.append('')
return row
# --- KML ---
def data_as_kml(request, geoids, format='kml'):
tables = get_tables_for_request(request)
geoid_list = filter(lambda g: bool(g), geoids.split(','))
boundaries = dict((b.external_id, b) for b in Boundary.objects.filter(external_id__in=geoid_list))
json_data = dict((j['geoid'], j) for j in utils.fetch_geographies(geoid_list))
labelset = mongoutils.get_labelset()
placemarks = [
_create_placemark_dict(boundaries[geoid], json_data[geoid], tables, labelset) for geoid in geoid_list
]
if format == 'kmz':
render = render_to_kmz
else:
render = render_to_kml
return render('gis/kml/placemarks.kml', {'places' : placemarks})
def _create_placemark_dict(b, j, tables, labelset):
"""
Each placemark should have a name, a description, and kml which includes <ExtraData>
"""
p = {
'name': b.display_name,
'description': 'Summary Level: %(sumlev)s; GeoID: %(geoid)s' % (j),
}
kml_context = _build_kml_context_for_template(b, j, tables, labelset)
shape = b.simple_shape.transform(4326, clone=True)
p['kml'] = shape.kml + KML_EXTENDED_DATA_TEMPLATE.render(Context(kml_context))
return p
KML_EXTENDED_DATA_TEMPLATE = Template("""
<ExtendedData>
{% for datum in data %}
<Data name="{{datum.name}}">{% if datum.display_name %}
<displayName><![CDATA[{{datum.display_name}}]]></displayName>{% endif %}
<value><![CDATA[{{datum.value}}]]></value>
</Data>
{% endfor %}
</ExtendedData>""")
def _build_kml_context_for_template(b, j, tables, labelset):
kml_context = { 'data': [] }
for table in tables:
# Fail gracefully if a table isn't loaded (as in test
try:
labels = labelset['tables'][table]['labels']
except KeyError:
continue
for statistic in sorted(labels.keys()):
for alternative in DATA_ALTERNATIVES:
#print "t: %s, a: %s, s: %s" % (table, alternative, statistic)
try:
datum = {
'value': j['data'][alternative][table][statistic]
}
if alternative == '2010':
datum['name'] = statistic
else:
datum['name'] = "%s.%s" % (statistic, alternative)
datum['display_name'] = labels[statistic]['text']
kml_context['data'].append(datum)
except KeyError:
pass
return kml_context
def generate_sql(request, file_ids=None, table_ids=None, aggregate=None):
if aggregate == 'all_files':
sql = utils.generate_create_sql_by_file()
return HttpResponse(sql,mimetype='text/plain')
elif aggregate == 'all_tables':
sql = utils.generate_sql_by_table()
return HttpResponse(sql,mimetype='text/plain')
elif aggregate == 'all_table_views':
sql = utils.generate_views_by_table()
return HttpResponse(sql,mimetype='text/plain')
elif aggregate is not None:
return HttpResponseNotFound()
if file_ids:
ids = map(int,file_ids.split(','))
sql = utils.generate_create_sql_by_file(file_numbers=ids)
return HttpResponse(sql,mimetype='text/plain')
if table_ids:
table_ids = table_ids.split(',')
sql = utils.generate_sql_by_table(table_ids)
return HttpResponse(sql,mimetype='text/plain')
return HttpResponseNotFound()
def map_contains(request):
point = request.REQUEST.get('point',None)
try:
lat,lng = point.split(',',1)
point = Point(float(lng),float(lat))
except:
raise TypeError("A point must be provided as a comma-separated string, 'lat,lng'")
types = request.REQUEST.get('types',[])
if types:
types = [x for x in types.split(',') if x in BOUNDARY_TYPES]
if not types: raise ValueError("None of the specified types are valid. Use one or more of (%s) separated by commas." % ','.join(BOUNDARY_TYPES))
else:
types = BOUNDARY_TYPES
boundaries = Boundary.objects.filter(shape__contains=point,set__slug__in=types)
geoids = sorted(x[0] for x in boundaries.values_list('external_id'))
geoids = ','.join(geoids)
url = reverse('map',kwargs={'geoids': geoids}) + "#%.6f,%.6f" % (point.y,point.x)
return HttpResponseRedirect(url)
```
|
{
"source": "Jerem2360/multitools",
"score": 2
}
|
#### File: c/_c_types/_base_types.py
```python
from ._const import *
from typing import TypeVar
import ctypes
# a wrapper for our CObject class to handle _ctypes._SimpleCData and it's _type_ attribute.
def _SimpleCDataWrapper(t_: str = "O") -> type:
class _Wrap(SimpleCData):
_type_ = t_
def __sup__(self, *args, **kwargs):
return super(*args, **kwargs)
return _Wrap
def _CObjectFromCData(data: CData) -> tuple[type, str]:
tp = type(data)
if not issubclass(tp, CData):
return NoneType
tpid = data._type_
return _SimpleCDataWrapper(tpid), tpid
# metaclass for our CObject class and it's __typename__ property.
class _CObjectMeta(type):
__slots__ = ["__tprepr__", "_tpname", "__tpid__", "__tpsize__", "__tporigin__"]
def __new__(mcs, *args, **kwargs):
cls = super().__new__(*args, **kwargs)
cls = meta_default_classattr(cls, "__tprepr__", "Object")
cls = meta_default_classattr(cls, "_tpname", PY_OBJECT)
cls = meta_default_classattr(cls, "__tpid__", TYPE_INFO[PY_OBJECT][1])
cls = meta_default_classattr(cls, "__tpsize__", TYPE_INFO[PY_OBJECT][0])
cls = meta_default_classattr(cls, "__tporigin__", ctypes.py_object)
return cls
def __init__(cls, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def __typename__(cls):
return cls._tpname
@__typename__.setter
def __typename__(cls, value):
tpsize, tpid = TYPE_INFO[value]
cls.__tpid__ = tpid
cls.__tpsize__ = tpsize
cls._tpname = value
_T = TypeVar("_T")
class CObject(object, metaclass=_CObjectMeta):
__ctype_be__ = None
__ctype_le__ = None
__tporigin__ = ctypes.py_object
__slots__ = ["_data", "__origin__"]
def __init_subclass__(cls, **kwargs):
cls.__metaclass__ = _CObjectMeta
cls.__init_subclass__ = CObject.__init_subclass__
def __init__(self, value):
self.__origin__ = value
if issubinstance(value, CData): # to replace with more cleaner function.
data_type, tpid = _CObjectFromCData(value)
if not (tpid == self.__tpid__):
raise TypeError(f"Expected python equivalent for '{self.__class__.__name__}', "
f"got '{type(value)}' instead.")
value = value.value
else:
data_type = _SimpleCDataWrapper(self.__tpid__)
if self.__ctype_be__ is not None:
data_type.__ctype_be__ = self.__ctype_be__
if self.__ctype_le__ is not None:
data_type.__ctype_le__ = self.__ctype_le__
self._data = data_type(value)
self._data.__repr__ = self.__repr__
type(self).__origin__ = data_type
# _SimpleCDataWrapper(c_api_type_indicator)(CData_value)
def __repr__(self):
return f"<C '{self.__tprepr__}' object at 0x{str(hex(id(self))).removeprefix('0x').upper()}>"
@property
def value(self):
return self._data.value
@value.setter
def value(self, __v):
self._data.value = __v
def super(self, *args, **kwargs):
return self._data.__sup__(*args, **kwargs)
CData = property(lambda self: self._data)
class CObjectPtr(CObject):
def __init__(self, value: ctypes.pointer):
super().__init__(value)
def __repr__(self):
void_p = ctypes.cast(self._data, ctypes.c_void_p)
addr = ctypes.cast(void_p, ctypes.c_int)
return f"<Pointer to {addr.value} at 0x{str(hex(id(self))).removeprefix('0x').upper()}>"
class _StructMeta(type):
__slots__ = ["_handle"]
def __new__(mcs, *args, **kwargs):
cls = super().__new__(mcs, *args, **kwargs)
return cls
def __repr__(cls):
return f"<C struct '{cls.__name__}'>"
@property
def __tporigin__(cls):
return cls._handle
class CStruct(CObject):
__fields__ = []
def __new__(cls, *args, **kwargs):
buffer = cls.__repr__
cls.__repr__ = lambda s: cls._handle.__repr__()
if buffer == CStruct.__repr__:
buffer = None
ctypesStruct = cls._BuildCtypesStructure()
self = super().__new__()
self._handle = ctypesStruct(*args, **kwargs)
if buffer is not None:
self._handle.__repr__ = buffer
return self
def __init__(self, *args, **kwargs):
self._handle.__init__(*args, **kwargs)
super().__init__(self._handle)
@classmethod
def _BuildCtypesStructure(cls):
class Struct(ctypes.Structure):
_fields_ = cls._ConvertFields()
return Struct
@classmethod
def _ConvertFields(cls) -> list[tuple[str, CData]]:
result = []
for field in cls.__fields__:
name, tp = field
new_tp = tp.__origin__
result.append((name, new_tp))
return result
def __getattr__(self, item):
if item == "__init__":
raise TypeError("Structs don't have constructors.")
return super().__getattribute__(item)
def __repr__(self):
return self._handle.__repr__()
class PyObject(CObject):
__typename__ = PY_OBJECT
__tprepr__ = "PyObject*"
__tporigin__ = ctypes.py_object
def __repr__(self):
try:
return super().__repr__()
except ValueError:
return "%s(<NULL>)" % type(self.CData).__name__
check_size(PyObject, "P")
class CShort(CObject):
__typename__ = C_SHORT
__tprepr__ = "short"
__tporigin__ = ctypes.c_short
check_size(CShort)
class CUShort(CObject):
__typename__ = C_USHORT
__tprepr__ = "unsigned short"
__tporigin__ = ctypes.c_ushort
check_size(CUShort)
class CLong(CObject):
__typename__ = C_LONG
__tprepr__ = "long"
__tporigin__ = ctypes.c_long
check_size(CLong)
class CULong(CObject):
__typename__ = C_ULONG
__tprepr__ = "unsigned long"
__tporigin__ = ctypes.c_ulong
check_size(CULong)
class CInt(CObject):
__typename__ = C_INT
__tprepr__ = "int"
__tporigin__ = ctypes.c_int
check_size(CInt)
class CUInt(CObject):
__typename__ = C_UINT
__tprepr__ = "unsigned int"
__tporigin__ = ctypes.c_uint
check_size(CUInt)
from struct import calcsize
if calcsize("i") == calcsize("l"):
CInt = CLong
CUInt = CULong
del calcsize
class CFloat(CObject):
__typename__ = C_FLOAT
__tprepr__ = "float"
__tporigin__ = ctypes.c_float
check_size(CFloat)
class CDouble(CObject):
__typename__ = C_DOUBLE
__tprepr__ = "double"
__tporigin__ = ctypes.c_double
check_size(CDouble)
class CLongDouble(CObject):
__typename__ = C_LONGDOUBLE
__tprepr__ = "long double"
__tporigin__ = ctypes.c_longdouble
check_size(CLongDouble)
class CLongLong(CObject):
__typename__ = C_LONGLONG
__tprepr__ = "long long"
__tporigin__ = ctypes.c_longlong
check_size(CLongLong)
class CULongLong(CObject):
__typename__ = C_ULONGLONG
__tprepr__ = "unsigned long long"
__tporigin__ = ctypes.c_ulonglong
from struct import calcsize
if calcsize("l") == calcsize("q"):
CLongLong = CLong
CULongLong = CULong
del calcsize
class CByte(CObject):
__typename__ = C_BYTE
__tprepr__ = "char[] (bytes)"
__tporigin__ = ctypes.c_byte
CByte.__ctype_le__ = CByte.__ctype_be__ = ctypes.c_byte
check_size(CByte)
class CUByte(CObject):
__typename__ = C_UBYTE
__tprepr__ = "unsigned char[] (bytes)"
__tporigin__ = ctypes.c_ubyte
CUByte.__ctype_le__ = CUByte.__ctype_be__ = ctypes.c_ubyte
check_size(CUByte)
class CChar(CObject):
__typename__ = C_CHAR
__tprepr__ = "char"
__tporigin__ = ctypes.c_char
def __class_getitem__(cls, item: int):
return ctypes.c_char * item
CChar.__ctype_le__ = CChar.__ctype_be__ = ctypes.c_char
check_size(CChar)
class CCharPtr(CObject):
__typename__ = C_CHAR_P
__tprepr__ = "char* || char[]"
__tporigin__ = ctypes.c_char_p
def lookup(self):
return ctypes.c_void_p.from_buffer(self._data).value
check_size(CCharPtr, "P")
class CVoidPtr(CObject):
__typename__ = C_VOID_P
__tprepr__ = "void*"
__tporigin__ = ctypes.c_void_p
def lookup(self):
return ctypes.c_void_p.from_buffer(self._data)
check_size(CVoidPtr)
class CBool(CObject):
__typename__ = C_BOOL
__tprepr__ = "bool"
__tporigin__ = ctypes.c_bool
class CWchar(CObject):
__typename__ = C_WCHAR
__tprepr__ = "wchar"
__tporigin__ = ctypes.c_wchar
class CWcharPtr(CObject):
__typename__ = C_WCHAR_P
__tprepr__ = "wchar* || wchar[]"
__tporigin__ = ctypes.c_wchar_p
def lookup(self):
return ctypes.c_void_p.from_buffer(self._data).value
```
#### File: multi_tools/c/__init__.py
```python
from . import c_module
from os import PathLike
class modes:
CDECL = c_module.CDECL
STDCALL = c_module.STDCALL
def c_import(path: PathLike, mode: int = modes.CDECL) -> c_module.CModule:
return c_module.c_import(path, mode=mode)
```
#### File: multitools/multi_tools/common.py
```python
from ctypes import c_bool, c_char_p, c_double, c_long, c_void_p, c_float, c_int, py_object
from _ctypes import CFuncPtr
ERROR_PREFIX = "$*1:"
class CallResults:
SUCCESS = 0
FAILURE = 1
@staticmethod
def D_FAILURE(error_name: str, error_text: str):
return f"$*1:{error_name}:{error_text}"
@staticmethod
def FAILURE_INFO(failure: str):
if not failure.startswith(ERROR_PREFIX):
raise TypeError("Not a detailed failure.")
data_str = failure.removeprefix(ERROR_PREFIX)
data = data_str.split(":")
if data[0] in globals():
return data[0], data[1]
raise AttributeError(f"'builtins' object has no attribute '{data[0]}'")
class Dll:
@staticmethod
def basic_type_wrap(value):
if isinstance(value, bool):
value = c_bool(value)
elif isinstance(value, float):
value = c_double(value)
elif isinstance(value, int):
value = c_long(value)
elif isinstance(value, str):
value = bytes(value, encoding="utf-8")
else:
value = py_object(value)
return value
@staticmethod
def basic_type_unwrap(value):
if isinstance(value, c_char_p):
value = str(value.value)
elif isinstance(value, c_bool):
value = bool(value.value)
elif isinstance(value, (c_double, c_float)):
value = float(value.value)
elif isinstance(value, (c_long, c_int)):
value = int(value.value)
elif isinstance(value, (str, bool, float, int, CFuncPtr)):
pass
elif isinstance(value, c_void_p):
if isinstance(value.value, CFuncPtr):
pass
else:
raise ValueError("Return value of function is a C pointer that cannot be converted to PyObject*.")
else:
raise ValueError("Return value of function is a C value that cannot be converted to PyObject*.")
if isinstance(value, str):
if value.startswith(ERROR_PREFIX):
error_name, error_text = CallResults.FAILURE_INFO(value)
raise globals()[error_name](error_text)
return value
@staticmethod
def wrap_self(value):
return py_object(value)
def format_id(o: object):
return "0x" + str(hex(id(o))).removeprefix("0x").upper()
```
#### File: multi_tools/c/wrapper.py
```python
from . import types
import ctypes
from ctypes import wintypes
class TypeWrap:
@staticmethod
def _common_name(name: str):
key = getattr(wintypes, name)
value = getattr(types.win32, name)
return {key: value}
@staticmethod
def _reversed_common_name(name: str):
key = getattr(types.win32, name)
value = getattr(wintypes, name)
return {key: value}
@classmethod
def _common_names(cls, *names: str):
result = {**{cls._common_name(n) for n in names}}
return result
@classmethod
def _reversed_common_names(cls, *names: str):
result = {**{cls._reversed_common_name(n) for n in names}}
return result
argTypesMapping = {
# standard C types:
types.PyObject: ctypes.py_object,
types.CShort: ctypes.c_short,
types.CUShort: ctypes.c_ushort,
types.CLong: ctypes.c_long,
types.CULong: ctypes.c_ulong,
types.CInt: ctypes.c_int,
types.CUInt: ctypes.c_uint,
types.CFloat: ctypes.c_float,
types.CDouble: ctypes.c_double,
types.CLongDouble: ctypes.c_longdouble,
types.CLongLong: ctypes.c_longlong,
types.CULongLong: ctypes.c_ulonglong,
types.CByte: ctypes.c_byte,
types.CUByte: ctypes.c_ubyte,
types.CChar: ctypes.c_char,
types.CCharPtr: ctypes.c_char_p,
types.CVoidPtr: ctypes.c_void_p,
types.CBool: ctypes.c_bool,
types.CWchar: ctypes.c_wchar,
types.CWcharPtr: ctypes.c_wchar_p,
# Windows.h types:
**_reversed_common_names(
"BYTE", "WORD", "DWORD", "CHAR", "WCHAR", "INT", "UINT"
"FLOAT", "DOUBLE", "BOOLEAN", "BOOL", "VARIANT_BOOL",
"LONG",
"ULONG", "SHORT", "USHORT", "WPARAM", "LPARAM", "ATOM", "LANGID",
"COLORREF", "LGRPID", "LCTYPE", "LCID", "HANDLE", "HACCEL", "HBITMAP",
"HBRUSH", "HCOLORSPACE", "HDC", "HDESK", "HDWP", "HENHMETAFILE",
"HFONT", "HGDIOBJ", "HGLOBAL", "HHOOK", "HICON", "HINSTANCE", "HKEY",
"HKL", "HLOCAL", "HMENU", "HMETAFILE", "HMODULE", "HMONITOR",
"HPALETTE", "HPEN", "HRGN", "HRSRC", "HSTR", "HTASK", "HWINSTA", "HWND",
"SC_HANDLE", "SERVICE_STATUS_HANDLE", "LARGE_INTEGER", "ULARGE_INTEGER",
),
# Windows.h structs:
**_reversed_common_names(
"RECT", "RECTL", "SMALL_RECT", "POINT", "POINTL", "SIZE", "SIZEL",
"FILETIME", "MSG", "WIN32_FIND_DATAA", "WIN32_FIND_DATAW",
),
}
retTypesMapping = {
# standard C types:
ctypes.py_object: types.PyObject,
ctypes.c_short: types.CShort,
ctypes.c_ushort: types.CUShort,
ctypes.c_long: types.CLong,
ctypes.c_ulong: types.CULong,
ctypes.c_int: types.CInt,
ctypes.c_uint: types.CUInt,
ctypes.c_float: types.CFloat,
ctypes.c_double: types.CDouble,
ctypes.c_longdouble: types.CLongDouble,
ctypes.c_longlong: types.CLongLong,
ctypes.c_ulonglong: types.CULongLong,
ctypes.c_byte: types.CByte,
ctypes.c_ubyte: types.CUByte,
ctypes.c_char: types.CChar,
ctypes.c_char_p: types.CCharPtr,
ctypes.c_void_p: types.CVoidPtr,
ctypes.c_bool: types.CBool,
ctypes.c_wchar: types.CWchar,
ctypes.c_wchar_p: types.CWcharPtr,
# Windows.h types:
**_common_names(
"BYTE", "WORD", "DWORD", "CHAR", "WCHAR", "INT", "UINT"
"FLOAT", "DOUBLE", "BOOLEAN", "BOOL", "VARIANT_BOOL", "LONG",
"ULONG", "SHORT", "USHORT", "WPARAM", "LPARAM", "ATOM", "LANGID",
"COLORREF", "LGRPID", "LCTYPE", "LCID", "HANDLE", "HACCEL", "HBITMAP",
"HBRUSH", "HCOLORSPACE", "HDC", "HDESK", "HDWP", "HENHMETAFILE",
"HFONT", "HGDIOBJ", "HGLOBAL", "HHOOK", "HICON", "HINSTANCE", "HKEY",
"HKL", "HLOCAL", "HMENU", "HMETAFILE", "HMODULE", "HMONITOR",
"HPALETTE", "HPEN", "HRGN", "HRSRC", "HSTR", "HTASK", "HWINSTA", "HWND",
"SC_HANDLE", "SERVICE_STATUS_HANDLE", "LARGE_INTEGER", "ULARGE_INTEGER",
),
# Windows.h structs:
**_common_names(
"RECT", "RECTL", "SMALL_RECT", "POINT", "POINTL", "SIZE", "SIZEL",
"FILETIME", "MSG", "WIN32_FIND_DATAA", "WIN32_FIND_DATAW",
),
}
def __init__(self, func, *args):
"""
Internal helper for wrapping C function calls and returning to
c.types.CObject subclass instances.
"""
_args = []
for arg in args:
argtype = type(arg)
if argtype in self.argTypesMapping:
true_arg = arg.__origin__
else:
true_arg = arg
_args.append(true_arg)
result = func(*_args)
if type(result) in self.retTypesMapping:
restype = self.retTypesMapping[type(result)]
res = restype(result)
else:
res = result
self._retval = res
ret = property(lambda self: self._retval)
def call_with_wrap(func, *args, **kwargs):
"""
Call an external function as
func(*args, *kwargs)
and return it's result.
Keyword arguments are converted to positional arguments and are
added to the end of the standard positional arguments.
"""
kwds = []
for key in kwargs:
kwds.append(kwargs.get(key))
wrapped = TypeWrap(func, *args, *kwds)
return wrapped.ret
```
#### File: multi_tools/graphical/_user32_impl.py
```python
from ._user32_const import *
from ..system import DllImport
from ctypes import c_void_p as _VoidPtr, Structure as _Struct
@DllImport(PATH)
def CreateWindowExA(dwExStyle: DWORD, lpClassName: str, lpWindowName: str, dwStyle: DWORD, X: int, Y: int,
nWidth: int, nHeight: int, hWndParent: HWND, hMenu: HMENU, hInstance: HINSTANCE,
lpParam: LPVOID) -> HWND:
"""
Create and return a new HWND window handle.
Return None upon failure.
"""
```
#### File: multi_tools/math/errors.py
```python
from multi_tools.errors import exceptions
class InfiniteDivisionError(exceptions.ErrorImitation):
def __init__(self, text="Division by infinity", immediateRaise=True):
"""
An error that occurs when something is divided by an infinite().
"""
exceptions.ErrorImitation.__init__(self, name="InfiniteDivisionError", text=text, immediateRaise=immediateRaise)
```
#### File: multi_tools/math/geometrical.py
```python
from typing import overload, Union
def format_id(obj):
hex_ = hex(id(obj))
inter = str(hex_).split('0x')[-1].upper()
return '0x' + inter
def typename(obj):
return eval("type(obj).__name__", {'obj': obj, **globals()}, locals())
class geometry_object(object):
__slots__ = []
__type__ = 'default'
__axes__ = '[]'
def __init_subclass__(cls, **kwargs):
if 'axes' in kwargs:
if not isinstance(kwargs.get('axes'), (list, tuple)):
raise TypeError(f"'axes' parameter must be a list[str] or tuple[str], not '{type(kwargs.get('axes'))}'.")
result = '['
last = len(kwargs.get('axes')) - 1
count = 0
generics_types = []
for axis in kwargs.get('axes'):
paramtype = eval(f"list[{typename(axis)}]", {'axis': axis, **globals()}, locals())
if not paramtype in generics_types:
generics_types.append(paramtype)
if not isinstance(axis, str):
raise TypeError(f"'axes' parameter must be a list[str] or tuple[str], not '{paramtype}'.")
result += axis
if count < last:
result += ', '
else:
result += ']'
count += 1
cls.__axes__ = result
def __repr__(self):
return "<Geometrical object at {0}, axes={1}".format(format_id(self), self.__axes__)
class _2DGeometricalObject(geometry_object, axes=('x', 'y')):
__type__ = 'default'
def __init_subclass__(cls, **kwargs):
if 'type' in kwargs:
cls.__type__ = kwargs.get('type')
def __repr__(self):
return f"<Geometry object at {format_id(self)}, type={self.__type__}, axes={self.__axes__}>"
class Point(_2DGeometricalObject, type='point'):
def __init__(self, x: Union[int or float], y: Union[int or float]):
self._coordinates = [float(x), float(y)]
@property
def x(self):
return self._coordinates[0]
@property
def y(self):
return self._coordinates[1]
class Vector2D(_2DGeometricalObject, type='vector'):
__slots__ = ['_coordinates']
@overload
def __init__(self, p1: Point, p2: Point): ...
@overload
def __init__(self, x: Union[int or float], y: Union[int or float]): ...
def __init__(self, *args):
"""
Create a vector object in a virtual 2D space.
Two signatures are possible:
Vector2D(number, number) and
Vector2D(Point, Point)
For each of them, the vector's coordinates are deduced and assigned to self._coordinates.
self.x is the x coordinate of the vector,
and self.y is the y coordinate of the vector.
To get the length of the vector, use len(vector).
"""
if len(args) == 2:
if isinstance(args[0], (int, float)) and isinstance(args[0], (int, float)):
self._coordinates = [float(args[0]), float(args[1])]
elif isinstance(args[0], Point) and isinstance(args[1], Point):
x = args[1].x - args[0].x
y = args[1].y - args[0].y
self._coordinates = [float(x), float(y)]
else:
raise TypeError(f"Expected argument types ('Point', 'Point') or ('int', 'int'), got {tuple(typename(i) for i in args)} instead.")
else:
raise TypeError(f"Expected argument types ('Point', 'Point') or ('int', 'int'), got {tuple(typename(i) for i in args)} instead.")
def __len__(self):
"""
Implement len(self)
"""
return (self._coordinates[0] ** 2) + (self._coordinates[1] ** 2)
def __repr__(self):
"""
Implement repr(self)
"""
return f"<'Vector2D' object at {format_id(self)}, x={self._coordinates[0]}, y={self._coordinates[1]}>"
def _dot_product(self, other):
"""
The dot product between two vectors, here it's self and other.
"""
return (self._coordinates[0] * other.x) + (self._coordinates[1] * other.y)
def _product(self, item):
"""
The product between a vector and a real number, here it's self and item
"""
x = item * self._coordinates[0]
y = item * self._coordinates[1]
return Vector2D(x, y)
def __mul__(self, other):
"""
Implement self * other
Choose between dot product and product depending
on other's type.
"""
if isinstance(other, (int, float)):
return self._product(other)
elif isinstance(other, Vector2D):
return self._dot_product(other)
else:
raise TypeError(f"'Vector2D' object cannot be multiplied by '{typename(other)}' object.")
@property
def x(self):
return self._coordinates[0]
@property
def y(self):
return self._coordinates[1]
```
#### File: math/geometry/bases.py
```python
from typing import Literal
def format_id(obj):
hex_ = hex(id(obj))
inter = str(hex_).split('0x')[-1].upper()
return '0x' + inter
def typename(obj):
"""
Get the string name of a type.
"""
return eval("type(obj).__name__", {'obj': obj, **globals()}, locals())
class GeometryObject(object):
"""
An empty common base for all geometry objects.
"""
pass
```
#### File: multi_tools/stdio/__init__.py
```python
import sys
class stdin:
"""
Contains only the useful functions of sys.stdin.
"""
@staticmethod
def read() -> str:
return sys.stdin.read()
@staticmethod
def fileno() -> int:
return sys.stdin.fileno()
@staticmethod
def seek(offset: int):
return sys.stdin.seek(offset)
class stdout:
"""
Contains only the useful functions of sys.stdout.
"""
@staticmethod
def write(text: str) -> None:
sys.stdout.write(text)
@staticmethod
def fileno() -> int:
return sys.stdout.fileno()
@staticmethod
def seek(offset: int):
return sys.stdout.seek(offset)
class stderr:
"""
Contains only the useful functions of sys.stderr.
"""
@staticmethod
def write(text: str) -> None:
sys.stderr.write(text)
@staticmethod
def fileno() -> int:
return sys.stderr.fileno()
@staticmethod
def seek(offset: int):
return sys.stderr.seek(offset)
```
#### File: multi_tools/system/path.py
```python
import os
from multi_tools import config
from typing import Union
PathType = Union[str, os.PathLike]
class Path:
@staticmethod
def is_bslash(path: PathType):
return '\\' in path
@staticmethod
def is_nslash(path: PathType):
return '/' in path
@staticmethod
def conventionalize(path: PathType):
conv = config.Path.slash_convention
result = path.replace('\\', conv)
return result
@staticmethod
def win_path(path: PathType):
conv = config.Path.win_convention
result = path.replace('/', conv)
return result
```
|
{
"source": "Jerem-35/GanExamples",
"score": 2
}
|
#### File: Jerem-35/GanExamples/acGAN_MNIST.py
```python
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torchvision.utils import save_image
######################################################################################################################################
######################################################################################################################################
##
## Impelementation of cGAN on MNIST dataset
## I referred to following examples to develop this sample :
## https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/03-advanced/generative_adversarial_network/main.py
## https://github.com/TuXiaokang/ACGAN.PyTorch/blob/master/main.py
######################################################################################################################################
######################################################################################################################################
def denorm(x):
out = (x + 1) / 2
return out.clamp(0, 1)
# Choose GPU or CPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("Current Device " , device)
# Change here learning parameters
batch_size = 50
latent_size = 64
hidden_size = 256
image_size = 784
lr = 0.0002
nb_classes = 10
num_epochs = 50 # Results become interesting from epoch ~20
resul_dir = 'ResulacGan'
if not os.path.exists(resul_dir):
os.makedirs(resul_dir)
dataTransform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), # 3 for RGB channels
std=(0.5, 0.5, 0.5))])
# MNIST Train data set
train_loader = torch.utils.data.DataLoader(datasets.MNIST('mnist_data',
download=True,
train=True,
transform=dataTransform),
batch_size=batch_size,
shuffle=True)
#Discriminator model
class Discriminator(nn.Module):
def __init__(self , input_size, hidden_size, output_size):
super(Discriminator, self).__init__()
self.linear1 = nn.Linear(input_size , hidden_size)
self.linear2 = nn.Linear(hidden_size , hidden_size)
self.linear3 = nn.Linear(hidden_size, output_size)
self.linearAUx = nn.Linear(hidden_size , nb_classes)
#image and label
def forward(self, x ):
x = F.leaky_relu(self.linear1(x), 0.2)
x = F.leaky_relu(self.linear2(x), 0.2)
output = torch.sigmoid((self.linear3(x)))
aux = self.linearAUx(x)
return output, aux
# Generator Model
class Generator(nn.Module):
def __init__(self , input_size, hidden_size, output_size):
super(Generator, self).__init__()
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size , hidden_size)
self.linear3 = nn.Linear(hidden_size, output_size)
self.label_embedding = nn.Embedding(nb_classes, latent_size)
# x random y labels
def forward(self, x, y):
x = torch.mul(self.label_embedding(y), x)
#x = torch.cat((self.label_embedding(y), x), -1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x= self.linear3(x)
return torch.tanh(x) #Tied Sigmoid instead : did not work
#initialize discriminator and generator
D = Discriminator(image_size, hidden_size ,1).to(device) ;
G = Generator(latent_size , hidden_size,image_size).to(device)
#Adam optimization
optimizerD = torch.optim.Adam(D.parameters(), lr)
optimizerG = torch.optim.Adam(G.parameters(), lr)
criterion_output = nn.BCELoss()
criterion_aux = nn.CrossEntropyLoss()
total_step = len(train_loader)
for epoch in range(num_epochs):
for batch_idx, (x, target) in enumerate(train_loader):
images = x.reshape(batch_size, -1).to(device)
realLabel = torch.ones(batch_size, 1).to(device)
fakeLabel = torch.zeros(batch_size, 1).to(device)
target = torch.LongTensor(target).to(device)
# TRAIN D
# On true data
predictR, predictRLabel = D(images) #image from the real dataset
loss_real_output = criterion_output(predictR, realLabel) # compare vs label =1 (D is supposed to "understand" that the image is real)
loss_real_aux = criterion_aux(predictRLabel , target)
real_score = predictR
# On fake data
latent_value = torch.randn((batch_size, latent_size)).to(device)
gen_labels = torch.LongTensor(np.random.randint(0, nb_classes, batch_size)).to(device)
fake_images = G(latent_value , gen_labels) #generate a fake image
predictF, predictLabelF = D(fake_images)
loss_fake_output = criterion_output(predictF , fakeLabel) # compare vs label =0 (D is supposed to "understand" that the image generated by G is fake)
loss_fake_aux = criterion_aux(predictLabelF, gen_labels)
fake_score = predictF
lossD = loss_real_output + loss_real_aux + loss_fake_output +loss_fake_aux
optimizerD.zero_grad()
optimizerG.zero_grad()
lossD.backward()
optimizerD.step()
# TRAIN G
latent_value = torch.randn((batch_size, latent_size)).to(device)
gen_labels = torch.LongTensor(np.random.randint(0, nb_classes, batch_size)).to(device)
fake_images= G(latent_value , gen_labels) #Generate a fake image
predictG, predictLabelG = D(fake_images)
lossG_output = criterion_output(predictG , realLabel) # Compare vs label = 1 (We want to optimize G to fool D, predictG must tend to 1)
lossG_aux = criterion_aux( predictLabelG, gen_labels)
lossG = lossG_output+ lossG_aux
optimizerD.zero_grad()
optimizerG.zero_grad()
lossG.backward()
optimizerG.step()
if (batch_idx+1) % 200 == 0:
print("Epoch: "+str(epoch)+"/"+str(num_epochs)+ " -- Batch:"+ str(batch_idx+1)+"/"+str(total_step))
print(" GenLoss "+str(round(lossG.item(), 3))+ " -- DiscLoss "+str(round(lossD.item(), 3)))
print(" D(x): "+str(round(real_score.mean().item(), 3))+ " -- D(G(z)):"+str(round(fake_score.mean().item(), 3)))
# Save real images
if (epoch+1) == 1:
images = images.reshape(images.size(0), 1, 28, 28)
save_image(denorm(images), os.path.join(resul_dir, 'real_images.png'))
# Save sampled images
with torch.no_grad() :
latent_value = torch.randn((60, latent_size)).to(device)
gen_labels = np.zeros(0)
for i in range(nb_classes):
gen_labels = np.append(gen_labels, np.ones(6)*i )
gen_labels = torch.from_numpy(gen_labels)
gen_labels = gen_labels.type(torch.LongTensor).to(device)
fake_images = G(latent_value , gen_labels) #generate a fake image
fake_images = fake_images.reshape(fake_images.size(0), 1, 28, 28)
save_image(denorm(fake_images), os.path.join(resul_dir, 'fake_images-{}.png'.format(epoch+1)))
# generate samples for all labels
nbImageToGenerate = 8*8
for i in range(10):
latent_value = torch.randn((nbImageToGenerate, latent_size)).to(device)
gen_labels = torch.LongTensor(np.full(nbImageToGenerate , i )).to(device)
fake_images = G(latent_value , gen_labels) #Generate a fake image
fake_images = fake_images.reshape(fake_images.size(0), 1, 28, 28)
save_image(denorm(fake_images), os.path.join(resul_dir, 'GeneratedSample-{}.png'.format(i)))
```
|
{
"source": "jeremaihloo/aiorm",
"score": 3
}
|
#### File: aiorm/orm/fields.py
```python
import uuid
class Field(object):
def __init__(self, name, column_type, primary_key, default):
self.name = name
self.column_type = column_type
self.primary_key = primary_key
self.default = default
def __str__(self):
return '%s %s %s' % (self.name, self.column_type, 'primary key' if self.primary_key else '')
class StringField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='varchar(100)', null=True, unique=False):
super().__init__(name, ddl, primary_key, default)
CharField = StringField
class BooleanField(Field):
def __init__(self, name=None, default=False):
super().__init__(name, 'boolean', False, default)
class IntegerField(Field):
def __init__(self, name=None, primary_key=False, default=0):
super().__init__(name, 'bigint', primary_key, default)
class FloatField(Field):
def __init__(self, name=None, primary_key=False, default=0.0):
super().__init__(name, 'real', primary_key, default)
DateTimeField = FloatField
class TextField(Field):
def __init__(self, name=None, default=None, null=True):
super().__init__(name, 'text', False, default)
class UUIDField(Field):
def __init__(self, name=None, primary_key=False, default=uuid.uuid4):
super(UUIDField, self).__init__(name, 'VARCHAR(40)', primary_key, lambda: str(default()))
class ForeignKeyField(Field):
def __init__(self, fk_model, name=None, related_name=None, default=None, null=True):
self.fk_model = fk_model
self.releated_name = related_name
if isinstance(fk_model, str):
colunm_type = None
else:
colunm_type = fk_model.__mappings__[fk_model.__primary_key__].column_type
super(ForeignKeyField, self).__init__(name, colunm_type, False, default)
```
#### File: aiorm/tests/contexts.py
```python
import pytest
from sample.bench import DemoDbContext
from sample.models import configs, DemoUser
pytestaio = pytest.mark.asyncio
@pytest.fixture()
async def context(event_loop):
ctx = DemoDbContext(event_loop, **configs)
await ctx.begin()
return ctx
@pytestaio
async def test_context_show_tables(context: DemoDbContext):
tables = await context.show_tables()
assert isinstance(tables, list)
@pytestaio
async def test_context_create_table(context: DemoDbContext):
await context.create_tables([DemoUser])
tables = await context.show_tables()
assert 'DemoUser' in tables
@pytestaio
async def test_context_count(context:DemoDbContext):
query = context.users.select_query().where(DemoUser.name == 'jeremaihloo')
count = await context.users.count(query)
print(count)
```
|
{
"source": "jeremander/nerdcal",
"score": 3
}
|
#### File: nerdcal/nerdcal/ifc.py
```python
from bisect import bisect
from dataclasses import dataclass
from datetime import time, timedelta, tzinfo
from itertools import accumulate
from operator import add
from typing import List, Optional
from nerdcal._base import check_int, Date, Datetime, days_before_year, is_leap_year, parse_isoformat_date
MIN_YEAR = 1
MAX_YEAR = 9999
MAX_ORDINAL = 3652059 # IFCDate.max.toordinal()
MIN_MONTH = 1
MAX_MONTH = 13
DAYS_IN_MONTH = 28
DAYS_IN_WEEK = 7
@dataclass(order = True, frozen = True)
class IFCDate(Date):
"""Concrete date type for IFC.
There are 13 months, consisting of 28 days each.
The additional month, Sol, occurs between June and July.
However, for simplicity, Year Day will be represented as December 29, and Leap Day will be represented as June 29."""
year: int
month: int
day: int
def __post_init__(self) -> None:
"""Perform validation on the year, month, and day.
Raise a ValueError if any entries are invalid."""
check_int(self.year)
check_int(self.month)
check_int(self.day)
if not MIN_YEAR <= self.year <= MAX_YEAR:
raise ValueError(f'year must be in {MIN_YEAR}..{MAX_YEAR}', self.year)
if not MIN_MONTH <= self.month <= MAX_MONTH:
raise ValueError(f'month must be in {MIN_MONTH}..{MAX_MONTH}', self.month)
dim = self._days_in_month(self.year)[self.month - 1]
if not (1 <= self.day <= dim):
raise ValueError(f'day must be in 1..{dim}', self.day)
# Accessors
def get_year(self) -> int:
return self.year
# Helpers
@classmethod
def _days_in_month(cls, year: int) -> List[int]:
"""List of number of days in each month."""
return [DAYS_IN_MONTH + 1 if ((month == 6) and is_leap_year(year)) or (month == 13) else DAYS_IN_MONTH for month in range(MIN_MONTH, MAX_MONTH + 1)]
@classmethod
def _days_before_month(cls, year: int) -> List[int]:
"""List of number of days before the start of each month."""
return list(accumulate([0] + cls._days_in_month(year), add))[:-1]
# Month and weekday names
@classmethod
def month_names(cls) -> List[str]:
"""Full names of each month."""
return ['January', 'February', 'March', 'April', 'May', 'June', 'Sol', 'July', 'August', 'September', 'October', 'November', 'December']
@classmethod
def month_abbrevs(cls) -> List[str]:
"""Abbreviated names of each month (3 letters, for use with ctime())."""
return ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Sol', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
@classmethod
def weekday_names(cls) -> List[str]:
return ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']
@classmethod
def weekday_abbrevs(cls) -> List[str]:
return ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
# Additional constructors
@classmethod
def _from_year_and_ordinal(cls, year: int, n: int) -> 'IFCDate':
dbm = cls._days_before_month(year)
month = bisect(dbm, n)
day = n - dbm[month - 1]
return cls(year, month, day + 1)
@classmethod
def fromisoformat(cls, date_string: str) -> 'IFCDate':
if not isinstance(date_string, str):
raise TypeError('fromisoformat: argument must be str')
return cls(*parse_isoformat_date(date_string))
# Standard conversions
def toordinal(self) -> int:
return days_before_year(self.year) + self._days_before_month(self.year)[self.month - 1] + self.day
def replace(self, year: int = None, month: int = None, day: int = None) -> 'IFCDate':
"""Return a new IFCDate with new values for the specified fields."""
return type(self)(year or self.year, month or self.month, day or self.day)
# Computations
def weekday(self) -> int:
"""Return day of the week as a number 0..6 (if a proper weekday), 7 (if Year Day), or 8 (if Leap Day)."""
if (self.month, self.day) == (13, 29):
return 7
elif (self.month, self.day) == (6, 29):
return 8
day_of_year = self.toordinal() - days_before_year(self.year)
if (self.month >= 7) and self.is_leap_year():
return day_of_year % DAYS_IN_WEEK
return (day_of_year - 1) % DAYS_IN_WEEK
# Conversions to string
def _ctime_date(self) -> str:
if (self.month, self.day) == (6, 29):
return 'Leap Day '
elif (self.month, self.day) == (13, 29):
return 'Year Day '
weekday = self.weekday()
weekday_name = self.weekday_abbrevs()[weekday]
month_name = self.month_abbrevs()[self.month - 1]
return '{} {} {:2d}'.format(weekday_name, month_name, self.day)
def strftime(self, fmt: str) -> str:
# TODO: revamp strftime to handle IFC month/day numbers/names
raise NotImplementedError
def isoformat(self) -> str:
return f'{self.year:04d}-{self.month:02d}-{self.day:02d}'
IFCDate.min = IFCDate(1, 1, 1)
IFCDate.max = IFCDate(9999, 13, 29)
IFCDate.resolution = timedelta(days = 1)
@dataclass(order = True, frozen = True)
class IFCDatetime(Datetime):
"""Concrete datetime type for IFC.
IFCDatetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
The year, month, and day arguments are required."""
_date_class = IFCDate
year: int
month: int
day: int
hour: int = 0
minute: int = 0
second: int = 0
microsecond: int = 0
tzinfo: Optional[tzinfo] = None
def __post_init__(self) -> None:
"""Perform validation on the entries.
Raise a ValueError if any entries are invalid."""
# validate by constructing sub-objects
self._date_class(self.year, self.month, self.day)
self._time_class(self.hour, self.minute, self.second, self.microsecond, self.tzinfo)
# Additional constructors
@classmethod
def _combine(cls, date: IFCDate, time: time, tzinfo: Optional[tzinfo] = None) -> 'IFCDatetime':
return cls(date.year, date.month, date.day, time.hour, time.minute, time.second, time.microsecond, tzinfo)
@classmethod
def strptime(cls, date_string: str, format: str) -> 'IFCDatetime':
import _strptime
# TODO: implement this
raise NotImplementedError
# Standard conversions
def date(self) -> 'IFCDate':
return self._date_class(self.year, self.month, self.day)
def time(self) -> time:
return self._time_class(self.hour, self.minute, self.second, self.microsecond)
def timetz(self) -> time:
return self._time_class(self.hour, self.minute, self.second, self.microsecond, self.tzinfo)
def replace(self, year: int = None, month: int = None, day: int = None, hour: int = None, minute: int = None, second: int = None, microsecond: int = None, tzinfo: tzinfo = None) -> 'IFCDatetime':
return type(self)(year or self.year, month or self.month, day or self.day, hour or self.hour, minute or self.minute, second or self.second, microsecond or self.microsecond, tzinfo or self.tzinfo)
# Conversions to string
def ctime(self) -> str:
date_str = self.date()._ctime_date()
return '{} {:02d}:{:02d}:{:02d} {:04d}'.format(date_str, self.hour, self.minute, self.second, self.year)
def isoformat(self, sep: str = 'T', timespec: str = 'auto') -> str:
s = self.todatetime().isoformat(sep = sep, timespec = timespec)
return self.date().isoformat() + s[10:]
IFCDatetime.min = IFCDatetime(1, 1, 1)
IFCDatetime.max = IFCDatetime(9999, 13, 29, 23, 59, 59, 999999)
IFCDatetime.resolution = timedelta(microseconds = 1)
```
|
{
"source": "jeremander/splitsio",
"score": 3
}
|
#### File: splitsio/splitsio/api.py
```python
from dataclasses import dataclass, Field, field
from datetime import datetime
import dateutil.parser
import numpy as np
import pandas as pd
from operator import attrgetter
from typing import Any, cast, Counter, List, NamedTuple, Optional, Sequence, Type
from dataclasses_json import config
from marshmallow import fields
from splitsio.query import SplitsIOData
CategoryCounts = NamedTuple('CategoryCounts', [('category', 'Category'), ('numRuns', int)])
############
# DATETIME #
############
class IsoDatetime(datetime):
"""Custom datetime class with ISO formatting."""
@classmethod
def isoparse(cls, timestamp: str) -> 'IsoDatetime':
s = timestamp.strip("'").rstrip('Z')
dt = dateutil.parser.isoparse(s)
return datetime.__new__(cls, dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, dt.tzinfo) # type: ignore
def __repr__(self) -> str:
return repr(str(self))
def __str__(self) -> str:
return self.isoformat(timespec = 'milliseconds') + 'Z'
isoparse = lambda ts : None if (ts is None) else IsoDatetime.isoparse(ts)
isoformat = lambda dt : None if (dt is None) else str(dt)
def isodatetime(**kwargs: Any) -> Field: # type: ignore
"""dataclass field rendering a datetime object as a string when parsing/formatting."""
return field(metadata = config(encoder = isoformat, decoder = isoparse, mm_field = fields.DateTime(format = 'iso')), **kwargs)
##############
# DATA MODEL #
##############
@dataclass
class Category(SplitsIOData):
"""A Category is a ruleset for a Game (Any%, 100%, MST, etc.) and an optional container for Runs. Its canonical ID string is a base 10 number, e.g. "312", "1456", "11"."""
id: str
name: str
created_at: Optional[datetime] = isodatetime(default = None, repr = False)
updated_at: Optional[datetime] = isodatetime(default = None, repr = False)
@classmethod
def collection(cls) -> str:
return "categories"
@property
def canonical_id(self) -> str:
return self.id
def runs(self) -> Sequence['Run']:
"""Runs for the category."""
return self.get_associated(Run)
def runners(self) -> Sequence['Runner']:
"""Runners for the category."""
return self.get_associated(Runner)
@dataclass
class Game(SplitsIOData):
"""A Game is a collection of information about a game, and a container for Categories. Its canonical ID string is its Speedrun.com shortname, e.g. "sms", "sm64", "portal"."""
id: str
name: str
shortname: Optional[str]
created_at: Optional[datetime] = isodatetime(default = None, repr = False)
updated_at: Optional[datetime] = isodatetime(default = None, repr = False)
categories: Optional[List[Category]] = field(default = None, repr = False)
@classmethod
def collection(cls) -> str:
return "games"
@property
def canonical_id(self) -> str:
return self.name if (self.shortname is None) else self.shortname
@classmethod
def all(cls) -> Sequence['Game']:
"""Obtains the list of all games."""
return cls.query(cls.collection())
@classmethod
def search(cls, name: str) -> Sequence['Game']:
"""Obtains the list of games matching a given search term."""
return cls.query(cls.collection() + '?search=' + name)
def runs(self) -> Sequence['Run']:
"""Runs for the game."""
return self.get_associated(Run)
def runners(self) -> Sequence['Runner']:
"""Runners for the game."""
return self.get_associated(Runner)
def category_counts(self) -> List[CategoryCounts]:
"""Returns categories along with number of runs for that category."""
if (self.categories is None):
return []
run_ctr: Counter[str] = Counter()
for run in self.runs():
if run.category:
run_ctr[run.category.id] += 1
items = [CategoryCounts(cat, run_ctr[cat.id]) for cat in self.categories]
# sort by decreasing number of runs
items.sort(key = attrgetter('numRuns'), reverse = True)
return items
@dataclass
class Runner(SplitsIOData):
"""A Runner is a user who has at least one run tied to their account. Its canonical ID string is their Splits.io username all-lowercased, e.g. "glacials", "batedurgonnadie", "snarfybobo"."""
id: str
twitch_id: Optional[str]
twitch_name: Optional[str]
display_name: str
name: str
avatar: Optional[str] = field(default = None, repr = False)
created_at: Optional[datetime] = isodatetime(default = None, repr = False)
updated_at: Optional[datetime] = isodatetime(default = None, repr = False)
@classmethod
def collection(cls) -> str:
return "runners"
@property
def canonical_id(self) -> str:
return self.name.lower()
def runs(self) -> Sequence['Run']:
"""The runner's runs."""
return self.get_associated(Run)
def pbs(self) -> Sequence['Run']:
"""The runner's personal best runs."""
return self.get_associated(Run, 'pbs')
def games(self) -> Sequence[Game]:
"""Games for which the runner has at least one speedrun."""
return self.get_associated(Game)
def categories(self) -> Sequence[Category]:
"""Categories the runner has participated in."""
return self.get_associated(Category)
@dataclass
class History(SplitsIOData):
"""History of a split on some previous run."""
attempt_number: int
realtime_duration_ms: Optional[int]
gametime_duration_ms: Optional[int]
started_at: Optional[datetime] = isodatetime(default = None)
ended_at: Optional[datetime] = isodatetime(default = None)
@classmethod
def collection(cls) -> str:
return "histories"
@property
def canonical_id(self) -> str:
raise NotImplementedError
def is_complete(self) -> bool:
"""Returns True if the run is complete (not reset).
It is considered complete if it has a stored realtime or gametime duration."""
return (self.realtime_duration_ms is not None) or (self.gametime_duration_ms is not None)
def duration(self) -> Optional[float]:
"""Gets the duration in milliseconds. First tries realtime, then gametime, then elapsed time between start and end timestamps."""
dur = getattr(self, 'realtime_duration_ms', getattr(self, 'gametime_duration_ms', None))
if (dur is None):
start = self.started_at
end = self.ended_at
if (start is not None) and (end is not None):
dur = (end - start).seconds * 1000
return dur
@dataclass
class Segment(SplitsIOData):
"""A Segment maps to a single piece of a run, also called a split. Its canonical ID string is a UUID, e.g. "c198a25f-9f8a-43cd-92ab-472a952f9336"."""
id: str
name: str
display_name: str
segment_number: int
realtime_start_ms: int
realtime_duration_ms: int
realtime_end_ms: int
realtime_shortest_duration_ms: Optional[int]
realtime_gold: bool
realtime_skipped: bool
realtime_reduced: bool
gametime_start_ms: int
gametime_duration_ms: int
gametime_end_ms: int
gametime_shortest_duration_ms: Optional[int]
gametime_gold: bool
gametime_skipped: bool
gametime_reduced: bool
histories: Optional[List[History]] = field(default = None, repr = False)
@classmethod
def collection(cls) -> str:
return "segments"
@property
def canonical_id(self) -> str:
return self.id
@dataclass
class Run(SplitsIOData):
"""A Run maps 1:1 to an uploaded splits file. Its canonical ID string is a base 36 number, e.g. "1b" "3nm" "1vr"."""
id: str
srdc_id: Optional[str]
realtime_duration_ms: int
realtime_sum_of_best_ms: Optional[int]
gametime_duration_ms: int
gametime_sum_of_best_ms: Optional[int]
default_timing: str
program: str
attempts: Optional[int]
image_url: Optional[int]
parsed_at: datetime = isodatetime()
created_at: datetime = isodatetime()
updated_at: datetime = isodatetime()
video_url: Optional[str] = None
game: Optional[Game] = None
category: Optional[Category] = None
runners: List[Runner] = field(default_factory = lambda : [])
segments: Optional[List[Segment]] = field(default = None, repr = False)
histories: Optional[List[History]] = field(default = None, repr = False)
@classmethod
def collection(cls) -> str:
return "runs"
@property
def canonical_id(self) -> str:
return self.id
@classmethod
def from_id(cls: Type['Run'], id_: str, historic: bool = False, **params: Any) -> 'Run':
"""If historic = True, additionally retrieves all historic run data."""
params['historic'] = 1 if historic else 0
run = super(Run, cls).from_id(id_, **params)
if run.histories: # sort chronologically ascending
run.histories = run.histories[::-1]
return run
@property
def completed_attempts(self) -> List[History]:
"""Returns all completed run attempts.
A completed attempt is one whose last segment has been completed."""
segments = [] if (self.segments is None) else self.segments
if (self.histories is None) or (len(segments) == 0):
return []
completed_attempt_numbers = {history.attempt_number for history in segments[-1].histories} # type: ignore
return [history for history in self.histories if (history.attempt_number in completed_attempt_numbers)]
def segment_durations(self, complete: bool = True, clean: bool = False) -> pd.DataFrame:
"""Returns a table of segment durations, in seconds.
Rows are attempts (in chronological order); columns are segments.
If complete = True, only includes completed attempts.
If clean = True, only includes attempts where each segment is completed (i.e. no missing splits).
Missing splits are assigned zero duration."""
if (self.histories is None):
raise ValueError("cannot get segment durations without run histories")
segments = [] if (self.segments is None) else self.segments
if complete and (len(segments) > 0):
attempt_numbers = [h.attempt_number for h in self.histories if h.is_complete()]
else:
attempt_numbers = [h.attempt_number for h in self.histories]
attempt_number_indices = {j : i for (i, j) in enumerate(attempt_numbers)}
arr = np.zeros((len(attempt_number_indices), len(segments)), dtype = float)
arr[:] = np.nan
# fill in segment durations
for (j, seg) in enumerate(segments):
for h in seg.histories: # type: ignore
attempt_number = h.attempt_number
if (attempt_number in attempt_number_indices):
arr[attempt_number_indices[attempt_number], j] = h.duration() / 1000 # type: ignore
df = pd.DataFrame(arr, index = pd.Index(attempt_numbers, name = 'attempt'), columns = [seg.name for seg in segments])
if clean: # zero values are invalid
return df.replace(0.0, np.nan).dropna(axis = 0, how = 'any')
return df.dropna(axis = 0, how = 'all').fillna(0.0)
def split_durations(self, complete: bool = True, clean: bool = False) -> pd.DataFrame:
"""Returns a table of split durations (cumulative segment durations), in seconds.
Rows are attempts (in chronological order); columns are splits."""
seg_durs = self.segment_durations(complete, clean)
split_durs = seg_durs.cumsum(axis = 1)
split_durs['total'] = split_durs[split_durs.columns[-1]]
histories = cast(List[History], self.histories)
attempt_number_indices = {h.attempt_number : i for (i, h) in enumerate(histories)}
true_totals = []
for j in split_durs.index:
history = histories[attempt_number_indices[j]]
dur = history.duration()
true_totals.append(None if (dur is None) else (dur / 1000))
split_durs['true_total'] = true_totals
return split_durs
```
|
{
"source": "jeremayaaa/OOP-58001",
"score": 4
}
|
#### File: jeremayaaa/OOP-58001/Manalang_FinalExam.py
```python
from tkinter import *
class SemGrade:
def __init__(self, win):
self.lbl1 = Label(win, text='First Number:')
self.lbl2 = Label(win, text='Second Number:')
self.lbl3 = Label(win, text='Third Number:')
self.lbl4 = Label(win, text=' Lowest Number Among the Three:')
self.t1 = Entry(bd=3)
self.t2 = Entry(bd=3)
self.t3 = Entry(bd=3)
self.t4 = Entry(bd=3)
self.b1 = Button(win, text=' Compute ', command=self.compute)
self.b1.place(x=150, y=150)
self.lbl1.place(x=70, y=50)
self.t1.place(x=180, y=50)
self.lbl2.place(x=70, y=80)
self.t2.place(x=180, y=80)
self.lbl3.place(x=70, y=110)
self.t3.place(x=180, y=110)
self.lbl4.place(x=70, y=190)
self.t4.place(x=130, y=220)
def compute(self):
self.t4.delete(0, 'end')
L = []
num1 = int(self.t1.get())
L.append(num1)
num2 = int(self.t2.get())
L.append(num2)
num3 = int(self.t3.get())
L.append(num3)
self.t4.insert(END, str(min(L)))
window = Tk()
mywin = SemGrade(window)
window.title('Finding the Least Number')
window.geometry("400x300+10+10")
window.mainloop()
```
|
{
"source": "jeremcs/dvc",
"score": 2
}
|
#### File: dvc/repo/add.py
```python
import os
from dvc.stage import Stage
def add(repo, fname, recursive=False, no_commit=False):
fnames = _collect_filenames_to_add(repo, fname, recursive)
stages = _create_stagefiles(repo, fnames, no_commit)
repo.check_dag(repo.stages() + stages)
for stage in stages:
stage.dump()
repo.remind_to_git_add()
return stages
def _collect_filenames_to_add(repo, fname, recursive):
if recursive and os.path.isdir(fname):
fnames = _collect_valid_filenames_from_directory(repo, fname)
else:
fnames = [fname]
return fnames
def _collect_valid_filenames_from_directory(repo, fname):
fnames = []
for file_path in _file_paths(fname):
if _is_valid_file_to_add(file_path, repo):
fnames.append(file_path)
return fnames
def _file_paths(directory):
for root, _, files in os.walk(directory):
for f in files:
yield os.path.join(root, f)
def _is_valid_file_to_add(file_path, repo):
if Stage.is_stage_file(file_path):
return False
if os.path.basename(file_path) == repo.scm.ignore_file:
return False
if repo.scm.is_tracked(file_path):
return False
return True
def _create_stagefiles(repo, fnames, no_commit):
stages = []
with repo.state:
for f in fnames:
stage = Stage.create(repo=repo, outs=[f], add=True)
if stage is None:
continue
stage.save()
if not no_commit:
stage.commit()
stages.append(stage)
return stages
```
|
{
"source": "JeremDona/spatiotemporal_variable_separation",
"score": 2
}
|
#### File: var_sep/data/chairs.py
```python
import os
import numpy as np
import torch
from PIL import Image
class Chairs(object):
max_length = 62
def __init__(self, train, data_root, nt_cond, seq_len=15, image_size=64):
self.train = train
self.nt_cond = nt_cond
assert seq_len <= self.max_length
self.seq_len = seq_len
assert image_size == 64
self.image_size = image_size
self.data_root = os.path.join(data_root, 'rendered_chairs')
self.sequences = sorted(os.listdir(self.data_root))
self.sequences.remove('all_chair_names.mat')
rng = np.random.RandomState(42)
rng.shuffle(self.sequences)
if self.train:
self.start_idx = 0
self.stop_idx = int(len(self.sequences) * 0.85)
else:
self.start_idx = int(len(self.sequences) * 0.85)
self.stop_idx = len(self.sequences)
def get_sequence(self, index, chosen_idx=None, chosen_id_st=None):
index, idx = divmod(index, self.stop_idx - self.start_idx)
if chosen_idx is not None:
idx = chosen_idx
obj_dir = self.sequences[self.start_idx + idx]
dname = os.path.join(self.data_root, obj_dir)
index, id_st = divmod(index, self.max_length)
if chosen_id_st is not None:
id_st = chosen_id_st
assert index == 0
sequence = []
for i in range(id_st, id_st + self.seq_len):
fname = os.path.join(dname, 'renders', f'{i % self.max_length}.png')
sequence.append(np.array(Image.open(fname)))
sequence = np.array(sequence)
return sequence
def __getitem__(self, index):
sequence = torch.tensor(self.get_sequence(index) / 255).permute(0, 3, 1, 2).float()
return sequence[:self.nt_cond], sequence[self.nt_cond:]
def __len__(self):
return (self.max_length) * (self.stop_idx - self.start_idx)
```
#### File: var_sep/networks/mlp_encdec.py
```python
import torch
import numpy as np
import torch.nn as nn
from var_sep.networks.mlp import MLP
from var_sep.networks.utils import activation_factory
class MLPEncoder(nn.Module):
def __init__(self, input_size, hidden_size, output_size, nlayers):
super(MLPEncoder, self).__init__()
self.mlp = MLP(input_size, hidden_size, output_size, nlayers)
def forward(self, x, return_skip=False):
x = x.view(len(x), -1)
return self.mlp(x)
class MLPDecoder(nn.Module):
def __init__(self, latent_size, hidden_size, output_shape, nlayers, last_activation, mixing):
super(MLPDecoder, self).__init__()
self.output_shape = output_shape
self.mixing = mixing
self.mlp = MLP(latent_size, hidden_size, np.prod(np.array(output_shape)), nlayers)
self.last_activation = activation_factory(last_activation)
def forward(self, z1, z2, skip=None):
if self.mixing == 'concat':
z = torch.cat([z1, z2], dim=1)
else:
z = z1 * z2
x = self.mlp(z)
x = self.last_activation(x)
return x.view([-1] + self.output_shape)
```
#### File: test/mnist/test.py
```python
import argparse
import os
import random
import torch
import numpy as np
import torch.nn.functional as F
from collections import defaultdict
from torch.utils.data import DataLoader
from tqdm import tqdm
from var_sep.data.moving_mnist import MovingMNIST
from var_sep.utils.helper import load_json
from var_sep.test.utils import load_model, _ssim_wrapper
def load_dataset(args, train=False):
return MovingMNIST.make_dataset(args.data_dir, 64, args.nt_cond, args.nt_cond + args.nt_pred, 4, True,
args.n_object, train)
def main(args):
##################################################################################################################
# Setup
##################################################################################################################
# -- Device handling (CPU, GPU)
if args.device is None:
device = torch.device('cpu')
else:
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.device)
device = torch.device('cuda:0')
torch.cuda.set_device(0)
# Seed
random.seed(args.test_seed)
np.random.seed(args.test_seed)
torch.manual_seed(args.test_seed)
# Load XP config
xp_config = load_json(os.path.join(args.xp_dir, 'params.json'))
xp_config.device = device
xp_config.data_dir = args.data_dir
xp_config.xp_dir = args.xp_dir
xp_config.nt_pred = args.nt_pred
##################################################################################################################
# Load test data
##################################################################################################################
print('Loading data...')
test_dataset = load_dataset(xp_config, train=False)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, pin_memory=True)
train_dataset = load_dataset(xp_config, train=True)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, pin_memory=True)
nc = 1
size = 64
##################################################################################################################
# Load model
##################################################################################################################
print('Loading model...')
sep_net = load_model(xp_config, args.epoch)
##################################################################################################################
# Eval
##################################################################################################################
print('Generating samples...')
torch.set_grad_enabled(False)
train_iterator = iter(train_loader)
nt_test = xp_config.nt_cond + args.nt_pred
predictions = []
content_swap = []
cond_swap = []
target_swap = []
cond = []
gt = []
results = defaultdict(list)
# Evaluation is done by batch
for batch in tqdm(test_loader, ncols=80, desc='evaluation'):
# Data
x_cond, x_target = batch
bsz = len(x_cond)
x_cond = x_cond.to(device)
x_target = x_target.to(device)
cond.append(x_cond.cpu().mul(255).byte().permute(0, 1, 3, 4, 2))
gt.append(x_target.cpu().mul(255).byte().permute(0, 1, 3, 4, 2))
# Prediction
x_pred, _, s_code, _ = sep_net.get_forecast(x_cond, nt_test)
x_pred = x_pred[:, xp_config.nt_cond:]
# Content swap
x_swap_cond, x_swap_target = next(train_iterator)
x_swap_cond = x_swap_cond[:bsz].to(device)
x_swap_target = x_swap_target[:bsz].to(device)
x_swap_cond_byte = x_swap_cond.cpu().mul(255).byte()
x_swap_target_byte = x_swap_target.cpu().mul(255).byte()
cond_swap.append(x_swap_cond_byte.permute(0, 1, 3, 4, 2))
target_swap.append(x_swap_target_byte.permute(0, 1, 3, 4, 2))
x_swap_pred = sep_net.get_forecast(x_swap_cond, nt_test, init_s_code=s_code)[0]
x_swap_pred = x_swap_pred[:, xp_config.dt:]
content_swap.append(x_swap_pred.cpu().mul(255).byte().permute(0, 1, 3, 4, 2))
# Pixelwise quantitative eval
x_target = x_target.view(-1, args.nt_pred, nc, size, size)
mse = torch.mean(F.mse_loss(x_pred, x_target, reduction='none'), dim=[3, 4])
metrics_batch = {
'mse': mse.mean(2).mean(1).cpu(),
'psnr': 10 * torch.log10(1 / mse).mean(2).mean(1).cpu(),
'ssim': _ssim_wrapper(x_pred, x_target).mean(2).mean(1).cpu()
}
predictions.append(x_pred.cpu().mul(255).byte().permute(0, 1, 3, 4, 2))
# Compute metrics for best samples and register
for name in metrics_batch.keys():
results[name].append(metrics_batch[name])
##################################################################################################################
# Print results
##################################################################################################################
print('\n')
print('Results:')
for name in results.keys():
res = torch.cat(results[name]).numpy()
results[name] = res
print(name, res.mean())
##################################################################################################################
# Save samples
##################################################################################################################
np.savez_compressed(os.path.join(args.xp_dir, 'results.npz'), **results)
np.savez_compressed(os.path.join(args.xp_dir, 'predictions.npz'), predictions=torch.cat(predictions).numpy())
np.savez_compressed(os.path.join(args.xp_dir, 'gt.npz'), gt=torch.cat(gt).numpy())
np.savez_compressed(os.path.join(args.xp_dir, 'cond.npz'), cond=torch.cat(cond).numpy())
np.savez_compressed(os.path.join(args.xp_dir, 'content_swap.npz'), content_swap=torch.cat(content_swap).numpy())
np.savez_compressed(os.path.join(args.xp_dir, 'cond_swap.npz'), target_swap=torch.cat(cond_swap).numpy())
np.savez_compressed(os.path.join(args.xp_dir, 'target_swap.npz'), target_swap=torch.cat(target_swap).numpy())
if __name__ == '__main__':
p = argparse.ArgumentParser(prog="PDE-Driven Spatiotemporal Disentanglement (Moving MNIST testing)")
p.add_argument('--data_dir', type=str, metavar='DIR', required=True,
help='Directory where the dataset is saved.')
p.add_argument('--xp_dir', type=str, metavar='DIR', required=True,
help='Directory where the model configuration file and checkpoints are saved.')
p.add_argument('--epoch', type=int, metavar='EPOCH', default=None,
help='If specified, loads the checkpoint of the corresponding epoch number.')
p.add_argument('--batch_size', type=int, metavar='BATCH', default=16,
help='Batch size used to compute metrics.')
p.add_argument('--nt_pred', type=int, metavar='PRED', required=True,
help='Total of frames to predict.')
p.add_argument('--device', type=int, metavar='DEVICE', default=None,
help='GPU where the model should be placed when testing (if None, on the CPU)')
p.add_argument('--test_seed', type=int, metavar='SEED', default=1,
help='Manual seed.')
args = p.parse_args()
main(args)
```
#### File: var_sep/utils/helper.py
```python
import json
import os
import torch
import yaml
def save(elem_xp_path, sep_net, epoch_number=None):
to_save = True
append = f'_{epoch_number}' if epoch_number is not None else ''
while to_save:
try:
torch.save(sep_net.Et, os.path.join(elem_xp_path, f'ov_Et{append}.pt'))
torch.save(sep_net.Es, os.path.join(elem_xp_path, f'ov_Es{append}.pt'))
torch.save(sep_net.decoder, os.path.join(elem_xp_path, f'decoder{append}.pt'))
torch.save(sep_net.t_resnet, os.path.join(elem_xp_path, f't_resnet{append}.pt'))
to_save = False
except:
print("unable to save all files")
# The following code is adapted from SRVP https://github.com/edouardelasalles/srvp; see license notice and copyrights
# below.
# # Copyright 2020 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class DotDict(dict):
"""
Dot notation access to dictionary attributes.
"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def load_yaml(path):
"""
Loads a yaml input file.
"""
with open(path, 'r') as f:
opt = yaml.safe_load(f)
return DotDict(opt)
def load_json(path):
"""
Loads a json input file.
"""
with open(path, 'r') as f:
opt = json.load(f)
return DotDict(opt)
```
|
{
"source": "Jeremiad/Flexget",
"score": 2
}
|
#### File: components/bittorrent/torrent_size.py
```python
from loguru import logger
from flexget import plugin
from flexget.event import event
logger = logger.bind(name='torrent_size')
class TorrentSize:
"""
Provides file size information when dealing with torrents
"""
@plugin.priority(200)
def on_task_modify(self, task, config):
for entry in task.entries:
if 'torrent' in entry:
size = entry['torrent'].size / 1024 / 1024
logger.debug('{} size: {} MB', entry['title'], size)
entry['content_size'] = size
@event('plugin.register')
def register_plugin():
plugin.register(TorrentSize, 'torrent_size', builtin=True, api_ver=2)
```
#### File: components/failed/db.py
```python
from datetime import datetime, timedelta
from loguru import logger
from sqlalchemy import Column, DateTime, Integer, String, Unicode
from sqlalchemy.schema import Index
from flexget import db_schema
from flexget.event import event
from flexget.utils.sqlalchemy_utils import table_add_column
SCHEMA_VER = 3
FAIL_LIMIT = 100
logger = logger.bind(name='failed.db')
Base = db_schema.versioned_base('failed', SCHEMA_VER)
@db_schema.upgrade('failed')
def upgrade(ver, session):
if ver is None or ver < 1:
raise db_schema.UpgradeImpossible
if ver == 1:
table_add_column('failed', 'reason', Unicode, session)
ver = 2
if ver == 2:
table_add_column('failed', 'retry_time', DateTime, session)
ver = 3
return ver
class FailedEntry(Base):
__tablename__ = 'failed'
id = Column(Integer, primary_key=True)
title = Column(Unicode)
url = Column(String)
tof = Column(DateTime)
reason = Column(Unicode)
count = Column(Integer, default=1)
retry_time = Column(DateTime)
def __init__(self, title, url, reason=None):
self.title = title
self.url = url
self.reason = reason
self.tof = datetime.now()
def __str__(self):
return '<Failed(title=%s)>' % self.title
def to_dict(self):
return {
'id': self.id,
'title': self.title,
'url': self.url,
'added_at': self.tof,
'reason': self.reason,
'count': self.count,
'retry_time': self.retry_time,
}
# create indexes, used when creating tables
columns = Base.metadata.tables['failed'].c
Index('failed_title_url', columns.title, columns.url, columns.count)
@event('manager.db_cleanup')
def db_cleanup(manager, session):
# Delete everything older than 30 days
session.query(FailedEntry).filter(
FailedEntry.tof < datetime.now() - timedelta(days=30)
).delete()
# Of the remaining, always keep latest 25. Drop any after that if fail was more than a week ago.
keep_num = 25
keep_ids = [
fe.id for fe in session.query(FailedEntry).order_by(FailedEntry.tof.desc())[:keep_num]
]
if len(keep_ids) == keep_num:
query = session.query(FailedEntry)
query = query.filter(FailedEntry.id.notin_(keep_ids))
query = query.filter(FailedEntry.tof < datetime.now() - timedelta(days=7))
query.delete(synchronize_session=False)
def get_failures(session, count=None, start=None, stop=None, sort_by=None, descending=None):
query = session.query(FailedEntry)
if count:
return query.count()
if descending:
query = query.order_by(getattr(FailedEntry, sort_by).desc())
else:
query = query.order_by(getattr(FailedEntry, sort_by))
return query.slice(start, stop).all()
```
#### File: components/pending_approval/api.py
```python
from math import ceil
from flask import jsonify, request
from flask_restx import inputs
from sqlalchemy.orm.exc import NoResultFound
from flexget.api import APIResource, api
from flexget.api.app import (
BadRequest,
NotFoundError,
base_message_schema,
etag,
pagination_headers,
success_response,
)
from . import db
pending_api = api.namespace('pending', description='View and manage pending entries')
class ObjectsContainer:
pending_entry_object = {
'type': 'object',
'properties': {
'id': {'type': 'integer'},
'task_name': {'type': 'string'},
'title': {'type': 'string'},
'url': {'type': 'string'},
'approved': {'type': 'boolean'},
'added': {'type': 'string', 'format': 'date-time'},
},
}
pending_entry_list = {'type': 'array', 'items': pending_entry_object}
operation_object = {
'type': 'object',
'properties': {'operation': {'type': 'string', 'enum': ['approve', 'reject']}},
'required': ['operation'],
'additionalProperties': False,
}
pending_entry_schema = api.schema_model('pending.entry', ObjectsContainer.pending_entry_object)
pending_entry_list_schema = api.schema_model(
'pending.entry_list', ObjectsContainer.pending_entry_list
)
operation_schema = api.schema_model('pending.operation', ObjectsContainer.operation_object)
filter_parser = api.parser()
filter_parser.add_argument('task_name', help='Filter by task name')
filter_parser.add_argument('approved', type=inputs.boolean, help='Filter by approval status')
sort_choices = ('added', 'task_name', 'title', 'url', 'approved')
pending_parser = api.pagination_parser(parser=filter_parser, sort_choices=sort_choices)
just_task_parser = filter_parser.copy()
just_task_parser.remove_argument('approved')
description = 'Either \'approve\' or \'reject\''
@pending_api.route('/')
class PendingEntriesAPI(APIResource):
@etag
@api.response(NotFoundError)
@api.response(200, model=pending_entry_list_schema)
@api.doc(parser=pending_parser)
def get(self, session=None):
"""List all pending entries"""
args = pending_parser.parse_args()
# Filter params
task_name = args.get('task_name')
approved = args.get('approved')
# Pagination and sorting params
page = args['page']
per_page = args['per_page']
sort_by = args['sort_by']
sort_order = args['order']
# Handle max size limit
if per_page > 100:
per_page = 100
descending = sort_order == 'desc'
# Handle max size limit
if per_page > 100:
per_page = 100
start = per_page * (page - 1)
stop = start + per_page
kwargs = {
'task_name': task_name,
'approved': approved,
'start': start,
'stop': stop,
'descending': descending,
'sort_by': sort_by,
'session': session,
}
total_items = session.query(db.PendingEntry).count()
if not total_items:
return jsonify([])
pending_entries = [pending.to_dict() for pending in db.list_pending_entries(**kwargs)]
total_pages = int(ceil(total_items / float(per_page)))
if page > total_pages:
raise NotFoundError('page %s does not exist' % page)
# Actual results in page
actual_size = min(per_page, len(pending_entries))
# Get pagination headers
pagination = pagination_headers(total_pages, total_items, actual_size, request)
# Created response
rsp = jsonify(pending_entries)
# Add link header to response
rsp.headers.extend(pagination)
return rsp
@api.validate(operation_schema, description=description)
@api.response(201, model=pending_entry_list_schema)
@api.response(204, 'No entries modified')
@api.doc(parser=just_task_parser)
def put(self, session=None):
"""Approve/Reject the status of pending entries"""
args = filter_parser.parse_args()
data = request.json
approved = data['operation'] == 'approve'
task_name = args.get('task_name')
pending_entries = []
for entry in db.list_pending_entries(session, task_name=task_name):
if entry.approved is not approved:
entry.approved = approved
pending_entries.append(entry.to_dict())
rsp = jsonify(pending_entries)
rsp.status_code = 201 if pending_entries else 204
return rsp
@api.response(200, model=base_message_schema)
@api.doc(parser=filter_parser)
def delete(self, session=None):
"""Delete pending entries"""
args = filter_parser.parse_args()
# Filter params
task_name = args.get('task_name')
approved = args.get('approved')
deleted = session.query(db.PendingEntry)
if task_name:
deleted = deleted.filter(db.PendingEntry.task_name == task_name)
if approved:
deleted = deleted.filter(db.PendingEntry.approved == approved)
deleted = deleted.delete()
return success_response('deleted %s pending entries'.format(deleted))
@pending_api.route('/<int:entry_id>/')
@api.doc(params={'entry_id': 'ID of the entry'})
@api.response(NotFoundError)
class PendingEntryAPI(APIResource):
@etag
@api.response(200, model=pending_entry_schema)
def get(self, entry_id, session=None):
"""Get a pending entry by ID"""
try:
entry = db.get_entry_by_id(session, entry_id)
except NoResultFound:
raise NotFoundError('No pending entry with ID %s' % entry_id)
return jsonify(entry.to_dict())
@api.response(201, model=pending_entry_schema)
@api.response(BadRequest)
@api.validate(operation_schema, description=description)
def put(self, entry_id, session=None):
"""Approve/Reject the status of a pending entry"""
try:
entry = db.get_entry_by_id(session, entry_id)
except NoResultFound:
raise NotFoundError('No pending entry with ID %s' % entry_id)
data = request.json
approved = data['operation'] == 'approve'
operation_text = 'approved' if approved else 'pending'
if entry.approved is approved:
raise BadRequest('Entry with id {} is already {}'.format(entry_id, operation_text))
entry.approved = approved
session.commit()
rsp = jsonify(entry.to_dict())
rsp.status_code = 201
return rsp
@api.response(200, model=base_message_schema)
def delete(self, entry_id, session=None):
"""Delete a pending entry"""
try:
entry = db.get_entry_by_id(session, entry_id)
except NoResultFound:
raise NotFoundError('No pending entry with ID %s' % entry_id)
session.delete(entry)
return success_response('successfully deleted entry with ID %s' % entry_id)
```
#### File: components/seen/seen.py
```python
from loguru import logger
from flexget import plugin
from flexget.event import event
from . import db
logger = logger.bind(name='seen')
class FilterSeen:
"""
Remembers previously downloaded content and rejects them in
subsequent executions. Without this plugin FlexGet would
download all matching content on every execution.
This plugin is enabled on all tasks by default.
See wiki for more information.
"""
schema = {
'oneOf': [
{'type': 'boolean'},
{'type': 'string', 'enum': ['global', 'local']},
{
'type': 'object',
'properties': {
'local': {'type': 'boolean'},
'fields': {
'type': 'array',
'items': {'type': 'string'},
"minItems": 1,
"uniqueItems": True,
},
},
},
]
}
def __init__(self):
# remember and filter by these fields
self.fields = ['title', 'url', 'original_url']
self.keyword = 'seen'
def prepare_config(self, config):
if config is None:
config = {}
elif isinstance(config, bool):
if config is False:
return config
else:
config = {'local': False}
elif isinstance(config, str):
config = {'local': config == 'local'}
config.setdefault('local', False)
config.setdefault('fields', self.fields)
return config
@plugin.priority(plugin.PRIORITY_FIRST)
def on_task_filter(self, task, config, remember_rejected=False):
"""Filter entries already accepted on previous runs."""
config = self.prepare_config(config)
if config is False:
logger.debug('{} is disabled', self.keyword)
return
fields = config.get('fields')
local = config.get('local')
for entry in task.entries:
# construct list of values looked
values = []
for field in fields:
if field not in entry:
continue
if entry[field] not in values and entry[field]:
values.append(str(entry[field]))
if values:
logger.trace('querying for: {}', ', '.join(values))
# check if SeenField.value is any of the values
found = db.search_by_field_values(
field_value_list=values, task_name=task.name, local=local, session=task.session
)
if found:
logger.debug(
"Rejecting '{}' '{}' because of seen '{}'",
entry['url'],
entry['title'],
found.value,
)
se = (
task.session.query(db.SeenEntry)
.filter(db.SeenEntry.id == found.seen_entry_id)
.one()
)
entry.reject(
'Entry with %s `%s` is already marked seen in the task %s at %s'
% (found.field, found.value, se.task, se.added.strftime('%Y-%m-%d %H:%M')),
remember=remember_rejected,
)
def on_task_learn(self, task, config):
"""Remember succeeded entries"""
config = self.prepare_config(config)
if config is False:
logger.debug('disabled')
return
fields = config.get('fields')
local = config.get('local')
if isinstance(config, list):
fields.extend(config)
for entry in task.accepted:
self.learn(task, entry, fields=fields, local=local)
# verbose if in learning mode
if task.options.learn:
logger.info("Learned '{}' (will skip this in the future)", entry['title'])
def learn(self, task, entry, fields=None, reason=None, local=False):
"""Marks entry as seen"""
# no explicit fields given, use default
if not fields:
fields = self.fields
se = db.SeenEntry(entry['title'], str(task.name), reason, local)
remembered = []
for field in fields:
if field not in entry:
continue
# removes duplicate values (eg. url, original_url are usually same)
if entry[field] in remembered:
continue
remembered.append(entry[field])
sf = db.SeenField(str(field), str(entry[field]))
se.fields.append(sf)
logger.debug("Learned '{}' (field: {}, local: {})", entry[field], field, local)
# Only add the entry to the session if it has one of the required fields
if se.fields:
task.session.add(se)
def forget(self, task, title):
"""Forget SeenEntry with :title:. Return True if forgotten."""
se = task.session.query(db.SeenEntry).filter(db.SeenEntry.title == title).first()
if se:
logger.debug("Forgotten '{}' ({} fields)", title, len(se.fields))
task.session.delete(se)
return True
@event('plugin.register')
def register_plugin():
plugin.register(FilterSeen, 'seen', builtin=True, api_ver=2)
```
#### File: Flexget/flexget/log.py
```python
import codecs
import collections
import contextlib
import functools
import logging
import logging.handlers
import os
import sys
import threading
import uuid
import warnings
from typing import Iterator, Union, List, Deque, Optional
import loguru
from loguru import logger
from flexget import __version__
from flexget.utils.tools import io_encoding
# A level more detailed than INFO
VERBOSE = 15
# environment variables to modify rotating log parameters from defaults of 1 MB and 9 files
ENV_MAXBYTES = 'FLEXGET_LOG_MAXBYTES'
ENV_MAXCOUNT = 'FLEXGET_LOG_MAXCOUNT'
LOG_FORMAT = (
'<green>{time:YYYY-MM-DD HH:mm:ss}</green> <level>{level: <8}</level> '
'<cyan>{name: <13}</cyan> <bold>{extra[task]: <15}</bold> {message}'
)
# Stores current `session_id` to keep track of originating thread for log calls
local_context = threading.local()
@contextlib.contextmanager
def capture_logs(*args, **kwargs) -> Iterator:
"""Takes the same arguments as `logger.add`, but this sync will only log messages contained in context."""
old_id = get_log_session_id()
session_id = local_context.session_id = old_id or str(uuid.uuid4())
existing_filter = kwargs.pop('filter', None)
kwargs.setdefault('format', LOG_FORMAT)
def filter_func(record):
if record['extra'].get('session_id') != session_id:
return False
if existing_filter:
return existing_filter(record)
return True
kwargs['filter'] = filter_func
log_sink = logger.add(*args, **kwargs)
try:
with logger.contextualize(session_id=session_id):
yield
finally:
local_context.session_id = old_id
logger.remove(log_sink)
def get_log_session_id() -> str:
return getattr(local_context, 'session_id', None)
def record_patcher(record: 'loguru.Record') -> None:
# If a custom name was bound to the logger, move it from extra directly into the record
name = record['extra'].pop('name', None)
if name:
record['name'] = name
class InterceptHandler(logging.Handler):
"""Catch any stdlib log messages from our deps and propagate to loguru."""
def emit(self, record: logging.LogRecord):
# Get corresponding Loguru level if it exists
level: Union[str, int]
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
# Find caller from where originated the logged message
frame, depth = logging.currentframe(), 2
while frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
logger.bind(name=record.name).opt(depth=depth, exception=record.exc_info).log(
level, record.getMessage()
)
_logging_configured = False
_startup_buffer: List['loguru.Record'] = []
_startup_buffer_id: Optional[int] = None
_logging_started = False
# Stores the last 100 debug messages
debug_buffer: Deque['loguru.Message'] = collections.deque(maxlen=100)
def initialize(unit_test: bool = False) -> None:
"""Prepare logging."""
# Remove default loguru sinks
logger.remove()
global _logging_configured, _logging_started, _buff_handler
if _logging_configured:
return
if 'dev' in __version__:
warnings.filterwarnings('always', category=DeprecationWarning, module='flexget.*')
warnings.simplefilter('once', append=True)
logger.level('VERBOSE', no=VERBOSE, color='<bold>', icon='👄')
logger.__class__.verbose = functools.partialmethod(logger.__class__.log, 'VERBOSE')
logger.configure(extra={'task': '', 'session_id': None}, patcher=record_patcher)
_logging_configured = True
# with unit test we want pytest to add the handlers
if unit_test:
_logging_started = True
return
# Store any log messages in a buffer until we `start` function is run
global _startup_buffer_id
_startup_buffer_id = logger.add(
lambda message: _startup_buffer.append(message.record), level='DEBUG', format=LOG_FORMAT
)
# Add a handler that sores the last 100 debug lines to `debug_buffer` for use in crash reports
logger.add(
lambda message: debug_buffer.append(message),
level='DEBUG',
format=LOG_FORMAT,
backtrace=True,
diagnose=True,
)
std_logger = logging.getLogger()
std_logger.addHandler(InterceptHandler())
def start(
filename: str = None, level: str = 'INFO', to_console: bool = True, to_file: bool = True
) -> None:
"""After initialization, start file logging."""
global _logging_started
assert _logging_configured
if _logging_started:
return
if level == 'NONE':
return
# Make sure stdlib logger is set so that dependency logging gets propagated
logging.getLogger().setLevel(logger.level(level).no)
if to_file and filename:
logger.add(
filename,
level=level,
rotation=int(os.environ.get(ENV_MAXBYTES, 1000 * 1024)),
retention=int(os.environ.get(ENV_MAXCOUNT, 9)),
encoding='utf-8',
format=LOG_FORMAT,
)
# without --cron we log to console
if to_console:
if not sys.stdout:
logger.debug("No sys.stdout, can't log to console.")
else:
# Make sure we don't send any characters that the current terminal doesn't support printing
safe_stdout = codecs.getwriter(io_encoding)(sys.stdout.buffer, 'replace')
colorize = None
# Auto-detection for colorize doesn't seem to work properly for PyCharm.
if "PYCHARM_HOSTED" in os.environ:
colorize = True
logger.add(safe_stdout, level=level, format=LOG_FORMAT, colorize=colorize)
# flush what we have stored from the plugin initialization
global _startup_buffer, _startup_buffer_id
if _startup_buffer_id:
logger.remove(_startup_buffer_id)
for record in _startup_buffer:
level, message = record['level'].name, record['message']
logger.patch(lambda r: r.update(record)).log(level, message)
_startup_buffer = []
_startup_buffer_id = None
_logging_started = True
```
#### File: plugins/cli/doc.py
```python
import sys
from loguru import logger
from flexget import options
from flexget.event import event
from flexget.plugin import plugins
from flexget.terminal import console
logger = logger.bind(name='doc')
def trim(docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def print_doc(manager, options):
plugin_name = options.doc
plugin = plugins.get(plugin_name, None)
if plugin:
if not plugin.instance.__doc__:
console('Plugin %s does not have documentation' % plugin_name)
else:
console('')
console(trim(plugin.instance.__doc__))
console('')
else:
console('Could not find plugin %s' % plugin_name)
@event('options.register')
def register_parser_arguments():
parser = options.register_command('doc', print_doc, help='display plugin documentation')
parser.add_argument('doc', metavar='<plugin name>', help='name of plugin to show docs for')
```
#### File: plugins/clients/nzbget.py
```python
from loguru import logger
from flexget import plugin
from flexget.event import event
logger = logger.bind(name='nzbget')
class OutputNzbget:
"""
Example::
nzbget:
url: http://nzbget:12345@localhost:6789/xmlrpc
category: movies
priority: 0
top: False
"""
schema = {
'type': 'object',
'properties': {
'url': {'type': 'string'},
'category': {'type': 'string', 'default': ''},
'priority': {'type': 'integer', 'default': 0},
'top': {'type': 'boolean', 'default': False},
},
'required': ['url'],
'additionalProperties': False,
}
def on_task_output(self, task, config):
from xmlrpc.client import ServerProxy
params = dict(config)
server = ServerProxy(params["url"])
for entry in task.accepted:
if task.options.test:
logger.info('Would add into nzbget: {}', entry['title'])
continue
# allow overriding the category
if 'category' in entry:
params['category'] = entry['category']
try:
server.appendurl(
entry['title'] + '.nzb',
params['category'],
params['priority'],
params['top'],
entry['url'],
)
logger.info('Added `{}` to nzbget', entry['title'])
except Exception as e:
logger.critical('rpc call to nzbget failed: {}', e)
entry.fail('could not call appendurl via RPC')
@event('plugin.register')
def register_plugin():
plugin.register(OutputNzbget, 'nzbget', api_ver=2)
```
#### File: plugins/clients/rtorrent.py
```python
import os
import re
import socket
from io import BytesIO
from time import sleep
from datetime import datetime
from urllib.parse import urljoin, urlparse, urlsplit
from xmlrpc import client as xmlrpc_client
from loguru import logger
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.bittorrent import Torrent, is_torrent_file
from flexget.utils.pathscrub import pathscrub
from flexget.utils.template import RenderError
logger = logger.bind(name='rtorrent')
class _Method:
# some magic to bind an XML-RPC method to an RPC server.
# supports "nested" methods (e.g. examples.getStateName)
def __init__(self, send, name):
self.__send = send
self.__name = name
def __getattr__(self, name):
return _Method(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
class HTTPDigestTransport(xmlrpc_client.Transport):
"""
Transport that uses requests to support Digest authentication.
"""
def __init__(self, scheme, digest_auth, username, password, session, *args, **kwargs):
self.__scheme = scheme
self.__session = session
self.__digest_auth = digest_auth
self.__username = username
self.__password = password
self.verbose = 0
xmlrpc_client.Transport.__init__(self, *args, **kwargs) # old style class
def request(self, host, handler, request_body, verbose=False):
return self.single_request(host, handler, request_body, verbose)
def single_request(self, host, handler, request_body, verbose=0):
url = urljoin('{0}://{1}'.format(self.__scheme, host), handler)
auth = self.get_auth()
response = self.send_request(url, auth, request_body)
# if status code is 401, it means we used the wrong auth method
if response.status_code == 401:
logger.warning(
'{} auth failed. Retrying with {}. Please change your config.',
'Digest' if self.__digest_auth else 'Basic',
'Basic' if self.__digest_auth else 'Digest',
)
self.__digest_auth = not self.__digest_auth
auth = self.get_auth()
response = self.send_request(url, auth, request_body)
response.raise_for_status()
return self.parse_response(response)
def get_auth(self):
if self.__digest_auth:
return HTTPDigestAuth(self.__username, self.__password)
return HTTPBasicAuth(self.__username, self.__password)
def send_request(self, url, auth, data):
return self.__session.post(url, auth=auth, data=data, raise_status=False)
def parse_response(self, response):
p, u = self.getparser()
if self.verbose:
logger.info('body: {!r}', response)
p.feed(response.content)
p.close()
return u.close()
def encode_netstring(input):
return str(len(input)).encode() + b':' + input + b','
def encode_header(key, value):
return key + b'\x00' + value + b'\x00'
class SCGITransport(xmlrpc_client.Transport):
"""
Public domain SCGITrannsport implementation from:
https://github.com/JohnDoee/autotorrent/blob/develop/autotorrent/scgitransport.py
"""
def __init__(self, *args, **kwargs):
self.socket_path = kwargs.pop('socket_path', '')
xmlrpc_client.Transport.__init__(self, *args, **kwargs)
def single_request(self, host, handler, request_body, verbose=False):
self.verbose = verbose
if self.socket_path:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(self.socket_path)
else:
host, port = host.split(':')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, int(port)))
request = encode_header(b'CONTENT_LENGTH', str(len(request_body)).encode())
request += encode_header(b'SCGI', b'1')
request += encode_header(b'REQUEST_METHOD', b'POST')
request += encode_header(b'REQUEST_URI', handler.encode())
request = encode_netstring(request)
request += request_body
s.send(request)
response = b''
while True:
r = s.recv(1024)
if not r:
break
response += r
response_body = BytesIO(b'\r\n\r\n'.join(response.split(b'\r\n\r\n')[1:]))
return self.parse_response(response_body)
if not hasattr(xmlrpc_client.Transport, 'single_request'):
SCGITransport.request = SCGITransport.single_request
def create_proxy(url):
parsed = urlsplit(url)
if not parsed.scheme:
path = parsed.path
return xmlrpc_client.ServerProxy('http://1', transport=SCGITransport(socket_path=path))
if parsed.scheme == 'scgi':
url = 'http://%s' % parsed.netloc
return xmlrpc_client.ServerProxy(url, transport=SCGITransport())
logger.debug('Creating Normal XMLRPC Proxy with url {!r}', url)
return xmlrpc_client.ServerProxy(url)
class RTorrent:
""" rTorrent API client """
default_fields = (
'hash',
'name',
'up_total',
'down_total',
'down_rate',
'is_open',
'is_active',
'custom1',
'custom2',
'custom3',
'custom4',
'custom5',
'state',
'complete',
'bytes_done',
'down.rate',
'left_bytes',
'ratio',
'base_path',
'load_date',
'timestamp_finished',
)
required_fields = ('hash', 'name', 'base_path')
def __init__(self, uri, username=None, password=<PASSWORD>, digest_auth=None, session=None):
"""
New connection to rTorrent
:param uri: RTorrent URL. Supports both http(s) and scgi
:param username: Username for basic auth over http(s)
:param password: Password for basic auth over http(s)
"""
self.uri = uri
self.username = username
self.password = password
self.digest_auth = digest_auth
self._version = None
parsed_uri = urlparse(uri)
if self.username and self.password and parsed_uri.scheme not in ['http', 'https']:
raise OSError('Username and password only supported on http(s)')
# Determine the proxy server
if parsed_uri.scheme in ['http', 'https']:
sp = xmlrpc_client.ServerProxy
elif parsed_uri.scheme == 'scgi':
sp = create_proxy
elif parsed_uri.scheme == '' and parsed_uri.path:
self.uri = parsed_uri.path
sp = create_proxy
else:
raise OSError('Unsupported scheme %s for uri %s' % (parsed_uri.scheme, self.uri))
# Use a special transport if http(s)
if parsed_uri.scheme in ['http', 'https']:
self._server = sp(
self.uri,
transport=HTTPDigestTransport(
parsed_uri.scheme, self.digest_auth, self.username, self.password, session
),
)
else:
self._server = sp(self.uri)
def _clean_fields(self, fields, reverse=False):
if not fields:
fields = list(self.default_fields)
if reverse:
for field in ['up.total', 'down.total', 'down.rate', 'timestamp.finished']:
if field in fields:
fields[fields.index(field)] = field.replace('.', '_')
return fields
for required_field in self.required_fields:
if required_field not in fields:
fields.insert(0, required_field)
for field in ['up_total', 'down_total', 'down_rate', 'timestamp_finished']:
if field in fields:
fields[fields.index(field)] = field.replace('_', '.')
return fields
def load(self, raw_torrent, fields=None, start=False, mkdir=True):
if fields is None:
fields = {}
# First param is empty 'target'
params = ['', xmlrpc_client.Binary(raw_torrent)]
# Additional fields to set
for key, val in fields.items():
# Values must be escaped if within params
# TODO: What are the escaping requirements? re.escape works differently on python 3.7+
params.append('d.%s.set=%s' % (key, re.escape(str(val))))
if mkdir and 'directory' in fields:
result = self._server.execute.throw('', 'mkdir', '-p', fields['directory'])
if result != 0:
raise xmlrpc_client.Error('Failed creating directory %s' % fields['directory'])
# by default rtorrent won't allow calls over 512kb in size.
xmlrpc_size = (
len(xmlrpc_client.dumps(tuple(params), 'raw_start')) + 71680
) # Add 70kb for buffer
if xmlrpc_size > 524288:
prev_size = self._server.network.xmlrpc.size_limit()
self._server.network.xmlrpc.size_limit.set('', xmlrpc_size)
# Call load method and return the response
if start:
result = self._server.load.raw_start(*params)
else:
result = self._server.load.raw(*params)
if xmlrpc_size > 524288:
self._server.network.xmlrpc.size_limit.set('', prev_size)
return result
def get_directory(self):
return self._server.get_directory()
def torrent(self, info_hash, fields=None):
""" Get the details of a torrent """
if not fields:
fields = list(self.default_fields)
fields = self._clean_fields(fields)
multi_call = xmlrpc_client.MultiCall(self._server)
for field in fields:
method_name = 'd.%s' % field
getattr(multi_call, method_name)(info_hash)
resp = multi_call()
# TODO: Maybe we should return a named tuple or a Torrent class?
return dict(list(zip(self._clean_fields(fields, reverse=True), [val for val in resp])))
def torrents(self, view='main', fields=None):
if not fields:
fields = list(self.default_fields)
fields = self._clean_fields(fields)
params = ['d.%s=' % field for field in fields]
params.insert(0, view)
resp = self._server.d.multicall2('', params)
# Response is formatted as a list of lists, with just the values
return [dict(list(zip(self._clean_fields(fields, reverse=True), val))) for val in resp]
def update(self, info_hash, fields):
multi_call = xmlrpc_client.MultiCall(self._server)
for key, val in fields.items():
method_name = 'd.%s.set' % key
getattr(multi_call, method_name)(info_hash, val)
return multi_call()[0]
def delete(self, info_hash):
return self._server.d.erase(info_hash)
def stop(self, info_hash):
self._server.d.stop(info_hash)
return self._server.d.close(info_hash)
def start(self, info_hash):
return self._server.d.start(info_hash)
def move(self, info_hash, dst_path):
self.stop(info_hash)
torrent = self.torrent(info_hash, fields=['base_path'])
try:
logger.verbose('Creating destination directory `{}`', dst_path)
self._server.execute.throw('', 'mkdir', '-p', dst_path)
except xmlrpc_client.Error:
raise xmlrpc_client.Error("unable to create folder %s" % dst_path)
self._server.execute.throw('', 'mv', '-u', torrent['base_path'], dst_path)
self._server.d.set_directory(info_hash, dst_path)
self.start(info_hash)
class RTorrentPluginBase:
priority_map = {'high': 3, 'medium': 2, 'low': 1, 'off': 0}
def _build_options(self, config, entry, entry_first=True):
options = {}
for opt_key in (
'path',
'message',
'priority',
'custom1',
'custom2',
'custom3',
'custom4',
'custom5',
):
# Values do not merge config with task
# Task takes priority then config is used
entry_value = entry.get(opt_key)
config_value = config.get(opt_key)
if entry_first:
if entry_value:
options[opt_key] = entry.render(entry_value)
elif config_value:
options[opt_key] = entry.render(config_value)
else:
if config_value:
options[opt_key] = entry.render(config_value)
elif entry_value:
options[opt_key] = entry.render(entry_value)
# Convert priority from string to int
priority = options.get('priority')
if priority and priority in self.priority_map:
options['priority'] = self.priority_map[priority]
# Map Flexget path to directory in rTorrent
if options.get('path'):
options['directory'] = options['path']
del options['path']
if 'directory' in options:
options['directory'] = pathscrub(options['directory'])
return options
class RTorrentOutputPlugin(RTorrentPluginBase):
schema = {
'type': 'object',
'properties': {
# connection info
'uri': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'digest_auth': {'type': 'boolean', 'default': False},
'start': {'type': 'boolean', 'default': True},
'mkdir': {'type': 'boolean', 'default': True},
'action': {'type': 'string', 'emun': ['update', 'delete', 'add'], 'default': 'add'},
# properties to set on rtorrent download object
'message': {'type': 'string'},
'priority': {'type': 'string'},
'path': {'type': 'string'},
'custom1': {'type': 'string'},
'custom2': {'type': 'string'},
'custom3': {'type': 'string'},
'custom4': {'type': 'string'},
'custom5': {'type': 'string'},
'fast_resume': {'type': 'boolean', 'default': False},
},
'required': ['uri'],
'additionalProperties': False,
}
def _verify_load(self, client, info_hash):
ex = xmlrpc_client.Error()
for _ in range(0, 5):
try:
return client.torrent(info_hash, fields=['hash'])
except xmlrpc_client.Error as e:
ex = e
sleep(0.5)
raise ex
@plugin.priority(120)
def on_task_download(self, task, config):
# If the download plugin is not enabled, we need to call it to get
# our temp .torrent files
if config['action'] == 'add' and 'download' not in task.config:
download = plugin.get('download', self)
download.get_temp_files(task, handle_magnets=True, fail_html=True)
@plugin.priority(135)
def on_task_output(self, task, config):
client = RTorrent(
os.path.expanduser(config['uri']),
username=config.get('username'),
password=config.get('password'),
digest_auth=config['digest_auth'],
session=task.requests,
)
try:
for entry in task.accepted:
if config['action'] == 'add':
if task.options.test:
logger.info('Would add {} to rTorrent', entry['url'])
continue
try:
options = self._build_options(config, entry)
except RenderError as e:
entry.fail("failed to render properties %s" % str(e))
continue
# fast_resume is not really an rtorrent option so it's not in _build_options
fast_resume = entry.get('fast_resume', config['fast_resume'])
self.add_entry(
client,
entry,
options,
start=config['start'],
mkdir=config['mkdir'],
fast_resume=fast_resume,
)
info_hash = entry.get('torrent_info_hash')
if not info_hash:
entry.fail('Failed to %s as no info_hash found' % config['action'])
continue
if config['action'] == 'delete':
if task.options.test:
logger.info(
'Would delete {} ({}) from rTorrent',
entry['title'],
entry['torrent_info_hash'],
)
continue
self.delete_entry(client, entry)
if config['action'] == 'update':
if task.options.test:
logger.info(
'Would update {} ({}) in rTorrent',
entry['title'],
entry['torrent_info_hash'],
)
continue
self.update_entry(client, entry, config)
except OSError as e:
raise plugin.PluginError("Couldn't connect to rTorrent: %s" % str(e))
def delete_entry(self, client, entry):
try:
client.delete(entry['torrent_info_hash'])
logger.verbose(
'Deleted {} ({}) in rtorrent ', entry['title'], entry['torrent_info_hash']
)
except xmlrpc_client.Error as e:
entry.fail('Failed to delete: %s' % str(e))
return
def update_entry(self, client, entry, config):
info_hash = entry['torrent_info_hash']
# First check if it already exists
try:
existing = client.torrent(info_hash, fields=['base_path'])
except xmlrpc_client.Error:
existing = False
# Build options but make config values override entry values
try:
options = self._build_options(config, entry, entry_first=False)
except RenderError as e:
entry.fail("failed to render properties %s" % str(e))
return
if existing and 'directory' in options:
# Check if changing to another directory which requires a move
if options['directory'] != existing['base_path'] and options[
'directory'
] != os.path.dirname(existing['base_path']):
try:
logger.verbose(
"Path is changing, moving files from '{}' to '{}'",
existing['base_path'],
options['directory'],
)
client.move(info_hash, options['directory'])
except xmlrpc_client.Error as e:
entry.fail('Failed moving torrent: %s' % str(e))
return
# Remove directory from update otherwise rTorrent will append the title to the directory path
if 'directory' in options:
del options['directory']
try:
client.update(info_hash, options)
logger.verbose('Updated {} ({}) in rtorrent ', entry['title'], info_hash)
except xmlrpc_client.Error as e:
entry.fail('Failed to update: %s' % str(e))
return
def add_entry(self, client, entry, options, start=True, mkdir=False, fast_resume=False):
if 'torrent_info_hash' not in entry:
entry.fail('missing torrent_info_hash')
return
if entry['url'].startswith('magnet:'):
torrent_raw = 'd10:magnet-uri%d:%se' % (len(entry['url']), entry['url'])
torrent_raw = torrent_raw.encode('ascii')
else:
# Check that file is downloaded
if 'file' not in entry:
raise plugin.PluginError('Temporary download file is missing from entry')
# Verify the temp file exists
if not os.path.exists(entry['file']):
raise plugin.PluginError('Temporary download file is missing from disk')
# Verify valid torrent file
if not is_torrent_file(entry['file']):
entry.fail("Downloaded temp file '%s' is not a torrent file" % entry['file'])
return
# Modify the torrent with resume data if needed
if fast_resume:
base = options.get('directory')
if not base:
base = client.get_directory()
piece_size = entry['torrent'].piece_size
chunks = int((entry['torrent'].size + piece_size - 1) / piece_size)
files = []
for f in entry['torrent'].get_filelist():
relative_file_path = os.path.join(f['path'], f['name'])
if entry['torrent'].is_multi_file:
relative_file_path = os.path.join(
entry['torrent'].name, relative_file_path
)
file_path = os.path.join(base, relative_file_path)
# TODO should it simply add the torrent anyway?
if not os.path.exists(file_path) and not os.path.isfile(file_path):
entry.fail('%s does not exist. Cannot add fast resume data.' % file_path)
return
# cannot bencode floats, so we need to coerce to int
mtime = int(os.path.getmtime(file_path))
# priority 0 should be "don't download"
files.append({'priority': 0, 'mtime': mtime})
entry['torrent'].set_libtorrent_resume(chunks, files)
# Since we modified the torrent, we need to write it to entry['file'] again
with open(entry['file'], 'wb+') as f:
f.write(entry['torrent'].encode())
try:
with open(entry['file'], 'rb') as f:
torrent_raw = f.read()
except OSError as e:
entry.fail('Failed to add to rTorrent %s' % str(e))
return
try:
Torrent(torrent_raw)
except SyntaxError as e:
entry.fail('Strange, unable to decode torrent, raise a BUG: %s' % str(e))
return
# First check if it already exists
try:
if client.torrent(entry['torrent_info_hash']):
logger.warning("Torrent {} already exists, won't add", entry['title'])
return
except xmlrpc_client.Error:
# No existing found
pass
try:
resp = client.load(torrent_raw, fields=options, start=start, mkdir=mkdir)
if resp != 0:
entry.fail('Failed to add to rTorrent invalid return value %s' % resp)
except xmlrpc_client.Error as e:
logger.exception(e)
entry.fail('Failed to add to rTorrent %s' % str(e))
return
# Verify the torrent loaded
try:
self._verify_load(client, entry['torrent_info_hash'])
logger.info('{} added to rtorrent', entry['title'])
except xmlrpc_client.Error as e:
logger.warning('Failed to verify torrent {} loaded: {}', entry['title'], str(e))
def on_task_learn(self, task, config):
""" Make sure all temp files are cleaned up when entries are learned """
# If download plugin is enabled, it will handle cleanup.
if 'download' not in task.config:
download = plugin.get('download', self)
download.cleanup_temp_files(task)
on_task_abort = on_task_learn
class RTorrentInputPlugin(RTorrentPluginBase):
schema = {
'type': 'object',
'properties': {
'uri': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'digest_auth': {'type': 'boolean', 'default': False},
'view': {'type': 'string', 'default': 'main'},
'fields': one_or_more({'type': 'string', 'enum': list(RTorrent.default_fields)}),
},
'required': ['uri'],
'additionalProperties': False,
}
def on_task_input(self, task, config):
client = RTorrent(
os.path.expanduser(config['uri']),
username=config.get('username'),
password=<PASSWORD>.<PASSWORD>('password'),
digest_auth=config['digest_auth'],
session=task.requests,
)
fields = config.get('fields')
try:
torrents = client.torrents(config['view'], fields=fields)
except (OSError, xmlrpc_client.Error) as e:
task.abort('Could not get torrents (%s): %s' % (config['view'], e))
return
entries = []
for torrent in torrents:
entry = Entry(
title=torrent['name'],
url='%s/%s' % (os.path.expanduser(config['uri']), torrent['hash']),
path=torrent['base_path'],
torrent_info_hash=torrent['hash'],
)
for attr, value in torrent.items():
entry[attr] = value
if 'timestamp_finished' in entry:
entry['timestamp_finished'] = datetime.fromtimestamp(entry['timestamp_finished'])
entries.append(entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(RTorrentOutputPlugin, 'rtorrent', api_ver=2)
plugin.register(RTorrentInputPlugin, 'from_rtorrent', api_ver=2)
```
#### File: plugins/filter/content_filter.py
```python
from fnmatch import fnmatch
from loguru import logger
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
logger = logger.bind(name='content_filter')
class FilterContentFilter:
"""
Rejects entries based on the filenames in the content. Torrent files only right now.
Example::
content_filter:
require:
- '*.avi'
- '*.mkv'
"""
schema = {
'type': 'object',
'properties': {
'min_files': {'type': 'integer'},
'max_files': {'type': 'integer'},
# These two properties allow a string or list of strings
'require': one_or_more({'type': 'string'}),
'require_all': one_or_more({'type': 'string'}),
'reject': one_or_more({'type': 'string'}),
'require_mainfile': {'type': 'boolean', 'default': False},
'strict': {'type': 'boolean', 'default': False},
},
'additionalProperties': False,
}
def prepare_config(self, config):
for key in ['require', 'require_all', 'reject']:
if key in config:
if isinstance(config[key], str):
config[key] = [config[key]]
return config
def process_entry(self, task, entry, config):
"""
Process an entry and reject it if it doesn't pass filter.
:param task: Task entry belongs to.
:param entry: Entry to process
:return: True, if entry was rejected.
"""
if 'content_files' in entry:
files = entry['content_files']
logger.debug('{} files: {}', entry['title'], files)
def matching_mask(files, masks):
"""Returns matching mask if any files match any of the masks, false otherwise"""
for file in files:
for mask in masks:
if fnmatch(file, mask):
return mask
return False
# Avoid confusion by printing a reject message to info log, as
# download plugin has already printed a downloading message.
if config.get('require'):
if not matching_mask(files, config['require']):
logger.info(
'Entry {} does not have any of the required filetypes, rejecting',
entry['title'],
)
entry.reject('does not have any of the required filetypes', remember=True)
return True
if config.get('require_all'):
# Make sure each mask matches at least one of the contained files
if not all(
any(fnmatch(file, mask) for file in files) for mask in config['require_all']
):
logger.info(
'Entry {} does not have all of the required filetypes, rejecting',
entry['title'],
)
entry.reject('does not have all of the required filetypes', remember=True)
return True
if config.get('reject'):
mask = matching_mask(files, config['reject'])
if mask:
logger.info('Entry {} has banned file {}, rejecting', entry['title'], mask)
entry.reject('has banned file %s' % mask, remember=True)
return True
if config.get('require_mainfile') and len(files) > 1:
best = None
for f in entry['torrent'].get_filelist():
if not best or f['size'] > best:
best = f['size']
if (100 * float(best) / float(entry['torrent'].size)) < 90:
logger.info('Entry {} does not have a main file, rejecting', entry['title'])
entry.reject('does not have a main file', remember=True)
return True
if config.get('min_files'):
if len(files) < config['min_files']:
logger.info(
f'Entry {entry["title"]} has {len(files)} files. Minimum is {config["min_files"]}. Rejecting.'
)
entry.reject(f'Has less than {config["min_files"]} files', remember=True)
return True
if config.get('max_files'):
if len(files) > config['max_files']:
logger.info(
f'Entry {entry["title"]} has {len(files)} files. Maximum is {config["max_files"]}. Rejecting.'
)
entry.reject(f'Has more than {config["max_files"]} files', remember=True)
return True
@plugin.priority(150)
def on_task_modify(self, task, config):
if task.options.test or task.options.learn:
logger.info(
'Plugin is partially disabled with --test and --learn '
'because content filename information may not be available'
)
# return
config = self.prepare_config(config)
for entry in task.accepted:
if self.process_entry(task, entry, config):
task.rerun(plugin='content_filter')
elif 'content_files' not in entry and config.get('strict'):
entry.reject('no content files parsed for entry', remember=True)
task.rerun(plugin='content_filter')
@event('plugin.register')
def register_plugin():
plugin.register(FilterContentFilter, 'content_filter', api_ver=2)
```
#### File: plugins/filter/exists.py
```python
import platform
from pathlib import Path
from loguru import logger
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
logger = logger.bind(name='exists')
class FilterExists:
"""
Reject entries that already exist in given path.
Example::
exists: /storage/movies/
"""
schema = one_or_more({'type': 'string', 'format': 'path'})
def prepare_config(self, config):
# If only a single path is passed turn it into a 1 element list
if isinstance(config, str):
config = [config]
return config
@plugin.priority(-1)
def on_task_filter(self, task, config):
if not task.accepted:
logger.debug('No accepted entries, not scanning for existing.')
return
logger.verbose('Scanning path(s) for existing files.')
config = self.prepare_config(config)
filenames = {}
for folder in config:
folder = Path(folder).expanduser()
if not folder.exists():
raise plugin.PluginWarning('Path %s does not exist' % folder, logger)
for p in folder.rglob('*'):
if p.is_file():
key = p.name
# windows file system is not case sensitive
if platform.system() == 'Windows':
key = key.lower()
filenames[key] = p
for entry in task.accepted:
# priority is: filename, location (filename only), title
name = Path(entry.get('filename', entry.get('location', entry['title']))).name
if platform.system() == 'Windows':
name = name.lower()
if name in filenames:
logger.debug('Found {} in {}', name, filenames[name])
entry.reject('exists in %s' % filenames[name])
@event('plugin.register')
def register_plugin():
plugin.register(FilterExists, 'exists', api_ver=2)
```
#### File: plugins/generic/cron_env.py
```python
import sys
from loguru import logger
from flexget.event import event
from flexget.utils.log import log_once
from flexget.utils.simple_persistence import SimplePersistence
__author__ = 'paranoidi'
logger = logger.bind(name='cron_env')
@event('manager.execute.started')
def check_env(manager, options):
persistence = SimplePersistence(plugin='cron_env')
encoding = sys.getfilesystemencoding()
if options.cron:
if 'terminal_encoding' in persistence:
terminal_encoding = persistence['terminal_encoding']
if terminal_encoding.lower() != encoding.lower():
logger.warning(
'Your cron environment has different filesystem encoding ({}) compared to your terminal environment ({}).',
encoding,
terminal_encoding,
)
if encoding == 'ANSI_X3.4-1968':
logger.warning(
'Your current cron environment results filesystem encoding ANSI_X3.4-1968 '
'which supports only ASCII letters in filenames.'
)
else:
log_once('Good! Your crontab environment seems to be same as terminal.')
else:
logger.info('Please run FlexGet manually once for environment verification purposes.')
else:
logger.debug('Encoding {} stored', encoding)
persistence['terminal_encoding'] = encoding
```
#### File: plugins/generic/db_analyze.py
```python
from loguru import logger
from flexget.event import event
logger = logger.bind(name='db_analyze')
# Run after the cleanup is actually finished
@event('manager.db_cleanup', 0)
def on_cleanup(manager, session):
logger.info('Running ANALYZE on database to improve performance.')
session.execute('ANALYZE')
```
#### File: plugins/generic/log_start.py
```python
import os
from argparse import SUPPRESS
from loguru import logger
from flexget import options
from flexget.event import event
logger = logger.bind(name='log_start')
@event('manager.startup')
def log_on_start(manager):
if manager.options.log_start:
logger.info('FlexGet started (PID: {})', os.getpid())
@event('manager.shutdown')
def log_on_shutdown(manager):
if manager.options.log_start:
logger.info('FlexGet stopped (PID: {})', os.getpid())
@event('options.register')
def register_options():
options.get_parser().add_argument('--log-start', action='store_true', help=SUPPRESS)
```
#### File: plugins/generic/urlfix.py
```python
from loguru import logger
from flexget import plugin
from flexget.event import event
from flexget.utils.log import log_once
logger = logger.bind(name='urlfix')
class UrlFix:
"""
Automatically fix broken urls.
"""
schema = {'type': 'boolean'}
@plugin.priority(plugin.PRIORITY_LAST)
def on_task_input(self, task, config):
if config is False:
return
for entry in task.entries:
if '&' in entry['url']:
log_once(
'Corrected `%s` url (replaced & with &)' % entry['title'], logger=logger
)
entry['url'] = entry['url'].replace('&', '&')
@event('plugin.register')
def register_plugin():
plugin.register(UrlFix, 'urlfix', builtin=True, api_ver=2)
```
#### File: plugins/input/filmweb_watchlist.py
```python
from loguru import logger
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
try:
from filmweb.exceptions import RequestFailed
from filmweb.filmweb import Filmweb as FilmwebAPI
from filmweb.items import LoggedUser
except ImportError:
# Errors are handled later
pass
logger = logger.bind(name='filmweb_watchlist')
def translate_type(type):
return {'shows': 'serial', 'movies': 'film'}[type]
class FilmwebWatchlist:
""""Creates an entry for each movie in your Filmweb list."""
schema = {
'type': 'object',
'properties': {
'login': {'type': 'string', 'description': 'Can be username or email address'},
'password': {'<PASSWORD>': '<PASSWORD>'},
'type': {'type': 'string', 'enum': ['shows', 'movies'], 'default': 'movies'},
'min_star': {
'type': 'integer',
'default': 0,
'description': 'Items will be processed with at least this level of "How much I want to see"',
},
},
'additionalProperties': False,
'required': ['login', 'password'],
}
def on_task_start(self, task, config):
"""Raise a DependencyError if our dependencies aren't available"""
try:
from filmweb.filmweb import Filmweb as FilmwebAPI # noqa
except ImportError as e:
logger.debug('Error importing pyfilmweb: {}', e)
raise plugin.DependencyError(
'filmweb_watchlist',
'pyfilmweb',
'pyfilmweb==0.1.1.1 module required. ImportError: %s' % e,
logger,
)
@cached('filmweb_watchlist', persist='2 hours')
def on_task_input(self, task, config):
type = translate_type(config['type'])
logger.verbose('Retrieving filmweb watch list for user: {}', config['login'])
fw = FilmwebAPI()
logger.verbose('Logging as {}', config['login'])
try:
fw.login(str(config['login']), str(config['password']))
except RequestFailed as error:
raise plugin.PluginError('Authentication request failed, reason %s' % str(error))
user = LoggedUser(fw)
try:
watch_list = user.get_want_to_see()
except RequestFailed as error:
raise plugin.PluginError('Fetching watch list failed, reason %s' % str(error))
logger.verbose('Filmweb list contains {} items', len(watch_list))
entries = []
for item in watch_list:
if item['level'] < config['min_star']:
continue
if item['film'].type != type:
continue
item_info = item['film'].get_info()
entry = Entry()
entry['title'] = item_info['name_org'] or item_info['name']
entry['title'] += ' (%s)' % item_info['year']
entry['year'] = item_info['year']
entry['url'] = item['film'].url
entry['filmweb_type'] = item_info['type']
entry['filmweb_id'] = item['film'].uid
logger.debug('Created entry {}', entry)
entries.append(entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(FilmwebWatchlist, 'filmweb_watchlist', api_ver=2)
```
#### File: plugins/input/from_task.py
```python
from loguru import logger
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.task import Task
logger = logger.bind(name='from_task')
class FromTask(object):
"""An input plugin which returns accepted entries from another task."""
schema = {'type': 'string'}
def on_task_input(self, task, config):
target_task_name = config
subtask_name = '{}>{}'.format(task.name, target_task_name)
subtask_config = task.manager.config['tasks'].get(target_task_name, {})
# TODO: This seen disabling is sorta hacky, is there a better way?
subtask_config.setdefault('seen', False)
input_task = Task(
task.manager,
subtask_name,
config=subtask_config,
# TODO: Do we want to pass other options through?
# TODO: Manual plugin semantics and allow_manual are confusing. Make it less confusing somehow?
options={'allow_manual': True, 'tasks': [subtask_name]},
output=task.output,
session_id=task.session_id,
priority=task.priority,
)
logger.verbose('Running task `{}` as subtask.', target_task_name)
input_task.execute()
logger.verbose('Finished running subtask `{}`.', target_task_name)
# Create fresh entries to reset state and strip association to old task
return [Entry(e) for e in input_task.accepted]
@event('plugin.register')
def register_plugin():
plugin.register(FromTask, 'from_task', api_ver=2)
```
#### File: plugins/input/from_telegram.py
```python
import re
from loguru import logger
from requests.exceptions import HTTPError, RequestException
from flexget.config_schema import one_or_more
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
logger = logger.bind(name='from_telegram')
_TELEGRAM_API_URL = "https://api.telegram.org/"
class TelegramInput:
"""
Parse any messages from Telegram and fills fields with regex
Example:
token: <token>
only_new: yes
whitelist:
- username: <my_username>
- group: <my_group>
- fullname:
first: <my_first_name>
sur: <my_surname>
entry:
<field>: <regexp to match value>
Note: If not declared, title will be the message
"""
schema = {
'type': 'object',
'properties': {
'token': {'type': 'string'},
'types': one_or_more({'type': 'string', 'enum': ['private', 'group']}),
'whitelist': {
'type': 'array',
'minItems': 1,
'items': {
'oneOf': [
{
'type': 'object',
'properties': {'username': {'type': 'string'}},
'required': ['username'],
'additionalProperties': False,
},
{
'type': 'object',
'properties': {
'fullname': {
'type': 'object',
'properties': {
'first': {'type': 'string'},
'sur': {'type': 'string'},
},
'required': ['first', 'sur'],
'additionalProperties': False,
}
},
'required': ['fullname'],
'additionalProperties': False,
},
{
'type': 'object',
'properties': {'group': {'type': 'string'}},
'required': ['group'],
'additionalProperties': False,
},
]
},
},
'only_new': {'type': 'boolean', 'default': True},
'entry': {
'type': 'object',
'properties': {
'url': {'type': 'string', 'format': 'regex'},
'title': {'type': 'string', 'format': 'regex'},
},
'additionalProperties': {'type': 'string', 'format': 'regex'},
},
},
'required': ['token'],
'additonalProperties': False,
}
@plugin.internet(logger)
def on_task_input(self, task, config):
# Load The Configs
token = config['token']
only_new = config['only_new']
entry_config = config.get('entry')
whitelist = config.get('whitelist', [])
types = config.get('types', ['private', 'group'])
# Get Last Checked ID
persistence_name = f"{token}_update_id"
update_id = task.simple_persistence.get(persistence_name)
# Get only new messages
params = {}
if update_id and only_new:
update_id += 1
params['offset'] = update_id
# The Target URL
url = f"{_TELEGRAM_API_URL}bot{token}/getUpdates"
# Get Telegram Updates
try:
response = task.requests.get(url, timeout=60, raise_status=True, params=params).json()
except HTTPError as e:
raise plugin.PluginError(f"Error getting telegram update: {e}")
# We have a error
if not response['ok']:
raise plugin.PluginError(
f"Telegram updater returned error {response['error_code']}: {response['description']}"
)
# Get All New Messages
messages = response['result']
entries = []
for message in messages:
# This is the ID
update_id = message['update_id']
# Update the last ID for the Bot
logger.debug("Last Update set to {}", update_id)
task.simple_persistence[persistence_name] = update_id
# We Don't care if it's not a message or no text
if (
'message' not in message
or 'text' not in message['message']
or 'chat' not in message['message']
or 'type' not in message['message']['chat']
):
logger.debug("Invalid message discarted: {}", message)
continue
logger.debug("Income message: {}", message)
# Check Types
if types and message['message']['chat']['type'] not in types:
logger.debug("Ignoring message because of invalid type {}", message)
continue
# Create Base Entry
text = message['message']['text']
entry = Entry()
entry['title'] = text
# We need a url, so we add a dummy
entry['url'] = f"http://localhost?update_id={str(update_id)}"
# Store the message if we need to use it in other plugins
entry['telegram_message'] = message['message']
# Check From
message_from = message['message']['from']
message_chat = message['message']['chat']
if whitelist:
for check in whitelist:
if 'username' in check and check['username'] == message_from['username']:
logger.debug("WhiteListing: Username {}", message_from['username'])
break
elif (
'fullname' in check
and check['fullname']['first'] == message_from['first_name']
and check['fullname']['sur'] == message_from['last_name']
):
logger.debug(
"WhiteListing: Full Name {} {}",
message_from['first_name'],
message_from['last_name'],
)
break
elif 'group' in check:
if (
message_chat['type'] == 'group'
and message_chat['title'] == check['group']
):
logger.debug("WhiteListing: Group {}", message_chat['title'])
break
else:
logger.debug("Ignoring message because of no whitelist match {}", message)
continue
# Process the entry config
accept = True
if entry_config:
for field, regexp in entry_config.items():
match = re.search(regexp, text)
if match:
try:
# Add field to entry
entry[field] = match.group(1)
except IndexError:
logger.error(
'Regex for field `{}` must contain a capture group', field
)
raise plugin.PluginError(
'Your from_telegram plugin config contains errors, please correct them.'
)
else:
logger.debug('Ignored entry, not match on field {}: {}', field, entry)
accept = False
break
# Append the entry
if accept:
entries.append(entry)
logger.debug('Added entry {}', entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(TelegramInput, 'from_telegram', api_ver=2)
```
#### File: plugins/modify/manipulate.py
```python
import re
from loguru import logger
from flexget import plugin
from flexget.event import event
logger = logger.bind(name='manipulate')
class Manipulate:
r"""
Usage:
manipulate:
- <destination field>:
[find_all]: <boolean>
[phase]: <phase>
[from]: <source field>
[extract]: <regexp>
[separator]: <text>
[replace]:
regexp: <regexp>
format: <regexp>
[remove]: <boolean>
Example:
manipulate:
- title:
extract: \[\d\d\d\d\](.*)
"""
schema = {
'type': 'array',
'items': {
'type': 'object',
'additionalProperties': {
'type': 'object',
'properties': {
'phase': {'enum': ['metainfo', 'filter', 'modify']},
'from': {'type': 'string'},
'extract': {'type': 'string', 'format': 'regex'},
'separator': {'type': 'string'},
'remove': {'type': 'boolean'},
'find_all': {'type': 'boolean'},
'replace': {
'type': 'object',
'properties': {
'regexp': {'type': 'string', 'format': 'regex'},
'format': {'type': 'string'},
},
'required': ['regexp', 'format'],
'additionalProperties': False,
},
},
'additionalProperties': False,
},
},
}
def on_task_start(self, task, config):
"""
Separates the config into a dict with a list of jobs per phase.
Allows us to skip phases without any jobs in them.
"""
self.phase_jobs = {'filter': [], 'metainfo': [], 'modify': []}
for item in config:
for item_config in item.values():
# Get the phase specified for this item, or use default of metainfo
phase = item_config.get('phase', 'metainfo')
self.phase_jobs[phase].append(item)
@plugin.priority(plugin.PRIORITY_FIRST)
def on_task_metainfo(self, task, config):
if not self.phase_jobs['metainfo']:
# return if no jobs for this phase
return
modified = sum(self.process(entry, self.phase_jobs['metainfo']) for entry in task.entries)
logger.verbose('Modified {} entries.', modified)
@plugin.priority(plugin.PRIORITY_FIRST)
def on_task_filter(self, task, config):
if not self.phase_jobs['filter']:
# return if no jobs for this phase
return
modified = sum(
self.process(entry, self.phase_jobs['filter'])
for entry in task.entries
)
logger.verbose('Modified {} entries.', modified)
@plugin.priority(plugin.PRIORITY_FIRST)
def on_task_modify(self, task, config):
if not self.phase_jobs['modify']:
# return if no jobs for this phase
return
modified = sum(
self.process(entry, self.phase_jobs['modify'])
for entry in task.entries
)
logger.verbose('Modified {} entries.', modified)
def process(self, entry, jobs):
"""Process given jobs from config for an entry.
:param entry: Entry to modify
:param jobs: Config items to run on this entry
:return: True if any fields were modified
"""
modified = False
for item in jobs:
for field, config in item.items():
from_field = field
if 'from' in config:
from_field = config['from']
field_value = entry.get(from_field)
logger.debug(
'field: `{}` from_field: `{}` field_value: `{}`',
field,
from_field,
field_value,
)
if config.get('remove'):
if field in entry:
del entry[field]
modified = True
continue
if 'extract' in config:
if not field_value:
logger.warning('Cannot extract, field `{}` is not present', from_field)
continue
if config.get('find_all'):
match = re.findall(config['extract'], field_value, re.I | re.U)
logger.debug('all matches: {}', match)
field_value = config.get('separator', ' ').join(match).strip()
logger.debug('field `{}` after extract: `{}`', field, field_value)
else:
match = re.search(config['extract'], field_value, re.I | re.U)
if match:
groups = [x for x in match.groups() if x is not None]
logger.debug('groups: {}', groups)
field_value = config.get('separator', ' ').join(groups).strip()
logger.debug('field `{}` after extract: `{}`', field, field_value)
if 'replace' in config:
if not field_value:
logger.warning('Cannot replace, field `{}` is not present', from_field)
continue
replace_config = config['replace']
regexp = re.compile(replace_config['regexp'], flags=re.I | re.U)
field_value = regexp.sub(replace_config['format'], field_value).strip()
logger.debug('field `{}` after replace: `{}`', field, field_value)
if from_field != field or entry[field] != field_value:
logger.verbose('Field `{}` is now `{}`', field, field_value)
modified = True
entry[field] = field_value
return modified
@event('plugin.register')
def register_plugin():
plugin.register(Manipulate, 'manipulate', api_ver=2)
```
#### File: plugins/output/sns.py
```python
import json
from loguru import logger
from flexget import plugin
from flexget.event import event
logger = logger.bind(name='output.sns')
DEFAULT_TEMPLATE_VALUE = json.dumps(
{
'entry': {
'title': '{{title}}',
'url': '{{url}}',
'original_url': '{{original_url}}',
'series': '{{series_name}}',
'series_id': '{{series_id}}',
},
'task': '{{task}}',
}
)
class SNSNotification:
"""
Emits SNS notifications of entries
Optionally writes the torrent itself to S3
Example configuration::
sns:
[aws_access_key_id: <AWS ACCESS KEY ID>] (will be taken from AWS_ACCESS_KEY_ID environment if not provided)
[aws_secret_access_key: <AWS SECRET ACCESS KEY>] (will be taken from AWS_SECRET_ACCESS_KEY environment if
not provided)
[profile_name: <AWS PROFILE NAME>] (If provided, use this profile name instead of the default.)
aws_region: <REGION>
sns_topic_arn: <SNS ARN>
[sns_notification_template: <TEMPLATE] (defaults to DEFAULT_TEMPLATE_VALUE)
"""
schema = {
'type': 'object',
'properties': {
'sns_topic_arn': {'type': 'string'},
'sns_notification_template': {'type': 'string', 'default': DEFAULT_TEMPLATE_VALUE},
'aws_access_key_id': {'type': 'string'},
'aws_secret_access_key': {'type': 'string'},
'aws_region': {'type': 'string'},
'profile_name': {'type': 'string'},
},
'required': ['sns_topic_arn', 'aws_region'],
'additionalProperties': False,
}
def on_task_start(self, task, config):
# verify that we actually support Boto 3
try:
import boto3 # noqa
except ImportError as e:
logger.debug('Error importing boto3: {}', e)
raise plugin.DependencyError(
"sns", "boto3", "Boto3 module required. ImportError: %s" % e
)
# this has to run near the end of the plugin chain, because we
# should notify after all other outputs.
@plugin.priority(0)
def on_task_output(self, task, config):
sender = SNSNotificationEmitter(config)
sender.send_notifications(task)
class SNSNotificationEmitter:
def __init__(self, config):
self.config = config
import boto3
self.boto3 = boto3
self.sns_notification_template = self.config.get(
'sns_notification_template', DEFAULT_TEMPLATE_VALUE
)
def build_session(self):
self.session = self.boto3.Session(
aws_access_key_id=self.config.get('aws_access_key_id', None),
aws_secret_access_key=self.config.get('aws_secret_access_key', None),
profile_name=self.config.get('profile_name', None),
region_name=self.config['aws_region'],
)
def get_topic(self):
self.build_session()
sns = self.session.resource('sns')
topic = sns.Topic(self.config['sns_topic_arn'])
return topic
def send_notifications(self, task):
topic = self.get_topic()
for entry in task.accepted:
message = entry.render(self.sns_notification_template)
if task.options.test:
logger.info(
'SNS publication: region={}, arn={}', self.config['aws_region'], topic.arn
)
logger.info('Message: {}', message)
continue
try:
response = topic.publish(Message=message)
except Exception as e:
logger.error('Error publishing {}: {}', entry['title'], e)
continue
else:
logger.debug('Published {}: {}', entry, response)
@event('plugin.register')
def register_sns_plugin():
plugin.register(SNSNotification, 'sns', api_ver=2)
```
#### File: tests/api_tests/test_etag.py
```python
from flexget.components.managed_lists.lists.movie_list.api import ObjectsContainer as OC
from flexget.utils import json
class TestETAG:
config = 'tasks: {}'
def test_etag(self, api_client, schema_match):
# Test ETag creation and usage
# Create movie lists
list_1 = {'name': 'list_1'}
list_2 = {'name': 'list_2'}
# Create lists
rsp = api_client.json_post('/movie_list/', data=json.dumps(list_1))
assert rsp.status_code == 201, 'Response code is %s' % rsp.status_code
rsp = api_client.json_post('/movie_list/', data=json.dumps(list_2))
assert rsp.status_code == 201, 'Response code is %s' % rsp.status_code
# Get ETag
rsp = api_client.get('/movie_list/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
etag = rsp.headers.get('etag')
assert etag is not None
# Test If-None-Match
header = {'If-None-Match': etag}
rsp = api_client.head('/movie_list/', headers=header)
assert rsp.status_code == 304, 'Response code is %s' % rsp.status_code
header = {'If-None-Match': etag}
rsp = api_client.get('/movie_list/', headers=header)
assert rsp.status_code == 304, 'Response code is %s' % rsp.status_code
data = rsp.get_data(as_text=True)
assert data is ''
header = {'If-None-Match': '*'}
rsp = api_client.head('/movie_list/', headers=header)
assert rsp.status_code == 304, 'Response code is %s' % rsp.status_code
# Test If-Match
header = {'If-Match': 'not_etag'}
rsp = api_client.head('/movie_list/', headers=header)
assert rsp.status_code == 412, 'Response code is %s' % rsp.status_code
# Change data
list_3 = {'name': 'list_3'}
rsp = api_client.json_post('/movie_list/', data=json.dumps(list_3))
assert rsp.status_code == 201, 'Response code is %s' % rsp.status_code
header = {'If-None-Match': etag}
rsp = api_client.get('/movie_list/', headers=header)
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.return_lists, data)
assert not errors
# Verify all 3 lists are received as payload
assert len(data) == 3
```
#### File: flexget/tests/test_cookies.py
```python
import pytest
class TestCookies:
config = """
tasks:
test_cookies:
text:
url: http://httpbin.org/cookies
entry:
title: '\"title\": \"(.*)\"'
url: '\"url\": \"(.*)\"'
cookies: cookies.txt
"""
@pytest.mark.online()
def test_cookies(self, request, execute_task):
task = execute_task('test_cookies', options={'nocache': True})
assert task.find_entry(title='blah', url='aoeu'), 'Entry should have been created.'
```
#### File: flexget/tests/test_imdb_parser.py
```python
import pytest
from flexget.components.imdb.utils import ImdbParser
@pytest.mark.online
class TestImdbParser:
def test_parsed_data(self):
parser = ImdbParser()
parser.parse('tt0114814')
assert parser.actors == {
'nm0000592': '<NAME>',
'nm0261452': '<NAME>',
'nm0000751': '<NAME>',
'nm0000286': '<NAME>',
'nm0000445': '<NAME>',
'nm0800339': '<NAME>',
'nm0002064': '<NAME>',
'nm0001590': '<NAME>',
'nm0000321': '<NAME>',
'nm0790436': '<NAME>',
'nm0000228': '<NAME>',
'nm0001629': '<NAME>',
'nm0107808': '<NAME>',
'nm0001125': '<NAME>',
'nm0000860': '<NAME>',
}, 'Actors not parsed correctly'
assert parser.directors == {'nm0001741': '<NAME>'}, 'Directors not parsed correctly'
print(parser.genres)
assert len(set(parser.genres).intersection(['crime', 'mystery', 'thriller'])) == len(
['crime', 'mystery', 'thriller']
), 'Genres not parsed correctly'
assert parser.imdb_id == 'tt0114814', 'ID not parsed correctly'
assert (
len(set(parser.languages).intersection(['english', 'hungarian', 'spanish', 'french']))
== 4
), 'Languages not parsed correctly'
assert parser.mpaa_rating == 'R', 'Rating not parsed correctly'
assert parser.name == 'The Usual Suspects', 'Name not parsed correctly'
assert parser.photo, 'Photo not parsed correctly'
assert parser.plot_outline == (
'Following a truck hijack in New York, five conmen are arrested and brought together for questioning. '
'As none of them are guilty, they plan a revenge operation against the police. The operation goes well, '
'but then the influence of a legendary mastermind criminal called <NAME>\xf6ze is felt. It becomes '
'clear that each one of them has wronged S\xf6ze at some point and must pay back now. The payback job '
'leaves 27 men dead in a boat explosion, but the real question arises now: Who actually is <NAME>\xf6ze?'
), 'Plot outline not parsed correctly'
assert 8.0 < parser.score < 9.0, 'Score not parsed correctly'
assert parser.url == 'https://www.imdb.com/title/tt0114814/', 'URL not parsed correctly'
assert 400000 < parser.votes < 1000000, 'Votes not parsed correctly'
assert parser.year == 1995, 'Year not parsed correctly'
expected_keywords = {
'criminal',
'suspect',
'criminal mastermind',
'dirty cop',
'burying a body',
}
assert len(expected_keywords.intersection(parser.plot_keywords)) == len(
expected_keywords
), 'Parsed plot keywords missing items from the expected result'
assert len(expected_keywords) == len(
parser.plot_keywords
), 'Parsed plot keyword count does not match expected.'
def test_no_plot(self):
# Make sure parser doesn't crash for movies with no plot
parser = ImdbParser()
parser.parse('tt1300562')
assert parser.name == 'Goodbye Mothers'
# There is no plot
assert not parser.plot_outline
def test_no_year(self):
# Make sure parser doesn't crash for movies with no year
parser = ImdbParser()
parser.parse('tt3303790')
assert parser.name == 'Master of None'
# There is no year
assert not parser.year
def test_plot_with_links(self):
"""Make sure plot doesn't terminate at the first link. GitHub #756"""
parser = ImdbParser()
parser.parse('tt2503944')
assert parser.plot_outline == (
"<NAME> (<NAME>) had it all - and lost it. A two-star Michelin "
"rockstar with the bad habits to match, the former enfant terrible of the Paris "
"restaurant scene did everything different every time out, and only ever cared "
"about the thrill of creating explosions of taste. To land his own kitchen and "
"that third elusive Michelin star though, he'll need the best of the best on "
"his side, including the beautiful Helene (<NAME>)."
)
```
#### File: flexget/tests/test_list_interface.py
```python
class TestListInterface:
config = """
templates:
global:
disable: [seen]
tasks:
list_get:
entry_list: test_list
list_1_get:
entry_list: list 1
list_2_get:
entry_list: list 2
test_list_add:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
accept_all: yes
list_add:
- entry_list: test_list
list_1_add:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
accept_all: yes
list_add:
- entry_list: list 1
list_2_add:
mock:
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
accept_all: yes
list_add:
- entry_list: list 2
test_multiple_list_add:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
accept_all: yes
list_add:
- entry_list: list 1
- entry_list: list 2
test_list_accept_with_remove:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
list_match:
from:
- entry_list: test_list
test_list_accept_without_remove:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
list_match:
from:
- entry_list: test_list
remove_on_match: no
test_multiple_list_accept_with_remove:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
list_match:
from:
- entry_list: list 1
- entry_list: list 2
test_multiple_list_accept_without_remove:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
list_match:
from:
- entry_list: list 1
- entry_list: list 2
remove_on_match: no
test_list_remove:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
accept_all: yes
list_remove:
- entry_list: test_list
test_list_reject:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
list_match:
from:
- entry_list: test_list
action: reject
add_for_list_queue:
mock:
- {title: 'The 5th Wave', url: "", imdb_id: "tt2304933"}
- {title: 'Drumline', url: "", imdb_id: "tt0303933"}
accept_all: yes
list_add:
- movie_list: test_list_queue
test_list_queue:
mock:
- {title: 'Drumline 2002 1080p BluRay DTS-HD MA 5 1 x264-FuzerHD',
url: "http://mock.url/Drumline 2002 1080p BluRay DTS-HD MA 5 1 x264-FuzerHD.torrent",
imdb_id: "tt0303933"}
- {title: 'Drumline 2002 720p BluRay DTS-HD MA 5 1 x264-FuzerHD',
url: "http://mock.url/Drumline 2002 720p BluRay DTS-HD MA 5 1 x264-FuzerHD.torrent",
imdb_id: "tt0303933"}
- {title: 'Drumline 2002 DVDRip x264-FuzerHD',
url: "http://mock.url/Drumline 2002 DVDRip x264-FuzerHD.torrent",
imdb_id: "tt0303933"}
list_match:
from:
- movie_list: test_list_queue
single_match: yes
get_for_list_queue:
movie_list: test_list_queue
test_list_clear_start:
entry_list: test_list
list_clear:
what:
- entry_list: test_list
test_list_clear_exit:
entry_list: test_list
list_clear:
what:
- entry_list: test_list
phase: exit
test_list_clear_input:
entry_list: test_list
list_clear:
what:
- entry_list: test_list
phase: input
test_list_add_with_attribute:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent", attribute_name: "some data"}
accept_all: yes
list_add:
- entry_list: test_list
test_entries_attributes_merge:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
list_match:
from:
- entry_list: test_list
"""
def test_list_add(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
def test_multiple_list_add(self, execute_task):
task = execute_task('test_multiple_list_add')
assert len(task.entries) == 2
task = execute_task('list_1_get')
assert len(task.entries) == 2
task = execute_task('list_2_get')
assert len(task.entries) == 2
def test_list_accept_with_remove(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
task = execute_task('test_list_accept_with_remove')
assert len(task.all_entries) == 3
assert len(task.accepted) == 2
task = execute_task('list_get')
assert len(task.entries) == 0
def test_list_accept_without_remove(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
task = execute_task('test_list_accept_without_remove')
assert len(task.all_entries) == 3
assert len(task.accepted) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
def test_multiple_list_accept_with_remove(self, execute_task):
task = execute_task('list_1_add')
assert len(task.entries) == 2
task = execute_task('list_2_add')
assert len(task.entries) == 1
task = execute_task('list_1_get')
assert len(task.entries) == 2
task = execute_task('list_2_get')
assert len(task.entries) == 1
task = execute_task('test_multiple_list_accept_with_remove')
assert len(task.accepted) == 3
task = execute_task('list_1_get')
assert len(task.entries) == 0
task = execute_task('list_2_get')
assert len(task.entries) == 0
def test_multiple_list_accept_without_remove(self, execute_task):
task = execute_task('list_1_add')
assert len(task.entries) == 2
task = execute_task('list_2_add')
assert len(task.entries) == 1
task = execute_task('list_1_get')
assert len(task.entries) == 2
task = execute_task('list_2_get')
assert len(task.entries) == 1
task = execute_task('test_multiple_list_accept_without_remove')
assert len(task.accepted) == 3
task = execute_task('list_1_get')
assert len(task.entries) == 2
task = execute_task('list_2_get')
assert len(task.entries) == 1
def test_list_remove(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
task = execute_task('test_list_remove')
assert len(task.accepted) == 1
task = execute_task('list_get')
assert len(task.entries) == 1
def test_list_reject(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
task = execute_task('test_list_reject')
assert len(task.rejected) == 1
def test_list_queue(self, execute_task):
# List queue test is based off movie_list and not entry_list since it entry_list matching is a
# lot more strict so it doesn't make sense to use it with it
task = execute_task('add_for_list_queue')
assert len(task.entries) == 2
task = execute_task('test_list_queue')
assert len(task.accepted) == 1
assert task.find_entry(title="Drumline 2002 1080p BluRay DTS-HD MA 5 1 x264-FuzerHD")
task = execute_task('get_for_list_queue')
assert len(task.entries) == 1
def test_list_clear_start(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('test_list_clear_start')
assert len(task.entries) == 0
def test_list_clear_exit(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('test_list_clear_exit')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 0
def test_list_clear_input(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('test_list_clear_input')
assert len(task.entries) == 0
def test_entries_attributes_merge(self, execute_task):
task = execute_task('test_list_add_with_attribute')
assert len(task.entries) == 1
task = execute_task('test_entries_attributes_merge')
assert len(task.all_entries) == 1
assert len(task.accepted) == 1
entry = task.find_entry(title="title 1")
assert entry
assert entry['attribute_name'] == 'some data'
```
#### File: flexget/tests/test_radarr_list.py
```python
import pytest
from flexget.components.managed_lists.lists.radarr_list import RadarrAPIService
RADARR_API_KEY = '65e246ce581a426781e1a8645f0a1f2c'
RADARR_BASE_URL = 'http://127.0.0.1'
RADARR_PORT = 7878
# Load up a radarr container and put VCR in record mode to record.
# NOTE: You'll need to reset radarr during runs otherwise the tags generated will have a different id. You'll also need to setup a root folder
# docker run -d --name=radarr-tmp -p 7878:7878 linuxserver/radarr:nightly
@pytest.mark.online
class TestRadarrListActions:
config = """
templates:
global:
disable: [seen]
tasks:
clear_and_add_to_radarr_list:
list_clear:
what:
- radarr_list:
base_url: %(RADARR_BASE_URL)s
api_key: %(RADARR_API_KEY)s
port: %(RADARR_PORT)s
mock:
- { title: 'Despicable Me 2 (2013)', imdb_id: 'tt1690953', tmdb_id: 93456 }
- { title: 'Sinister 2 (2015)', imdb_id: 'tt2752772', tmdb_id: 283445 }
- { title: 'Crimson Peak (2015)', imdb_id: 'tt2554274', tmdb_id: 201085 }
- { title: 'Deadpool (2016)', imdb_id: 'tt1431045', tmdb_id: 293660 }
accept_all: yes
list_add:
- radarr_list:
base_url: %(RADARR_BASE_URL)s
api_key: %(RADARR_API_KEY)s
port: %(RADARR_PORT)s
clear_and_add_to_radarr_with_tags:
list_clear:
what:
- radarr_list:
base_url: %(RADARR_BASE_URL)s
api_key: %(RADARR_API_KEY)s
port: %(RADARR_PORT)s
mock:
- { title: 'Deadpool (2016)', imdb_id: 'tt1431045', tmdb_id: 293660 }
accept_all: yes
list_add:
- radarr_list:
base_url: %(RADARR_BASE_URL)s
api_key: %(RADARR_API_KEY)s
port: %(RADARR_PORT)s
tags: ["movies", "othertag"]
radarr_list_as_input_plugin:
radarr_list:
base_url: %(RADARR_BASE_URL)s
api_key: %(RADARR_API_KEY)s
port: %(RADARR_PORT)s
include_data: True
accept_all: yes
remove_from_radarr_list:
mock:
- { title: "Ocean\'s Twelve (2004)", imdb_id: 'tt0349903', tmdb_id: 163 }
- { title: 'Sinister 2 (2015)', imdb_id: 'tt2752772', tmdb_id: 283445 }
accept_all: yes
list_remove:
- radarr_list:
base_url: %(RADARR_BASE_URL)s
api_key: %(RADARR_API_KEY)s
port: %(RADARR_PORT)s
match_radarr_list:
mock:
- { title: 'Despicable.Me.2.2013.1080p.BluRay.x264-FlexGet', imdb_id: 'tt1690953', tmdb_id: 93456 }
- { title: 'Sinister.2.2015.720p.BluRay.x264-FlexGet', imdb_id: 'tt2752772', tmdb_id: 283445 }
- { title: 'Crimson.Peak.2015.720p.BluRay.x264-FlexGet', imdb_id: 'tt2554274', tmdb_id: 201085 }
- { title: 'Deadpool.2016.1080p.BluRay.x264-FlexGet', imdb_id: 'tt1431045', tmdb_id: 293660 }
- { title: 'Kung.Fu.Panda.3.2016.720p.BluRay.x264-FlexGet', imdb_id: 'tt2267968', tmdb_id: 140300 }
list_match:
from:
- radarr_list:
base_url: %(RADARR_BASE_URL)s
api_key: %(RADARR_API_KEY)s
port: %(RADARR_PORT)s
""" % {
'RADARR_API_KEY': RADARR_API_KEY,
'RADARR_BASE_URL': RADARR_BASE_URL,
'RADARR_PORT': RADARR_PORT,
}
def test_radarr_list_tags(self, execute_task, manager):
radarr = RadarrAPIService(RADARR_API_KEY, RADARR_BASE_URL, RADARR_PORT)
tag_by_id = radarr.add_tag('tag_by_id')["id"]
manager.config['tasks']['clear_and_add_to_radarr_with_tags']['list_add'][0]['radarr_list'][
'tags'
].append(tag_by_id)
execute_task('clear_and_add_to_radarr_with_tags')
tags = {t["label"].lower(): t["id"] for t in radarr.get_tags()}
for movie in radarr.get_movies():
assert sorted(movie['tags']) == sorted(
[tag_by_id, tags.get("movies"), tags.get("othertag")]
)
# TODO: each action should be own test case
def test_radarr_list_actions(self, execute_task):
# Begin by clearing and then adding a bunch of movies
task = execute_task('clear_and_add_to_radarr_list')
# By using the list as the input we verify that the
# movies added above is returned to us
task = execute_task('radarr_list_as_input_plugin')
assert task.find_entry(
movie_name='Despicable Me 2'
), "movie should have been present in the list but it wasn't"
assert task.find_entry(
movie_name='Crimson Peak'
), "movie should have been present in the list but it wasn't"
assert task.find_entry(
movie_name='Deadpool'
), "movie should have been present in the list but it wasn't"
assert task.find_entry(
movie_name='Sinister 2'
), "movie should have been present in the list but it wasn't"
# Now we will attempt to remove one existing (Sinister 2) and one
# non-existing movie which should not affect anything at all
task = execute_task('remove_from_radarr_list')
# And to verify the list we fetch the list again
# Sinister 2 should now be missing
task = execute_task('radarr_list_as_input_plugin')
assert task.find_entry(
movie_name='Despicable Me 2'
), "movie should have been present in the list but it wasn't"
assert task.find_entry(
movie_name='Crimson Peak'
), "movie should have been present in the list but it wasn't"
assert task.find_entry(
movie_name='Deadpool'
), "movie should have been present in the list but it wasn't"
assert not task.find_entry(
movie_name='Sinister 2'
), "movie should not be present in the list but it was"
# Now we will try to match a bunch of input entries with
# the list. Two of the movies should not have been matched.
task = execute_task('match_radarr_list')
assert task.find_entry(
'accepted', title='Despicable.Me.2.2013.1080p.BluRay.x264-FlexGet'
), "movie should have been matched but it wasn't"
assert task.find_entry(
'accepted', title='Crimson.Peak.2015.720p.BluRay.x264-FlexGet'
), "movie should have been matched but it wasn't"
assert task.find_entry(
'accepted', title='Deadpool.2016.1080p.BluRay.x264-FlexGet'
), "movie should have been matched but it wasn't"
assert task.find_entry(
'undecided', title='Sinister.2.2015.720p.BluRay.x264-FlexGet'
), "movie should not have been matched but it was"
assert task.find_entry(
'undecided', title='Kung.Fu.Panda.3.2016.720p.BluRay.x264-FlexGet'
), "movie should not have been matched but it was"
# list_match should have removed all the matched movies
# so no movies should remain
task = execute_task('radarr_list_as_input_plugin')
assert len(task.all_entries) == 0, "there should be no movies left in the list"
```
#### File: flexget/tests/test_seen.py
```python
class TestFilterSeen:
config = """
templates:
global:
accept_all: true
tasks:
test:
mock:
- {title: 'Seen title 1', url: 'http://localhost/seen1'}
test2:
mock:
- {title: 'Seen title 2', url: 'http://localhost/seen1'} # duplicate by url
- {title: 'Seen title 1', url: 'http://localhost/seen2'} # duplicate by title
- {title: 'Seen title 3', url: 'http://localhost/seen3'} # new
test_number:
mock:
- {title: 'New title 1', url: 'http://localhost/new1', imdb_score: 5}
- {title: 'New title 2', url: 'http://localhost/new2', imdb_score: 5}
test_learn:
mock:
- title: learned entry
accept_all: yes
mock_output: yes
"""
def test_seen(self, execute_task):
task = execute_task('test')
assert task.find_entry(title='Seen title 1'), 'Test entry missing'
# run again, should filter
task.execute()
assert not task.find_entry(title='Seen title 1'), 'Seen test entry remains'
# execute another task
task = execute_task('test2')
# should not contain since fields seen in previous task
assert not task.find_entry(
title='Seen title 1'
), 'Seen test entry 1 remains in second task'
assert not task.find_entry(
title='Seen title 2'
), 'Seen test entry 2 remains in second task'
# new item in task should exists
assert task.find_entry(title='Seen title 3'), 'Unseen test entry 3 not in second task'
# test that we don't filter reject on non-string fields (ie, seen same imdb_score)
task = execute_task('test_number')
assert task.find_entry(title='New title 1') and task.find_entry(
title='New title 2'
), 'Item should not have been rejected because of number field'
def test_learn(self, execute_task):
task = execute_task('test_learn', options={'learn': True})
assert len(task.accepted) == 1, 'entry should have been accepted'
assert not task.mock_output, 'Entry should not have been output with --learn'
task = execute_task('test_learn')
assert len(task.rejected) == 1, 'Seen plugin should have rejected on second run'
class TestSeenLocal:
config = """
templates:
global:
accept_all: yes
tasks:
global seen 1:
mock:
- title: item 1
local seen:
seen: local
mock:
- title: item 1
- title: item 2
global seen 2:
mock:
- title: item 1
- title: item 2
local seen 2:
seen:
local: yes
mock:
- title: item 1
- title: item 2
"""
def test_local(self, execute_task):
task = execute_task('global seen 1')
# global seen 1 task should not affect seen in the local seen task
task = execute_task('local seen')
assert task.find_entry('accepted', title='item 1'), 'item 1 should be accepted first run'
# seen should still work normally within the local seen task
task = execute_task('local seen')
assert task.find_entry('rejected', title='item 1'), 'item 1 should be seen on second run'
# local seen task should not affect global seen 2 task, but global seen 1 should
task = execute_task('global seen 2')
assert task.find_entry('rejected', title='item 1'), 'item 1 should be seen'
assert task.find_entry('accepted', title='item 2'), 'item 2 should be accepted'
def test_local_dict_config(self, execute_task):
task = execute_task('local seen 2')
assert task.find_entry('accepted', title='item 1'), 'item 1 should be accepted'
assert task.find_entry('accepted', title='item 2'), 'item 2 should be accepted'
task = execute_task('global seen 2')
assert task.find_entry('accepted', title='item 1'), 'item 1 should be accepted'
assert task.find_entry('accepted', title='item 2'), 'item 2 should be accepted'
task = execute_task('local seen 2')
assert task.find_entry('rejected', title='item 1'), 'item 1 should be seen'
assert task.find_entry('rejected', title='item 2'), 'item 2 should be seen'
class TestFilterSeenMovies:
config = """
tasks:
test_1:
mock:
- {title: 'Seen movie title 1', url: 'http://localhost/seen_movie1', imdb_id: 'tt0103064', tmdb_id: 123}
- {title: 'Seen movie title 2', url: 'http://localhost/seen_movie2', imdb_id: 'tt0103064'}
accept_all: yes
seen_movies: loose
test_2:
mock:
- {title: 'Seen movie title 3', url: 'http://localhost/seen_movie3', imdb_id: 'tt0103064'}
- {title: 'Seen movie title 4', url: 'http://localhost/seen_movie4', imdb_id: 'tt0103064'}
- {title: 'Seen movie title 5', url: 'http://localhost/seen_movie5', imdb_id: 'tt0231264'}
- {title: 'Seen movie title 6', url: 'http://localhost/seen_movie6', tmdb_id: 123}
- {title: 'Seen movie title 13', url: 'http://localhost/seen_movie13', imdb_id: 'tt9901062'}
seen_movies: loose
strict:
mock:
- {title: 'Seen movie title 7', url: 'http://localhost/seen_movie7', imdb_id: 'tt0134532'}
- {title: 'Seen movie title 8', url: 'http://localhost/seen_movie8', imdb_id: 'tt0103066'}
- {title: 'Seen movie title 9', url: 'http://localhost/seen_movie9', tmdb_id: 456}
- {title: 'Seen movie title 10', url: 'http://localhost/seen_movie10'}
seen_movies: strict
local:
mock:
- {title: 'Seen movie title 11', url: 'http://localhost/seen_movie11', imdb_id: 'tt0103064', tmdb_id: 123}
- {title: 'Seen movie title 12', url: 'http://localhost/seen_movie12', imdb_id: 'tt9901062'}
accept_all: yes
seen_movies:
scope: local
"""
def test_seen_movies(self, execute_task):
task = execute_task('test_1')
assert not (
task.find_entry(title='Seen movie title 1')
and task.find_entry(title='Seen movie title 2')
), 'Movie accepted twice in one run'
# execute again
task.execute()
assert not task.find_entry(
title='Seen movie title 1'
), 'Test movie entry 1 should be rejected in second execution'
assert not task.find_entry(
title='Seen movie title 2'
), 'Test movie entry 2 should be rejected in second execution'
# execute another task
task = execute_task('test_2')
# should not contain since fields seen in previous task
assert not task.find_entry(title='Seen movie title 3'), 'seen movie 3 exists'
assert not task.find_entry(title='Seen movie title 4'), 'seen movie 4 exists'
assert not task.find_entry(title='Seen movie title 6'), 'seen movie 6 exists (tmdb_id)'
assert task.find_entry(title='Seen movie title 5'), 'unseen movie 5 doesn\'t exist'
def test_seen_movies_strict(self, execute_task):
task = execute_task('strict')
assert len(task.rejected) == 1, 'Too many movies were rejected'
assert not task.find_entry(
title='Seen movie title 10'
), 'strict should not have passed movie 10'
def test_seen_movies_local(self, execute_task):
task = execute_task('local')
assert task.find_entry(
'accepted', title='Seen movie title 11'
), 'local should have passed movie 11'
# execute again
task.execute()
msg = 'Test movie entry 12 should be rejected in second execution'
assert task.find_entry('rejected', title='Seen movie title 12'), msg
# test a global scope after
task = execute_task('test_2')
msg = 'Changing scope should not have rejected Seen movie title 13'
assert not task.find_entry('rejected', title='Seen movie title 13'), msg
```
#### File: flexget/tests/test_serialization.py
```python
import datetime
import pytest
from flexget import entry
from flexget.utils import qualities, serialization
@entry.register_lazy_lookup('lazy function')
def lazy_func(entry):
entry['lazyfield'] = 'value a'
class TestSerialization:
def test_entry_serialization(self):
entry1 = entry.Entry(
{
'title': 'blah',
'url': 'http://blah',
'listfield': ['a', 'b', 1, 2],
'dictfield': {'a': 1, 'b': 2},
'intfield': 5,
'floatfield': 5.5,
'datefield': datetime.date(1999, 9, 9),
'datetimefield': datetime.datetime(1999, 9, 9, 9, 9),
'qualityfield': qualities.Quality('720p hdtv'),
'nestedlist': [qualities.Quality('1080p')],
'nesteddict': {'a': datetime.date(1999, 9, 9)},
}
)
entry1.add_lazy_fields('lazy function', ['lazyfield'])
assert entry1.is_lazy('lazyfield')
serialized = serialization.dumps(entry1)
print(serialized)
entry2 = serialization.loads(serialized)
# Use the underlying dict, so we compare all fields
assert entry2.is_lazy('lazyfield')
assert dict(entry1) == dict(entry2)
assert entry2['lazyfield'] == 'value a'
def test_builtin_serialization(self):
# Also test these things nest properly
value = {
'a': 'aoeu',
'b': [1, 2, 3.5],
'c': (1, datetime.datetime(2019, 12, 12, 12, 12)),
'd': {'a', 1, datetime.date(2019, 11, 11)},
}
out = serialization.dumps(value)
backin = serialization.loads(out)
assert backin == value
def test_unserializable(self):
# Hide an unserializable object as deep as we can in supported collections
value = ['a', ('b', {'c': {'d', object()}})]
with pytest.raises(TypeError):
serialization.serialize(value)
with pytest.raises(TypeError):
serialization.dumps(value)
```
#### File: Flexget/flexget/tray_icon.py
```python
import logging
import webbrowser
from functools import partial, wraps
from pathlib import Path
from typing import List, Optional, Callable
from loguru import logger
from flexget import __version__
logger = logger.bind(name='tray_icon')
try:
# If we are running outside of a graphical environment, these imports will fail
from PIL import Image
from pystray import Icon, Menu, MenuItem
_import_success = True
except Exception as e:
logger.debug('Could not load tray icon: {}', e)
_import_success = False
def check_if_tray_is_active(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
if not self.active:
return
return f(self, *args, **kwargs)
return wrapped
image_path = Path(__file__).parent / 'resources' / 'flexget.png'
class TrayIcon:
def __init__(self, path_to_image: Path = image_path):
# Silence PIL noisy logging
logging.getLogger('PIL.PngImagePlugin').setLevel(logging.INFO)
logging.getLogger('PIL.Image').setLevel(logging.INFO)
self.path_to_image: Path = path_to_image
self.icon: Optional['Icon'] = None
self._menu: Optional['Menu'] = None
self.menu_items: List['MenuItem'] = []
self.active: bool = _import_success
self.running: bool = False
self.add_core_menu_items()
@check_if_tray_is_active
def add_menu_item(
self,
text: str = None,
action: Callable = None,
menu_item: 'MenuItem' = None,
index: int = None,
**kwargs,
):
"""
Add a menu item byt passing its text and function, or pass a created MenuItem. Force position by sending index
"""
if not any(v for v in (menu_item, text)):
raise ValueError(f"Either 'text' or 'menu_item' are required")
menu_item = menu_item or MenuItem(text=text, action=action, **kwargs)
if index is not None:
self.menu_items.insert(index, menu_item)
else:
self.menu_items.append(menu_item)
@check_if_tray_is_active
def add_menu_separator(self, index: int = None):
self.add_menu_item(menu_item=Menu.SEPARATOR, index=index)
def add_core_menu_items(self):
open_web = partial(webbrowser.open)
self.add_menu_item(text=f'Flexget {__version__}', enabled=False)
self.add_menu_separator()
self.add_menu_item(text='Homepage', action=partial(open_web, 'https://flexget.com/'))
self.add_menu_item(text='Forum', action=partial(open_web, 'https://discuss.flexget.com/'))
@property
def menu(self) -> 'Menu':
# This is lazy loaded since we'd like to delay the menu build until the tray is requested to run
if not self._menu:
self._menu = Menu(*self.menu_items)
return self._menu
@check_if_tray_is_active
def run(self):
"""Run the tray icon. Must be run from the main thread and is blocking"""
try:
logger.verbose('Starting tray icon')
self.icon = Icon('Flexget', Image.open(self.path_to_image), menu=self.menu)
self.running = True
self.icon.run()
except Exception as e:
logger.warning('Could not run tray icon: {}', e)
self.running = False
@check_if_tray_is_active
def stop(self):
if not self.running:
return
logger.verbose('Stopping tray icon')
self.icon.stop()
self.running = False
tray_icon = TrayIcon()
```
#### File: flexget/utils/bittorrent.py
```python
import binascii
import re
from contextlib import suppress
from typing import Dict, Union, Any, Callable, Match, Generator, Iterator, List
from loguru import logger
logger = logger.bind(name='torrent')
# Magic indicator used to quickly recognize torrent files
TORRENT_RE = re.compile(br'^d\d{1,3}:')
# List of all standard keys in a metafile
# See http://packages.python.org/pyrocore/apidocs/pyrocore.util.metafile-module.html#METAFILE_STD_KEYS
METAFILE_STD_KEYS = [
i.split('.')
for i in (
"announce",
"announce-list", # BEP-0012
"comment",
"created by",
"creation date",
"encoding",
"info",
"info.length",
"info.name",
"info.piece length",
"info.pieces",
"info.private",
"info.files",
"info.files.length",
"info.files.path",
)
]
def clean_meta(
meta: Dict[str, Any], including_info: bool = False, log_func: Callable[..., None] = None
):
"""Clean meta dict. Optionally log changes using the given logger.
See also http://packages.python.org/pyrocore/apidocs/pyrocore.util.metafile-pysrc.html#clean_meta
@param log_func: If given, a callable accepting a string message.
@return: Set of keys removed from C{meta}.
"""
modified = set()
for key in list(meta.keys()):
if [key] not in METAFILE_STD_KEYS:
if log_func:
log_func("Removing key %r..." % (key,))
del meta[key]
modified.add(key)
if including_info:
for key in list(meta["info"].keys()):
if ["info", key] not in METAFILE_STD_KEYS:
if log_func:
log_func("Removing key %r..." % ("info." + key,))
del meta["info"][key]
modified.add("info." + key)
for idx, entry in enumerate(meta["info"].get("files", [])):
for key in list(entry.keys()):
if ["info", "files", key] not in METAFILE_STD_KEYS:
if log_func:
log_func("Removing key %r from file #%d..." % (key, idx + 1))
del entry[key]
modified.add("info.files." + key)
return modified
def is_torrent_file(metafilepath: str) -> bool:
"""Check whether a file looks like a metafile by peeking into its content.
Note that this doesn't ensure that the file is a complete and valid torrent,
it just allows fast filtering of candidate files.
@param metafilepath: Path to the file to check, must have read permissions for it.
@return: True if there is a high probability this is a metafile.
"""
with open(metafilepath, 'rb') as f:
data = f.read(200)
magic_marker = bool(TORRENT_RE.match(data))
if not magic_marker:
logger.trace(
"{} doesn't seem to be a torrent, got `{}` (hex)", metafilepath, binascii.hexlify(data)
)
return bool(magic_marker)
def tokenize(
text: bytes,
match=re.compile(
br'([idel])|(\d+):|(-?\d+)'
).match, # type: Callable[[bytes, int], Match[bytes]]
) -> Generator[bytes, None, None]:
i = 0
while i < len(text):
m = match(text, i)
s = m.group(m.lastindex)
i = m.end()
if m.lastindex == 2:
yield b's'
yield text[i : i + int(s)]
i += int(s)
else:
yield s
def decode_item(src_iter: Iterator[bytes], token: bytes) -> Union[bytes, str, int, list, dict]:
data: Union[bytes, str, int, list, dict]
if token == b'i':
# integer: "i" value "e"
data = int(next(src_iter))
if next(src_iter) != b'e':
raise ValueError
elif token == b's':
# string: "s" value (virtual tokens)
data = next(src_iter)
# Strings in torrent file are defined as utf-8 encoded
with suppress(UnicodeDecodeError):
# The pieces field is a byte string, and should be left as such.
data = data.decode('utf-8')
elif token in (b'l', b'd'):
# container: "l"(list) or "d"(dict), values "e"
data = []
tok = next(src_iter)
while tok != b'e':
data.append(decode_item(src_iter, tok))
tok = next(src_iter)
if token == b'd':
data = dict(list(zip(data[0::2], data[1::2])))
else:
raise ValueError
return data
def bdecode(text: bytes) -> Dict[str, Any]:
try:
src_iter = tokenize(text)
data = decode_item(src_iter, next(src_iter))
for _ in src_iter: # look for more tokens
raise SyntaxError("trailing junk")
except (AttributeError, ValueError, StopIteration, TypeError) as e:
raise SyntaxError(f"syntax error: {e}") from e
return data
# encoding implementation by d0b
def encode_string(data: str) -> bytes:
return encode_bytes(data.encode('utf-8'))
def encode_bytes(data: bytes) -> bytes:
return str(len(data)).encode() + b':' + data
def encode_integer(data: int) -> bytes:
return b'i' + str(data).encode() + b'e'
def encode_list(data: list) -> bytes:
encoded = b'l'
for item in data:
encoded += bencode(item)
encoded += b'e'
return encoded
def encode_dictionary(data: dict) -> bytes:
encoded = b'd'
items = list(data.items())
items.sort()
for (key, value) in items:
encoded += bencode(key)
encoded += bencode(value)
encoded += b'e'
return encoded
def bencode(data: Union[bytes, str, int, list, dict]) -> bytes:
if isinstance(data, bytes):
return encode_bytes(data)
if isinstance(data, str):
return encode_string(data)
if isinstance(data, int):
return encode_integer(data)
if isinstance(data, list):
return encode_list(data)
if isinstance(data, dict):
return encode_dictionary(data)
raise TypeError(f'Unknown type for bencode: {type(data)}')
class Torrent:
"""Represents a torrent"""
# string type used for keys, if this ever changes, stuff like "x in y"
# gets broken unless you coerce to this type
KEY_TYPE = str
@classmethod
def from_file(cls, filename: str) -> 'Torrent':
"""Create torrent from file on disk."""
with open(filename, 'rb') as handle:
return cls(handle.read())
def __init__(self, content: bytes) -> None:
"""Accepts torrent file as string"""
# Make sure there is no trailing whitespace. see #1592
content = content.strip()
# decoded torrent structure
self.content = bdecode(content)
self.modified = False
def __repr__(self) -> str:
return "%s(%s, %s)" % (
self.__class__.__name__,
", ".join(
"%s=%r" % (key, self.content["info"].get(key))
for key in ("name", "length", "private")
),
", ".join("%s=%r" % (key, self.content.get(key)) for key in ("announce", "comment")),
)
def get_filelist(self) -> List[Dict[str, Union[str, int]]]:
"""Return array containing fileinfo dictionaries (name, length, path)"""
files = []
if 'length' in self.content['info']:
# single file torrent
if 'name.utf-8' in self.content['info']:
name = self.content['info']['name.utf-8']
else:
name = self.content['info']['name']
t = {'name': name, 'size': self.content['info']['length'], 'path': ''}
files.append(t)
else:
# multifile torrent
for item in self.content['info']['files']:
if 'path.utf-8' in item:
path = item['path.utf-8']
else:
path = item['path']
t = {'path': '/'.join(path[:-1]), 'name': path[-1], 'size': item['length']}
files.append(t)
# Decode strings
for item in files:
for field in ('name', 'path'):
# These should already be decoded if they were utf-8, if not we can try some other stuff
if not isinstance(item[field], str):
try:
item[field] = item[field].decode(self.content.get('encoding', 'cp1252'))
except UnicodeError:
# Broken beyond anything reasonable
fallback = item[field].decode('utf-8', 'replace').replace('\ufffd', '_')
logger.warning(
'{}={!r} field in torrent {!r} is wrongly encoded, falling back to `{}`',
field,
item[field],
self.content['info']['name'],
fallback,
)
item[field] = fallback
return files
@property
def is_multi_file(self) -> bool:
"""Return True if the torrent is a multi-file torrent"""
return 'files' in self.content['info']
@property
def name(self) -> str:
"""Return name of the torrent"""
return self.content['info'].get('name', '')
@property
def size(self) -> int:
"""Return total size of the torrent"""
size = 0
# single file torrent
if 'length' in self.content['info']:
size = int(self.content['info']['length'])
else:
# multifile torrent
for item in self.content['info']['files']:
size += int(item['length'])
return size
@property
def private(self) -> Union[int, bool]:
return self.content['info'].get('private', False)
@property
def trackers(self) -> List[str]:
"""
:returns: List of trackers, supports single-tracker and multi-tracker implementations
"""
trackers = []
# the spec says, if announce-list present use ONLY that
# funny iteration because of nesting, ie:
# [ [ tracker1, tracker2 ], [backup1] ]
for tl in self.content.get('announce-list', []):
for t in tl:
trackers.append(t)
if not self.content.get('announce') in trackers:
trackers.append(self.content.get('announce'))
return trackers
@property
def info_hash(self) -> str:
"""Return Torrent info hash"""
import hashlib
sha1_hash = hashlib.sha1()
info_data = encode_dictionary(self.content['info'])
sha1_hash.update(info_data)
return str(sha1_hash.hexdigest().upper())
@property
def comment(self) -> str:
return self.content['comment']
@comment.setter
def comment(self, comment: str) -> None:
self.content['comment'] = comment
self.modified = True
@property
def piece_size(self) -> int:
return int(self.content['info']['piece length'])
@property
def libtorrent_resume(self) -> dict:
return self.content.get('libtorrent_resume', {})
def set_libtorrent_resume(self, chunks, files) -> None:
self.content['libtorrent_resume'] = {}
self.content['libtorrent_resume']['bitfield'] = chunks
self.content['libtorrent_resume']['files'] = files
self.modified = True
def remove_multitracker(self, tracker: str) -> None:
"""Removes passed multi-tracker from this torrent"""
for tl in self.content.get('announce-list', [])[:]:
with suppress(AttributeError, ValueError):
tl.remove(tracker)
self.modified = True
# if no trackers left in list, remove whole list
if not tl:
self.content['announce-list'].remove(tl)
def add_multitracker(self, tracker: str) -> None:
"""Appends multi-tracker to this torrent"""
self.content.setdefault('announce-list', [])
self.content['announce-list'].append([tracker])
self.modified = True
def __str__(self) -> str:
return f'<Torrent instance. Files: {self.get_filelist()}>'
def encode(self) -> bytes:
return bencode(self.content)
```
#### File: flexget/utils/soup.py
```python
import warnings
from typing import IO, Union
from bs4 import BeautifulSoup
from html5lib.constants import DataLossWarning
warnings.simplefilter('ignore', DataLossWarning)
def get_soup(obj: Union[str, IO, bytes], parser: str = 'html5lib') -> BeautifulSoup:
return BeautifulSoup(obj, parser)
```
|
{
"source": "jeremiah1066/PylertAlertManager",
"score": 3
}
|
#### File: PylertAlertManager/alertmanager/alertmanager.py
```python
from requests.compat import urljoin
from requests import HTTPError
import requests
import logging
import json
import maya
from box import Box, BoxKeyError
from .alert_objects import Alert, Silence
class AlertManager(object):
"""
Implements and interface to the Alert Manager API.
Alert Manager comes packaged with Prometheus and is used for alert
management. This class aims to create an interface that simplifies
interactions with the Alert Manager API. It also provides a simple means of
introducing alerts into Alert Manager that do not originate from
Prometheus.
"""
SUCCESS_STATUSES = ['success']
ERROR_STATUSES = ['error']
def __init__(self, host, port=9093, req_obj=None):
"""
Init method.
Parameters
----------
host : str
This is the Alert Manager instance we wish to connect to.
port : int
(Default value = 9093)
This is the port we wish to use to connect to our
Alert Manager instance.
req_obj : request object
(Default value = None)
The req object would typically be a requests.Session() object.
"""
self.hostname = host
self.port = port
self._req_obj = req_obj
@property
def request_session(self):
"""
Return a requests object used to affect HTTP requests.
This property is intended to be called by the _make_request method
so we are always working with request.Sessions, allowing good
customization for end users
Returns
-------
_req_obj : requests.Session()
This is our default requests.Session() object. Can be overridden
during instantiation by specifying the req_obj parameter.
"""
if not self._req_obj:
self._req_obj = requests.Session()
return self._req_obj
def _check_response(self, req):
"""
Raise an error if our responses are not what we expect.
This is a protected method that should only be used by methods making
API calls. The intention is to check out responses for a successful
HTTP status code along with a successful return from the Alert Manager
API.
Parameters
----------
req : requests.Response
This is the response object we want to verify.
Returns
-------
boolean
Return True if response check is successful.
Raises
------
ValueError
Raise a value error if the 'status' key of our response is in
our list of error statuses from Alert Manager.
HTTPError
Raise an http error if our response objects status_code attribute
is not in requests.codes.ok (basically not a 200).
"""
if (req.status_code == requests.codes.ok
and req.json()['status'] in self.SUCCESS_STATUSES):
return True
elif (req.status_code == requests.codes.ok
and req.json()['status'] in self.ERROR_STATUSES):
raise ValueError('{} ==> {}'.format(req.json()['errorType'],
req.json()['error']))
else:
raise HTTPError('{} ==> {}'.format(req.status_code, req.text))
def _make_request(self, method="GET", route="/", **kwargs):
"""
Make our HTTP request and return a requests.Response object.
This is a protected method to affect simple utilization of the
requests.request method. Here we can override the HTTP verb we utilize
and ensure any keyword arguments are passed as well. Ultimately this
method returns a requests.Response object.
Parameters
----------
method : str
(Default value = "GET")
This is our HTTP verb.
route : str
(Default value = "/")
This is the url we are making our request to.
**kwargs : dict
Arbitrary keyword arguments.
Returns
-------
r : requests.Response
Return the response from our API call.
"""
_host = "{}:{}".format(self.hostname, self.port)
route = urljoin(_host, route)
r = self.request_session.request(method, route, **kwargs)
return r
def get_alerts(self, **kwargs):
"""
Get a list of all alerts currently in Alert Manager.
This method returns a list of all firing alerts from our Alert Manager
instance.
Parameters
----------
**kwargs : dict
Arbitrary keyword arguments. These kwargs can be used to specify
filters to limit the return of our list of alerts to alerts that
match our filter.
Returns
-------
list
Return a list of Alert objects from our Alert Manager instance.
"""
route = "/alertmanager/api/v1/alerts"
self._validate_get_alert_kwargs(**kwargs)
if kwargs.get('filter'):
kwargs['filter'] = self._handle_filters(kwargs['filter'])
r = self._make_request("GET", route, params=kwargs)
if self._check_response(r):
return [Alert(alert) for alert in r.json()['data']]
def _validate_get_alert_kwargs(self, **kwargs):
"""
Check kwargs for validity.
This is a protected method and should not be used outside of the
get_alerts method. Here we verify that the kwargs we pass to filter our
returned alerts is sane and contains keys Alert Manager knows about.
Parameters
----------
**kwargs : dict
Arbitrary keyword arguments. These kwargs are used to specify
filters to limit the return of our list of alerts to alerts that
match our filter.
Raises
------
KeyError
If a key in our kwargs doesn't match our list of valid_keys,
we raise a key error. We prevent filter keys that Alert Manager
doesn't understand from being passed in a request.
"""
valid_keys = ['filter', 'silenced', 'inhibited']
for key in kwargs.keys():
if key not in valid_keys:
raise KeyError('invalid get parameter {}'.format(key))
def _validate_get_silence_kwargs(self, **kwargs):
"""
Check kwargs for validity.
This is a protected method and should not be used outside of the
get_silences method. Here we verify that the kwargs we pass to filter our
returned silences is sane and contains keys Alert Manager knows about.
Parameters
----------
**kwargs : dict
Arbitrary keyword arguments. These kwargs are used to specify
filters to limit the return of our list of silences to silences that
match our filter.
Raises
------
KeyError
If a key in our kwargs doesn't match our list of valid_keys,
we raise a key error. We prevent filter keys that Alert Manager
doesn't understand from being passed in a request.
"""
valid_keys = ['filter']
for key in kwargs.keys():
if key not in valid_keys:
raise KeyError('invalid get parameter {}'.format(key))
def _handle_filters(self, filter_dict):
"""
Construct and return a filter.
This is a protected method and should not be used outside of the public
get_alerts method. This method works to ensure the structure of our
filter string is something that Alert Manager can understand.
Parameters
----------
filter_dict : dict
A dict where the keys represent the label on which we wish to
filter and the value that key should have.
Returns
-------
str
Returns a filter string to be passed along with our get_alerts
method call.
"""
if not isinstance(filter_dict, dict):
raise TypeError('get_alerts() and get_silences() filter must be dict')
filter_list = list()
starter_string = '{}="{}"'
for key, value in filter_dict.items():
string = starter_string.format(key, value)
filter_list.append(string)
final_filter_string = ','.join(filter_list)
return '{{{}}}'.format(final_filter_string)
def post_alerts(self, *alert):
"""
Post alerts to Alert Manager.
This method is straightforward and it's name describes what it does.
We use this method to post alerts to Alert Manager.
Parameters
----------
*alert : list of alerts or single alert
This is either a list of Alert objects, dictionaries or a single
Alert object or dictionary to be posted as an alert to
Alert Manager.
Returns
-------
Alert
Return the response from Alert Manager as an Alert object.
"""
payload = list()
for obj in alert:
if isinstance(obj, Alert):
payload.append(obj.validate_and_dump())
else:
converted = Alert.from_dict(obj)
payload.append(converted.validate_and_dump())
route = "/alertmanager/api/v1/alerts"
r = self._make_request("POST", route, json=payload)
if self._check_response(r):
return Alert.from_dict(r.json())
def get_status(self):
"""
Return the status of our Alert Manager instance.
This method returns a great deal of valuable information about our
Alert Manager's current configuration.
Returns
-------
Alert
Return the response from Alert Manager as an Alert object.
"""
route = "/alertmanager/api/v1/status"
r = self._make_request("GET", route)
if self._check_response(r):
return Alert.from_dict(r.json())
def get_receivers(self):
"""
Return a list of available receivers from our Alert Manager instance.
Receivers from an alert manager perspective are notification
integrations. Notifications can be sent from Alert Manager to any of
The listed receivers. Per Alert Manager documentation, no new
receivers are going to be added, so further integrations should be
managed via the webhook receiver:
https://prometheus.io/docs/alerting/configuration/
Returns
-------
Alert
Return the response from Alert Manager as an Alert object.
"""
route = "/alertmanager/api/v1/receivers"
r = self._make_request("GET", route)
if self._check_response(r):
return Alert.from_dict(r.json())
def get_alert_groups(self):
"""
Return alerts grouped by label keys.
Another method to return our alerts.
Return
------
Alert
Return the response from Alert Manager as an Alert object.
"""
route = "/alertmanager/api/v1/alerts/groups"
r = self._make_request("GET", route)
if self._check_response(r):
return Alert.from_dict(r.json())
def get_silence(self, id=None):
"""
Return a list of alert silences.
Alert Manager allows alerts to be silenced. This call will return a
list of all silences that have been created on our Alert Manager
instance.
Parameters
----------
id : str
(Default value = None)
This is the ID of the silence we want returned.
Returns
-------
Alert
Return the response from Alert Manager as an Alert object. In this
case a list of silences.
"""
route = "/alertmanager/api/v1/silences"
if id:
route = urljoin(route, id)
r = self._make_request("GET", route)
if self._check_response(r):
return Alert.from_dict(r.json())
def get_silences(self, **kwargs):
"""
Get a list of all silences currently in Alert Manager.
This method returns a list of all silences from our Alert Manager
instance.
Parameters
----------
**kwargs : dict
Arbitrary keyword arguments. These kwargs can be used to specify
filters to limit the return of our list of alerts to silences that
match our filter.
Returns
-------
list
Return a list of Silence objects from our Alert Manager instance.
"""
route = "/alertmanager/api/v1/silences"
self._validate_get_silence_kwargs(**kwargs)
if kwargs.get('filter'):
kwargs['filter'] = self._handle_filters(kwargs['filter'])
r = self._make_request("GET", route, params=kwargs)
if self._check_response(r):
return [Alert(alert) for alert in r.json()['data']]
def post_silence(self, silence):
"""
Create a silence.
This method can be utilized to silence alerts based on a matches found
by alert manager specified in the matchers parameter. Minimum structure
for a matcher is as follows:
{'matchers':
[
{
'name': 'label',
'value': 'label_value'
}
],
'endsAt': 'silence end_time'
}
Parameters
----------
matcher : dict
Our matcher is a dict containing keys/values to match an alert.
Returns
-------
Alert
Return the response from Alert Manager as an Alert object.
"""
if isinstance(silence, Silence):
silence = silence.validate_and_dump()
else:
silence = Silence.from_dict(silence)
silence = silence.validate_and_dump()
route = "/alertmanager/api/v1/silences"
r = self._make_request("POST", route, json=silence)
if self._check_response(r):
return Alert.from_dict(r.json())
def delete_silence(self, silence_id):
"""
Delete a silence.
This method allows us to specify a silence_id and delete it from
Alert Manager.
Parameters
----------
silence_id : str
This is the ID of the silence returned by Alert Manager.
Returns
-------
Alert
Return the response from Alert Manager as an Alert object.
"""
route = "/alertmanager/api/v1/silence/"
route = urljoin(route, silence_id)
r = self._make_request("DELETE", route)
if self._check_response(r):
return Alert.from_dict(r.json())
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.