code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import sys,os
from os import path
sys.path.append((os.path.dirname(path.dirname(__file__))))
from ctypes import *
import sys
import time
from commFunction import global_result,getip
params = [1, 16000, 16, 99, 1, 16000, 16, 99, 1, 1, 0.2, 1, 0, 1, 0, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1] # for WB 16K sample rate
params = [1, 16000, 16, 99, 1, 16000, 16, 99, 1, 1, 0.2, 1, 0, 1, 0, -1, -1, -1, -1, -1, -1, 0, 2, 0, 3, 0, 0, 0, 0, 0]
connect_flag = False
stop_flag = False
stop_flag1 = False
VQTdll = cdll.LoadLibrary(sys.prefix + '/VQTDll.dll')
def VQTLogfunc(msg):
# print('VQTLogfunc : Start')
c_s = msg.decode()
# cs = str(c_s,'utf-8')
cs = str(c_s)
print(cs)
global connect_flag
global stop_flag1
if 'Connected' in cs or 'Message sent' in cs:
connect_flag = True
stop_flag1 = True
def VQTMSGfunc(msg):
c_s = string_at(msg)
cs = str(c_s, 'utf-8')
print(cs)
global stop_flag
if 'VQT PAMS/PSQM Test Failed' in cs:
mosFailedFlag = True
else:
if 'POLQA:' in cs:
global_result.mosResult['mos'] = cs.split('POLQA:')[1].strip()
if 'Speech Level Gain:' in cs:
global_result.mosResult['Speech Level Gain'] = cs.split('Speech Level Gain:')[1].strip()
if 'Noise Level Gain:' in cs:
global_result.mosResult['Noise Level Gain'] = cs.split('Noise Level Gain:')[1].strip()
def startvqt(srcFile=None, degFile=None, samplerate=48000):
params = [1, 0, 16, 99, 1, 0, 16, 99, 1, 1, 0.2, 1, 0, 1, 0, 2]
params = [1, 16000, 16, 99, 1, 16000, 16, 99, 1, 1, 0.2, 1, 0, 1, 0, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1]
params = [1, 16000, 16, 99, 1, 16000, 16, 99, 1, 1, 0.2, 1, 0, 1, 0, -1, -1, -1, -1, -1, -1, 0, 2, 0, 3, 0, 0, 0, 0,
0]
params[1], params[5] = samplerate, samplerate
params = (c_float * len(params))(*params)
if connect_flag == False:
vip = getip()
try:
VQTdll.ConnectPort(c_char_p(vip.encode('utf_8')), 6666)
while connect_flag == False:
pass
except Exception as e:
print(str(e))
VQTdll.RunVQTPAMSPSQM(0, 0, c_char_p(srcFile.encode('utf_8')), c_char_p(degFile.encode('utf_8')), 1, params)
time.sleep(5)
pass
def vqtDisConnect():
VQTdll.Disconnect()
callback_type = CFUNCTYPE(None, c_char_p)
callback_login = callback_type(VQTLogfunc)
VQTdll.SetVQTLogMessage(callback_login)
# TODO 初始化放到init里
callback_type1 = CFUNCTYPE(None, c_wchar_p)
callback = callback_type1(VQTMSGfunc)
VQTdll.SetVQTRecvDBResponse(callback)
def test_polqa(file_name, count):
f = file_name
mode = ['_music', '_speech']
for m in mode:
bps = ['20kbps', '28kbps', '32kbps', '40kbps', '48kbps', '64kbps']
for b in bps:
for i in range(count):
input = 'E:\\weiwei\\data_for_opus\\input\\' + f + str(i) + '.wav'
output = 'E:\\weiwei\\data_for_opus\\output\\' + f + '_comp5_' + b + m + str(i) + '.wav'
# print(input, output)
startvqt(srcFile=input, degFile=output, samplerate=48000)
print(output, mosResult['mos'])
if __name__ == '__main__':
input = r'E:\01_MOS_AUTO_TESTER\testSrcFiles\Speech_16000\female\femalePolqaWB.pcm'
output = r'E:\02_POLQA_RESULT\testDstFiles\NERTC_honor8x_iphone11_V4.3.0\L\Speech_16000\NONE\female\femalePolqaWB\femalePolqaWB_20210601201611_O_ManualTest_My Phone2_20210601201611_p_0.0.pcm'
output2 = r'E:\02_POLQA_RESULT\testDstFiles\NERTC_honor8x_iphone11_V4.3.0\L\Speech_16000\NONE\female\femalePolqaWB\femalePolqaWB_20210601201541_O_ManualTest_My Phone2_20210601201541_p_4.19.pcm'
output3 = r'E:\02_POLQA_RESULT\testDstFiles\123.pcm'
startvqt(srcFile=input, degFile=output3,
samplerate=16000)
print(global_result.mosResult['mos'])
VQTdll.Disconnect()
|
AlgorithmLib
|
/AlgorithmLib-4.0.3.tar.gz/AlgorithmLib-4.0.3/algorithmLib/POLQA/POLQA.py
|
POLQA.py
|
import copy
import sys,os
from os import path
sys.path.append(os.path.dirname(path.dirname(__file__)))
sys.path.append(path.dirname(__file__))
from commFunction import getip,log_time,global_result,sftp_connect,sftp_put,sftp_disconnect,get_rms
import shutil
from socketClient import SocketClient
import numpy as np
def polqa_client_test(src,test,samplerate,mode=0):
curip = getip()
curtime = log_time()
curpath = str(curip) + '_'+str(curtime)
os.mkdir(curpath)
if mode == 1:
f = open(test, "rb")
curaudiodata = np.fromfile(f, dtype=np.int16)
curaudiodata = curaudiodata.astype(np.float64)
currms = get_rms(curaudiodata)
adjustlevel = -26 - currms
factor = 10 ** (adjustlevel / 20)
urframe = curaudiodata * factor
urframe = urframe.astype(np.int16)
urframe.tofile(test)
f.close()
curdata = global_result.get_data()
curdata['module'] = 'clientA'
curdata['method'] = 'requestA'
curdata['samplerate'] = samplerate
curdata['token'] = curpath
curdata['srcFile'] = os.path.basename(src)
curdata['testFile'] = os.path.basename(test)
#ssh
shutil.copy(src,curpath)
shutil.copy(test, curpath)
dstpath = '/home/netease/polqa'
# stfp
client,sftp = sftp_connect(global_result.username,global_result.password,global_result.HOST,port=global_result.sftpPort)
sftp_put(sftp,curpath, dstpath)
sftp_disconnect(client)
shutil.rmtree(curpath)
# get result
socket = SocketClient(global_result.machost,global_result.PORT)
try:
result = socket.sender(curdata)
print(result['result'])
except:
socket.close()
return None
return result['result']
if __name__ == '__main__':
import platform
print(platform.uname())
print(sys.platform)
polqa_client_test(src='src.pcm',test='test.pcm',samplerate=48000,mode=0)
exit(0)
# client, sftp = sftp_connect(username, password, serverIP, port=port)
# sftp_get(sftp, '/home/netease/polqa_result/' + '192.168.1.3_2021-08-18-17-52-35' + '.ress', 'result')
src = r'D:\0\speech_cn_minus_13.pcm'
test1 = r'D:\0\mixDstFile_minus_13.pcm'
test2 = r'D:\0\mixDstFile_minus_13_-6.pcm'
refout = r'E:\AIVAD\OTHER\ref_out0_01.pcm'
file = r'E:\AIVAD\OTHER\reverse0_01.pcm'
info = polqa_client_test(src, test1, 48000)
info = polqa_client_test(src, test2, 48000)
exit(0)
path = r'E:\02_POLQA_RESULT\testDstFiles\NERTC_iphone11_honor8x_V4.3.0\L\Speech_48000\NONE\female\femalePOLQASWB'
src = r'E:\01_MOS_AUTO_TESTER\testSrcFiles\Speech_48000\female\femalePOLQASWB.pcm'
filelist = []
f = open('time.txt','w')
get_file_path(path,filelist,[])
print(filelist)
#print(exists_remote('10.219.33.45','/home/netease/polqa_result/455.ss'))
for a in range (1000):
for file in filelist:
print('*****************************************************')
print('curent testfile name is {0},srcfile name is {1}'.format(file,src))
begin_time = datetime.datetime.now()
info = polqa_client_test(src, file,48000)
end_time = datetime.datetime.now()
duration = end_time - begin_time
print('time duration is {}'.format(duration))
f.writelines('*****************************************************\n')
f.writelines('file name is {}\n'.format(file))
f.writelines('result is {}\n'.format(str(info)))
f.writelines('time duration is {}\n'.format(duration))
f.close()
#vqtDisConnect
# for a in range(100):
# polqa_client_test(srcfile,testfile)
|
AlgorithmLib
|
/AlgorithmLib-4.0.3.tar.gz/AlgorithmLib-4.0.3/algorithmLib/POLQA/polqa_client.py
|
polqa_client.py
|
import ctypes,platform
import librosa
from ctypes import *
from formatConvert import pcm2wav
import sys
sys.path.append('../')
from commFunction import get_data_array,get_rms
from PCC.Pearson_CC import get_max_cc_by_dll
from scipy import signal
import numpy as np
def get_my_dll():
"""
:return:
"""
mydll = None
cur_paltform = platform.platform().split('-')[0]
if cur_paltform == 'Windows':
mydll = ctypes.windll.LoadLibrary(sys.prefix + '/pcc.dll')
if cur_paltform == 'macOS':
mydll = CDLL(sys.prefix + '/pcc.dylib')
if cur_paltform == 'Linux':
mydll = CDLL(sys.prefix + '/pcc.so')
return mydll
def cal_fullref_echo(reffile, testfile):
""""""
echoThreshold = 0.5
target_fs = 8000
framehLenth = int(0.2 * target_fs)
frameshift = int(0.1 * target_fs)
searchRange = int(1.8 * target_fs)
lowf = 100
hif = 7000
#use_section = [[16.,18.74],[19.09,21.76],[22.13,24.64],[25.11,27.85],[28.32,31]]
use_section = [[1.62, 3.84], [4.33, 6.8], [7.28, 10.2], [10.56, 13.23], [13.635, 16.57]]
refdata,fs1,ch = get_data_array(reffile)
testdata,fs2,ch = get_data_array(testfile)
refdata = band_pass_filter(lowf,hif,refdata,fs1)
testdata = band_pass_filter(lowf,hif,testdata,fs2)
refdata = librosa.resample(refdata.astype(np.float32), orig_sr=fs1 ,target_sr=target_fs)
testdata = librosa.resample(testdata.astype(np.float32), orig_sr=fs2 ,target_sr=target_fs)
suspectItems = []
for subsection in use_section:
startpoint = int(target_fs * subsection[0])
endpoint = int(target_fs * subsection[1])
caltimes = (endpoint - startpoint-framehLenth) // frameshift
for a in range(caltimes):
relstart = startpoint+frameshift * a
currefdata = refdata[relstart:relstart + framehLenth]
curtestdata = testdata[relstart:relstart + framehLenth+searchRange]
currefrms = get_rms(currefdata)
curitem = [currefdata, curtestdata, relstart / target_fs,currefrms,relstart]
suspectItems.append(curitem)
for suspectitem in suspectItems:
maxCoin, startpot = get_max_cc_by_dll(suspectitem[0], suspectitem[1], get_my_dll(), 3)
refeng = suspectitem[3]
testeng = get_rms(suspectitem[1][startpot:startpot+len((suspectitem[0]))])
echoTime = (suspectitem[-1]+startpot)/target_fs
srcTime = suspectitem[2]
if maxCoin > echoThreshold and refeng > -45 and testeng > -35:
print('An echo was detected at the {}-second mark of the file with a magnitude of {} dB.'.format(echoTime,
testeng))
return True
return False
def band_pass_filter(lowfre,hifre,data,fs):
f1 = lowfre # 带通滤波器的下截止频率
f2 = hifre # 带通滤波器的上截止频率
Wn = [f1 / (fs / 2), f2 / (fs / 2)]
b, a = signal.butter(4, Wn, btype='band') # 4阶Butterworth带通滤波器
# 使用滤波器对信号进行滤波
filtered_data = signal.filtfilt(b, a, data)
return filtered_data
# 绘制滤波前后的信号图像
if __name__ == '__main__':
print('>>>>>>>>>>>>>')
ref = 'src_bak.wav'
test = 'pc_1.wav'
cal_fullref_echo(pcm2wav(ref),pcm2wav(test))
print('>>>>>>>>>>>>>')
ref = 'src_bak.wav'
test = 'pc_8.wav'
cal_fullref_echo(pcm2wav(ref),pcm2wav(test))
print('>>>>>>>>>>>>>')
# ref = 'src_bak.wav'
# test = '3.wav'
# cal_fullref_echo(pcm2wav(ref),pcm2wav(test))
# print('>>>>>>>>>>>>>')
# ref = 'src_bak.wav'
# test = '4.wav'
# cal_fullref_echo(pcm2wav(ref), pcm2wav(test))
# print('>>>>>>>>>>>>>')
pass
|
AlgorithmLib
|
/AlgorithmLib-4.0.3.tar.gz/AlgorithmLib-4.0.3/algorithmLib/AEC_EVALUATION/FR_ECHO_DETECT.py
|
FR_ECHO_DETECT.py
|
import sys,os
from os import path
sys.path.append('../')
from ctypes import *
from commFunction import emxArray_real_T,get_data_of_ctypes_
import ctypes
import platform
# * -------------------------------------------------------------------------
# * Arguments : const emxArray_real_T *sig_mic
# * const emxArray_real_T *sig_far
# * const emxArray_real_T *sig_ref
# * double fs_mic
# * double fs_far
# * double type
# * double *ERLE
# * double *output_std
# * double *residual_avgdB
# * double *err
# * Return Type : void
# */
# void ERLE_estimation(const emxArray_real_T *sig_mic, const emxArray_real_T
# *sig_far, const emxArray_real_T *sig_ref, double fs_mic,
# double fs_far, double type, double *ERLE, double
# *output_std, double *residual_avgdB, double *err)
def cal_erle(micFile = None,testFile =None, refFile =None,targetType=0):
"""
% type- input signal type:
% 0:Chiness
% 1:English
% 2:Single Digit
% 3:Music
Parameters
----------
inFile
output
refFile
targetType
Returns
-------
"""
instruct,insamplerate,_ = get_data_of_ctypes_(micFile,True)
teststruct,outsamplerate,_ = get_data_of_ctypes_(testFile,True)
refstruct, refsamplerate, _ = get_data_of_ctypes_(refFile,True)
# if refsamplerate != testsamplerate :
# raise TypeError('Different format of ref and test files!')
mydll = None
cur_paltform = platform.platform().split('-')[0]
if cur_paltform == 'Windows':
mydll = ctypes.windll.LoadLibrary(sys.prefix + '/ERLE_estimation.dll')
if cur_paltform == 'macOS':
mydll = CDLL(sys.prefix + '/ERLE_estimation.dylib')
if cur_paltform == 'Linux':
mydll = CDLL(sys.prefix + '/ERLE_estimation.so')
mydll.ERLE_estimation.argtypes = [POINTER(emxArray_real_T),POINTER(emxArray_real_T),POINTER(emxArray_real_T),c_double,c_double,c_double,POINTER(c_double),POINTER(c_double),POINTER(c_double),POINTER(c_double)]
data_format = c_double*11
gain_table = data_format()
DR = data_format()
ERLE,output_std,err,residual_avgdB = c_double(0.0),c_double(0.0),c_double(0.0),c_double(0.0)
mydll.ERLE_estimation(byref(instruct),byref(teststruct),byref(refstruct),c_double(insamplerate),c_double(outsamplerate),c_double(targetType),byref(ERLE),byref(output_std),byref(residual_avgdB),byref(err))
print(err.value)
print(ERLE.value,output_std.value,residual_avgdB.value)
#if err.value == 0.0:
return ERLE.value,output_std.value,residual_avgdB.value
# else:
# return None,None,None
if __name__ == '__main__':
import platform
print(platform.platform().split('-')[0])
# micfile = r'C:\Users\vcloud_avl\Documents\我的POPO\0\stdRefFile.wav'
# test = r'C:\Users\vcloud_avl\Documents\我的POPO\0\mixDstFile.wav'
# ref = R'C:\Users\vcloud_avl\Documents\我的POPO\0\ref_cn.wav'
micfile = r'D:\MARTIN\audiotestalgorithm-master\algorithmLib\AEC_EVALUATION\agoraTestCase_03_None_None\agora_near\0\stdRefFile.wav'
test = r'D:\MARTIN\audiotestalgorithm-master\algorithmLib\AEC_EVALUATION\agoraTestCase_03_None_None\agora_near\0\mixDstFile.wav'
ref = r'D:\MARTIN\audiotestalgorithm-master\algorithmLib\AEC_EVALUATION\agoraTestCase_03_None_None\agora_near\0\ref_cn.wav'
ERLE,output_std,residual_avgdB = cal_erle(micFile=micfile,testFile=test,refFile=ref,targetType=0)
print('ERLE:{},output_std:{},residual_avgdB:{}'.format(ERLE,output_std,residual_avgdB))
|
AlgorithmLib
|
/AlgorithmLib-4.0.3.tar.gz/AlgorithmLib-4.0.3/algorithmLib/AEC_EVALUATION/ERLE_ETSIMATION.py
|
ERLE_ETSIMATION.py
|
import sys,os
from os import path
sys.path.append('../')
from ctypes import *
from commFunction import emxArray_real_T,get_data_of_ctypes_,write_ctypes_data_2_file_,get_none_data_of_ctypes_
import ctypes,platform
def MATCH_AEC(refFile=None, testFile=None, caliFile=None,outFile=None,targetType=0):
"""
% type- input signal type:
% 0:Chiness
% 1:English
% 2:Single Digit
% 3:Music
"""
refstruct,refsamplerate,reflen = get_data_of_ctypes_(refFile)
teststruct,testsamplerate,testlen = get_data_of_ctypes_(testFile)
calistruct,calisamplerate,_ = get_data_of_ctypes_(caliFile)
outlen = max(reflen, testlen)
outStruct = get_none_data_of_ctypes_(outlen)
if refsamplerate != testsamplerate or testsamplerate!= calisamplerate:
raise TypeError('Different format of ref and test files!')
mydll = None
cur_paltform = platform.platform().split('-')[0]
if cur_paltform == 'Windows':
mydll = ctypes.windll.LoadLibrary(sys.prefix + '/matchsig_aec.dll')
if cur_paltform == 'macOS':
mydll = CDLL(sys.prefix + '/matchsig_aec.dylib')
if cur_paltform == 'Linux':
mydll = CDLL(sys.prefix + '/matchsig_aec.so')
mydll.matchsig_aec.argtypes = [POINTER(emxArray_real_T),POINTER(emxArray_real_T),POINTER(emxArray_real_T),c_double,c_double,POINTER(emxArray_real_T),POINTER(c_double)]
err,fs_out,type = c_double(0.0),c_double(refsamplerate),c_double(targetType)
mydll.matchsig_aec(byref(teststruct),byref(refstruct),byref(calistruct),refsamplerate,type,byref(outStruct),byref(err))
if err.value == 0.0:
if outFile is not None:
write_ctypes_data_2_file_(outFile, outStruct,refsamplerate)
return True
else:
return False
if __name__ == '__main__':
path = r'D:\AudioPublicWork\3a_auto_test_porject\3a_auto_test_porject\08_TestDstFiles\sdk_zego_vivo_y3hf_music_V_shengbo_compare\aec\Speech\TestCase_01_None_None\near_cn'
ref = path +'\\' +'far_cn.wav'
test = path +'\\' + 'mixDstFile.wav'
cali = path +'\\' + 'mixDstFile.wav'
outFile = r'C:\Users\vcloud_avl\Downloads\Speech\TestCase_01_None_None\near_cn\target.wav'
delay = MATCH_AEC(refFile=ref,testFile=test,caliFile=cali,outFile=outFile,targetType=0)
print(delay)
pass
|
AlgorithmLib
|
/AlgorithmLib-4.0.3.tar.gz/AlgorithmLib-4.0.3/algorithmLib/AEC_EVALUATION/MATCH_AEC.py
|
MATCH_AEC.py
|
import sys,os
from ctypes import *
import ctypes
import numpy as np
# * -------------------------------------------------------------------------
# * Arguments : const emxArray_real_T *ref_in
# * const emxArray_real_T *sig_in
# * double fs
# * double *delay
# * double *err
# * Return Type : void
# */
# void delay_estimation_15s(const emxArray_real_T *ref_in, const emxArray_real_T
# *sig_in, double fs, double *delay, double *err)
def cal_PCC(refData, testData,calSize,mydll):
"""
"""
refstruct = np.ascontiguousarray(refData, dtype=c_double).ctypes.data_as(ctypes.POINTER(c_double))
teststruct = np.ascontiguousarray(testData, dtype=c_double).ctypes.data_as(ctypes.POINTER(c_double))
mydll.Pearson_Correlation.argtypes = [POINTER(c_double),POINTER(c_double),c_long,POINTER(c_double)]
pcc= c_double(0.0)
mydll.Pearson_Correlation(refstruct,teststruct,c_long(calSize),byref(pcc))
return pcc.value
def get_max_cc_by_dll(refData, testData,mydll,step):
"""
(double *x, double *y,long xLenth,long yLenth,long step,double *maxcc,long *startPoint)
"""
refstruct = np.ascontiguousarray(refData, dtype=c_double).ctypes.data_as(ctypes.POINTER(c_double))
teststruct = np.ascontiguousarray(testData, dtype=c_double).ctypes.data_as(ctypes.POINTER(c_double))
mydll.get_max_pcc.argtypes = [POINTER(c_double),POINTER(c_double),c_long,c_long,c_long,POINTER(c_double),POINTER(c_long)]
max_cc,start_point = c_double(0.0),c_long(0)
mydll.get_max_pcc(refstruct,teststruct,c_long(len(refData)),c_long(len(testData)),c_long(step),byref(max_cc),byref(start_point))
return max_cc.value,start_point.value
if __name__ == '__main__':
ref = r'C:\Users\vcloud_avl\Documents\我的POPO\src.wav'
test = r'C:\Users\vcloud_avl\Documents\我的POPO\test.wav'
from commFunction import get_data_array_double
import platform,time
newdata,fs,ch = get_data_array_double(ref)
refdata,fs,ch = get_data_array_double(test)
newdata = newdata[:10000]
refdata = refdata[:10000]
mydll = None
cur_paltform = platform.platform().split('-')[0]
if cur_paltform == 'Windows':
mydll = ctypes.windll.LoadLibrary(sys.prefix + '/pcc.dll')
if cur_paltform == 'macOS':
mydll = CDLL(sys.prefix + '/pcc.dylib')
if cur_paltform == 'Linux':
mydll = CDLL(sys.prefix + '/pcc.so')
#mydll.Pearson_Correlation.argtypes = [POINTER(c_double), POINTER(c_double), c_long, POINTER(c_double)]
# newdata = np.arange(0,20000,2)
# refdata = np.arange(10000,30000,2)
# print(len(newdata),len(refdata))
# print(time.time())
# print(np.corrcoef(refdata,newdata))
# print(time.time())
# print(cal_PCC(refdata,newdata,10000,mydll))
# print(time.time())
suma,sumb = 0,0
for e in range(10000):
a = time.time()
res = np.corrcoef(refdata, newdata)
print(res)
b = time.time()
suma += b - a
c = time.time()
res = cal_PCC(refdata, newdata, 10000, mydll)
print(res)
d = time.time()
sumb += d - c
print(suma/10000)
print(sumb/10000)
pass
|
AlgorithmLib
|
/AlgorithmLib-4.0.3.tar.gz/AlgorithmLib-4.0.3/algorithmLib/PCC/Pearson_CC.py
|
Pearson_CC.py
|
import sys,os
from os import path
sys.path.append(os.path.dirname(path.dirname(__file__)))
from ctypes import *
from commFunction import emxArray_real_T,get_data_of_ctypes_
import ctypes
import platform
# void SNR_transient(const emxArray_real_T *ref, const emxArray_real_T *ref_noise,
# const emxArray_real_T *sig, double fs, double *SNR, double
# *noise_dB, double *err)
# void attackrelease_estimation(const emxArray_real_T *ref, const emxArray_real_T *
# sig, double fs_ref, double fs_sig, double *time_attack, double *time_release,
# double *err)
def cal_attack_release(refFile=None, testFile=None):
"""
"""
refstruct,refsamplerate,_ = get_data_of_ctypes_(refFile,True)
teststruct,testsamplerate,_ = get_data_of_ctypes_(testFile,True)
if refsamplerate != testsamplerate :
raise TypeError('Different format of ref and test files!')
mydll = None
cur_paltform = platform.platform().split('-')[0]
if cur_paltform == 'Windows':
mydll = ctypes.windll.LoadLibrary(sys.prefix + '/attackrelease.dll')
if cur_paltform == 'macOS':
mydll = CDLL(sys.prefix + '/attackrelease.dylib')
if cur_paltform == 'linux':
mydll = CDLL(sys.prefix + '/attackrelease.so')
mydll.attackrelease_estimation.argtypes = [POINTER(emxArray_real_T),POINTER(emxArray_real_T),c_double,c_double, POINTER(c_double),POINTER(c_double),POINTER(c_double)]
time_attack,time_release,err = c_double(0.0),c_double(0.0),c_double(0.0)
mydll.attackrelease_estimation(byref(refstruct),byref(teststruct),c_double(refsamplerate),c_double(refsamplerate),byref(time_attack),byref(time_release),byref(err))
if err.value == 0.0:
return time_attack.value,time_release.value
else:
return None
if __name__ == '__main__':
file = r'C:\Users\vcloud_avl\Documents\我的POPO\0\speech_attackrelease.wav'
test = r'C:\Users\vcloud_avl\Documents\我的POPO\0\speech_attackrelease.wav'
print(cal_attack_release(refFile=file,testFile=test))
pass
|
AlgorithmLib
|
/AlgorithmLib-4.0.3.tar.gz/AlgorithmLib-4.0.3/algorithmLib/AGC_EVALUATION/CAL_ATTACK_RELEASE.py
|
CAL_ATTACK_RELEASE.py
|
import sys,os
from os import path
sys.path.append(os.path.dirname(path.dirname(__file__)))
from ctypes import *
from commFunction import emxArray_real_T,get_data_of_ctypes_
import ctypes
# void gaintable_estimation(const emxArray_real_T *ref, const emxArray_real_T *sig,
# double fs_ref, double fs_sig, double type, double gain_table[11], double DR[11],
# double *limiter, double *err)
def cal_gain_table(refFile=None, testFile=None,targetType=0):
'''
Parameters
----------
refFile
testFile
targetType 0:speech,1:music
Returns
-------
'''
refstruct,refsamplerate,_ = get_data_of_ctypes_(refFile,True)
teststruct,testsamplerate,_ = get_data_of_ctypes_(testFile,True)
if refsamplerate != testsamplerate :
raise TypeError('Different format of ref and test files!')
import platform
mydll = None
cur_paltform = platform.platform().split('-')[0]
if cur_paltform == 'Windows':
mydll = ctypes.windll.LoadLibrary(sys.prefix + '/gaintable.dll')
if cur_paltform == 'macOS':
mydll = CDLL(sys.prefix + '/gaintable.dylib')
if cur_paltform == 'Linux':
mydll = CDLL(sys.prefix + '/gaintable.so')
mydll.gaintable_estimation.argtypes = [POINTER(emxArray_real_T),POINTER(emxArray_real_T),c_double,c_double,c_double,POINTER(c_double),POINTER(c_double),POINTER(c_double),POINTER(c_double)]
data_format = c_double*11
gain_table = data_format()
DR = data_format()
limiter,err = c_double(0.0),c_double(0.0)
mydll.gaintable_estimation(byref(refstruct),byref(teststruct),c_double(refsamplerate),c_double(refsamplerate),c_double(targetType),gain_table,DR,byref(limiter),byref(err))
if err.value == 0.0:
return limiter.value,gain_table,DR
else:
return None
if __name__ == '__main__':
file = r'C:\Users\vcloud_avl\Documents\我的POPO\0\speech_gaintable.wav'
test = r'C:\Users\vcloud_avl\Documents\我的POPO\0\speech_gaintable.wav'
lim,gain_table,DR = cal_gain_table(refFile=file,testFile=test,targetType=0)
print(lim,gain_table[0],DR[2])
print(gain_table,DR)
for a in gain_table:
print(a)
for a in DR:
print(a)
pass
|
AlgorithmLib
|
/AlgorithmLib-4.0.3.tar.gz/AlgorithmLib-4.0.3/algorithmLib/AGC_EVALUATION/CAL_GAIN_TABLE.py
|
CAL_GAIN_TABLE.py
|
import sys,os
from os import path
sys.path.append(os.path.dirname(path.dirname(__file__)))
from commFunction import get_rms,make_out_file,get_ave_rms,get_peak_rms
import numpy as np
from SNR_ESTIMATION.MATCH_SIG import match_sig
from timeAligment.time_align import cal_fine_delay_of_specific_section,cal_fine_delay
from commFunction import get_data_array
import scipy.signal as sg
speechSection = [12, 15]
noiseSection = [0, 10]
FRAME_LEN = 9600
frame_shift = 4800
def get_maxima(values:np.ndarray):
"""极大值"""
max_index = sg.argrelmax(values)[0]
return max_index,values[max_index]
def get_minima(values:np.ndarray):
"""极小值"""
min_index = sg.argrelmin(values)[0]
return min_index,values[min_index]
def get_data_pairs(srcFile=None,testFile=None):
"""
Parameters
----------
srcFile
testFile
Returns
-------
"""
#samples = match_sig(refFile=srcFile, testFile=testFile)
samples = cal_fine_delay_of_specific_section(srcFile, testFile, speech_section=[[12.3,14.5]], targetfs=8000, outfile=None)
if samples is None:
return None
dataSrc, fs, chn = get_data_array(srcFile)
dataTest, fs2, chn2 = get_data_array(testFile)
print(dataTest,dataSrc,samples)
assert fs == fs2
assert chn2 == chn
assert samples > 0
dataTest = dataTest[int(samples*fs):]
M,N = len(dataSrc),len(dataTest)
targetLen = min(M,N)
return dataSrc[:targetLen],dataTest[:targetLen],fs,chn
def cal_noise_converge(dataSrc,dataTest,fs,chn):
"""
Parameters
----------
dataSrc
dataTest
Returns
-------
"""
srcSpeechLevel = get_rms(dataSrc[fs*speechSection[0]:fs*speechSection[1]])
curSpeechLevel = get_rms(dataTest[fs*speechSection[0]:fs*speechSection[1]])
# log(V1 / V2) = X/20
gain = np.power(10,(srcSpeechLevel - curSpeechLevel)/20)
newData = dataTest.astype(np.float32) * gain
make_out_file('source.wav', dataSrc.astype(np.int16), fs, chn)
make_out_file('target.wav',newData.astype(np.int16),fs,chn)
n_sengen = len(newData) // FRAME_LEN
MAX_RMS = -120
for a in range(n_sengen):
curLevel = get_rms(newData[a*FRAME_LEN:(a+1)*FRAME_LEN])
print(MAX_RMS,curLevel)
if curLevel > MAX_RMS:
MAX_RMS = curLevel
if curLevel < MAX_RMS - 12:
break
converge = a * FRAME_LEN / fs
if converge >= noiseSection[1]:
nsLevel = 0.0
else:
nsLevel = get_ave_rms(dataSrc[int(converge * fs) :noiseSection[1]* fs]) - get_ave_rms(newData[int(converge * fs) :noiseSection[1]* fs])
return converge, nsLevel
#TODO 收敛时间
#TODO 降噪量
def cal_transient_noise_Supp_by_ref(srcFile,testFile,speechSection,musicMode=False):
"""
:return:
"""
# FRAME_LEN = 9600
# frame_shift = 4800
if musicMode:
before_point = 13
end_point = 32
else:
before_point = 13
end_point = 26.5
srcdata, fs, ch = get_data_array(srcFile)
testdata, fs, ch = get_data_array(testFile)
FRAME_LEN = fs*1
cal_level = 999
# 计算录音文件和原始文件的差,做补偿
for singleSection in speechSection:
start = int(fs*singleSection[0])
end = int(fs*singleSection[1])
curPeakSrc = get_peak_rms(srcdata[start:end])
curPeakDst = get_peak_rms(testdata[start:end])
curdiv =curPeakSrc -curPeakDst
if cal_level > curdiv:
cal_level = curdiv
factor = 10 ** (cal_level / 20)
AdjustTestData = testdata * factor
AdjustTestData = AdjustTestData.astype(np.int16)
print(cal_level)
n_sengen = (len(srcdata)) // FRAME_LEN
startPoint = speechSection[0][0]
endPoint = speechSection[-1][1]
init_Level,init_cnt = 0,0
before_speech_level,before_sppech_cnt = 0,0
before_speech_floor = 0
after_speech_level, after_sppech_cnt = 0, 0
in_speech_level,in_speech_cnt = 0,0
between_speech_level,bwtween_speech_cnt = 0,0
convert_index = 0
std_sample_list = []
for a in range(n_sengen):
cursrcLevel = get_rms(srcdata[a * FRAME_LEN:a * FRAME_LEN + FRAME_LEN])
curdstLevel = get_rms(AdjustTestData[a * FRAME_LEN:a * FRAME_LEN + FRAME_LEN])
curdstlevel_real = get_rms(testdata[a * FRAME_LEN:a * FRAME_LEN + FRAME_LEN])
curdiv = curdstLevel - cursrcLevel
if (a * FRAME_LEN) > before_point * fs and (a * FRAME_LEN) + FRAME_LEN < startPoint*fs:
std_sample_list.append(curdstlevel_real)
if (a * FRAME_LEN) > endPoint * fs and (a * FRAME_LEN) + FRAME_LEN < end_point*fs:
std_sample_list.append(curdstlevel_real)
if a >= 0 and a <= 2:
init_Level += curdiv
init_cnt += 1
if (a * FRAME_LEN) > (startPoint-1) * fs and (a * FRAME_LEN) + FRAME_LEN < startPoint*fs:
before_speech_level += curdiv
before_speech_floor += curdstlevel_real
before_sppech_cnt += 1
if (a * FRAME_LEN) > (endPoint) * fs:
after_speech_level += curdiv
after_sppech_cnt += 1
for scnt in speechSection:
if (a * FRAME_LEN) > scnt[0] * fs and (a * FRAME_LEN) + FRAME_LEN < scnt[1] * fs:
in_speech_level += curdiv
in_speech_cnt += 1
if len(speechSection) > 1:
for i,scnt in enumerate(speechSection):
if i == 0:
continue
if (a * FRAME_LEN) > speechSection[i-1][1] * fs and (a * FRAME_LEN) + FRAME_LEN < \
speechSection[i][0] * fs:
between_speech_level += curdiv
bwtween_speech_cnt += 1
init_Level = init_Level/init_cnt
before_speech_level = before_speech_level/before_sppech_cnt
after_speech_level = after_speech_level/after_sppech_cnt
in_speech_level = in_speech_level/in_speech_cnt
before_speech_floor = before_speech_floor/before_sppech_cnt
if bwtween_speech_cnt != 0:
between_speech_level = between_speech_level/bwtween_speech_cnt
else:
between_speech_level = None
for a in range(n_sengen):
cursrcLevel = get_rms(srcdata[a * FRAME_LEN:a * FRAME_LEN + FRAME_LEN])
curdstLevel = get_rms(AdjustTestData[a * FRAME_LEN:a * FRAME_LEN + FRAME_LEN])
curdiv = curdstLevel - cursrcLevel
print(curdiv)
if a >= 1 and curdiv < init_Level - 6:
break
converage = (a * FRAME_LEN)/fs
print(before_speech_floor)
print(converage)
if init_Level - before_speech_level < 3:
if before_speech_floor < -65:
converage = 0.0
post_std = np.std(std_sample_list, ddof=1)
noise_floor = sum(std_sample_list)/len(std_sample_list)
print(init_Level,before_speech_level,after_speech_level,in_speech_level,between_speech_level,before_speech_floor,converage,post_std,noise_floor)
return converage, before_speech_level, after_speech_level, in_speech_level, between_speech_level, post_std,noise_floor
def cal_noise_Supp_by_ref(srcFile,testFile,speechSection,musicMode=False):
"""
:return:
"""
FRAME_LEN = 9600
frame_shift = 4800
if musicMode:
before_point = 13
end_point = 32
else:
before_point = 13
end_point = 26.5
srcdata, fs, ch = get_data_array(srcFile)
testdata, fs, ch = get_data_array(testFile)
cal_level = 999
# 计算录音文件和原始文件的差,做补偿
for singleSection in speechSection:
start = int(fs*singleSection[0])
end = int(fs*singleSection[1])
curPeakSrc = get_peak_rms(srcdata[start:end])
curPeakDst = get_peak_rms(testdata[start:end])
curdiv =curPeakSrc -curPeakDst
if cal_level > curdiv:
cal_level = curdiv
factor = 10 ** (cal_level / 20)
AdjustTestData = testdata * factor
AdjustTestData = AdjustTestData.astype(np.int16)
print(cal_level)
n_sengen = (len(srcdata) - FRAME_LEN) // frame_shift
startPoint = speechSection[0][0]
endPoint = speechSection[-1][1]
init_Level,init_cnt = 0,0
before_speech_level,before_sppech_cnt = 0,0
before_speech_floor = 0
after_speech_level, after_sppech_cnt = 0, 0
in_speech_level,in_speech_cnt = 0,0
between_speech_level,bwtween_speech_cnt = 0,0
convert_index = 0
std_sample_list = []
for a in range(n_sengen):
cursrcLevel = get_rms(srcdata[a * frame_shift:a * frame_shift + FRAME_LEN])
curdstLevel = get_rms(AdjustTestData[a * frame_shift:a * frame_shift + FRAME_LEN])
curdstlevel_real = get_rms(testdata[a * frame_shift:a * frame_shift + FRAME_LEN])
curdiv = curdstLevel - cursrcLevel
if (a * frame_shift) > before_point * fs and (a * frame_shift) + FRAME_LEN < startPoint*fs:
std_sample_list.append(curdstlevel_real)
if (a * frame_shift) > endPoint * fs and (a * frame_shift) + FRAME_LEN < end_point*fs:
std_sample_list.append(curdstlevel_real)
if a >= 1 and a <= 3:
init_Level += curdiv
init_cnt += 1
if (a * frame_shift) > (startPoint-1) * fs and (a * frame_shift) + FRAME_LEN < startPoint*fs:
before_speech_level += curdiv
before_speech_floor += curdstlevel_real
before_sppech_cnt += 1
if (a * frame_shift) > (endPoint) * fs:
after_speech_level += curdiv
after_sppech_cnt += 1
for scnt in speechSection:
if (a * frame_shift) > scnt[0] * fs and (a * frame_shift) + FRAME_LEN < scnt[1] * fs:
in_speech_level += curdiv
in_speech_cnt += 1
if len(speechSection) > 1:
for i,scnt in enumerate(speechSection):
if i == 0:
continue
if (a * frame_shift) > speechSection[i-1][1] * fs and (a * frame_shift) + FRAME_LEN < \
speechSection[i][0] * fs:
between_speech_level += curdiv
bwtween_speech_cnt += 1
init_Level = init_Level/init_cnt
before_speech_level = before_speech_level/before_sppech_cnt
after_speech_level = after_speech_level/after_sppech_cnt
in_speech_level = in_speech_level/in_speech_cnt
before_speech_floor = before_speech_floor/before_sppech_cnt
if bwtween_speech_cnt != 0:
between_speech_level = between_speech_level/bwtween_speech_cnt
else:
between_speech_level = None
for a in range(n_sengen):
cursrcLevel = get_rms(srcdata[a * frame_shift:a * frame_shift + FRAME_LEN])
curdstLevel = get_rms(AdjustTestData[a * frame_shift:a * frame_shift + FRAME_LEN])
curdiv = curdstLevel - cursrcLevel
print(curdiv)
if a >= 1 and curdiv < init_Level - 6:
break
converage = (a * frame_shift)/fs
print(before_speech_floor)
print(converage)
if init_Level - before_speech_level < 3:
if before_speech_floor < -65:
converage = 0.0
post_std = np.std(std_sample_list, ddof=1)
noise_floor = sum(std_sample_list)/len(std_sample_list)
print(init_Level,before_speech_level,after_speech_level,in_speech_level,between_speech_level,before_speech_floor,converage,post_std,noise_floor)
return converage, before_speech_level, after_speech_level, in_speech_level, between_speech_level, post_std,noise_floor
def cal_noise_Supp(srcFile,testFile,nslabmode=False,start=0.2,end=15.8,noiseType='None'):
"""
Parameters
----------
data
Returns
-------
"""
nosieVariable = {'bubble': 4, 'car': 4.5, 'restaurant': 7,'white':3,'traffic':4,'metro':3.5,'None':4}
if nslabmode:
#确定计算边界
dataSrc, fs, chn = get_data_array(testFile)
overallLen = len(dataSrc)
lowTmp,upperTmp = 0,overallLen
if start is None or start < 0.1:
dataFloor = dataSrc[0:int(0.1*fs)]
Floor = get_rms(dataFloor)
else:
# 计算src noise
lowTmp = int(start * fs)
dataFloor = dataSrc[0:lowTmp]
Floor = get_rms(dataFloor)
if end is None:
dataDegrad = dataSrc[overallLen-fs:overallLen]
else:
upperTmp = int(end*fs)
dataDegrad = dataSrc[int((end-2)*fs):upperTmp]
Degrad = get_rms(dataDegrad)
# 计算rms求最大值
dataSrc = dataSrc[lowTmp:upperTmp]
datanew = dataSrc.astype(np.float32)
n_sengen = (len(datanew)-FRAME_LEN)//frame_shift
MAX_RMS,maxindex,MIN_RMS,minindex = -120,0,0,0
index = 0
x,y = [],[]
for a in range(n_sengen):
index += 1
curLevel = get_rms(datanew[a * frame_shift:a * frame_shift + FRAME_LEN])
if curLevel > MAX_RMS:
MAX_RMS = curLevel
maxindex = index
x.append(index*frame_shift/fs)
y.append(curLevel)
# 找到第一个拐点
for i,curlel in enumerate(y):
if i < maxindex:
continue
else:
if curlel < MAX_RMS - nosieVariable[noiseType]/2-3:
break
firindex = i
firstconvertime = (i) * frame_shift / fs
#计算先验噪声
lastindex = (len(datanew) - 2 * fs)/frame_shift
post = y[int(lastindex):]
pre_std = np.std(post, ddof=1)
#计算最小值
index = 0
for a in range(n_sengen):
index += 1
curLevel = get_rms(datanew[a * frame_shift:a * frame_shift + FRAME_LEN])
if curLevel < MIN_RMS and index > firindex:
MIN_RMS = curLevel
minindex = index
# 求极小值
minimadex,minmavalue = get_minima(np.array(y))
for a in range (len(minimadex)):
if minmavalue[a] < MIN_RMS + 2 and minimadex[a] < minindex:
MIN_RMS = minmavalue[a]
minindex = minimadex[a]
break
#找到第二个拐点
revers = y[::-1]
for i,curlel in enumerate(revers):
if i < len(y)-minindex:
continue
if curlel > MIN_RMS + 2*pre_std:
break
secondConvertime = (len(y)-i) * frame_shift / fs
#计算后验噪声
postdata = y[int(len(y)-i):]
post_std = np.std(postdata, ddof=1)
post_Degrad = get_rms(datanew[int(secondConvertime*fs):])
noise_src = MAX_RMS - nosieVariable[noiseType] / 2
post_src = get_rms(datanew[:int(firstconvertime*fs)])
# print('firstconvertime is {}'.format(firstconvertime))
# print('secondConvertime is {}'.format(secondConvertime))
# print('prestd is {}'.format(pre_std))
# print('poststd is {}'.format(post_std))
# print('noise src is {}'.format(noise_src))
# print('post noise src is {}'.format(post_src))
# print('noise floor is {}'.format(Floor))
# print('noise Degrad is {}'.format(Degrad))
# print('post noise Degrad is {}'.format(post_Degrad))
# print('ns gain is {}'.format(post_src-post_Degrad))
# import matplotlib.pyplot as plt
# plt.plot(x,y)
# plt.show()
return firstconvertime,secondConvertime,Floor,post_src,post_Degrad,post_std
else:
result = get_data_pairs(srcFile=srcFile, testFile=testFile)
if result is not None:
srcdata, dstdata, fs, chn = result
return cal_noise_converge(srcdata,dstdata,fs,chn)
else:
return result
if __name__ == '__main__':
src = 'car_noise_speech.wav'
dst = 'speech_cn.wav'
dst2 = 'mixDstFile3.wav'
# dur = cal_noise_Supp(src,dst2,nslabmode=True)
speech_cn = r'music_piano.wav'
testfile = r'mixDstFile.wav'
mixfile = r'mixFile.wav'
# delay = cal_fine_delay(reffile=speech_cn,testfile=testfile,outfile='out.wav')
# dur = cal_noise_Supp_by_ref(srcFile=mixfile,testFile='out.wav',speechSection=[[17.32, 28.856]])
# dur = cal_noise_Supp_by_ref(srcFile='mixFile.wav', testFile='mixDstFile_noise_rematch.wav',
# speechSection=[[17.32, 28.856]],musicMode=True)
# filepath = r'D:/MARTIN/归档/'
# from commFunction import get_file_path
# noiseList = []
# get_file_path(filepath,noiseList,[])
# print(noiseList)
# for subfile in noiseList:
# curdata,fs,ch=get_data_array(subfile)
# curbase = os.path.basename(subfile)[:-4]
# overallarray = np.array([])
# for index in range(27):
# overallarray = np.concatenate((overallarray,curdata))
# outfile = "noise4speech/" + curbase + '_-20_20s.wav'
# make_out_file(outfile,overallarray,fs,ch)
#
# factor = 10 ** (-5 / 20)
# overallarray = overallarray * factor
# outfile = "noise4speech/" + curbase + '_-25_20s.wav'
# make_out_file(outfile,overallarray,fs,ch)
#
# overallarray = overallarray * factor
# outfile = "noise4speech/" + curbase + '_-30_20s.wav'
# make_out_file(outfile,overallarray,fs,ch)
#
# overallarray = np.array([])
# for index in range(33):
# overallarray = np.concatenate((overallarray,curdata))
# outfile = 'noise4music/' +curbase + '_-20_20s.wav'
# make_out_file(outfile,overallarray,fs,ch)
#
# factor = 10 ** (-5 / 20)
# overallarray = overallarray * factor
# outfile = "noise4music/" + curbase + '_-25_20s.wav'
# make_out_file(outfile, overallarray, fs, ch)
#
# overallarray = overallarray * factor
# outfile = "noise4music/" + curbase + '_-30_20s.wav'
# make_out_file(outfile, overallarray, fs, ch)
pass
|
AlgorithmLib
|
/AlgorithmLib-4.0.3.tar.gz/AlgorithmLib-4.0.3/algorithmLib/Noise_Suppression/noiseFuction.py
|
noiseFuction.py
|
import sys
import os
import time
from os import path
import sys,os
from os import path
sys.path.append(os.path.dirname(path.dirname(__file__)))
from formatConvert.wav_pcm import wav2pcm,pcm2wav
from G160.G160 import cal_g160
from P563.P563 import cal_563_mos
from PESQ.PESQ import cal_pesq
from POLQA.polqa_client import polqa_client_test
from SDR.SDR import cal_sdr
from STI.cal_sti import cal_sti
from STOI.STOI import cal_stoi
from PEAQ.PEAQ import cal_peaq
from resample.resampler import resample,restruct
from timeAligment.time_align import cal_fine_delay,cal_fine_delay_of_specific_section
import os
import wave
import numpy as np
from ctypes import *
from SNR_ESTIMATION.MATCH_SIG import match_sig
from SNR_ESTIMATION.SNR_MUSIC import cal_snr_music
from SNR_ESTIMATION.SNR_TRANSIENT import cal_snr_transient
from AGC_EVALUATION.CAL_GAIN_TABLE import cal_gain_table
from AGC_EVALUATION.CAL_ATTACK_RELEASE import cal_attack_release
from AGC_EVALUATION.CAL_MUSIC_STABILITY import cal_music_stablility
from AGC_EVALUATION.CAL_DELAY import cal_DELAY
from AEC_EVALUATION.MATCH_AEC import MATCH_AEC
from AEC_EVALUATION.ERLE_ETSIMATION import cal_erle
from AEC_MOS.aecmos import cal_aec_mos
from MOS_INFER.run_predict import cal_mos_infer
from FUNCTION.audioFunction import isSlience,audioFormat,get_rms_level,get_effective_spectral,cal_pitch,cal_EQ
from Noise_Suppression.noiseFuction import cal_noise_Supp
from CLIPPING_DETECTION.audio_clip_detection import cal_clip_index
from AEC_EVALUATION.FR_ECHO_DETECT import cal_fullref_echo
allMetrics = ['G160','P563','POLQA','PESQ','STOI','STI','PEAQ','SDR',
'SII','LOUDNESS','MUSIC','TRANSIENT','MATCH','GAINTABLE',
'ATTACKRELEASE','MUSICSTA','AGCDELAY','SLIENCE','FORMAT',
'MATCHAEC','ERLE','AECMOS','AIMOS','TRMS','ARMS','NOISE','CLIP','DELAY','ECHO','SPEC','PITCH','EQ','MATCH2','MATCH3']
class computeAudioQuality():
def __init__(self,**kwargs):
"""
:param kwargs:
"""
#print(**kwargs)
self.__parse_para(**kwargs)
self.__chcek_valid()
pass
def __parse_para(self,**kwargs):
"""
:param kwargs:
:return:
"""
self.mertic = kwargs['metrics']
self.testFile = kwargs['testFile']
self.refFile = kwargs['refFile']
self.micFile = kwargs['micFile']
self.cleFile = kwargs['cleFile']
self.noiseFile = kwargs['noiseFile']
self.caliFile = kwargs['aecCaliFile']
self.outFile = kwargs['outFile']
self.samplerate = kwargs['samplerate']
self.bitwidth = kwargs['bitwidth']
self.channel = kwargs['channel']
self.refOffset = kwargs['refOffset']
self.testOffset = kwargs['refOffset']
self.maxComNLevel = kwargs['maxComNLevel']
self.speechPauseLevel = kwargs['speechPauseLevel']
self.audioType = kwargs["audioType"]
self.aecStartPoint = kwargs['aecStartPoint']
self.aecScenario = kwargs['aecScenario']
self.aecTargetType = kwargs["aecTargetType"]
self.rmsCalsection = kwargs["rmsCalsection"]
self.polqaMode = kwargs["polqaMode"]
self.pitchLogMode = kwargs["pitchLogMode"]
self.fineDelaySection = kwargs["fineDelaySection"]
#maxComNLevel=c_double(-48.0),speechPauseLevel=c_double(-35.0)
def __chcek_valid(self):
"""
:return:
"""
if self.mertic not in allMetrics:
raise ValueError('matrix must betwin ' + str(allMetrics))
def __check_format(self,curWav):
"""
:param curWav:
:return:
"""
curType = os.path.splitext(curWav)[-1]
if curType !='.wav':
return self.channel,self.bitwidth,self.samplerate
wavf = wave.open(curWav,'rb')
curChannel = wavf.getnchannels()
cursamWidth = wavf.getsampwidth()
cursamplerate = wavf.getframerate()
wavf.close()
if curChannel != 1:
raise ValueError('wrong type of channel' + curWav)
if cursamWidth != 2:
raise ValueError('wrong type of samWidth' + curWav)
return curChannel,cursamWidth,cursamplerate
def __double_end_check(self):
"""
:return:
"""
if self.refFile is None or self.testFile is None:
raise EOFError('lack of inputfiles!')
if self.__check_format(self.testFile) != self.__check_format(self.refFile):
raise TypeError('there are different parametre in inputfiles!')
def __data_convert(self,ref,test):
"""
:return:
"""
with open(wav2pcm(ref), 'rb') as ref:
pcmdata = ref.read()
with open(wav2pcm(test), 'rb') as ref:
indata = ref.read()
ref = np.frombuffer(pcmdata, dtype=np.int16)
ins = np.frombuffer(indata, dtype=np.int16)
lenth = min(len(ref),len(ins))
return ref[:lenth],ins[:lenth]
def G160(self):
"""
:return:
# g160 无采样率限制
# WAV/PCM 输入
"""
if self.cleFile is None or self.refFile is None or self.testFile is None:
raise EOFError('lack of inputfiles!')
if self.__check_format(self.testFile) != self.__check_format(self.refFile) or \
self.__check_format(self.testFile) != self.__check_format(self.cleFile):
raise TypeError('there are different parametre in inputfiles!')
return cal_g160(pcm2wav(self.cleFile,sample_rate=self.samplerate),pcm2wav(self.refFile,sample_rate=self.samplerate),pcm2wav(self.testFile,sample_rate=self.samplerate),self.refOffset,self.testOffset,maxComNLevel=self.maxComNLevel,speechPauseLevel=self.speechPauseLevel)
def P563(self):
"""
# P 563 PCM输入 、 8Khz
# • Sampling frequency: 8000 Hz
# If higher frequencies are used for recording, a separate down-sampling by using a high
# quality flat low pass filter has to be applied. Lower sampling frequencies are not allowed.
# • Amplitude resolution: 16 bit linear PCM
# • Minimum active speech in file: 3.0 s
# • Maximum signal length: 20.0 s
# • Minimum speech activity ratio: 25%
# • Maximum speech activity ratio: 75%
:return:
"""
if self.testFile is None:
raise EOFError('lack of inputfiles!')
curCH,curBwidth,curSR = self.__check_format(self.testFile)
#TODO 将采样率
if curSR != 8000:
print('file will be resampled to 8k!')
finalName = wav2pcm(resample(pcm2wav(self.testFile,sample_rate=self.samplerate),8000))
return cal_563_mos(finalName)
def POLQA(self):
"""
#POLQA 窄带模式 8k 超宽带模式 48k
# pcm输入
:return:
"""
self.__double_end_check()
curCH,curBwidth,curSR = self.__check_format(self.testFile)
result = polqa_client_test(wav2pcm(self.refFile),wav2pcm(self.testFile),curSR,mode=self.polqaMode)
time.sleep(2)
return result
def PESQ(self):
"""
# PESQ 窄带模式8K 宽带模式 16k
# 数据块输入
:return:
"""
self.__double_end_check()
curCH,curBwidth,curSR = self.__check_format(self.testFile)
if curSR < 16000:
print('file will be resampled to 8k!')
finalrefName = wav2pcm(resample(pcm2wav(self.refFile, curSR), 8000))
finaltestName = wav2pcm(resample(pcm2wav(self.testFile, curSR), 8000))
return cal_pesq(finalrefName, finaltestName, 8000)
else:
print('file will be resampled to 16k!')
finalrefName = wav2pcm(resample(pcm2wav(self.refFile, sample_rate=curSR), 16000))
finaltestName = wav2pcm(resample(pcm2wav(self.testFile, sample_rate=curSR), 16000))
return cal_pesq(finalrefName,finaltestName,16000)
def STOI(self):
"""
#STOI
#数据块输入
#采样率 16000
:return:
"""
self.__double_end_check()
ref, ins = self.__data_convert(wav2pcm(resample(pcm2wav(self.refFile),16000)),wav2pcm(resample(pcm2wav(self.testFile),16000)))
result = cal_stoi(ref,ins,sr=16000)
return result
pass
def STI(self):
"""
#sti
#wav输入 采样率无关
:return:
"""
self.__double_end_check()
return cal_sti(pcm2wav(self.refFile,sample_rate=self.samplerate),pcm2wav(self.testFile,sample_rate=self.samplerate))
pass
def SII(self):
"""
Returns
-------
"""
pass
def PEAQ(self):
"""
# wav输入
:return:
"""
self.__double_end_check()
curCH,curBwidth,curSR = self.__check_format(self.testFile)
if curSR not in [8000,16000]:
#TODO 采样率
pass
#TODO 计算peaq
return cal_peaq(pcm2wav(self.refFile,sample_rate=self.samplerate),pcm2wav(self.testFile,sample_rate=self.samplerate))
pass
def SDR(self):
"""
#SDR
#数据块输入 采样率无关
:return:
"""
self.__double_end_check()
ref, ins = self.__data_convert(wav2pcm(resample(pcm2wav(self.refFile),16000)),wav2pcm(resample(pcm2wav(self.testFile),16000)))
result = cal_sdr(ref,ins,sr=16000)
return result
pass
def MUSIC(self):
"""
# MUSIC SNR
# 无采样率限制
# WAV/PCM 输入
:return:
"""
self.__double_end_check()
return cal_snr_music(refFile=pcm2wav(self.refFile,sample_rate=self.samplerate),testFile=pcm2wav(self.testFile,sample_rate=self.samplerate))
def TRANSIENT(self):
"""
# Transient noise SNR
# 无采样率限制
# WAV/PCM 输入
:return:
"""
if self.cleFile is None or self.testFile is None or self.noiseFile is None:
raise EOFError('lack of inputfiles!')
if self.__check_format(self.cleFile) != self.__check_format(self.testFile) or \
self.__check_format(self.testFile) != self.__check_format(self.noiseFile):
raise TypeError('there are different parametre in inputfiles!')
return cal_snr_transient(pcm2wav(self.cleFile,sample_rate=self.samplerate),pcm2wav(self.noiseFile,sample_rate=self.samplerate),pcm2wav(self.testFile,sample_rate=self.samplerate))
def MATCH(self):
"""
# MATCH SIG
# 无采样率限制
# 可选择是否输出文件
# WAV/PCM 输入
:return:
"""
self.__double_end_check()
return match_sig(pcm2wav(self.refFile,sample_rate=self.samplerate), pcm2wav(self.testFile,sample_rate=self.samplerate), self.outFile,self.audioType)
def MATCH2(self):
"""
"""
self.__double_end_check()
return cal_fine_delay(pcm2wav(self.refFile,sample_rate=self.samplerate), pcm2wav(self.testFile,sample_rate=self.samplerate), outfile=self.outFile)
def MATCH3(self):
"""
"""
self.__double_end_check()
return cal_fine_delay_of_specific_section(pcm2wav(self.refFile,sample_rate=self.samplerate), pcm2wav(self.testFile,sample_rate=self.samplerate), outfile=self.outFile,speech_section=self.fineDelaySection)
def LOUDNESS(self):
"""
Returns
-------
"""
pass
def __cal_sii__(self):
'''
Returns
-------
'''
#return cal_sii()
pass
def GAINTABLE(self):
"""
AGC PARA 1
计算agc的gain table
:return:
"""
self.__double_end_check()
return cal_gain_table(refFile=pcm2wav(self.refFile, sample_rate=self.samplerate),
testFile=pcm2wav(self.testFile, sample_rate=self.samplerate),targetType=self.audioType)
def ATTACKRELEASE(self):
"""
AGC PARA 2
计算agc的attack release
:return:
"""
self.__double_end_check()
return cal_attack_release(refFile=pcm2wav(self.refFile, sample_rate=self.samplerate),
testFile=pcm2wav(self.testFile, sample_rate=self.samplerate))
def MUSICSTA(self):
"""
AGC PARA 3
计算music 信号稳定性
:return:
"""
self.__double_end_check()
return cal_music_stablility(refFile=pcm2wav(self.refFile, sample_rate=self.samplerate),
testFile=pcm2wav(self.testFile, sample_rate=self.samplerate))
def AGCDELAY(self):
"""
AGC PARA 3
计算文件延时
:return:
"""
self.__double_end_check()
return cal_DELAY(refFile=pcm2wav(self.refFile, sample_rate=self.samplerate),
testFile=pcm2wav(self.testFile, sample_rate=self.samplerate))
def AECMOS(self):
"""
Returns
-------
"""
if self.refFile is None or self.micFile is None or self.testFile is None:
raise EOFError('lack of inputfiles!')
if self.__check_format(self.refFile) != self.__check_format(self.micFile) or \
self.__check_format(self.micFile) != self.__check_format(self.testFile):
raise TypeError('there are different parametre in inputfiles!')
return cal_aec_mos(pcm2wav(self.refFile,sample_rate=self.samplerate),pcm2wav(self.micFile,sample_rate=self.samplerate),pcm2wav(self.testFile,sample_rate=self.samplerate),scenario=self.aecScenario,startPoint=self.aecStartPoint,SAMPLE_RATE=self.samplerate)
def AIMOS(self):
"""
Returns
-------
"""
if self.testFile is None:
raise EOFError('lack of inputfiles!')
finalName = pcm2wav(self.testFile,sample_rate=self.samplerate)
return cal_mos_infer(finalName)
def ERLE(self):
"""
Returns
-------
"""
if self.refFile is None or self.micFile is None or self.testFile is None:
raise EOFError('lack of inputfiles!')
if self.__check_format(self.refFile) != self.__check_format(self.micFile) or \
self.__check_format(self.micFile) != self.__check_format(self.testFile):
raise TypeError('there are different parametre in inputfiles!')
return cal_erle(refFile=pcm2wav(self.refFile,sample_rate=self.samplerate),micFile=pcm2wav(self.micFile,sample_rate=self.samplerate),testFile=pcm2wav(self.testFile,sample_rate=self.samplerate),targetType=self.aecTargetType)
def MATCHAEC(self):
"""
Returns
-------
"""
if self.caliFile is None or self.refFile is None or self.testFile is None:
raise EOFError('lack of inputfiles!')
if self.__check_format(self.caliFile) != self.__check_format(self.refFile) or \
self.__check_format(self.caliFile) != self.__check_format(self.testFile):
raise TypeError('there are different parametre in inputfiles!')
return MATCH_AEC(pcm2wav(self.refFile,sample_rate=self.samplerate),pcm2wav(self.testFile,sample_rate=self.samplerate),pcm2wav(self.caliFile,sample_rate=self.samplerate),self.outFile,targetType=self.aecTargetType)
def SLIENCE(self):
"""
Returns
-------
"""
if self.testFile is None:
raise EOFError('lack of inputfiles!')
return isSlience(self.testFile,sample_rate=self.samplerate,bits=self.bitwidth,channels=self.channel,section=self.rmsCalsection)
def FORMAT(self):
"""
Returns
-------
"""
if self.testFile is None:
raise EOFError('lack of inputfiles!')
return audioFormat(self.testFile)
def TRMS(self):
"""
Returns
-------
# (wavFileName=None,rmsMode='total',startTime=0,endTime=1):
"""
if self.testFile is None:
raise EOFError('lack of inputfiles!')
return get_rms_level(wavFileName=pcm2wav(self.testFile,sample_rate=self.samplerate),rmsMode='total',section=self.rmsCalsection)
def ARMS(self):
"""
Returns
-------
"""
if self.testFile is None:
raise EOFError('lack of inputfiles!')
return get_rms_level(wavFileName=pcm2wav(self.testFile,sample_rate=self.samplerate),rmsMode='average',section=self.rmsCalsection)
def NOISE(self):
"""
Returns
-------
"""
self.__double_end_check()
return cal_noise_Supp(pcm2wav(self.refFile, sample_rate=self.samplerate),
pcm2wav(self.testFile, sample_rate=self.samplerate))
def CLIP(self):
"""
Returns
-------
"""
return cal_clip_index(pcm2wav(self.testFile, sample_rate=self.samplerate))
def ECHO(self):
"""
Returns
-------
"""
self.__double_end_check()
return cal_fullref_echo(pcm2wav(self.refFile, sample_rate=self.samplerate),
pcm2wav(self.testFile, sample_rate=self.samplerate))
def SPEC(self):
"""
Returns
-------
"""
if self.testFile is None:
raise EOFError('lack of inputfiles!')
return get_effective_spectral(pcm2wav(self.testFile, sample_rate=self.samplerate))
def PITCH(self):
"""
Returns
-------
"""
self.__double_end_check()
return cal_pitch(pcm2wav(self.refFile, sample_rate=self.samplerate),
pcm2wav(self.testFile, sample_rate=self.samplerate),pitchlogMode=self.pitchLogMode)
def EQ(self):
"""
Returns
-------
"""
self.__double_end_check()
return cal_EQ(pcm2wav(self.refFile, sample_rate=self.samplerate),
pcm2wav(self.testFile, sample_rate=self.samplerate))
|
AlgorithmLib
|
/AlgorithmLib-4.0.3.tar.gz/AlgorithmLib-4.0.3/algorithmLib/computeAudioQuality/mainProcess.py
|
mainProcess.py
|
import os
import multiprocessing
import copy
import math
import librosa as lb
import numpy as np
import pandas as pd; pd.options.mode.chained_assignment = None
import matplotlib.pyplot as plt
from tqdm import tqdm
from scipy.stats import pearsonr, spearmanr
from scipy.optimize import minimize
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence
from torch.nn.utils.rnn import pack_padded_sequence
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import sys,os
from os import path
#%% Models
#
class NISQA(nn.Module):
'''
NISQA: The main speech quality model without speech quality dimension
estimation (MOS only). The module loads the submodules for framewise
modelling (e.g. CNN), time-dependency modelling (e.g. Self-Attention
or LSTM), and pooling (e.g. max-pooling or attention-pooling)
'''
def __init__(self,
ms_seg_length=15,
ms_n_mels=48,
cnn_model='adapt',
cnn_c_out_1=16,
cnn_c_out_2=32,
cnn_c_out_3=64,
cnn_kernel_size=3,
cnn_dropout=0.2,
cnn_pool_1=[24,7],
cnn_pool_2=[12,5],
cnn_pool_3=[6,3],
cnn_fc_out_h=None,
td='self_att',
td_sa_d_model=64,
td_sa_nhead=1,
td_sa_pos_enc=None,
td_sa_num_layers=2,
td_sa_h=64,
td_sa_dropout=0.1,
td_lstm_h=128,
td_lstm_num_layers=1,
td_lstm_dropout=0,
td_lstm_bidirectional=True,
td_2='skip',
td_2_sa_d_model=None,
td_2_sa_nhead=None,
td_2_sa_pos_enc=None,
td_2_sa_num_layers=None,
td_2_sa_h=None,
td_2_sa_dropout=None,
td_2_lstm_h=None,
td_2_lstm_num_layers=None,
td_2_lstm_dropout=None,
td_2_lstm_bidirectional=None,
pool='att',
pool_att_h=128,
pool_att_dropout=0.1,
):
super().__init__()
self.name = 'NISQA'
self.cnn = Framewise(
cnn_model,
ms_seg_length=ms_seg_length,
ms_n_mels=ms_n_mels,
c_out_1=cnn_c_out_1,
c_out_2=cnn_c_out_2,
c_out_3=cnn_c_out_3,
kernel_size=cnn_kernel_size,
dropout=cnn_dropout,
pool_1=cnn_pool_1,
pool_2=cnn_pool_2,
pool_3=cnn_pool_3,
fc_out_h=cnn_fc_out_h,
)
self.time_dependency = TimeDependency(
input_size=self.cnn.model.fan_out,
td=td,
sa_d_model=td_sa_d_model,
sa_nhead=td_sa_nhead,
sa_pos_enc=td_sa_pos_enc,
sa_num_layers=td_sa_num_layers,
sa_h=td_sa_h,
sa_dropout=td_sa_dropout,
lstm_h=td_lstm_h,
lstm_num_layers=td_lstm_num_layers,
lstm_dropout=td_lstm_dropout,
lstm_bidirectional=td_lstm_bidirectional
)
self.time_dependency_2 = TimeDependency(
input_size=self.time_dependency.fan_out,
td=td_2,
sa_d_model=td_2_sa_d_model,
sa_nhead=td_2_sa_nhead,
sa_pos_enc=td_2_sa_pos_enc,
sa_num_layers=td_2_sa_num_layers,
sa_h=td_2_sa_h,
sa_dropout=td_2_sa_dropout,
lstm_h=td_2_lstm_h,
lstm_num_layers=td_2_lstm_num_layers,
lstm_dropout=td_2_lstm_dropout,
lstm_bidirectional=td_2_lstm_bidirectional
)
self.pool = Pooling(
self.time_dependency_2.fan_out,
output_size=1,
pool=pool,
att_h=pool_att_h,
att_dropout=pool_att_dropout,
)
def forward(self, x, n_wins):
x = self.cnn(x, n_wins)
x, n_wins = self.time_dependency(x, n_wins)
x, n_wins = self.time_dependency_2(x, n_wins)
x = self.pool(x, n_wins)
return x
#
class NISQA_MULTITASK(nn.Module):
'''
NISQA_DIM: The main speech quality model with speech quality dimension
estimation (MOS, Noisiness, Coloration, Discontinuity, and Loudness).
The module loads the submodules for framewise modelling (e.g. CNN),
time-dependency modelling (e.g. Self-Attention or LSTM), and pooling
(e.g. max-pooling or attention-pooling)
'''
def __init__(self,
ms_seg_length=15,
ms_n_mels=48,
cnn_model='adapt',
cnn_c_out_1=16,
cnn_c_out_2=32,
cnn_c_out_3=64,
cnn_kernel_size=3,
cnn_dropout=0.2,
cnn_pool_1=[24, 7],
cnn_pool_2=[12, 5],
cnn_pool_3=[6, 3],
cnn_fc_out_h=None,
td='self_att',
td_sa_d_model=64,
td_sa_nhead=1,
td_sa_pos_enc=None,
td_sa_num_layers=2,
td_sa_h=64,
td_sa_dropout=0.1,
td_lstm_h=128,
td_lstm_num_layers=1,
td_lstm_dropout=0,
td_lstm_bidirectional=True,
td_2='skip',
td_2_sa_d_model=None,
td_2_sa_nhead=None,
td_2_sa_pos_enc=None,
td_2_sa_num_layers=None,
td_2_sa_h=None,
td_2_sa_dropout=None,
td_2_lstm_h=None,
td_2_lstm_num_layers=None,
td_2_lstm_dropout=None,
td_2_lstm_bidirectional=None,
pool='att',
pool_att_h=128,
pool_att_dropout=0.1,
):
super().__init__()
self.name = 'NISQA_MULTITASK'
self.cnn = Framewise(
cnn_model,
ms_seg_length=ms_seg_length,
ms_n_mels=ms_n_mels,
c_out_1=cnn_c_out_1,
c_out_2=cnn_c_out_2,
c_out_3=cnn_c_out_3,
kernel_size=cnn_kernel_size,
dropout=cnn_dropout,
pool_1=cnn_pool_1,
pool_2=cnn_pool_2,
pool_3=cnn_pool_3,
fc_out_h=cnn_fc_out_h,
)
self.time_dependency = TimeDependency(
input_size=self.cnn.model.fan_out,
td=td,
sa_d_model=td_sa_d_model,
sa_nhead=td_sa_nhead,
sa_pos_enc=td_sa_pos_enc,
sa_num_layers=td_sa_num_layers,
sa_h=td_sa_h,
sa_dropout=td_sa_dropout,
lstm_h=td_lstm_h,
lstm_num_layers=td_lstm_num_layers,
lstm_dropout=td_lstm_dropout,
lstm_bidirectional=td_lstm_bidirectional
)
self.time_dependency_2 = TimeDependency(
input_size=self.time_dependency.fan_out,
td=td_2,
sa_d_model=td_2_sa_d_model,
sa_nhead=td_2_sa_nhead,
sa_pos_enc=td_2_sa_pos_enc,
sa_num_layers=td_2_sa_num_layers,
sa_h=td_2_sa_h,
sa_dropout=td_2_sa_dropout,
lstm_h=td_2_lstm_h,
lstm_num_layers=td_2_lstm_num_layers,
lstm_dropout=td_2_lstm_dropout,
lstm_bidirectional=td_2_lstm_bidirectional
)
pool = Pooling(
self.time_dependency.fan_out,
output_size=1,
pool=pool,
att_h=pool_att_h,
att_dropout=pool_att_dropout,
)
self.pool_layers = self._get_clones(pool, 2)
def _get_clones(self, module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def forward(self, x, n_wins):
# import pdb; pdb.set_trace()
x = self.cnn(x, n_wins)
x, n_wins = self.time_dependency(x, n_wins)
x, n_wins = self.time_dependency_2(x, n_wins)
out = [mod(x, n_wins) for mod in self.pool_layers]
out = torch.cat(out, dim=1)
return out
#
class NISQA_DIM(nn.Module):
'''
NISQA_DIM: The main speech quality model with speech quality dimension
estimation (MOS, Noisiness, Coloration, Discontinuity, and Loudness).
The module loads the submodules for framewise modelling (e.g. CNN),
time-dependency modelling (e.g. Self-Attention or LSTM), and pooling
(e.g. max-pooling or attention-pooling)
'''
def __init__(self,
ms_seg_length=15,
ms_n_mels=48,
cnn_model='adapt',
cnn_c_out_1=16,
cnn_c_out_2=32,
cnn_c_out_3=64,
cnn_kernel_size=3,
cnn_dropout=0.2,
cnn_pool_1=[24,7],
cnn_pool_2=[12,5],
cnn_pool_3=[6,3],
cnn_fc_out_h=None,
td='self_att',
td_sa_d_model=64,
td_sa_nhead=1,
td_sa_pos_enc=None,
td_sa_num_layers=2,
td_sa_h=64,
td_sa_dropout=0.1,
td_lstm_h=128,
td_lstm_num_layers=1,
td_lstm_dropout=0,
td_lstm_bidirectional=True,
td_2='skip',
td_2_sa_d_model=None,
td_2_sa_nhead=None,
td_2_sa_pos_enc=None,
td_2_sa_num_layers=None,
td_2_sa_h=None,
td_2_sa_dropout=None,
td_2_lstm_h=None,
td_2_lstm_num_layers=None,
td_2_lstm_dropout=None,
td_2_lstm_bidirectional=None,
pool='att',
pool_att_h=128,
pool_att_dropout=0.1,
):
super().__init__()
self.name = 'NISQA_DIM'
self.cnn = Framewise(
cnn_model,
ms_seg_length=ms_seg_length,
ms_n_mels=ms_n_mels,
c_out_1=cnn_c_out_1,
c_out_2=cnn_c_out_2,
c_out_3=cnn_c_out_3,
kernel_size=cnn_kernel_size,
dropout=cnn_dropout,
pool_1=cnn_pool_1,
pool_2=cnn_pool_2,
pool_3=cnn_pool_3,
fc_out_h=cnn_fc_out_h,
)
self.time_dependency = TimeDependency(
input_size=self.cnn.model.fan_out,
td=td,
sa_d_model=td_sa_d_model,
sa_nhead=td_sa_nhead,
sa_pos_enc=td_sa_pos_enc,
sa_num_layers=td_sa_num_layers,
sa_h=td_sa_h,
sa_dropout=td_sa_dropout,
lstm_h=td_lstm_h,
lstm_num_layers=td_lstm_num_layers,
lstm_dropout=td_lstm_dropout,
lstm_bidirectional=td_lstm_bidirectional
)
self.time_dependency_2 = TimeDependency(
input_size=self.time_dependency.fan_out,
td=td_2,
sa_d_model=td_2_sa_d_model,
sa_nhead=td_2_sa_nhead,
sa_pos_enc=td_2_sa_pos_enc,
sa_num_layers=td_2_sa_num_layers,
sa_h=td_2_sa_h,
sa_dropout=td_2_sa_dropout,
lstm_h=td_2_lstm_h,
lstm_num_layers=td_2_lstm_num_layers,
lstm_dropout=td_2_lstm_dropout,
lstm_bidirectional=td_2_lstm_bidirectional
)
pool = Pooling(
self.time_dependency.fan_out,
output_size=1,
pool=pool,
att_h=pool_att_h,
att_dropout=pool_att_dropout,
)
self.pool_layers = self._get_clones(pool, 5)
def _get_clones(self, module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def forward(self, x, n_wins):
x = self.cnn(x, n_wins)
x, n_wins = self.time_dependency(x, n_wins)
x, n_wins = self.time_dependency_2(x, n_wins)
out = [mod(x, n_wins) for mod in self.pool_layers]
out = torch.cat(out, dim=1)
return out
#
class NISQA_DE(nn.Module):
'''
NISQA: The main speech quality model for double-ended prediction.
The module loads the submodules for framewise modelling (e.g. CNN),
time-dependency modelling (e.g. Self-Attention or LSTM), time-alignment,
feature fusion and pooling (e.g. max-pooling or attention-pooling)
'''
def __init__(self,
ms_seg_length=15,
ms_n_mels=48,
cnn_model='adapt',
cnn_c_out_1=16,
cnn_c_out_2=32,
cnn_c_out_3=64,
cnn_kernel_size=3,
cnn_dropout=0.2,
cnn_pool_1=[24,7],
cnn_pool_2=[12,5],
cnn_pool_3=[6,3],
cnn_fc_out_h=None,
td='self_att',
td_sa_d_model=64,
td_sa_nhead=1,
td_sa_pos_enc=None,
td_sa_num_layers=2,
td_sa_h=64,
td_sa_dropout=0.1,
td_lstm_h=128,
td_lstm_num_layers=1,
td_lstm_dropout=0,
td_lstm_bidirectional=True,
td_2='skip',
td_2_sa_d_model=None,
td_2_sa_nhead=None,
td_2_sa_pos_enc=None,
td_2_sa_num_layers=None,
td_2_sa_h=None,
td_2_sa_dropout=None,
td_2_lstm_h=None,
td_2_lstm_num_layers=None,
td_2_lstm_dropout=None,
td_2_lstm_bidirectional=None,
pool='att',
pool_att_h=128,
pool_att_dropout=0.1,
de_align = 'dot',
de_align_apply = 'hard',
de_fuse_dim = None,
de_fuse = True,
):
super().__init__()
self.name = 'NISQA_DE'
self.cnn = Framewise(
cnn_model,
ms_seg_length=ms_seg_length,
ms_n_mels=ms_n_mels,
c_out_1=cnn_c_out_1,
c_out_2=cnn_c_out_2,
c_out_3=cnn_c_out_3,
kernel_size=cnn_kernel_size,
dropout=cnn_dropout,
pool_1=cnn_pool_1,
pool_2=cnn_pool_2,
pool_3=cnn_pool_3,
fc_out_h=cnn_fc_out_h,
)
self.time_dependency = TimeDependency(
input_size=self.cnn.model.fan_out,
td=td,
sa_d_model=td_sa_d_model,
sa_nhead=td_sa_nhead,
sa_pos_enc=td_sa_pos_enc,
sa_num_layers=td_sa_num_layers,
sa_h=td_sa_h,
sa_dropout=td_sa_dropout,
lstm_h=td_lstm_h,
lstm_num_layers=td_lstm_num_layers,
lstm_dropout=td_lstm_dropout,
lstm_bidirectional=td_lstm_bidirectional
)
self.align = Alignment(
de_align,
de_align_apply,
q_dim=self.time_dependency.fan_out,
y_dim=self.time_dependency.fan_out,
)
self.fuse = Fusion(
in_feat=self.time_dependency.fan_out,
fuse_dim=de_fuse_dim,
fuse=de_fuse,
)
self.time_dependency_2 = TimeDependency(
input_size=self.fuse.fan_out,
td=td_2,
sa_d_model=td_2_sa_d_model,
sa_nhead=td_2_sa_nhead,
sa_pos_enc=td_2_sa_pos_enc,
sa_num_layers=td_2_sa_num_layers,
sa_h=td_2_sa_h,
sa_dropout=td_2_sa_dropout,
lstm_h=td_2_lstm_h,
lstm_num_layers=td_2_lstm_num_layers,
lstm_dropout=td_2_lstm_dropout,
lstm_bidirectional=td_2_lstm_bidirectional
)
self.pool = Pooling(
self.time_dependency_2.fan_out,
output_size=1,
pool=pool,
att_h=pool_att_h,
att_dropout=pool_att_dropout,
)
def _split_ref_deg(self, x, n_wins):
(x, y) = torch.chunk(x, 2, dim=2)
(n_wins_x, n_wins_y) = torch.chunk(n_wins, 2, dim=1)
n_wins_x = n_wins_x.view(-1)
n_wins_y = n_wins_y.view(-1)
return x, y, n_wins_x, n_wins_y
def forward(self, x, n_wins):
x, y, n_wins_x, n_wins_y = self._split_ref_deg(x, n_wins)
x = self.cnn(x, n_wins_x)
y = self.cnn(y, n_wins_y)
x, n_wins_x = self.time_dependency(x, n_wins_x)
y, n_wins_y = self.time_dependency(y, n_wins_y)
y = self.align(x, y, n_wins_y)
x = self.fuse(x, y)
x, n_wins_x = self.time_dependency_2(x, n_wins_x)
x = self.pool(x, n_wins_x)
return x
#%% Framewise
class Framewise(nn.Module):
'''
Framewise: The main framewise module. It loads either a CNN or feed-forward
network for framewise modelling of the Mel-spec segments. This module can
also be skipped by loading the SkipCNN module. There are two CNN modules
available. AdaptCNN with adaptive maxpooling and the StandardCNN module.
However, they could also be replaced with new modules, such as PyTorch
implementations of ResNet or Alexnet.
'''
def __init__(
self,
cnn_model,
ms_seg_length=15,
ms_n_mels=48,
c_out_1=16,
c_out_2=32,
c_out_3=64,
kernel_size=3,
dropout=0.2,
pool_1=[24,7],
pool_2=[12,5],
pool_3=[6,3],
fc_out_h=None,
):
super().__init__()
if cnn_model=='adapt':
self.model = AdaptCNN(
input_channels=1,
c_out_1=c_out_1,
c_out_2=c_out_2,
c_out_3=c_out_3,
kernel_size=kernel_size,
dropout=dropout,
pool_1=pool_1,
pool_2=pool_2,
pool_3=pool_3,
fc_out_h=fc_out_h,
)
elif cnn_model=='standard':
assert ms_n_mels == 48, "ms_n_mels is {} and should be 48, use adaptive model or change ms_n_mels".format(ms_n_mels)
assert ms_seg_length == 15, "ms_seg_len is {} should be 15, use adaptive model or change ms_seg_len".format(ms_seg_length)
assert ((kernel_size == 3) or (kernel_size == (3,3))), "cnn_kernel_size is {} should be 3, use adaptive model or change cnn_kernel_size".format(kernel_size)
self.model = StandardCNN(
input_channels=1,
c_out_1=c_out_1,
c_out_2=c_out_2,
c_out_3=c_out_3,
kernel_size=kernel_size,
dropout=dropout,
fc_out_h=fc_out_h,
)
elif cnn_model=='dff':
self.model = DFF(ms_seg_length, ms_n_mels, dropout, fc_out_h)
elif (cnn_model is None) or (cnn_model=='skip'):
self.model = SkipCNN(ms_seg_length, ms_n_mels, fc_out_h)
elif cnn_model == 'res':
self.model = ResCNN(input_channels=1,
c_out_1=c_out_1,
c_out_2=c_out_2,
c_out_3=c_out_3,
kernel_size=kernel_size,
dropout=dropout,
pool_1=pool_1,
pool_2=pool_2,
pool_3=pool_3,
fc_out_h=fc_out_h,)
else:
raise NotImplementedError('Framwise model not available')
def forward(self, x, n_wins):
(bs, length, channels, height, width) = x.shape
x_packed = pack_padded_sequence(
x,
n_wins.cpu(),
batch_first=True,
enforce_sorted=False
)
x = self.model(x_packed.data)
x = x_packed._replace(data=x)
x, _ = pad_packed_sequence(
x,
batch_first=True,
padding_value=0.0,
total_length=n_wins.max())
return x
class SkipCNN(nn.Module):
'''
SkipCNN: Can be used to skip the framewise modelling stage and directly
apply an LSTM or Self-Attention network.
'''
def __init__(
self,
cnn_seg_length,
ms_n_mels,
fc_out_h
):
super().__init__()
self.name = 'SkipCNN'
self.cnn_seg_length = cnn_seg_length
self.ms_n_mels = ms_n_mels
self.fan_in = cnn_seg_length*ms_n_mels
self.bn = nn.BatchNorm2d( 1 )
if fc_out_h is not None:
self.linear = nn.Linear(self.fan_in, fc_out_h)
self.fan_out = fc_out_h
else:
self.linear = nn.Identity()
self.fan_out = self.fan_in
def forward(self, x):
x = self.bn(x)
x = x.view(-1, self.fan_in)
x = self.linear(x)
return x
class DFF(nn.Module):
'''
DFF: Deep Feed-Forward network that was used as baseline framwise model as
comparision to the CNN.
'''
def __init__(self,
cnn_seg_length,
ms_n_mels,
dropout,
fc_out_h=4096,
):
super().__init__()
self.name = 'DFF'
self.dropout_rate = dropout
self.fc_out_h = fc_out_h
self.fan_out = fc_out_h
self.cnn_seg_length = cnn_seg_length
self.ms_n_mels = ms_n_mels
self.fan_in = cnn_seg_length*ms_n_mels
self.lin1 = nn.Linear(self.fan_in, self.fc_out_h)
self.lin2 = nn.Linear(self.fc_out_h, self.fc_out_h)
self.lin3 = nn.Linear(self.fc_out_h, self.fc_out_h)
self.lin4 = nn.Linear(self.fc_out_h, self.fc_out_h)
self.bn1 = nn.BatchNorm2d(1)
self.bn2 = nn.BatchNorm1d( self.fc_out_h )
self.bn3 = nn.BatchNorm1d( self.fc_out_h )
self.bn4 = nn.BatchNorm1d( self.fc_out_h )
self.bn5 = nn.BatchNorm1d( self.fc_out_h )
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
x = self.bn1(x)
x = x.view(-1, self.fan_in)
x = F.relu( self.bn2( self.lin1(x) ) )
x = self.dropout(x)
x = F.relu( self.bn3( self.lin2(x) ) )
x = self.dropout(x)
x = F.relu( self.bn4( self.lin3(x) ) )
x = self.dropout(x)
x = F.relu( self.bn5( self.lin4(x) ) )
return x
class AdaptCNN(nn.Module):
'''
AdaptCNN: CNN with adaptive maxpooling that can be used as framewise model.
Overall, it has six convolutional layers. This CNN module is more flexible
than the StandardCNN that requires a fixed input dimension of 48x15.
'''
def __init__(self,
input_channels,
c_out_1,
c_out_2,
c_out_3,
kernel_size,
dropout,
pool_1,
pool_2,
pool_3,
fc_out_h=20,
):
super().__init__()
self.name = 'CNN_adapt'
self.input_channels = input_channels
self.c_out_1 = c_out_1
self.c_out_2 = c_out_2
self.c_out_3 = c_out_3
self.kernel_size = kernel_size
self.pool_1 = pool_1
self.pool_2 = pool_2
self.pool_3 = pool_3
self.dropout_rate = dropout
self.fc_out_h = fc_out_h
self.dropout = nn.Dropout2d(p=self.dropout_rate)
if isinstance(self.kernel_size, int):
self.kernel_size = (self.kernel_size, self.kernel_size)
# Set kernel width of last conv layer to last pool width to
# downsample width to one.
self.kernel_size_last = (self.kernel_size[0], self.pool_3[1])
# kernel_size[1]=1 can be used for seg_length=1 -> corresponds to
# 1D conv layer, no width padding needed.
if self.kernel_size[1] == 1:
self.cnn_pad = (1,0)
else:
self.cnn_pad = (1,1)
self.conv1 = nn.Conv2d(
self.input_channels,
self.c_out_1,
self.kernel_size,
padding = self.cnn_pad)
self.bn1 = nn.BatchNorm2d( self.conv1.out_channels )
self.conv2 = nn.Conv2d(
self.conv1.out_channels,
self.c_out_2,
self.kernel_size,
padding = self.cnn_pad)
self.bn2 = nn.BatchNorm2d( self.conv2.out_channels )
self.conv3 = nn.Conv2d(
self.conv2.out_channels,
self.c_out_3,
self.kernel_size,
padding = self.cnn_pad)
self.bn3 = nn.BatchNorm2d( self.conv3.out_channels )
self.conv4 = nn.Conv2d(
self.conv3.out_channels,
self.c_out_3,
self.kernel_size,
padding = self.cnn_pad)
self.bn4 = nn.BatchNorm2d( self.conv4.out_channels )
self.conv5 = nn.Conv2d(
self.conv4.out_channels,
self.c_out_3,
self.kernel_size,
padding = self.cnn_pad)
self.bn5 = nn.BatchNorm2d( self.conv5.out_channels )
self.conv6 = nn.Conv2d(
self.conv5.out_channels,
self.c_out_3,
self.kernel_size_last,
padding = (1,0))
self.bn6 = nn.BatchNorm2d( self.conv6.out_channels )
if self.fc_out_h:
self.fc = nn.Linear(self.conv6.out_channels * self.pool_3[0], self.fc_out_h)
self.fan_out = self.fc_out_h
else:
self.fan_out = (self.conv6.out_channels * self.pool_3[0])
def forward(self, x):
x = F.relu( self.bn1( self.conv1(x) ) )
x = F.adaptive_max_pool2d(x, output_size=(self.pool_1))
x = F.relu( self.bn2( self.conv2(x) ) )
x = F.adaptive_max_pool2d(x, output_size=(self.pool_2))
x = self.dropout(x)
x = F.relu( self.bn3( self.conv3(x) ) )
x = self.dropout(x)
x = F.relu( self.bn4( self.conv4(x) ) )
x = F.adaptive_max_pool2d(x, output_size=(self.pool_3))
x = self.dropout(x)
x = F.relu( self.bn5( self.conv5(x) ) )
x = self.dropout(x)
x = F.relu( self.bn6( self.conv6(x) ) )
x = x.view(-1, self.conv6.out_channels * self.pool_3[0])
if self.fc_out_h:
x = self.fc( x )
return x
class StandardCNN(nn.Module):
'''
StandardCNN: CNN with fixed maxpooling that can be used as framewise model.
Overall, it has six convolutional layers. This CNN module requires a fixed
input dimension of 48x15.
'''
def __init__(
self,
input_channels,
c_out_1,
c_out_2,
c_out_3,
kernel_size,
dropout,
fc_out_h=None
):
super().__init__()
self.name = 'CNN_standard'
self.input_channels = input_channels
self.c_out_1 = c_out_1
self.c_out_2 = c_out_2
self.c_out_3 = c_out_3
self.kernel_size = kernel_size
self.pool_size = 2
self.dropout_rate = dropout
self.fc_out_h = fc_out_h
self.output_width = 2 # input width 15 pooled 3 times
self.output_height = 6 # input height 48 pooled 3 times
self.dropout = nn.Dropout2d(p=self.dropout_rate)
self.pool_first = nn.MaxPool2d(
self.pool_size,
stride = self.pool_size,
padding = (0,1))
self.pool = nn.MaxPool2d(
self.pool_size,
stride = self.pool_size,
padding = 0)
self.conv1 = nn.Conv2d(
self.input_channels,
self.c_out_1,
self.kernel_size,
padding = 1)
self.bn1 = nn.BatchNorm2d( self.conv1.out_channels )
self.conv2 = nn.Conv2d(
self.conv1.out_channels,
self.c_out_2,
self.kernel_size,
padding = 1)
self.bn2 = nn.BatchNorm2d( self.conv2.out_channels )
self.conv3 = nn.Conv2d(
self.conv2.out_channels,
self.c_out_3,
self.kernel_size,
padding = 1)
self.bn3 = nn.BatchNorm2d( self.conv3.out_channels )
self.conv4 = nn.Conv2d(
self.conv3.out_channels,
self.c_out_3,
self.kernel_size,
padding = 1)
self.bn4 = nn.BatchNorm2d( self.conv4.out_channels )
self.conv5 = nn.Conv2d(
self.conv4.out_channels,
self.c_out_3,
self.kernel_size,
padding = 1)
self.bn5 = nn.BatchNorm2d( self.conv5.out_channels )
self.conv6 = nn.Conv2d(
self.conv5.out_channels,
self.c_out_3,
self.kernel_size,
padding = 1)
self.bn6 = nn.BatchNorm2d( self.conv6.out_channels )
if self.fc_out_h:
self.fc_out = nn.Linear(self.conv6.out_channels * self.output_height * self.output_width, self.fc_out_h)
self.fan_out = self.fc_out_h
else:
self.fan_out = (self.conv6.out_channels * self.output_height * self.output_width)
def forward(self, x):
x = F.relu( self.bn1( self.conv1(x) ) )
x = self.pool_first( x )
x = F.relu( self.bn2( self.conv2(x) ) )
x = self.pool( x )
x = self.dropout(x)
x = F.relu( self.bn3( self.conv3(x) ) )
x = self.dropout(x)
x = F.relu( self.bn4( self.conv4(x) ) )
x = self.pool( x )
x = self.dropout(x)
x = F.relu( self.bn5( self.conv5(x) ) )
x = self.dropout(x)
x = F.relu( self.bn6( self.conv6(x) ) )
x = x.view(-1, self.conv6.out_channels * self.output_height * self.output_width)
if self.fc_out_h:
x = self.fc_out( x )
return x
class ResBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int, leaky_relu_slope=0.01):
super().__init__()
self.downsample = in_channels != out_channels
# BN / LReLU / MaxPool layer before the conv layer - see Figure 1b in the paper
self.pre_conv = nn.Sequential(
nn.BatchNorm2d(num_features=in_channels),
nn.LeakyReLU(leaky_relu_slope, inplace=True),
# nn.MaxPool2d(kernel_size=(1, 2)), # apply downsampling on the y axis only
)
# conv layers
self.conv = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(leaky_relu_slope, inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1, bias=False),
)
# 1 x 1 convolution layer to match the feature dimensions
self.conv1by1 = None
if self.downsample:
self.conv1by1 = nn.Conv2d(in_channels, out_channels, 1, bias=False)
def forward(self, x):
x = self.pre_conv(x)
if self.downsample:
x = self.conv(x) + self.conv1by1(x)
else:
x = self.conv(x) + x
return x
class ResCNN(nn.Module):
def __init__(
self,
input_channels,
c_out_1,
c_out_2,
c_out_3,
kernel_size,
dropout,
pool_1,
pool_2,
pool_3,
fc_out_h=20,
):
super().__init__()
self.name = 'CNN_res'
self.input_channels = input_channels
self.c_out_1 = c_out_1
self.c_out_2 = c_out_2
self.c_out_3 = c_out_3
self.kernel_size = kernel_size
self.pool_1 = pool_1
self.pool_2 = pool_2
self.pool_3 = pool_3
self.dropout_rate = dropout
self.fc_out_h = fc_out_h
self.conv_block = nn.Sequential(
nn.Conv2d(in_channels=input_channels, out_channels=c_out_1, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=c_out_1),
nn.LeakyReLU(0.01, inplace=True),
nn.Conv2d(c_out_1, c_out_1, 3, padding=1, bias=False),
)
# res blocks
self.res_block1 = ResBlock(in_channels=c_out_1, out_channels=c_out_2)
self.res_block2 = ResBlock(in_channels=c_out_2, out_channels=c_out_3)
self.res_block3 = ResBlock(in_channels=c_out_3, out_channels=c_out_3)
self.post_conv_block = nn.Sequential(
nn.Conv2d(in_channels=c_out_3, out_channels=c_out_3, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=c_out_3),
nn.LeakyReLU(0.01, inplace=True),
nn.Conv2d(c_out_3, c_out_3, 3, padding=(1,0), bias=False),
)
if self.fc_out_h:
self.fc = nn.Linear(c_out_3 * self.pool_3[0], self.fc_out_h)
self.fan_out = self.fc_out_h
else:
self.fan_out = (c_out_3 * self.pool_3[0])
def forward(self, x):
x = self.conv_block(x)
x = self.res_block1(x)
x = F.adaptive_max_pool2d(x, output_size=(self.pool_1))
x = self.res_block2(x)
x = F.adaptive_max_pool2d(x, output_size=(self.pool_2))
x = self.res_block3(x)
x = F.adaptive_max_pool2d(x, output_size=(self.pool_3))
x = self.post_conv_block(x)
x = x.view(-1, self.c_out_3 * self.pool_3[0])
if self.fc_out_h:
x = self.fc(x)
# import pdb; pdb.set_trace()
return x
#%% Time Dependency
class TimeDependency(nn.Module):
'''
TimeDependency: The main time-dependency module. It loads either an LSTM
or self-attention network for time-dependency modelling of the framewise
features. This module can also be skipped.
'''
def __init__(self,
input_size,
td='self_att',
sa_d_model=512,
sa_nhead=8,
sa_pos_enc=None,
sa_num_layers=6,
sa_h=2048,
sa_dropout=0.1,
lstm_h=128,
lstm_num_layers=1,
lstm_dropout=0,
lstm_bidirectional=True,
):
super().__init__()
if td=='self_att':
self.model = SelfAttention(
input_size=input_size,
d_model=sa_d_model,
nhead=sa_nhead,
pos_enc=sa_pos_enc,
num_layers=sa_num_layers,
sa_h=sa_h,
dropout=sa_dropout,
activation="relu"
)
self.fan_out = sa_d_model
elif td=='lstm':
self.model = LSTM(
input_size,
lstm_h=lstm_h,
num_layers=lstm_num_layers,
dropout=lstm_dropout,
bidirectional=lstm_bidirectional,
)
self.fan_out = self.model.fan_out
elif (td is None) or (td=='skip'):
self.model = self._skip
self.fan_out = input_size
else:
raise NotImplementedError('Time dependency option not available')
def _skip(self, x, n_wins):
return x, n_wins
def forward(self, x, n_wins):
x, n_wins = self.model(x, n_wins)
return x, n_wins
class LSTM(nn.Module):
'''
LSTM: The main LSTM module that can be used as a time-dependency model.
'''
def __init__(self,
input_size,
lstm_h=128,
num_layers=1,
dropout=0.1,
bidirectional=True
):
super().__init__()
self.lstm = nn.LSTM(
input_size = input_size,
hidden_size = lstm_h,
num_layers = num_layers,
dropout = dropout,
batch_first = True,
bidirectional = bidirectional
)
if bidirectional:
num_directions = 2
else:
num_directions = 1
self.fan_out = num_directions*lstm_h
def forward(self, x, n_wins):
x = pack_padded_sequence(
x,
n_wins.cpu(),
batch_first=True,
enforce_sorted=False
)
self.lstm.flatten_parameters()
x = self.lstm(x)[0]
x, _ = pad_packed_sequence(
x,
batch_first=True,
padding_value=0.0,
total_length=n_wins.max())
return x, n_wins
class SelfAttention(nn.Module):
'''
SelfAttention: The main SelfAttention module that can be used as a
time-dependency model.
'''
def __init__(self,
input_size,
d_model=512,
nhead=8,
pool_size=3,
pos_enc=None,
num_layers=6,
sa_h=2048,
dropout=0.1,
activation="relu"
):
super().__init__()
encoder_layer = SelfAttentionLayer(d_model, nhead, pool_size, sa_h, dropout, activation)
self.norm1 = nn.LayerNorm(d_model)
self.linear = nn.Linear(input_size, d_model)
self.layers = self._get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.d_model = d_model
self.nhead = nhead
if pos_enc:
self.pos_encoder = PositionalEncoding(d_model, dropout)
else:
self.pos_encoder = nn.Identity()
self._reset_parameters()
def _get_clones(self, module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, n_wins=None):
src = self.linear(src)
output = src.transpose(1,0)
output = self.norm1(output)
output = self.pos_encoder(output)
for mod in self.layers:
output, n_wins = mod(output, n_wins=n_wins)
return output.transpose(1,0), n_wins
class SelfAttentionLayer(nn.Module):
'''
SelfAttentionLayer: The SelfAttentionLayer that is used by the
SelfAttention module.
'''
def __init__(self, d_model, nhead, pool_size=1, sa_h=2048, dropout=0.1, activation="relu"):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, sa_h)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(sa_h, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = self._get_activation_fn(activation)
def _get_activation_fn(self, activation):
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
def forward(self, src, n_wins=None):
if n_wins is not None:
mask = ~((torch.arange(src.shape[0])[None, :]).to(src.device) < n_wins[:, None].to(torch.long).to(src.device))
else:
mask = None
src2 = self.self_attn(src, src, src, key_padding_mask=mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src, n_wins
class PositionalEncoding(nn.Module):
'''
PositionalEncoding: PositionalEncoding taken from the PyTorch Transformer
tutorial. Can be applied to the SelfAttention module. However, it did not
improve the results in previous experiments.
'''
def __init__(self, d_model, dropout=0.1, max_len=3000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
#%% Pooling
class Pooling(nn.Module):
'''
Pooling: Main Pooling module. It can load either attention-pooling, average
pooling, maxpooling, or last-step pooling. In case of bidirectional LSTMs
last-step-bi pooling should be used instead of last-step pooling.
'''
def __init__(self,
d_input,
output_size=1,
pool='att',
att_h=None,
att_dropout=0,
):
super().__init__()
if pool=='att':
if att_h is None:
self.model = PoolAtt(d_input, output_size)
else:
self.model = PoolAttFF(d_input, output_size, h=att_h, dropout=att_dropout)
elif pool=='last_step_bi':
self.model = PoolLastStepBi(d_input, output_size)
elif pool=='last_step':
self.model = PoolLastStep(d_input, output_size)
elif pool=='max':
self.model = PoolMax(d_input, output_size)
elif pool=='avg':
self.model = PoolAvg(d_input, output_size)
else:
raise NotImplementedError('Pool option not available')
def forward(self, x, n_wins):
return self.model(x, n_wins)
class PoolLastStepBi(nn.Module):
'''
PoolLastStepBi: last step pooling for the case of bidirectional LSTM
'''
def __init__(self, input_size, output_size):
super().__init__()
self.linear = nn.Linear(input_size, output_size)
def forward(self, x, n_wins=None):
x = x.view(x.shape[0], n_wins.max(), 2, x.shape[-1]//2)
x = torch.cat(
(x[torch.arange(x.shape[0]), n_wins.type(torch.long)-1, 0, :],
x[:,0,1,:]),
dim=1
)
x = self.linear(x)
return x
class PoolLastStep(nn.Module):
'''
PoolLastStep: last step pooling can be applied to any one-directional
sequence.
'''
def __init__(self, input_size, output_size):
super().__init__()
self.linear = nn.Linear(input_size, output_size)
def forward(self, x, n_wins=None):
x = x[torch.arange(x.shape[0]), n_wins.type(torch.long)-1]
x = self.linear(x)
return x
class PoolAtt(torch.nn.Module):
'''
PoolAtt: Attention-Pooling module.
'''
def __init__(self, d_input, output_size):
super().__init__()
self.linear1 = nn.Linear(d_input, 1)
self.linear2 = nn.Linear(d_input, output_size)
def forward(self, x, n_wins):
att = self.linear1(x)
att = att.transpose(2,1)
mask = torch.arange(att.shape[2])[None, :] < n_wins[:, None].to('cpu').to(torch.long)
att[~mask.unsqueeze(1)] = float("-Inf")
att = F.softmax(att, dim=2)
x = torch.bmm(att, x)
x = x.squeeze(1)
x = self.linear2(x)
return x
class PoolAttFF(torch.nn.Module):
'''
PoolAttFF: Attention-Pooling module with additonal feed-forward network.
'''
def __init__(self, d_input, output_size, h, dropout=0.1):
super().__init__()
self.linear1 = nn.Linear(d_input, h)
self.linear2 = nn.Linear(h, 1)
self.linear3 = nn.Linear(d_input, output_size)
self.activation = F.relu
self.dropout = nn.Dropout(dropout)
def forward(self, x, n_wins):
att = self.linear2(self.dropout(self.activation(self.linear1(x))))
att = att.transpose(2,1)
mask = torch.arange(att.shape[2])[None, :] < n_wins[:, None].to('cpu').to(torch.long)
att[~mask.unsqueeze(1)] = float("-Inf")
att = F.softmax(att, dim=2)
x = torch.bmm(att, x)
x = x.squeeze(1)
x = self.linear3(x)
return x
class PoolAvg(torch.nn.Module):
'''
PoolAvg: Average pooling that consideres masked time-steps.
'''
def __init__(self, d_input, output_size):
super().__init__()
self.linear = nn.Linear(d_input, output_size)
def forward(self, x, n_wins):
mask = torch.arange(x.shape[1])[None, :] < n_wins[:, None].to('cpu').to(torch.long)
mask = ~mask.unsqueeze(2).to(x.device)
x.masked_fill_(mask, 0)
x = torch.div(x.sum(1), n_wins.unsqueeze(1))
x = self.linear(x)
return x
class PoolMax(torch.nn.Module):
'''
PoolMax: Max-pooling that consideres masked time-steps.
'''
def __init__(self, d_input, output_size):
super().__init__()
self.linear = nn.Linear(d_input, output_size)
def forward(self, x, n_wins):
mask = torch.arange(x.shape[1])[None, :] < n_wins[:, None].to('cpu').to(torch.long)
mask = ~mask.unsqueeze(2).to(x.device)
x.masked_fill_(mask, float("-Inf"))
x = x.max(1)[0]
x = self.linear(x)
return x
#%% Alignment
class Alignment(torch.nn.Module):
'''
Alignment: Alignment module for the double-ended NISQA_DE model. It
supports five different alignment mechanisms.
'''
def __init__(self,
att_method,
apply_att_method,
q_dim=None,
y_dim=None,
):
super().__init__()
# Attention method --------------------------------------------------------
if att_method=='bahd':
self.att = AttBahdanau(
q_dim=q_dim,
y_dim=y_dim)
elif att_method=='luong':
self.att = AttLuong(
q_dim=q_dim,
y_dim=y_dim)
elif att_method=='dot':
self.att = AttDot()
elif att_method=='cosine':
self.att = AttCosine()
elif att_method=='distance':
self.att = AttDistance()
elif (att_method=='none') or (att_method is None):
self.att = None
else:
raise NotImplementedError
# Apply method ----------------------------------------------------------
if apply_att_method=='soft':
self.apply_att = ApplySoftAttention()
elif apply_att_method=='hard':
self.apply_att = ApplyHardAttention()
else:
raise NotImplementedError
def _mask_attention(self, att, y, n_wins):
mask = torch.arange(att.shape[2])[None, :] < n_wins[:, None].to('cpu').to(torch.long)
mask = mask.unsqueeze(1).expand_as(att)
att[~mask] = float("-Inf")
def forward(self, query, y, n_wins_y):
if self.att is not None:
att_score, sim = self.att(query, y)
self._mask_attention(att_score, y, n_wins_y)
att_score = F.softmax(att_score, dim=2)
y = self.apply_att(y, att_score)
return y
class AttDot(torch.nn.Module):
'''
AttDot: Dot attention that can be used by the Alignment module.
'''
def __init__(self):
super().__init__()
def forward(self, query, y):
att = torch.bmm(query, y.transpose(2,1))
sim = att.max(2)[0].unsqueeze(1)
return att, sim
class AttCosine(torch.nn.Module):
'''
AttCosine: Cosine attention that can be used by the Alignment module.
'''
def __init__(self):
super().__init__()
self.pdist = nn.CosineSimilarity(dim=3)
def forward(self, query, y):
att = self.pdist(query.unsqueeze(2), y.unsqueeze(1))
sim = att.max(2)[0].unsqueeze(1)
return att, sim
class AttDistance(torch.nn.Module):
'''
AttDistance: Distance attention that can be used by the Alignment module.
'''
def __init__(self, dist_norm=1, weight_norm=1):
super().__init__()
self.dist_norm = dist_norm
self.weight_norm = weight_norm
def forward(self, query, y):
att = (query.unsqueeze(1)-y.unsqueeze(2)).abs().pow(self.dist_norm)
att = att.mean(dim=3).pow(self.weight_norm)
att = - att.transpose(2,1)
sim = att.max(2)[0].unsqueeze(1)
return att, sim
class AttBahdanau(torch.nn.Module):
'''
AttBahdanau: Attention according to Bahdanau that can be used by the
Alignment module.
'''
def __init__(self, q_dim, y_dim, att_dim=128):
super().__init__()
self.q_dim = q_dim
self.y_dim = y_dim
self.att_dim = att_dim
self.Wq = nn.Linear(self.q_dim, self.att_dim)
self.Wy = nn.Linear(self.y_dim, self.att_dim)
self.v = nn.Linear(self.att_dim, 1)
def forward(self, query, y):
att = torch.tanh( self.Wq(query).unsqueeze(1) + self.Wy(y).unsqueeze(2) )
att = self.v(att).squeeze(3).transpose(2,1)
sim = att.max(2)[0].unsqueeze(1)
return att, sim
class AttLuong(torch.nn.Module):
'''
AttLuong: Attention according to Luong that can be used by the
Alignment module.
'''
def __init__(self, q_dim, y_dim):
super().__init__()
self.q_dim = q_dim
self.y_dim = y_dim
self.W = nn.Linear(self.y_dim, self.q_dim)
def forward(self, query, y):
att = torch.bmm(query, self.W(y).transpose(2,1))
sim = att.max(2)[0].unsqueeze(1)
return att, sim
class ApplyHardAttention(torch.nn.Module):
'''
ApplyHardAttention: Apply hard attention for the purpose of time-alignment.
'''
def __init__(self):
super().__init__()
def forward(self, y, att):
self.idx = att.argmax(2)
y = y[torch.arange(y.shape[0]).unsqueeze(-1), self.idx]
return y
class ApplySoftAttention(torch.nn.Module):
'''
ApplySoftAttention: Apply soft attention for the purpose of time-alignment.
'''
def __init__(self):
super().__init__()
def forward(self, y, att):
y = torch.bmm(att, y)
return y
class Fusion(torch.nn.Module):
'''
Fusion: Used by the double-ended NISQA_DE model and used to fuse the
degraded and reference features.
'''
def __init__(self, fuse_dim=None, in_feat=None, fuse=None):
super().__init__()
self.fuse_dim = fuse_dim
self.fuse = fuse
if self.fuse=='x/y/-':
self.fan_out = 3*in_feat
elif self.fuse=='+/-':
self.fan_out = 2*in_feat
elif self.fuse=='x/y':
self.fan_out = 2*in_feat
else:
raise NotImplementedError
if self.fuse_dim:
self.lin_fusion = nn.Linear(self.fan_out, self.fuse_dim)
self.fan_out = fuse_dim
def forward(self, x, y):
if self.fuse=='x/y/-':
x = torch.cat((x, y, x-y), 2)
elif self.fuse=='+/-':
x = torch.cat((x+y, x-y), 2)
elif self.fuse=='x/y':
x = torch.cat((x, y), 2)
else:
raise NotImplementedError
if self.fuse_dim:
x = self.lin_fusion(x)
return x
#%% Evaluation
#
def predict_mos(model, ds, bs, dev, num_workers=0):
'''
predict_mos: predicts MOS of the given dataset with given model. Used for
NISQA and NISQA_DE model.
'''
dl = DataLoader(ds,
batch_size=bs,
shuffle=False,
drop_last=False,
pin_memory=False,
num_workers=num_workers)
model.to(dev)
model.eval()
with torch.no_grad():
import time
start = time.time()
y_hat_list = [ [model(xb.to(dev), n_wins.to(dev)).cpu().numpy(), yb.cpu().numpy()] for xb, yb, (idx, n_wins), yb_std, yb_votes in dl]
end = time.time()
proc_time = end - start
print('total processing time: %.3f sec' % proc_time )
yy = np.concatenate( y_hat_list, axis=1 )
y_hat = yy[0,:,0].reshape(-1,1)
y = yy[1,:,0].reshape(-1,1)
ds.df['mos_pred'] = y_hat.astype(dtype=float)
return y_hat, y
#
def predict_mos_multitask(model, ds, bs, dev, num_workers=0):
'''
predict_mos_multitask: predicts MOS and MOS std of the given dataset with given model.
'''
dl = DataLoader(ds,
batch_size=bs,
shuffle=False,
drop_last=False,
pin_memory=False,
num_workers=num_workers)
model.to(dev)
model.eval()
with torch.no_grad():
import time
start = time.time()
y_hat_list = [ [model(xb.to(dev), n_wins.to(dev)).cpu().numpy(), np.concatenate((yb, yb_std), axis=1)] for xb, yb, (idx, n_wins), yb_std, yb_votes in dl]
end = time.time()
proc_time = end - start
print('total processing time: %.3f sec' % proc_time )
# import pdb; pdb.set_trace()
yy = np.concatenate( y_hat_list, axis=1 )
y_hat = yy[0,:,:]
y = yy[1,:,:]
ds.df['mos_pred'] = y_hat[:,0].reshape(-1,1)
ds.df['std_pred'] = y_hat[:,1].reshape(-1,1)
return y_hat, y
#
def predict_mos_multifeature(model, ds, bs, dev, num_workers=0):
'''
predict_mos: predicts MOS of the given dataset with given model. Used for
NISQA and NISQA_DE model.
'''
dl = DataLoader(ds,
batch_size=bs,
shuffle=False,
drop_last=False,
pin_memory=False,
num_workers=num_workers)
model.to(dev)
model.eval()
with torch.no_grad():
y_hat_list = [ [model(torch.cat((xb, ssl), -2).to(dev), n_wins.to(dev)).cpu().numpy(), yb.cpu().numpy()] for xb, ssl, yb, (idx, n_wins), yb_std, yb_votes in dl]
yy = np.concatenate( y_hat_list, axis=1 )
y_hat = yy[0,:,0].reshape(-1,1)
y = yy[1,:,0].reshape(-1,1)
ds.df['mos_pred'] = y_hat.astype(dtype=float)
return y_hat, y
#
def predict_mos_multiresolution(model_1, model_2, model_3, ds, bs, dev, num_workers=0):
'''
predict_mos: predicts MOS of the given dataset with given model. Used for
NISQA and NISQA_DE model.
'''
dl = DataLoader(ds,
batch_size=bs,
shuffle=False,
drop_last=False,
pin_memory=False,
num_workers=num_workers)
model_1.to(dev)
model_2.to(dev)
model_3.to(dev)
model_1.eval()
model_2.eval()
model_3.eval()
with torch.no_grad():
y_hat_list = [ [(model_1(xb1.to(dev), n_wins1.to(dev)).cpu().numpy() + model_2(xb2.to(dev), n_wins2.to(dev)).cpu().numpy() + model_3(xb3.to(dev), n_wins3.to(dev)).cpu().numpy())/3, yb.cpu().numpy()] for xb1, xb2, xb3, yb, (idx, n_wins1, n_wins2, n_wins3), yb_std, yb_votes in dl]
yy = np.concatenate( y_hat_list, axis=1 )
y_hat = yy[0,:,0].reshape(-1,1)
y = yy[1,:,0].reshape(-1,1)
ds.df['mos_pred'] = y_hat.astype(dtype=float)
return y_hat, y
#
def predict_mos_multiscale(model_1, model_2, model_3, ds, bs, dev, num_workers=0):
'''
predict_mos: predicts MOS of the given dataset with given model. Used for
NISQA and NISQA_DE model.
'''
dl = DataLoader(ds,
batch_size=bs,
shuffle=False,
drop_last=False,
pin_memory=False,
num_workers=num_workers)
model_1.to(dev)
model_2.to(dev)
model_3.to(dev)
model_1.eval()
model_2.eval()
model_3.eval()
y_hat_list = []
with torch.no_grad():
for sr, xb1, xb2, xb3, yb, (idx, n_wins1, n_wins2, n_wins3), yb_std, yb_votes in dl:
if sr == 48000 or sr == 44100:
y_hat_list.append([(model_1(xb1.to(dev), n_wins1.to(dev)).cpu().numpy() + model_2(xb2.to(dev), n_wins2.to(dev)).cpu().numpy() + model_3(xb3.to(dev), n_wins3.to(dev)).cpu().numpy())/3, yb.cpu().numpy()])
elif sr == 16000 or sr == 32000:
y_hat_list.append([(model_2(xb2.to(dev), n_wins2.to(dev)).cpu().numpy() + model_3(xb3.to(dev), n_wins3.to(dev)).cpu().numpy())/2, yb.cpu().numpy()])
else:
y_hat_list.append([(model_3(xb3.to(dev), n_wins3.to(dev)).cpu().numpy()), yb.cpu().numpy()])
yy = np.concatenate( y_hat_list, axis=1 )
y_hat = yy[0,:,0].reshape(-1,1)
y = yy[1,:,0].reshape(-1,1)
ds.df['mos_pred'] = y_hat.astype(dtype=float)
return y_hat, y
#
def predict_dim(model, ds, bs, dev, num_workers=0): #
'''
predict_dim: predicts MOS and dimensions of the given dataset with given
model. Used for NISQA_DIM model.
'''
dl = DataLoader(ds,
batch_size=bs,
shuffle=False,
drop_last=False,
pin_memory=False,
num_workers=num_workers)
model.to(dev)
model.eval()
with torch.no_grad():
y_hat_list = [ [model(xb.to(dev), n_wins.to(dev)).cpu().numpy(), yb.cpu().numpy()] for xb, yb, (idx, n_wins) in dl]
yy = np.concatenate( y_hat_list, axis=1 )
y_hat = yy[0,:,:]
y = yy[1,:,:]
ds.df['mos_pred'] = y_hat[:,0].reshape(-1,1)
ds.df['noi_pred'] = y_hat[:,1].reshape(-1,1)
ds.df['dis_pred'] = y_hat[:,2].reshape(-1,1)
ds.df['col_pred'] = y_hat[:,3].reshape(-1,1)
ds.df['loud_pred'] = y_hat[:,4].reshape(-1,1)
return y_hat, y
def is_const(x):
if np.linalg.norm(x - np.mean(x)) < 1e-13 * np.abs(np.mean(x)):
return True
elif np.all(x==x[0]):
return True
else:
return False
#
def calc_eval_metrics(y, y_hat, y_hat_map=None, d=None, ci=None):
'''
Calculate RMSE, mapped RMSE, mapped RMSE* and Pearson's correlation.
See ITU-T P.1401 for details on RMSE*.
'''
r = {
'pcc': np.nan,
'srcc': np.nan,
'rmse': np.nan,
'rmse_map': np.nan,
'rmse_star_map': np.nan,
}
if is_const(y_hat) or any(np.isnan(y)):
r['pcc'] = np.nan
else:
r['pcc'] = pearsonr(y, y_hat)[0]
if is_const(y_hat) or any(np.isnan(y)):
r['srcc'] = np.nan
else:
r['srcc'] = spearmanr(y, y_hat)[0]
r['rmse'] = calc_rmse(y, y_hat)
if y_hat_map is not None:
r['rmse_map'] = calc_rmse(y, y_hat_map, d=d)
if ci is not None:
r['rmse_star_map'] = calc_rmse_star(y, y_hat_map, ci, d)[0]
return r
def calc_rmse(y_true, y_pred, d=0):
if d==0:
rmse = np.sqrt(np.mean(np.square(y_true-y_pred)))
else:
N = y_true.shape[0]
if (N-d)<1:
rmse = np.nan
else:
rmse = np.sqrt( 1/(N-d) * np.sum( np.square(y_true-y_pred) ) ) # Eq (7-29) P.1401
return rmse
def calc_rmse_star(mos_sub, mos_obj, ci, d):
N = mos_sub.shape[0]
error = mos_sub-mos_obj
if np.isnan(ci).any():
p_error = np.nan
rmse_star = np.nan
else:
p_error = (abs(error)-ci).clip(min=0) # Eq (7-27) P.1401
if (N-d)<1:
rmse_star = np.nan
else:
rmse_star = np.sqrt( 1/(N-d) * sum(p_error**2) ) # Eq (7-29) P.1401
return rmse_star, p_error, error
def calc_mapped(x, b):
N = x.shape[0]
order = b.shape[0]-1
A = np.zeros([N,order+1])
for i in range(order+1):
A[:,i] = x**(i)
return A @ b
def fit_first_order(y_con, y_con_hat):
A = np.vstack([np.ones(len(y_con_hat)), y_con_hat]).T
b = np.linalg.lstsq(A, y_con, rcond=None)[0]
return b
def fit_second_order(y_con, y_con_hat):
A = np.vstack([np.ones(len(y_con_hat)), y_con_hat, y_con_hat**2]).T
b = np.linalg.lstsq(A, y_con, rcond=None)[0]
return b
def fit_third_order(y_con, y_con_hat):
A = np.vstack([np.ones(len(y_con_hat)), y_con_hat, y_con_hat**2, y_con_hat**3]).T
b = np.linalg.lstsq(A, y_con, rcond=None)[0]
p = np.poly1d(np.flipud(b))
p2 = np.polyder(p)
rr = np.roots(p2)
r = rr[np.imag(rr)==0]
monotonic = all( np.logical_or(r>max(y_con_hat),r<min(y_con_hat)) )
if monotonic==False:
print('Not monotonic!!!')
return b
def fit_monotonic_third_order(
dfile_db,
dcon_db=None,
pred=None,
target_mos=None,
target_ci=None,
mapping=None):
'''
Fits third-order function with the constrained to be monotonically.
increasing. This function may not return an optimal fitting.
'''
y = dfile_db[target_mos].to_numpy()
y_hat = dfile_db[pred].to_numpy()
if dcon_db is None:
if target_ci in dfile_db:
ci = dfile_db[target_ci].to_numpy()
else:
ci = 0
else:
y_con = dcon_db[target_mos].to_numpy()
if target_ci in dcon_db:
ci = dcon_db[target_ci].to_numpy()
else:
ci = 0
x = y_hat
y_hat_min = min(y_hat) - 0.01
y_hat_max = max(y_hat) + 0.01
def polynomial(p, x):
return p[0]+p[1]*x+p[2]*x**2+p[3]*x**3
def constraint_2nd_der(p):
return 2*p[2]+6*p[3]*x
def constraint_1st_der(p):
x = np.arange(y_hat_min, y_hat_max, 0.1)
return p[1]+2*p[2]*x+3*p[3]*x**2
def objective_con(p):
x_map = polynomial(p, x)
dfile_db['x_map'] = x_map
x_map_con = dfile_db.groupby('con').mean().x_map.to_numpy()
err = x_map_con-y_con
if mapping=='pError':
p_err = (abs(err)-ci).clip(min=0)
return (p_err**2).sum()
elif mapping=='error':
return (err**2).sum()
else:
raise NotImplementedError
def objective_file(p):
x_map = polynomial(p, x)
err = x_map-y
if mapping=='pError':
p_err = (abs(err)-ci).clip(min=0)
return (p_err**2).sum()
elif mapping=='error':
return (err**2).sum()
else:
raise NotImplementedError
cons = dict(type='ineq', fun=constraint_1st_der)
if dcon_db is None:
res = minimize(
objective_file,
x0=np.array([0., 1., 0., 0.]),
method='SLSQP',
constraints=cons,
)
else:
res = minimize(
objective_con,
x0=np.array([0., 1., 0., 0.]),
method='SLSQP',
constraints=cons,
)
b = res.x
return b
def calc_mapping(
dfile_db,
mapping=None,
dcon_db=None,
target_mos=None,
target_ci=None,
pred=None,
):
'''
Computes mapping between subjective and predicted MOS.
'''
if dcon_db is not None:
y = dcon_db[target_mos].to_numpy()
y_hat = dfile_db.groupby('con').mean().get(pred).to_numpy()
else:
y = dfile_db[target_mos].to_numpy()
y_hat = dfile_db[pred].to_numpy()
if mapping==None:
b = np.array([0,1,0,0])
d_map = 0
elif mapping=='first_order':
b = fit_first_order(y, y_hat)
d_map = 1
elif mapping=='second_order':
b = fit_second_order(y, y_hat)
d_map = 3
elif mapping=='third_order_not_monotonic':
b = fit_third_order(y, y_hat)
d_map = 4
elif mapping=='third_order':
b = fit_monotonic_third_order(
dfile_db,
dcon_db=dcon_db,
pred=pred,
target_mos=target_mos,
target_ci=target_ci,
mapping='error',
)
d_map = 4
else:
raise NotImplementedError
return b, d_map
def eval_results(
df,
dcon=None,
target_mos = 'mos',
target_ci = 'mos_ci',
pred = 'mos_pred',
mapping = None,
do_print = False,
do_plot = False
):
'''
Evaluates a trained model on given dataset.
'''
# Loop through databases
db_results_df = []
df['y_hat_map'] = np.nan
for db_name in df.database.astype("category").cat.categories:
df_db = df.loc[df.database==db_name]
if dcon is not None:
dcon_db = dcon.loc[dcon.database==db_name]
else:
dcon_db = None
# per file -----------------------------------------------------------
y = df_db[target_mos].to_numpy()
if np.isnan(y).any():
r = {'pcc': np.nan,'srcc': np.nan,'rmse': np.nan, 'rmse_map': np.nan}
else:
y_hat = df_db[pred].to_numpy()
b, d = calc_mapping(
df_db,
mapping=mapping,
target_mos=target_mos,
target_ci=target_ci,
pred=pred
)
y_hat_map = calc_mapped(y_hat, b)
r = calc_eval_metrics(y, y_hat, y_hat_map=y_hat_map, d=d)
r.pop('rmse_star_map')
r = {f'{k}_file': v for k, v in r.items()}
# per con ------------------------------------------------------------
r_con = {'pcc': np.nan,'srcc': np.nan,'rmse': np.nan, 'rmse_map': np.nan}
if (dcon_db is not None) and ('con' in df_db):
y_con = dcon_db[target_mos].to_numpy()
y_con_hat = df_db.groupby('con').mean().get(pred).to_numpy()
if not np.isnan(y_con).any():
if target_ci in dcon_db:
ci_con = dcon_db[target_ci].to_numpy()
else:
ci_con = None
b_con, d = calc_mapping(
df_db,
dcon_db=dcon_db,
mapping=mapping,
target_mos=target_mos,
target_ci=target_ci,
pred=pred
)
df_db['y_hat_map'] = calc_mapped(y_hat, b_con)
df['y_hat_map'].loc[df.database==db_name] = df_db['y_hat_map']
y_con_hat_map = df_db.groupby('con').mean().get('y_hat_map').to_numpy()
r_con = calc_eval_metrics(y_con, y_con_hat, y_hat_map=y_con_hat_map, d=d, ci=ci_con)
r_con = {f'{k}_con': v for k, v in r_con.items()}
r = {**r, **r_con}
# ---------------------------------------------------------------------
db_results_df.append({'database': db_name, **r})
# Plot ------------------------------------------------------------------
if do_plot and (not np.isnan(y).any()):
xx = np.arange(0, 6, 0.01)
yy = calc_mapped(xx, b)
plt.figure(figsize=(3.0, 3.0), dpi=300)
plt.clf()
plt.plot(y_hat, y, 'o', label='Original data', markersize=2)
plt.plot([0, 5], [0, 5], 'gray')
plt.plot(xx, yy, 'r', label='Fitted line')
plt.axis([1, 5, 1, 5])
plt.gca().set_aspect('equal', adjustable='box')
plt.grid(True)
plt.xticks(np.arange(1, 6))
plt.yticks(np.arange(1, 6))
plt.title(db_name + ' per file')
plt.ylabel('Subjective ' + target_mos.upper())
plt.xlabel('Predicted ' + target_mos.upper())
# plt.savefig('corr_diagram_fr_' + db_name + '.pdf', dpi=300, bbox_inches="tight")
plt.show()
if (dcon_db is not None) and ('con' in df_db):
xx = np.arange(0, 6, 0.01)
yy = calc_mapped(xx, b_con)
plt.figure(figsize=(3.0, 3.0), dpi=300)
plt.clf()
plt.plot(y_con_hat, y_con, 'o', label='Original data', markersize=3)
plt.plot([0, 5], [0, 5], 'gray')
plt.plot(xx, yy, 'r', label='Fitted line')
plt.axis([1, 5, 1, 5])
plt.gca().set_aspect('equal', adjustable='box')
plt.grid(True)
plt.xticks(np.arange(1, 6))
plt.yticks(np.arange(1, 6))
plt.title(db_name + ' per con')
plt.ylabel('Sub ' + target_mos.upper())
plt.xlabel('Pred ' + target_mos.upper())
# plt.savefig(db_name + '.pdf', dpi=300, bbox_inches="tight")
plt.show()
# import pdb; pdb.set_trace()
# Print ------------------------------------------------------------------
if do_print and (not np.isnan(y).any()):
if (dcon_db is not None) and ('con' in df_db):
print('%-30s pcc_file: %0.2f, rmse_map_file: %0.2f, pcc_con: %0.2f, rmse_map_con: %0.2f, '
% (db_name+':', r['pcc_file'], r['rmse_map_file'], r['pcc_con'], r['rmse_map_con']))
else:
print('%-30s pcc_file: %0.2f, srcc_file: %0.2f, rmse_map_file: %0.2f'
% (db_name+':', r['pcc_file'], r['srcc_file'], r['rmse_map_file']))
# Save individual database results in DataFrame
db_results_df = pd.DataFrame(db_results_df)
r_average = {}
r_average['pcc_mean_file'] = db_results_df.pcc_file.mean()
r_average['srcc_mean_file'] = db_results_df.srcc_file.mean()
r_average['rmse_mean_file'] = db_results_df.rmse_file.mean()
r_average['rmse_map_mean_file'] = db_results_df.rmse_map_file.mean()
if dcon_db is not None:
r_average['pcc_mean_con'] = db_results_df.pcc_con.mean()
r_average['rmse_mean_con'] = db_results_df.rmse_con.mean()
r_average['rmse_map_mean_con'] = db_results_df.rmse_map_con.mean()
r_average['rmse_star_map_mean_con'] = db_results_df.rmse_star_map_con.mean()
else:
r_average['pcc_mean_con'] = np.nan
r_average['rmse_mean_con'] = np.nan
r_average['rmse_map_mean_con'] = np.nan
r_average['rmse_star_map_mean_con'] = np.nan
# Get overall per file results
y = df[target_mos].to_numpy()
y_hat = df[pred].to_numpy()
r_total_file = calc_eval_metrics(y, y_hat)
r_total_file = {'pcc_all': r_total_file['pcc'], 'srcc_all': r_total_file['srcc'], 'rmse_all': r_total_file['rmse'],}
overall_results = {
**r_total_file,
**r_average
}
return db_results_df, overall_results
#%% Loss
class biasLoss(object):
'''
Bias loss class.
Calculates loss while considering database bias.
'''
def __init__(self, db, anchor_db=None, mapping='first_order', min_r=0.7, loss_weight=0.0, do_print=True):
self.db = db
self.mapping = mapping
self.min_r = min_r
self.anchor_db = anchor_db
self.loss_weight = loss_weight
self.do_print = do_print
self.b = np.zeros((len(db),4))
self.b[:,1] = 1
self.do_update = False
self.apply_bias_loss = True
if (self.min_r is None) or (self.mapping is None):
self.apply_bias_loss = False
def get_loss(self, yb, yb_hat, idx):
if self.apply_bias_loss:
b = torch.tensor(self.b, dtype=torch.float).to(yb_hat.device)
b = b[idx,:]
yb_hat_map = (b[:,0]+b[:,1]*yb_hat[:,0]+b[:,2]*yb_hat[:,0]**2+b[:,3]*yb_hat[:,0]**3).view(-1,1)
loss_bias = self._nan_mse(yb_hat_map, yb)
loss_normal = self._nan_mse(yb_hat, yb)
loss = loss_bias + self.loss_weight * loss_normal
else:
loss = self._nan_mse(yb_hat, yb)
return loss
def update_bias(self, y, y_hat):
if self.apply_bias_loss:
y_hat = y_hat.reshape(-1)
y = y.reshape(-1)
if not self.do_update:
r = pearsonr(y[~np.isnan(y)], y_hat[~np.isnan(y)])[0]
if self.do_print:
print('--> bias update: min_r {:0.2f}, pcc {:0.2f}'.format(r, self.min_r))
if r>self.min_r:
self.do_update = True
if self.do_update:
if self.do_print:
print('--> bias updated')
for db_name in self.db.unique():
db_idx = (self.db==db_name).to_numpy().nonzero()
y_hat_db = y_hat[db_idx]
y_db = y[db_idx]
if not np.isnan(y_db).any():
if self.mapping=='first_order':
b_db = self._calc_bias_first_order(y_hat_db, y_db)
else:
raise NotImplementedError
if not db_name==self.anchor_db:
self.b[db_idx,:len(b_db)] = b_db
def _calc_bias_first_order(self, y_hat, y):
A = np.vstack([np.ones(len(y_hat)), y_hat]).T
btmp = np.linalg.lstsq(A, y, rcond=None)[0]
b = np.zeros((4))
b[0:2] = btmp
return b
def _nan_mse(self, y, y_hat):
err = (y-y_hat).view(-1)
idx_not_nan = ~torch.isnan(err)
nan_err = err[idx_not_nan]
return torch.mean(nan_err**2)
#%% Early stopping
class earlyStopper(object):
'''
Early stopping class.
Training is stopped if neither RMSE or Pearson's correlation
is improving after "patience" epochs.
'''
def __init__(self, patience):
self.best_rmse = 1e10
self.best_r_p = -1e10
self.cnt = -1
self.patience = patience
self.best = False
def step(self, r):
self.best = False
if r['pcc_mean_file'] > self.best_r_p:
self.best_r_p = r['pcc_mean_file']
self.cnt = -1
if r['rmse_map_mean_file'] < self.best_rmse:
self.best_rmse = r['rmse_map_mean_file']
self.cnt = -1
self.best = True
self.cnt += 1
if self.cnt >= self.patience:
stop_early = True
return stop_early
else:
stop_early = False
return stop_early
class earlyStopper_multitask(object):
'''
Early stopping class for multi-task model.
Training is stopped if neither RMSE or Pearson's correlation
is improving after "patience" epochs.
'''
def __init__(self, patience):
self.best_rmse = 1e10
self.best_rmse_std = 1e10
self.best_r_p = -1e10
self.best_r_p_std = -1e10
self.cnt = -1
self.patience = patience
self.best = False
def step(self, r):
self.best = False
if r['pcc_mean_file'] > self.best_r_p:
self.best_r_p = r['pcc_mean_file']
self.cnt = -1
if r['pcc_mean_file_std'] > self.best_r_p_std:
self.best_r_p_std = r['pcc_mean_file_std']
self.cnt = -1
if r['rmse_map_mean_file'] < self.best_rmse:
self.best_rmse = r['rmse_map_mean_file']
self.cnt = -1
self.best = True
if r['rmse_map_mean_file_std'] < self.best_rmse_std:
self.best_rmse_std = r['rmse_map_mean_file_std']
self.cnt = -1
self.cnt += 1
if self.cnt >= self.patience:
stop_early = True
return stop_early
else:
stop_early = False
return stop_early
class earlyStopper_dim(object):
'''
Early stopping class for dimension model.
Training is stopped if neither RMSE or Pearson's correlation
is improving after "patience" epochs.
'''
def __init__(self, patience):
self.best_rmse = 1e10
self.best_rmse_noi = 1e10
self.best_rmse_col = 1e10
self.best_rmse_dis = 1e10
self.best_rmse_loud = 1e10
self.best_r_p = -1e10
self.best_r_p_noi = -1e10
self.best_r_p_col = -1e10
self.best_r_p_dis = -1e10
self.best_r_p_loud = -1e10
self.cnt = -1
self.patience = patience
self.best = False
def step(self, r):
self.best = False
if r['pcc_mean_file'] > self.best_r_p:
self.best_r_p = r['pcc_mean_file']
self.cnt = -1
if r['pcc_mean_file_noi'] > self.best_r_p_noi:
self.best_r_p_noi = r['pcc_mean_file_noi']
self.cnt = -1
if r['pcc_mean_file_col'] > self.best_r_p_col:
self.best_r_p_col = r['pcc_mean_file_col']
self.cnt = -1
if r['pcc_mean_file_dis'] > self.best_r_p_dis:
self.best_r_p_dis = r['pcc_mean_file_dis']
self.cnt = -1
if r['pcc_mean_file_loud'] > self.best_r_p_loud:
self.best_r_p_loud = r['pcc_mean_file_loud']
self.cnt = -1
if r['rmse_map_mean_file'] < self.best_rmse:
self.best_rmse = r['rmse_map_mean_file']
self.cnt = -1
self.best = True
if r['rmse_map_mean_file_noi'] < self.best_rmse_noi:
self.best_rmse_noi = r['rmse_map_mean_file_noi']
self.cnt = -1
if r['rmse_map_mean_file_col'] < self.best_rmse_col:
self.best_rmse_col = r['rmse_map_mean_file_col']
self.cnt = -1
if r['rmse_map_mean_file_dis'] < self.best_rmse_dis:
self.best_rmse_dis = r['rmse_map_mean_file_dis']
self.cnt = -1
if r['rmse_map_mean_file_loud'] < self.best_rmse_loud:
self.best_rmse_loud = r['rmse_map_mean_file_loud']
self.cnt = -1
self.cnt += 1
if self.cnt >= self.patience:
stop_early = True
return stop_early
else:
stop_early = False
return stop_early
def get_lr(optimizer):
'''
Get current learning rate from Pytorch optimizer.
'''
for param_group in optimizer.param_groups:
return param_group['lr']
#%% Dataset
#
class SpeechQualityDataset(Dataset):
'''
Dataset for Speech Quality Model.
'''
def __init__(
self,
df,
df_con=None,
data_dir='',
folder_column='',
filename_column='filename',
mos_column='MOS',
seg_length=15,
max_length=None,
to_memory=False,
to_memory_workers=0,
transform=None,
seg_hop_length=1,
ms_n_fft = 1024,
ms_hop_length = 80,
ms_win_length = 170,
ms_n_mels=32,
ms_sr=48e3,
ms_fmax=16e3,
ms_channel=None,
double_ended=False,
filename_column_ref=None,
dim=False,
mos_std_column=None,
votes_column=None,
task_type = 0,
):
self.df = df
self.df_con = df_con
self.data_dir = data_dir
self.folder_column = folder_column
self.filename_column = filename_column
self.filename_column_ref = filename_column_ref
self.mos_column = mos_column
self.seg_length = seg_length
self.seg_hop_length = seg_hop_length
self.max_length = max_length
self.transform = transform
self.to_memory_workers = 0
self.ms_n_fft = ms_n_fft
self.ms_hop_length = ms_hop_length
self.ms_win_length = ms_win_length
self.ms_n_mels = ms_n_mels
self.ms_sr = ms_sr
self.ms_fmax = ms_fmax
self.ms_channel = ms_channel
self.double_ended = double_ended
self.dim = dim
self.mos_std_column = mos_std_column
self.votes_column = votes_column
self.task_type = task_type
# if True load all specs to memory
self.to_memory = False
self._to_memory()
def _to_memory_multi_helper(self, idx):
return [self._load_spec(i) for i in idx]
def _to_memory(self):
if self.to_memory_workers==0:
if self.task_type == 0:
self.mem_list = [self._load_spec(idx) for idx in tqdm(range(len(self)))]
elif self.task_type == 1:
self.mem_list = [self._load_spec_multi_task(idx) for idx in tqdm(range(len(self)))]
elif self.task_type == 2:
self.mem_list = [self._load_spec_multi_feature(idx) for idx in tqdm(range(len(self)))]
elif self.task_type == 3:
self.mem_list = [self._load_spec_multi_resolution(idx) for idx in tqdm(range(len(self)))]
elif self.task_type == 4:
self.mem_list = [self._load_spec_multi_scale(idx) for idx in tqdm(range(len(self)))]
# import pdb; pdb.set_trace()
else:
buffer_size = 128
idx = np.arange(len(self))
n_bufs = int(len(idx)/buffer_size)
idx = idx[:buffer_size*n_bufs].reshape(-1,buffer_size).tolist() + idx[buffer_size*n_bufs:].reshape(1,-1).tolist()
pool = multiprocessing.Pool(processes=self.to_memory_workers)
mem_list = []
for out in tqdm(pool.imap(self._to_memory_multi_helper, idx), total=len(idx)):
mem_list = mem_list + out
self.mem_list = mem_list
pool.terminate()
pool.join()
self.to_memory=True
# import pdb; pdb.set_trace()
def _load_spec(self, index):
# Load spec
file_path = os.path.join(self.data_dir, self.df[self.filename_column].iloc[index])
feature_path = file_path[:-4] + '_mel80_center.npy'
try:
spec = np.load(feature_path)
except:
spec = get_librosa_melspec(
file_path,
sr = self.ms_sr,
n_fft=self.ms_n_fft,
hop_length=self.ms_hop_length,
win_length=self.ms_win_length,
n_mels=self.ms_n_mels,
fmax=self.ms_fmax,
ms_channel=self.ms_channel
)
np.save(feature_path, spec)
# import pdb; pdb.set_trace()
return spec
def _load_spec_multi_task(self, index):
# Load spec
file_path = os.path.join(self.data_dir, self.df[self.filename_column].iloc[index])
feature_path = file_path[:-4] + '_mel48.npy'
try:
spec = np.load(feature_path)
except:
spec = get_librosa_melspec(
file_path,
sr = self.ms_sr,
n_fft=self.ms_n_fft,
hop_length=self.ms_hop_length,
win_length=self.ms_win_length,
n_mels=self.ms_n_mels,
fmax=self.ms_fmax,
ms_channel=self.ms_channel
)
np.save(feature_path, spec)
# import pdb; pdb.set_trace()
return spec
'''
# Load spec
file_path = './test/' + self.df['db'].iloc[index] + '/' + self.df['deg_wav'].iloc[index]
feature_path = file_path[:-4] + '_mel48.npy'
# import pdb; pdb.set_trace()
try:
spec = np.load(feature_path)
except:
spec = get_librosa_melspec(
file_path,
sr = self.ms_sr,
n_fft=self.ms_n_fft,
hop_length=self.ms_hop_length,
win_length=self.ms_win_length,
n_mels=self.ms_n_mels,
fmax=self.ms_fmax,
ms_channel=self.ms_channel
)
np.save(feature_path, spec)
# import pdb; pdb.set_trace()
return spec
'''
def _load_spec_multi_feature(self, index):
# Load spec
file_path = os.path.join(self.data_dir, self.df[self.filename_column].iloc[index])
feature_path = file_path[:-4] + '_mel80.npy'
try:
spec = np.load(feature_path)
except:
spec = get_librosa_melspec(
file_path,
sr=None,
n_fft=int(source_sr*0.04),
hop_length=0.02,
win_length=0.04,
n_mels=80,
fmax=20000,
ms_channel=None,
)
np.save(feature_path, spec)
feature_path = file_path[:-4] + '_xlsr16k.npy'
try:
features = np.load(feature_path)
except:
signal, source_sr = sf.read(file_path)
sr = 16000
singal_16k = librosa.resample(signal, source_sr, sr)
F = np.reshape(singal_16k,(1,singal_16k.shape[0]))
F = torch.from_numpy(F).float().to("cpu")
features = self.model_ssl(F, features_only=True, mask=False)['x']
features = features.detach().numpy()
features = features.squeeze(0)
features = features.transpose(1,0)
np.save(feature_path, features)
frames = min(features.shape[1], spec.shape[1])
features = features[:,0:frames]
spec = spec[:,0:frames]
assert features.shape[1] == spec.shape[1], "ssl feature frames not equal to spectrum feature frames"
# import pdb; pdb.set_trace()
return spec, features
def _load_spec_multi_resolution(self, index):
# Load spec
file_path = os.path.join(self.data_dir, self.df[self.filename_column].iloc[index])
feature_path = file_path[:-4] + '_mr_2_10.npy'
try:
spec1 = np.load(feature_path)
except:
spec1 = get_librosa_melspec(
file_path,
sr=None,
n_fft=4096,
hop_length=0.002,
win_length=0.010,
n_mels=80,
fmax=20000,
ms_channel=None,
)
np.save(feature_path, spec1)
feature_path = file_path[:-4] + '_mr_5_25.npy'
try:
spec2 = np.load(feature_path)
except:
spec2 = get_librosa_melspec(
file_path,
sr=None,
n_fft=4096,
hop_length=0.005,
win_length=0.025,
n_mels=80,
fmax=20000,
ms_channel=None,
)
np.save(feature_path, spec2)
feature_path = file_path[:-4] + '_mr_10_50.npy'
try:
spec3 = np.load(feature_path)
except:
spec3 = get_librosa_melspec(
file_path,
sr=None,
n_fft=4096,
hop_length=0.010,
win_length=0.050,
n_mels=80,
fmax=20000,
ms_channel=None,
)
np.save(feature_path, spec3)
# import pdb; pdb.set_trace()
return spec1, spec2, spec3
def _load_spec_multi_scale(self, index):
# Load spec
file_path = os.path.join(self.data_dir, self.df[self.filename_column].iloc[index])
feature_path = file_path[:-4] + '_sr.npy'
sr = np.load(feature_path)
if sr == 48000 or sr == 44100:
feature_path = file_path[:-4] + '_ms_48k.npy'
try:
spec1 = np.load(feature_path)
except:
spec1 = get_librosa_melspec(
file_path,
sr=48000,
n_fft=4096,
hop_length=0.01,
win_length=0.02,
n_mels=80,
fmax=20000,
ms_channel=None,
)
np.save(feature_path, spec1)
feature_path = file_path[:-4] + '_ms_16k.npy'
try:
spec2 = np.load(feature_path)
except:
spec2 = get_librosa_melspec(
file_path,
sr=16000,
n_fft=4096,
hop_length=0.01,
win_length=0.02,
n_mels=80,
fmax=7200,
ms_channel=None,
)
np.save(feature_path, spec2)
feature_path = file_path[:-4] + '_ms_8k.npy'
try:
spec3 = np.load(feature_path)
except:
spec3 = get_librosa_melspec(
file_path,
sr=8000,
n_fft=4096,
hop_length=0.01,
win_length=0.02,
n_mels=80,
fmax=3600,
ms_channel=None,
)
np.save(feature_path, spec3)
if sr == 16000 or sr == 32000:
spec1 = sr
feature_path = file_path[:-4] + '_ms_16k.npy'
try:
spec2 = np.load(feature_path)
except:
spec2 = get_librosa_melspec(
file_path,
sr=16000,
n_fft=4096,
hop_length=0.01,
win_length=0.02,
n_mels=80,
fmax=7200,
ms_channel=None,
)
np.save(feature_path, spec2)
feature_path = file_path[:-4] + '_ms_8k.npy'
try:
spec3 = np.load(feature_path)
except:
spec3 = get_librosa_melspec(
file_path,
sr=8000,
n_fft=4096,
hop_length=0.01,
win_length=0.02,
n_mels=80,
fmax=3600,
ms_channel=None,
)
np.save(feature_path, spec3)
if sr == 8000:
spec1 = sr
spec2 = sr
feature_path = file_path[:-4] + '_ms_8k.npy'
try:
spec3 = np.load(feature_path)
except:
spec3 = get_librosa_melspec(
file_path,
sr=8000,
n_fft=4096,
hop_length=0.01,
win_length=0.02,
n_mels=80,
fmax=3600,
ms_channel=None,
)
np.save(feature_path, spec3)
# import pdb; pdb.set_trace()
return sr, spec1, spec2, spec3
def __getitem__(self, index):
assert isinstance(index, int), 'index must be integer (no slice)'
# import pdb; pdb.set_trace()
if self.to_memory:
data = self.mem_list[index]
else:
if self.task_type == 0:
data = self._load_spec(index)
elif self.task_type == 1:
data = self._load_spec_multi_task(index)
elif self.task_type == 2:
data = self._load_spec_multi_feature(index)
elif self.task_type == 3:
data = self._load_spec_multi_resolution(index)
elif self.task_type == 4:
data = self._load_spec_multi_scale(index)
# Segment specs
file_path = os.path.join(self.data_dir, self.df[self.filename_column].iloc[index])
# file_path = './test/' + self.df['db'].iloc[index] + '/' + self.df['deg_wav'].iloc[index]
if self.seg_length is not None:
# added 20220307
if self.task_type == 0:
spec = data
x_spec_seg, n_wins = segment_specs(file_path,
spec,
self.seg_length,
self.seg_hop_length,
self.max_length)
elif self.task_type == 1:
spec = data
x_spec_seg, n_wins = segment_specs(file_path,
spec,
self.seg_length,
self.seg_hop_length,
self.max_length)
elif self.task_type == 2:
spec, ssl = data
x_spec_seg, n_wins = segment_specs(file_path,
spec,
self.seg_length,
self.seg_hop_length,
self.max_length)
x_spec_seg_ssl, n_wins = segment_specs(file_path,
ssl,
self.seg_length,
self.seg_hop_length,
self.max_length)
elif self.task_type == 3:
spec1 ,spec2, spec3 = data
x_spec_seg_1, n_wins_1 = segment_specs(file_path,
spec1,
self.seg_length,
self.seg_hop_length,
self.max_length)
x_spec_seg_2, n_wins_2 = segment_specs(file_path,
spec2,
self.seg_length,
self.seg_hop_length,
self.max_length)
x_spec_seg_3, n_wins_3 = segment_specs(file_path,
spec3,
self.seg_length,
self.seg_hop_length,
self.max_length)
elif self.task_type == 4:
sr, spec1 ,spec2, spec3 = data
if sr == 48000 or sr == 44100:
x_spec_seg_1, n_wins_1 = segment_specs(file_path,
spec1,
self.seg_length,
self.seg_hop_length,
self.max_length)
x_spec_seg_2, n_wins_2 = segment_specs(file_path,
spec2,
self.seg_length,
self.seg_hop_length,
self.max_length)
x_spec_seg_3, n_wins_3 = segment_specs(file_path,
spec3,
self.seg_length,
self.seg_hop_length,
self.max_length)
elif sr == 16000 or sr == 32000:
x_spec_seg_2, n_wins_2 = segment_specs(file_path,
spec2,
self.seg_length,
self.seg_hop_length,
self.max_length)
x_spec_seg_3, n_wins_3 = segment_specs(file_path,
spec3,
self.seg_length,
self.seg_hop_length,
self.max_length)
x_spec_seg_1, n_wins_1 = np.zeros(x_spec_seg_2.shape), np.zeros(n_wins_3.shape, dtype=n_wins_3.dtype)
elif sr == 8000:
x_spec_seg_3, n_wins_3 = segment_specs(file_path,
spec3,
self.seg_length,
self.seg_hop_length,
self.max_length)
x_spec_seg_1, n_wins_1 = np.zeros(x_spec_seg_3.shape), np.zeros(n_wins_3.shape, dtype=n_wins_3.dtype)
x_spec_seg_2, n_wins_2 = np.zeros(x_spec_seg_3.shape), np.zeros(n_wins_3.shape, dtype=n_wins_3.dtype)
else:
x_spec_seg = spec
n_wins = spec.shape[1]
if self.max_length is not None:
x_padded = np.zeros((x_spec_seg.shape[0], self.max_length))
x_padded[:,:n_wins] = x_spec_seg
x_spec_seg = np.expand_dims(x_padded.transpose(1,0), axis=(1, 3))
if not torch.is_tensor(x_spec_seg):
x_spec_seg = torch.tensor(x_spec_seg, dtype=torch.float)
# Get MOS (apply NaN in case of prediction only mode)
if self.dim:
if self.mos_column=='predict_only':
y = np.full((5,1), np.nan).reshape(-1).astype('float32')
else:
y_mos = self.df['mos'].iloc[index].reshape(-1).astype('float32')
y_noi = self.df['noi'].iloc[index].reshape(-1).astype('float32')
y_dis = self.df['dis'].iloc[index].reshape(-1).astype('float32')
y_col = self.df['col'].iloc[index].reshape(-1).astype('float32')
y_loud = self.df['loud'].iloc[index].reshape(-1).astype('float32')
y = np.concatenate((y_mos, y_noi, y_dis, y_col, y_loud), axis=0)
else:
if self.mos_column=='predict_only':
y = np.full(1, np.nan).reshape(-1).astype('float32')
y1 = np.full(1, np.nan).reshape(-1).astype('float32')
y2 = np.full(1, np.nan).reshape(-1).astype('float32')
else:
y = self.df[self.mos_column].iloc[index].reshape(-1).astype('float32')
y1 = self.df[self.mos_std_column].iloc[index].reshape(-1).astype('float32')
y2 = self.df[self.votes_column].iloc[index].reshape(-1).astype('int16')
# import pdb; pdb.set_trace()
if self.task_type == 0 or self.task_type == 1 :
return x_spec_seg, y, (index, n_wins), y1, y2
elif self.task_type == 2:
return x_spec_seg, x_spec_seg_ssl, y, (index, n_wins), y1, y2
elif self.task_type == 3:
return x_spec_seg_1, x_spec_seg_2, x_spec_seg_3, y, (index, n_wins_1, n_wins_2, n_wins_3), y1, y2
elif self.task_type == 4 :
if not torch.is_tensor(sr):
sr = torch.tensor(sr)
if not torch.is_tensor(x_spec_seg_1):
x_spec_seg_1 = torch.tensor(x_spec_seg_1, dtype=torch.float32)
if not torch.is_tensor(x_spec_seg_2):
x_spec_seg_2 = torch.tensor(x_spec_seg_2, dtype=torch.float32)
return sr, x_spec_seg_1, x_spec_seg_2, x_spec_seg_3, y, (index, n_wins_1, n_wins_2, n_wins_3), y1, y2
def __len__(self):
return len(self.df)
#%% Spectrograms
def segment_specs(file_path, x, seg_length, seg_hop=1, max_length=None):
'''
Segment a spectrogram into "seg_length" wide spectrogram segments.
Instead of using only the frequency bin of the current time step,
the neighboring bins are included as input to the CNN. For example
for a seg_length of 7, the previous 3 and the follwing 3 frequency
bins are included.
A spectrogram with input size [H x W] will be segmented to:
[W-(seg_length-1) x C x H x seg_length], where W is the width of the
original mel-spec (corresponding to the length of the speech signal),
H is the height of the mel-spec (corresponding to the number of mel bands),
C is the number of CNN input Channels (always one in our case).
'''
if seg_length % 2 == 0:
raise ValueError('seg_length must be odd! (seg_lenth={})'.format(seg_length))
if not torch.is_tensor(x):
x = torch.tensor(x)
n_wins = x.shape[1]-(seg_length-1)
# broadcast magic to segment melspec
idx1 = torch.arange(seg_length)
idx2 = torch.arange(n_wins)
idx3 = idx1.unsqueeze(0) + idx2.unsqueeze(1)
x = x.transpose(1,0)[idx3,:].unsqueeze(1).transpose(3,2)
if seg_hop>1:
x = x[::seg_hop,:]
n_wins = int(np.ceil(n_wins/seg_hop))
if max_length is not None:
if max_length < n_wins:
raise ValueError('n_wins {} > max_length {} --- {}. Increase max window length ms_max_segments!'.format(n_wins, max_length, file_path))
x_padded = torch.zeros((max_length, x.shape[1], x.shape[2], x.shape[3]))
x_padded[:n_wins,:] = x
x = x_padded
return x, np.array(n_wins)
def get_librosa_melspec(
file_path,
sr=48e3,
n_fft=1024,
hop_length=80,
win_length=170,
n_mels=32,
fmax=16e3,
ms_channel=None,
):
'''
Calculate mel-spectrograms with Librosa.
'''
# Calc spec
try:
if ms_channel is not None:
y, sr = lb.load(file_path, sr=sr, mono=False)
if len(y.shape)>1:
y = y[ms_channel, :]
else:
y, sr = lb.load(file_path, sr=sr)
except:
raise ValueError('Could not load file {}'.format(file_path))
hop_length = int(sr * hop_length)
win_length = int(sr * win_length)
S = lb.feature.melspectrogram(
y=y,
sr=sr,
S=None,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window='hann',
center=True,
pad_mode='reflect',
power=1.0,
n_mels=n_mels,
fmin=0.0,
fmax=fmax,
htk=False,
norm='slaney',
)
spec = lb.core.amplitude_to_db(S, ref=1.0, amin=1e-4, top_db=80.0)
return spec
|
AlgorithmLib
|
/AlgorithmLib-4.0.3.tar.gz/AlgorithmLib-4.0.3/algorithmLib/MOS_INFER/NISQA_lib.py
|
NISQA_lib.py
|
import os
import datetime
import pandas as pd; pd.options.mode.chained_assignment=None
import torch
import torch.nn as nn
from MOS_INFER import NISQA_lib as NL
import sys,os
from os import path
class nisqaModel(object):
'''
nisqaModel: Main class that loads the model and the datasets. Contains
the training loop, prediction, and evaluation function.
'''
def __init__(self, args):
self.args = args
self.runinfos = {}
self._getDevice()
self._loadModel()
self._loadDatasets()
self.args['now'] = datetime.datetime.today()
def predict(self):
print('---> Predicting ...')
if self.args['tr_parallel']:
self.model = nn.DataParallel(self.model)
if self.args['dim']==True:
y_val_hat, y_val = NL.predict_dim(
self.model,
self.ds_val,
self.args['tr_bs_val'],
self.dev,
num_workers=self.args['tr_num_workers'])
else:
if self.args['task_type'] == 0:
y_val_hat, y_val = NL.predict_mos(
self.model,
self.ds_val,
self.args['tr_bs_val'],
self.dev,
num_workers=self.args['tr_num_workers'])
elif self.args['task_type'] == 1:
y_val_hat, y_val = NL.predict_mos_multitask(
self.model,
self.ds_val,
self.args['tr_bs_val'],
self.dev,
num_workers=self.args['tr_num_workers'])
elif self.args['task_type'] == 2:
y_val_hat, y_val = NL.predict_mos_multifeature(
self.model,
self.ds_val,
self.args['tr_bs_val'],
self.dev,
num_workers=self.args['tr_num_workers'])
elif self.args['task_type'] == 3:
y_val_hat, y_val = NL.predict_mos_multiresolution(
self.model_1,
self.model_2,
self.model_3,
self.ds_val,
self.args['tr_bs_val'],
self.dev,
num_workers=self.args['tr_num_workers'])
elif self.args['task_type'] == 4:
y_val_hat, y_val = NL.predict_mos_multiscale(
self.model_1,
self.model_2,
self.model_3,
self.ds_val,
self.args['tr_bs_val'],
self.dev,
num_workers=self.args['tr_num_workers'])
# import pdb; pdb.set_trace()
if self.args['output_dir']:
self.ds_val.df['model'] = self.args['name']
self.ds_val.df.to_csv(
os.path.join(self.args['output_dir'], 'test.csv'),
index=False)
# print(self.ds_val.df.to_string(index=False))
if self.args['task_type'] == 1:
r_mos = NL.calc_eval_metrics(y_val[:,0].squeeze(), y_val_hat[:,0].squeeze())
r_std = NL.calc_eval_metrics(y_val[:,1].squeeze(), y_val_hat[:,1].squeeze())
print('mos')
print(r_mos)
print('std')
print(r_std)
else:
r = NL.calc_eval_metrics(y_val.squeeze(), y_val_hat.squeeze())
print(r)
return self.ds_val.df
def _loadDatasets(self):
if self.args['mode']=='predict_file':
self._loadDatasetsFile()
elif self.args['mode']=='predict_dir':
self._loadDatasetsFolder()
elif self.args['mode']=='predict_csv':
self._loadDatasetsCSVpredict()
elif self.args['mode']=='main':
self._loadDatasetsCSV()
else:
raise NotImplementedError('mode not available')
def _loadDatasetsFile(self):
data_dir = os.path.dirname(self.args['deg'])
file_name = os.path.basename(self.args['deg'])
df_val = pd.DataFrame([file_name], columns=['deg'])
# creating Datasets ---------------------------------------------------
self.ds_val = NL.SpeechQualityDataset(
df_val,
df_con=None,
data_dir = data_dir,
filename_column = 'deg',
mos_column = 'predict_only',
seg_length = self.args['ms_seg_length'],
max_length = self.args['ms_max_segments'],
to_memory = None,
to_memory_workers = None,
seg_hop_length = self.args['ms_seg_hop_length'],
transform = None,
ms_n_fft = self.args['ms_n_fft'],
ms_hop_length = self.args['ms_hop_length'],
ms_win_length = self.args['ms_win_length'],
ms_n_mels = self.args['ms_n_mels'],
ms_sr = self.args['ms_sr'],
ms_fmax = self.args['ms_fmax'],
ms_channel = self.args['ms_channel'],
double_ended = self.args['double_ended'],
dim = self.args['dim'],
filename_column_ref = None,
mos_std_column = 'mos_std',
votes_column = 'votes',
task_type = self.args['task_type'],
)
def _loadModel(self):
'''
Loads the Pytorch models with given input arguments.
'''
# if True overwrite input arguments from pretrained model
# import pdb; pdb.set_trace()
if self.args['pretrained_model']:
# if os.path.isabs(self.args['pretrained_model']):
# model_path = os.path.join(self.args['pretrained_model'])
# else:
# model_path = os.path.join(os.getcwd(), self.args['pretrained_model'])
#model_path = os.path.join(sys.prefix, self.args['pretrained_model'])
model_path = sys.prefix + '//'+ self.args['pretrained_model']
print(sys.prefix,model_path)
if self.args['task_type'] == 3 or self.args['task_type'] == 4:
checkpoint = torch.load(model_path[:-4] + '_1.tar', map_location=self.dev)
checkpoint_2 = torch.load(model_path[:-4] + '_2.tar', map_location=self.dev)
checkpoint_3 = torch.load(model_path[:-4] + '_3.tar', map_location=self.dev)
else:
checkpoint = torch.load(model_path, map_location=self.dev)
# update checkpoint arguments with new arguments
checkpoint['args'].update(self.args)
self.args = checkpoint['args']
if self.args['model']=='NISQA_DIM':
self.args['dim'] = True
self.args['csv_mos'] = None # column names hardcoded for dim models
else:
self.args['dim'] = False
if self.args['model']=='NISQA_DE':
self.args['double_ended'] = True
else:
self.args['double_ended'] = False
self.args['csv_ref'] = None
# Load Model
self.model_args = {
'ms_seg_length': self.args['ms_seg_length'],
'ms_n_mels': self.args['ms_n_mels'],
'cnn_model': self.args['cnn_model'],
'cnn_c_out_1': self.args['cnn_c_out_1'],
'cnn_c_out_2': self.args['cnn_c_out_2'],
'cnn_c_out_3': self.args['cnn_c_out_3'],
'cnn_kernel_size': self.args['cnn_kernel_size'],
'cnn_dropout': self.args['cnn_dropout'],
'cnn_pool_1': self.args['cnn_pool_1'],
'cnn_pool_2': self.args['cnn_pool_2'],
'cnn_pool_3': self.args['cnn_pool_3'],
'cnn_fc_out_h': self.args['cnn_fc_out_h'],
'td': self.args['td'],
'td_sa_d_model': self.args['td_sa_d_model'],
'td_sa_nhead': self.args['td_sa_nhead'],
'td_sa_pos_enc': self.args['td_sa_pos_enc'],
'td_sa_num_layers': self.args['td_sa_num_layers'],
'td_sa_h': self.args['td_sa_h'],
'td_sa_dropout': self.args['td_sa_dropout'],
'td_lstm_h': self.args['td_lstm_h'],
'td_lstm_num_layers': self.args['td_lstm_num_layers'],
'td_lstm_dropout': self.args['td_lstm_dropout'],
'td_lstm_bidirectional': self.args['td_lstm_bidirectional'],
'td_2': self.args['td_2'],
'td_2_sa_d_model': self.args['td_2_sa_d_model'],
'td_2_sa_nhead': self.args['td_2_sa_nhead'],
'td_2_sa_pos_enc': self.args['td_2_sa_pos_enc'],
'td_2_sa_num_layers': self.args['td_2_sa_num_layers'],
'td_2_sa_h': self.args['td_2_sa_h'],
'td_2_sa_dropout': self.args['td_2_sa_dropout'],
'td_2_lstm_h': self.args['td_2_lstm_h'],
'td_2_lstm_num_layers': self.args['td_2_lstm_num_layers'],
'td_2_lstm_dropout': self.args['td_2_lstm_dropout'],
'td_2_lstm_bidirectional': self.args['td_2_lstm_bidirectional'],
'pool': self.args['pool'],
'pool_att_h': self.args['pool_att_h'],
'pool_att_dropout': self.args['pool_att_dropout'],
}
if self.args['double_ended']:
self.model_args.update({
'de_align': self.args['de_align'],
'de_align_apply': self.args['de_align_apply'],
'de_fuse_dim': self.args['de_fuse_dim'],
'de_fuse': self.args['de_fuse'],
})
print('Model architecture: ' + self.args['model'])
if self.args['model']=='NISQA':
if self.args['task_type'] == 0:
self.model = NL.NISQA(**self.model_args)
elif self.args['task_type'] == 1:
self.model = NL.NISQA_MULTITASK(**self.model_args)
elif self.args['task_type'] == 2:
self.model = NL.NISQA(**self.model_args)
elif self.args['task_type'] == 3:
self.model_1 = NL.NISQA(**self.model_args)
self.model_2 = NL.NISQA(**self.model_args)
self.model_3 = NL.NISQA(**self.model_args)
elif self.args['task_type'] == 4:
self.model_1 = NL.NISQA(**self.model_args)
self.model_2 = NL.NISQA(**self.model_args)
self.model_3 = NL.NISQA(**self.model_args)
elif self.args['model']=='NISQA_DIM':
self.model = NL.NISQA_DIM(**self.model_args)
elif self.args['model']=='NISQA_DE':
self.model = NL.NISQA_DE(**self.model_args)
else:
raise NotImplementedError('Model not available')
# Load weights if pretrained model is used ------------------------------------
if self.args['pretrained_model']:
if self.args['task_type'] == 3 or self.args['task_type'] == 4:
missing_keys, unexpected_keys = self.model_1.load_state_dict(checkpoint['model_state_dict'], strict=True)
missing_keys, unexpected_keys = self.model_2.load_state_dict(checkpoint_2['model_state_dict'], strict=True)
missing_keys, unexpected_keys = self.model_3.load_state_dict(checkpoint_3['model_state_dict'], strict=True)
else:
missing_keys, unexpected_keys = self.model.load_state_dict(checkpoint['model_state_dict'], strict=True)
print('Loaded pretrained model from ' + self.args['pretrained_model'])
if missing_keys:
print('missing_keys:')
print(missing_keys)
if unexpected_keys:
print('unexpected_keys:')
print(unexpected_keys)
# para_num = sum([p.numel() for p in self.model.parameters()])
# para_size = para_num * 4 / 1024
# import pdb; pdb.set_trace()
def _getDevice(self):
'''
Train on GPU if available.
'''
if torch.cuda.is_available():
self.dev = torch.device("cuda")
else:
self.dev = torch.device("cpu")
if "tr_device" in self.args:
if self.args['tr_device']=='cpu':
self.dev = torch.device("cpu")
elif self.args['tr_device']=='cuda':
self.dev = torch.device("cuda")
print('Device: {}'.format(self.dev))
if "tr_parallel" in self.args:
if (self.dev==torch.device("cpu")) and self.args['tr_parallel']==True:
self.args['tr_parallel']==False
print('Using CPU -> tr_parallel set to False')
|
AlgorithmLib
|
/AlgorithmLib-4.0.3.tar.gz/AlgorithmLib-4.0.3/algorithmLib/MOS_INFER/NISQA_model.py
|
NISQA_model.py
|
import ctypes,os,platform
import sys,librosa
from ctypes import *
from formatConvert import pcm2wav
import numpy as np
import sys,os
from os import path
sys.path.append('../')
from commFunction import get_data_array,make_out_file
from VAD_NN.hubconf import silero_vad
from PCC.Pearson_CC import get_max_cc_by_dll
import numpy as np
def merge_intervals(intervals, N):
new_intervals = []
for i in range(len(intervals)):
a, b = intervals[i][0], intervals[i][1]
if i == 0:
continue
if i == 1:
new_intervals.append([0, (intervals[i][0] + intervals[i - 1][1]) // 2])
else:
new_intervals.append([(intervals[i-1][0]+intervals[i-2][1])//2+1, (intervals[i-1][1]+intervals[i][0])//2])
new_intervals.append([(intervals[-2][1] + intervals[-1][0]) // 2 + 1, N - 1])
return new_intervals
def shift_array_by_interval(arr, intervals, offsets):
"""
对numpy数组按照多个区间和偏移进行重新排列顺序
参数:
arr: 一维numpy数组
intervals: 区间列表,格式为[[start1, end1], [start2, end2], ...]
offsets: 偏移列表,格式为[offset1, offset2, ...]
返回值:
排序后的新数组
"""
if offsets >= 0:
base_point = intervals[0] - offsets
arr1 = arr[:base_point]
arr2 = arr[intervals[0]:]
arr3 = arr[base_point:intervals[0]]
new_arr = np.concatenate((arr1, arr2, arr3))
if offsets <= 0:
base_point = intervals[0] - offsets
arr1 = arr[:intervals[0]]
arr2 = arr[intervals[0]:base_point]
arr3 = arr[intervals[0]:]
new_arr = np.concatenate((arr1, arr2, arr3))
# 返回新数组
return new_arr
def replace_none(arr):
"""
将数组arr中的None元素替换为周围两个元素的平均值
参数:
arr: 一维数组,包含None和数字元素
返回值:
替换后的新数组
"""
n = len(arr)
result = []
if all(x is None for x in arr):
result = None
else:
val = arr[0] if arr[0] is not None else arr[1]
for i in range(n):
if arr[i] is None:
if i == n - 1 or arr[i + 1] is None:
val = val
else:
k = i + 1
while k < n and arr[k] is None:
k += 1
if k == n:
val = val
elif k == i + 1:
val = arr[k]
else:
val = (arr[i - 1] + arr[k]) / 2
else:
val = arr[i]
result.append(val)
val = val if arr[i] is None else arr[i]
return result
def shift_array(arr, n):
"""
将数组arr中的前n个元素补到最后
参数:
arr: 一维numpy数组
n: 指定的元素数量
返回值:
补全前n个元素后的新数组
"""
# 使用切片语法将数组分为前n个元素和从第n个元素开始的元素
arr1 = arr[:n]
arr2 = arr[n:]
# 使用concatenate函数拼接数组
new_arr = np.concatenate((arr2, arr1))
# 返回新数组
return new_arr
def get_my_dll():
"""
:return:
"""
mydll = None
cur_paltform = platform.platform().split('-')[0]
if cur_paltform == 'Windows':
mydll = ctypes.windll.LoadLibrary(sys.prefix + '/pcc.dll')
if cur_paltform == 'macOS':
mydll = CDLL(sys.prefix + '/pcc.dylib')
if cur_paltform == 'Linux':
mydll = CDLL(sys.prefix + '/pcc.so')
return mydll
def cal_fine_delay_of_specific_section(reffile, testfile,speech_section=None,targetfs=8000,outfile=None):
""""""
if speech_section is None:
speech_section = silero_vad(reffile)
#speech_section = [[0.941, 3.712], [4.7, 7.5]]
delaysearchRange = 4
delayThreshhold = 0.3
single_frame_size = 0.5
frameshift = 0.4
ref_orgin_data,fs,ch = get_data_array(reffile)
test_orgin_data,fs,ch = get_data_array(testfile)
refdata = librosa.resample(ref_orgin_data.astype(np.float32), orig_sr=fs ,target_sr=targetfs)
testdata = librosa.resample(test_orgin_data.astype(np.float32), orig_sr=fs ,target_sr=targetfs)
maxsearchlen = len(testdata)
delay_list = []
int_intervers = [[int(x*fs) for x in inner_arr] for inner_arr in speech_section]
for point in speech_section:
startpoint,endpoint = int(point[0]*targetfs),int(point[1]*targetfs)
cal_len = endpoint - startpoint
last_start_point,last_end_point = startpoint,endpoint
caltimes = (cal_len - (single_frame_size) * targetfs) // (frameshift * targetfs)
caltimes = int(caltimes)
assert caltimes > 0
cc_list = []
for times in range(caltimes):
start = int(startpoint + times * frameshift * targetfs)
srcblock = refdata[start:start + int(single_frame_size*targetfs)]
limit = min(maxsearchlen,int(start+(single_frame_size+delaysearchRange)*targetfs))
dstbloack = testdata[start:limit]
maxCoin, startPoint = get_max_cc_by_dll(srcblock, dstbloack, get_my_dll(), 3)
if maxCoin > delayThreshhold:
cc_list.append(round((startPoint / targetfs), 8))
if len(cc_list) == 0:
curDealy = None
else:
curDealy = sum(cc_list)/len(cc_list)
delay_list.append(curDealy)
delay_list = replace_none(delay_list)
if delay_list is None:
return None
delay_list = [int(x*fs) for x in delay_list]
result = [delay_list[0]]
prev = delay_list[0]
for i in range(1, len(delay_list)):
diff = delay_list[i] - prev
result.append(diff)
prev = delay_list[i]
base = shift_array(test_orgin_data, result[0])
if len(delay_list) == 1:
if outfile is not None:
make_out_file(outfile, base, fs, ch)
return sum(delay_list) / len(delay_list) / fs
intervals = merge_intervals(int_intervers,len(test_orgin_data))
for i in range(len(result)):
if i == 0:
continue
else:
base = shift_array_by_interval(base,intervals[i],result[i])
if outfile is not None:
make_out_file(outfile, base, fs, ch)
if len(delay_list) == 0:
return None
else:
return sum(delay_list)/len(delay_list)/fs
def cal_fine_delay(reffile, testfile,targetfs=8000,outfile=None):
""""""
delaysearchRange = 4
delayThreshhold = 0.3
single_frame_size = 1
reforgindata,fs,ch = get_data_array(reffile)
testorigndata,fs,ch = get_data_array(testfile)
refdata = librosa.resample(reforgindata.astype(np.float32), orig_sr=fs ,target_sr=targetfs)
testdata = librosa.resample(testorigndata.astype(np.float32), orig_sr=fs ,target_sr=targetfs)
cal_len = min(len(refdata),len(testdata))
caltimes = (cal_len - (delaysearchRange + single_frame_size) * targetfs) // (single_frame_size * targetfs)
caltimes = int(caltimes)
assert caltimes > 0
cc_list = []
for times in range(caltimes):
start = int(times * single_frame_size * targetfs)
srcblock = refdata[start:start + single_frame_size*targetfs]
dstbloack = testdata[start:start + (single_frame_size+delaysearchRange)*targetfs]
maxCoin, startPoint = get_max_cc_by_dll(srcblock, dstbloack, get_my_dll(), 3)
print(maxCoin)
if maxCoin > delayThreshhold:
cc_list.append(round((startPoint / targetfs), 8))
if len(cc_list) == 0:
return None
else:
finaldelay = sum(cc_list)/len(cc_list)
if outfile is not None:
make_out_file(outfile,shift_array(testorigndata,int(fs*finaldelay)),fs,ch)
return finaldelay
if __name__ == '__main__':
ref = 'speech_cn.wav'
test = 'mixDstFile_minus_13.wav'
test2 = 'mixFile.wav'
#a = cal_fine_delay_of_specific_section(pcm2wav(ref),pcm2wav(test),outfile='out.wav',speech_section=([16.467,18.769],[19.6,21.2],[22,24.2]))
a = cal_fine_delay_of_specific_section(pcm2wav(ref), pcm2wav(test), outfile='out2.wav')
#b = cal_fine_delay(pcm2wav(ref),pcm2wav(test2))
#a = cal_fine_delay(pcm2wav(ref), pcm2wav(test),outfile='out.wav')
print(a)
pass
|
AlgorithmLib
|
/AlgorithmLib-4.0.3.tar.gz/AlgorithmLib-4.0.3/algorithmLib/timeAligment/time_align.py
|
time_align.py
|
import cv2
import numpy as np
from Algorithms.image_processing.utils import read_image, rgb_min_image, min_filter
class DarkPrior():
def __init__(self, epsilon=10**-8):
self.epsilon = epsilon
def dark_channel(self, image):
# output the dark channel as the image
new_image = image.copy()
# perfroming the 15 x 15 min filter
min_image = min_filter(new_image)
# perfroming the color min operation
dark_prior = rgb_min_image(min_image)
return dark_prior
def transmission_map(self, image,A,w):
#finds the transmission map for the image
image_new = np.divide(image,A).astype(float)
# finding the dark channel of the divide image
new_dark = self.dark_channel(image_new)
# Saling and subtracting the image
transmission = 1 - w*new_dark
return transmission
def a_estimator(self, image,dark_prior):
#Used the information extracted from the dark prior
#find a value for A
image_copy = image.copy()
[row,col,dem] = image_copy.shape
dark_copy = dark_prior.copy()
# finding the number of 0.01% values
num = np.round(row*col*0.001).astype(int)
j = sorted(np.asarray(dark_copy).reshape(-1), reverse=True)[:num]
# getting the location of the top 0.01%
ind = np.unravel_index(j[0], dark_copy.shape)
# Pefroming a search for the max value in the group
max_val = image_copy[ind[0],ind[1],:]
for element in j:
ind = np.unravel_index(element, dark_copy.shape)
if sum(max_val[:]) < sum(image_copy[ind[0],ind[1],:]):
max_val[:] = image_copy[ind[0],ind[1],:]
# creating a color image of the max value
A = image_copy
A[:,:,:] = max_val[:]
return A
def Radience_cal(self, image, A, transmission_map, t_not):
#Used information from the transmit map to remove haze from the image.
image_copy = image.copy()
transmission_map_copy = (transmission_map.copy()).astype(float)
# Pefroming the min operation between Ttransmission map and 0.1
divisor = np.maximum(transmission_map_copy,t_not)
radience = (image.copy()).astype(float)
# Perfroming the eqution 3 for every color channel
for i in range(3):
radience[:,:,i] = np.divide(
((image_copy[:,:,i]).astype(float) - A[0,0,i]),
divisor) + A[0,0,i]
# Capping all of the out of bound values
#radience = radience - np.min(radience)
#radience = 255*(radience/np.max(radience))
radience[radience>255]=255
radience[radience<0]=0
return radience.astype('uint8')
def guided_filter(self, image,guide,diameter,epsilon):
w_size = diameter+1
# Exatrcation the mean of the image by blurring
meanI=cv2.blur(image,(w_size,w_size))
mean_Guide=cv2.blur(guide,(w_size,w_size))
# Extracting the auto correlation
II=image**2
corrI=cv2.blur(II,(w_size,w_size))
# Finding the correlation between image and guide
I_guide=image*guide
corrIG=cv2.blur(I_guide,(w_size,w_size))
# using the mean of the image to find the variance of each point
varI=corrI-meanI**2
covIG=corrIG-meanI*mean_Guide
#covIG normalized with a epsilon factor
a=covIG/(varI+epsilon)
#a is used to find the b
b=mean_Guide-a*meanI
meanA=cv2.blur(a,(w_size,w_size))
meanB=cv2.blur(b,(w_size,w_size))
# using the mean of a b to fix refine the transmission map
transmission_rate=meanA*image+meanB
# normalizaing of the transimational map
transmission_rate = transmission_rate/np.max(transmission_rate)
return transmission_rate
def haze_remover(self, path=None, image=None):
'''
This function is used to dehaze a image from an
image path or from a cv2 image object
'''
if path is None and image is None:
print("There is not path and image enter to the function. Please add a image or a path to the model")
return None
else:
if image is None:
image = read_image(path)
min_image = min_filter(image)
dark_prior = rgb_min_image(min_image)
A = self.a_estimator(image,dark_prior)
img_gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
Transmition_image = self.transmission_map(image,A,0.95)
refine_Transmission_image = self.guided_filter(img_gray.astype(np.float32),Transmition_image.astype(np.float32),100,self.epsilon)
refine_radience_image = self.Radience_cal(image,A,refine_Transmission_image,0.1)
self.output = {'Input':image, 'Min_Image':min_image, 'A':A ,'Gray_Image':img_gray,
'Transmition_Map':Transmition_image, 'Refine_Transmition_Map':refine_Transmission_image,
'DeHaze_Image':refine_radience_image}
return self.output
def save_image(self, path='output.jpg', key='DeHaze_Image'):
'''
Input is path/filename
Key is the file you want to save
key = [Input, Min_Image, A, Gray_Image, Transmition_Map, Refine_Transmition_Map, DeHaze_Image]
saves the image to the path
'''
cv2.imwrite(path,self.output[key])
print('file name {} has been saved').format(path)
|
Algorithms-abhay
|
/Algorithms-abhay-0.0.1.tar.gz/Algorithms-abhay-0.0.1/Image_Processing/Dehazing.py
|
Dehazing.py
|
from __future__ import absolute_import
import pickle
import numpy as np
from .decision_stamp import decision_stamp
class ada_boost():
def __init__(self, n_estimators=10, clf=decision_stamp):
self.clf = clf
self.n_estimators = n_estimators
self.alpha = list()
self.scores = list()
self.weights = list()
self.weak_clf = list()
self.label_pred = list()
self.weight_error = list()
def get_params(self):
return{'n_estimators':self.n_estimators}
def set_parmas(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def weight_cal(self, epsolan, weight, label, prediction):
label_pred = -1*np.multiply(label,prediction)
self.label_pred.append(label_pred)
# Alpha
epsolan/=sum(weight)
alpha_m = 0.5*np.log((1-epsolan)/epsolan)
# New weights
weight_new = np.multiply(weight,np.exp([float(x) * alpha_m for x in label_pred]))
self.alpha.append(alpha_m)
return weight_new
def predict(self,data):
row, _ = data.shape
sum_y_pred = np.zeros((row))
for i, clf in enumerate(self.weak_clf):
sum_y_pred += self.alpha[i]*clf.predict(data)
y_pred = np.sign(sum_y_pred)
return y_pred
def fit(self, data,label,weight= None):
row,col = data.shape
if weight is None:
labels, counts = np.unique(label, return_counts=True)
counts = 0.5*(1/np.array(counts))
new_init_weight = [counts[np.where(labels == l)[0][0]] for l in list(label)]
weight = np.array(new_init_weight)
weight = np.ones(row)/row
for i in range(self.n_estimators):
self.weights.append(weight)
curr_clf = self.clf()
curr_clf.fit(data,label,weight)
curr_pred = curr_clf.predict(data)
weight_error = sum([w*(p != l) for p, l, w in zip(curr_pred, label, weight)])
weight = self.weight_cal(weight_error,weight,label,curr_pred)
self.weak_clf.append(curr_clf)
self.scores.append(self.score(data,label))
def score(self,data,label):
pred = self.predict(data)
equality_check = lambda y1,y2: y1 == y2
total = sum(map(equality_check, label, pred))
score = total/len(list(label))
return score
def save(self, path):
model = {
"clf" : self.clf,
"n_estimators" : self.n_estimators,
"alpha" : self.alpha,
"weak_clf" : self.weak_clf,
"scores" : self.scores
}
pickle.dump(model,open(path, 'wb'))
def load(self, path):
model = pickle.load(open(path, 'rb'))
self.clf = model['clf']
self.n_estimators = model['n_estimators']
self.alpha = model['alpha']
self.scores = model['scores']
self.weak_clf = model['weak_clf']
|
Algorithms-abhay
|
/Algorithms-abhay-0.0.1.tar.gz/Algorithms-abhay-0.0.1/Machine_Learning/adaboost.py
|
adaboost.py
|
import numpy as np
class decision_stamp():
def __init__(self):
# The index of the feature used to make classification
self.feature_index = None
# The threshold value that the feature should be measured against
self.threshold = 0
# Pairity of the threshold
self.pairty = 1
self.weight_error = 0
self.weights = None
def get_params(self):
parameters = {"feature_index": self.feature_index,
"threshold":self.threshold,
"pairty":self.pairty,
"weight_error":self.weight_error}
return parameters
def fit(self, data=None, label=None, weight=None):
'''
X, y, W should all be numpy array
X shape = [N,M]
Y shape = [1,N]
W shape = [1,N]
'''
if (data is None and label is None):
print("Improper input in the function")
else:
f_star = float('inf')
[row, col] = data.shape
labels, counts = np.unique(label, return_counts=True)
if (counts[0] > counts[1]):
label = -1*label
self.pairty = -1
if weight is None:
counts = (1/labels.shape[0])*(1/np.array(counts))
new_init_weight = [counts[np.where(labels == l)[0][0]] for l in list(label)]
weight = np.array(new_init_weight)
for j in range(col):
index = data[:,j].argsort()
x_j = data[:,j][index]
y_j = label[index]
d_j = weight[index]
f_curr = sum(d_j[y_j == 1])
if f_curr<f_star:
f_star = f_curr
theta_star = x_j[0] - 1
j_star = j
for i in range(0,row-1):
f_curr -= y_j[i]*d_j[i]
if (f_curr<f_star and x_j[i]!=x_j[i+1]):
f_star = f_curr
theta_star= 0.5*((x_j[i] + x_j[i+1]))
j_star=j
self.feature_index = j_star
self.threshold = theta_star
self.weight_error = f_star
self.weights = weight
return self
def predict(self, data):
if self.pairty == -1:
prediction = (data[:,self.feature_index] > self.threshold).astype(int)
else :
prediction = (data[:,self.feature_index] <= self.threshold).astype(int)
prediction = 2*prediction - 1
return prediction
def score(self, label, prediction):
same_check = lambda y1,y2: y1 == y2
total = sum(map(same_check, label, prediction))
score = total/len(list(label))
return score
|
Algorithms-abhay
|
/Algorithms-abhay-0.0.1.tar.gz/Algorithms-abhay-0.0.1/Machine_Learning/decision_stamp.py
|
decision_stamp.py
|
## 一、安装
``` shell
# 本地安装
pip install AliFCWeb
# fun安装
fun install --save --runtime python3 --package-type pip AliFCWeb
```
## 二、快速入门
### 1. 本地配置fun
由于AliFCWeb是第三方库,所以需要自己上传,为了使用方便,我们使用阿里云官方调试工具fun进行代码编写和调试。
- 安装fun,安装教程参考[官方文档](https://github.com/alibaba/funcraft/blob/master/docs/usage/installation-zh.md?spm=a2c4g.11186623.2.18.30a8130772dyyb&file=installation-zh.md)
- 配置fun环境
- 配置方法1:在命令台键入 fun config,然后按照提示,依次配置 Account ID、Access Key Id、Secret Access Key、 Default Region Name
- 配置方法2:在C:\Users\当前用户\\.fcli文件夹下创建config.yaml文件并输入以下内容(注意将其中的配置替换成你自己的配置)
```yaml
endpoint: 'https://AccountID.RegionName.fc.aliyuncs.com'
api_version: '2016-08-15'
access_key_id: AccessKeyId
access_key_secret: SecretAccessKey
security_token: ''
debug: false
timeout: 10
retries: 3
sls_endpoint: RegionName.fc.aliyuncs.com
report: true
```
### 2. 编写HelloWorld
- 本地创建test目录,用于存放所有的demo
- 在test目录下创建demo01
- 在demo01目录下创建template.yml,内容如下:
```yaml
ROSTemplateFormatVersion: '2015-09-01'
Transform: 'Aliyun::Serverless-2018-04-03'
Resources:
fcweb-demo:
Type: 'Aliyun::Serverless::Service'
Properties:
Description: '函数计算fcweb框架demo'
demo01:
Type: 'Aliyun::Serverless::Function'
Properties:
Description: 'HelloWorld'
Handler: index.handler
Runtime: python3
CodeUri: '.'
Timeout: 30
Events:
httpTrigger:
Type: HTTP
Properties:
AuthType: ANONYMOUS
Methods:
- GET
- POST
- PUT
- DELETE
```
- 在demo01目录下创建index.py,代码如下:
```python
import json
import logging
from AliFCWeb import fcIndex, get, post, put, delete, ResponseEntity
@fcIndex()
def handler(environ, start_response):
pass
@get()
def testFC(data):
return ResponseEntity.ok('Hello World!')
```
- 引包,控制台执行命令
```shell
fun install --save --runtime python3 --package-type pip AliFCWeb
```

- 上传,控制台执行命令
```shell
fun deploy
```
- 进入函数计算控制台,点击执行查看运行结果

## 三、获取参数
### 1. 获取地址栏参数
- 复制demo01,重命名为demo02
- 修改template.yml
```yaml
ROSTemplateFormatVersion: '2015-09-01'
Transform: 'Aliyun::Serverless-2018-04-03'
Resources:
fcweb-demo:
Type: 'Aliyun::Serverless::Service'
Properties:
Description: '函数计算fcweb框架demo'
demo02:
Type: 'Aliyun::Serverless::Function'
Properties:
Description: '获取地址栏参数'
Handler: index.handler
Runtime: python3
CodeUri: '.'
Timeout: 30
Events:
httpTrigger:
Type: HTTP
Properties:
AuthType: ANONYMOUS
Methods:
- GET
- POST
- PUT
- DELETE
```
- 修改index.py
```python
import json
import logging
from AliFCWeb import fcIndex, get, post, put, delete, ResponseEntity
@fcIndex(debug=True)
def handler(environ, start_response):
pass
@get()
def testFC(data):
print('前端传来的参数:')
print(data)
return ResponseEntity.ok(data)
```
- 上传代码
```shell
fun deploy
```
- 测试,在控制台随便传递几个参数


### 2. 获取body参数
- 复制demo02,重命名为demo03
- 修改template.yml文件
```yaml
ROSTemplateFormatVersion: '2015-09-01'
Transform: 'Aliyun::Serverless-2018-04-03'
Resources:
fcweb-demo:
Type: 'Aliyun::Serverless::Service'
Properties:
Description: '函数计算fcweb框架demo'
demo03:
Type: 'Aliyun::Serverless::Function'
Properties:
Description: '获取body参数'
Handler: index.handler
Runtime: python3
CodeUri: '.'
Timeout: 30
Events:
httpTrigger:
Type: HTTP
Properties:
AuthType: ANONYMOUS
Methods:
- GET
- POST
- PUT
- DELETE
```
- 修改index.py文件
```python
import json
import logging
from AliFCWeb import fcIndex, get, post, put, delete, ResponseEntity
@fcIndex(debug=True)
def handler(environ, start_response):
pass
# 改为post请求
@post()
def testFC(data):
print('前端传来的参数:')
print(data)
return ResponseEntity.ok(data)
```
- 上传代码
```shell
fun deploy
```
- 测试执行


### 3. 获取摸板参数
- 复制demo03,重命名为demo04
- 修改template.yml文件
```yaml
ROSTemplateFormatVersion: '2015-09-01'
Transform: 'Aliyun::Serverless-2018-04-03'
Resources:
fcweb-demo:
Type: 'Aliyun::Serverless::Service'
Properties:
Description: '函数计算fcweb框架demo'
demo04:
Type: 'Aliyun::Serverless::Function'
Properties:
Description: '获取摸板参数'
Handler: index.handler
Runtime: python3
CodeUri: '.'
Timeout: 30
Events:
httpTrigger:
Type: HTTP
Properties:
AuthType: ANONYMOUS
Methods:
- GET
- POST
- PUT
- DELETE
```
- 修改index.py文件
```python
import json
import logging
from AliFCWeb import fcIndex, get, post, put, delete, ResponseEntity
@fcIndex(debug=True)
def handler(environ, start_response):
pass
@get('/demo04/{name}')
def testFC(data):
print('前端传来的参数:')
print(data)
return ResponseEntity.ok(data)
```
- 上传代码
```shell
fun deploy
```
- 测试执行


### 4. 获取混合参数
- 复制demo04,重命名为demo05
- 修改template.yml文件
```yaml
ROSTemplateFormatVersion: '2015-09-01'
Transform: 'Aliyun::Serverless-2018-04-03'
Resources:
fcweb-demo:
Type: 'Aliyun::Serverless::Service'
Properties:
Description: '函数计算fcweb框架demo'
demo05:
Type: 'Aliyun::Serverless::Function'
Properties:
Description: '获取混合参数'
Handler: index.handler
Runtime: python3
CodeUri: '.'
Timeout: 30
Events:
httpTrigger:
Type: HTTP
Properties:
AuthType: ANONYMOUS
Methods:
- GET
- POST
- PUT
- DELETE
```
- 修改index.py文件
```python
import json
import logging
from AliFCWeb import fcIndex, get, post, put, delete, ResponseEntity
@fcIndex(debug=True)
def handler(environ, start_response):
pass
@post('/demo05/{name}')
def testFC(data):
print('前端传来的参数:')
print(data)
return ResponseEntity.ok(data)
```
- 上传代码
```shell
fun deploy
```
- 测试执行(为了方便查看结果,此处我们使用类似postman的工具进行测试)


## 四、使用配置中心
为了解决函数计算的配置问题,提供一个默认的配置中心,可以将配置统一写在这个配置中心供其他函数调用。
### 1. 创建配置中心
创建默认的配置中心只需要以下几个步骤
- 创建函数
- 创建config目录
- 在config目录下创建index.py,使用configCenter装饰器装饰handler函数
```python
from AliFCWeb import configCenter
@configCenter()
def handler(environ, start_response):
pass
```
- 在config目录下创建template.yml
```yaml
ROSTemplateFormatVersion: '2015-09-01'
Transform: 'Aliyun::Serverless-2018-04-03'
Resources:
fcweb-demo:
Type: 'Aliyun::Serverless::Service'
Properties:
Description: '函数计算fcweb框架demo'
config:
Type: 'Aliyun::Serverless::Function'
Properties:
Description: '配置中心'
Handler: index.handler
Runtime: python3
CodeUri: '.'
Timeout: 30
Events:
httpTrigger:
Type: HTTP
Properties:
AuthType: ANONYMOUS
Methods:
- GET
- POST
- PUT
- DELETE
```
- 引入AliFCWeb包
```shell
fun install --save --runtime python3 --package-type pip AliFCWeb
```
- 添加配置文件,配置中心只需要一个application.py的配置文件用于存放配置
- 在config目录创建application.py文件即可,创建后结构如下:

### 2. 使用配置中心
- 在application.py中新增一些配置
```python
mysql = {
'url': 'localhost',
'username': 'root',
'password': 'fcweb',
'charset': 'utf-8'
}
```
- 向配置中心发送请求获取配置

### 3. 为配置中心设置密码
- 在配置中心的application.py文件中设置pwd的值

- 测试


### 4. 在其他函数计算中获取配置
- 复制前面的demo05,重命名为demo06
- 修改template.yml中的函数名为demo06
```yaml
ROSTemplateFormatVersion: '2015-09-01'
Transform: 'Aliyun::Serverless-2018-04-03'
Resources:
fcweb-demo:
Type: 'Aliyun::Serverless::Service'
Properties:
Description: '函数计算fcweb框架demo'
demo06:
Type: 'Aliyun::Serverless::Function'
Properties:
Description: '获取配置'
Handler: index.handler
Runtime: python3
CodeUri: '.'
Timeout: 30
Events:
httpTrigger:
Type: HTTP
Properties:
AuthType: ANONYMOUS
Methods:
- GET
- POST
- PUT
- DELETE
```
- 添加application.py文件
```python
conf_center = {
# 配置中心url,可以是全路径如:https://xxx.cn-hangzhou.fc.aliyuncs.com/2016-08-15/proxy/fcweb-demo/config/
'url': '/fcweb-demo/config/',
# 配置中心密码
'pwd': 'abc123'
}
```
- 修改index.py
```python
import json
import logging
from AliFCWeb import fcIndex, get, post, put, delete, ResponseEntity
# 引入获取配置的方法
from AliFCWeb import getConfByName
@fcIndex(debug=True)
def handler(environ, start_response):
pass
@get()
def testFC(data):
# 获取配置
mysqlConf = getConfByName('mysql')
return ResponseEntity.ok(mysqlConf)
```
- 验证结果

### 5. 一次性获取多个配置
上面已经用geyConfByName方法获取了mysql的配置,该方法每次会先从本地缓存寻找配置,如果没有则会去配置中心拉取。优点是使用方便,缺点是只能一次获取一个配置。如果想批量获取,可以使用getConfigFromConfCenter方法
- 复制demo06,重命名为demo07
- 修改template.yml文件
```yaml
ROSTemplateFormatVersion: '2015-09-01'
Transform: 'Aliyun::Serverless-2018-04-03'
Resources:
fcweb-demo:
Type: 'Aliyun::Serverless::Service'
Properties:
Description: '函数计算fcweb框架demo'
demo07:
Type: 'Aliyun::Serverless::Function'
Properties:
Description: '批量获取配置'
Handler: index.handler
Runtime: python3
CodeUri: '.'
Timeout: 30
Events:
httpTrigger:
Type: HTTP
Properties:
AuthType: ANONYMOUS
Methods:
- GET
- POST
- PUT
- DELETE
```
- 修改index.py文件
```python
import json
import logging
from AliFCWeb import fcIndex, get, post, put, delete, ResponseEntity
from AliFCWeb import getConfByName, CONF_CENTER_NAME
# 引入getConfigFromConfCenter方法
# 该方法在AliFCWeb.fcutils包下
from AliFCWeb.fcutils import getConfigFromConfCenter
@fcIndex(debug=True)
def handler(environ, start_response):
pass
@get()
def testFC(data):
# 先获取配置中心url
centerConfig = getConfByName(CONF_CENTER_NAME)
# 获取配置
myConf = getConfigFromConfCenter(centerConfig['url'], ['mysql', 'postgresql'], centerConfig['pwd'])
return ResponseEntity.ok(myConf.text)
```
- 测试
- 在配置中心(/feweb-demo/config/)的application.py文件中添加postgresql的配置信息
```python
postgresql = {
'dbname': 'cofree',
'user': 'postgres',
'password': '123456',
'host': 'localhost'
}
```
- 测试demo07

## 五、自定义返回格式
### 1. ResponseEntity类
在前面我们使用了ResponseEntity类进行返回,该类已经对返回结果进行了处理和封装。你可以直接new一个ResponseEntity,也可以通过内置的常用静态方法快速获取一个ResponseEntity实例,下面是**部分**内置方法
| 方法名 | 方法说明 | 备注 |
| --------------------------- | -------------- | --------- |
| ResponseEntity.ok() | 操作正确时返回 | 返回码200 |
| ResponseEntity.badRequest() | 错误时返回 | 返回码400 |
| ResponseEntity.notFound() | 返回404 | |
| ResponseEntity.serverError()| 服务器发生错误 | 返回码500 |
| ResponseEntity.unauthorized() | 权限不足 | 返回码401 |
### 2. 自定义返回格式
- 更改ResponseEntity类的build()方法即可更改返回结果,但是不建议你这做
- 更好的方式是继承ResponseEntity类并重写build()方法
- 新建demo08,并创建index.py,template.yml文件,导入AliFCWeb库
- 在demo08目录下新建myResponseEntity.py文件,内容如下
```python
from AliFCWeb import ResponseEntity
from AliFCWeb import getConfByName, FC_START_RESPONSE
class MyResponseEntity(ResponseEntity):
def build(self, token = None):
# 设置请求头和请求code
# 这一步必须要
start_response = getConfByName(FC_START_RESPONSE)
start_response(self.statusCode, self.response_headers)
# self.res中储存了要返回的数据
res = 'Hello ' + self.res
return [res.encode()]
```
- 修改index.py文件
```python
import json
import logging
from AliFCWeb import fcIndex, get, post, put, delete
# 引入自定义的ResponseEntity
from myResponseEntity import MyResponseEntity
@fcIndex(debug=True)
def handler(environ, start_response):
pass
@get()
def testFC(data):
res = MyResponseEntity('200', data.get('name', 'World'))
return res
```
- 上传并测试

## 六、安全和权限验证
### 1. 登录验证
在get, post, put, delete四种请求方式中都可以设置login,auth和uToken三个参数
- login:验证是否登录,需要引入pyjwt包并在配置中心配置密钥,前端需要在请求头中携带3RD_SESSION参数
- auth:权限验证,传入一个方法,框架会调用该方法并将3RD_SESSION作为参数传递给该方法。返回True则代表鉴权成功,否则鉴权失败
- uToken:是否自动更新token
#### 1.1 配置密钥
在配置中心配置RSA密钥,密钥可以找在线网站生成或者使用本地工具生成
修改配置中心的application.py文件,添加密钥
```python
# 公钥
rsa_public_key = '''-----BEGIN PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANyrRZamov8MEpJf70ljWNrZxa7HbUhR
mTulmdc64tQ5Cjp5iIiJefAI8l9sJdj4RGwmJVuIVZJ4inWs0QRme1MCAwEAAQ==
-----END PUBLIC KEY-----
'''
# 私钥
rsa_private_key = '''-----BEGIN PRIVATE KEY-----
MIIBVgIBADANBgkqhkiG9w0BAQEFAASCAUAwggE8AgEAAkEA3KtFlqai/wwSkl/v
SWNY2tnFrsdtSFGZO6WZ1zri1DkKOnmIiIl58AjyX2wl2PhEbCYlW4hVkniKdazR
BGZ7UwIDAQABAkEA05di9a65EmgIEtTshGk/lTJF7G6LalHb5abH2eo8ABMd3LOx
Uu080HisRqMP7lRDYIl+pIvbn3JD3qQEU/6mWQIhAO51C3nzF+kcuAtxf6UBAAil
D+IbajDyeVnnTqb5H9wlAiEA7Oc3LMviG91RuNAhhnSojkbHqJPHXvn6kqL9Xxra
pBcCIQDkHsTj3VM6h2bqS6I5UEOgAYi4XlGwkcbV4xqzUhDQoQIgVqLMA77gBq6u
dzbuO7yn87ggxh6dF7e1kjC3FjO856sCIQChK4bPN58V2shUFSvxyNbcjzljajC2
KWhUrhfmSsmj0g==
-----END PRIVATE KEY-----'''
```
#### 1.2 编写测试demo
- 新建demo09
- 创建index.py文件和application.py文件以及template.yml文件
- 在demo09目录下打开控制台执行以下命令
```python
fun install --save --runtime python3 --package-type pip AliFCWeb
fun install --save --runtime python3 --package-type pip pyjwt
```
- 修改index.py文件
```python
import json
import logging
from AliFCWeb import fcIndex, get, post, put, delete, ResponseEntity
from AliFCWeb import getPayloadFromHeader
_log = logging.getLogger()
@fcIndex(debug=True)
def handler(environ, start_response):
pass
@get()
def testFC(data):
password = data['password']
if password == 123456:
# 获取Token
userId = data.get('id', 1)
return ResponseEntity.ok('登录成功!').setToken({'user_id': userId})
else:
return ResponseEntity.unauthorized('密码错误!')
@post(login=True)
def testPost(data):
token = getPayloadFromHeader()
_log.info(token)
return ResponseEntity.ok('操作成功')
```
#### 1.3 模拟登录
- 先用get请求模拟登录

- 登录成功后后端会返回加密的token
- 以后每次请求将token放在请求头(名字为3RD_SESSION)即可
- 用post请求验证登录
- 设置post请求的login=True

- 你可以将一些有用的信息存入token,然后在代码中使用getPayloadFromHeader()方法获取了token中的信息
- 你也可以用getTokenFromHeader()方法获取token,两个方法的返回结果是完全一样的,区别在于getTokenFromHeader()需要对token进行鉴定。
- 如果设置了login=True参数建议使用getPayloadFromHeader()获取,效率更高。
- 如果没有设置login=True建议使用getTokenFromHeader()以保证安全。
### 2. 权限验证
- 复制demo09为demo10,修改template.yml文件
- 修改index文件
```python
import json
import logging
from AliFCWeb import fcIndex, get, post, put, delete, ResponseEntity
_log = logging.getLogger()
@fcIndex(debug=True)
def handler(environ, start_response):
pass
@post(login=True, auth=myAuth)
def testPost(data):
return ResponseEntity.ok('操作成功')
def myAuth(token):
if token['user_id'] == 1:
return True
return False
```
- 上传并测试
- 先去demo09里面获取一个id为2的token


- 再去demo09里面获取一个id为1的token


## 七、使用数据库
- 本示例使用mysql数据库,框架同时支持postgresql,使用方法完全一样,只需要配置好对应的配置文件即可
- 你需要先去配置中心配置好mysql的参数以供使用
- 使用mysql数据库只需要使用mysqlConn获取连接即可
- 使用postgresql数据库只需要使用postgresqlConn获取连接即可
- 需要引入对应的包
```shell
# mysql
fun install --save --runtime python3 --package-type pip pymysql
# postgresql
fun install --save --runtime python3 --package-type pip psycopg2
```
- 需要引入以下两个类
```python
from AliFCWeb.fcorm import Orm, Example
```
### 1. 准备工作
- 这里默认你已经在配置中心配置好了mysql的配置信息
```python
mysql = {
'url': '',
'username': '',
'password': '',
'database': '',
'charset': 'utf8'
}
```
- 在数据库中创建一张user表用作测试
```sql
CREATE TABLE `user` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(255) DEFAULT NULL,
`age` int(11) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
```
### 2. 使用ORM类
#### 2.1 新增数据
- 新建demo11,配置好template.yml,application.py
- 修改index.py文件如下:
```python
import json
import logging
from AliFCWeb.fcorm import Orm, Example
from AliFCWeb import fcIndex, get, post, put, delete, mysqlConn, ResponseEntity
_log = logging.getLogger()
_conn = mysqlConn
@fcIndex(debug=True)
def handler(environ, start_response):
pass
@post()
def testFC(data):
'''
'''
orm = Orm(_conn, 'user')
userId = orm.insertData(data)
return ResponseEntity.ok('新增用户成功,你的id:{}'.format(userId))
```
- 测试结果

insertData()方法可以用于插入数据,该方法接收以下几种类型的参数(以user表为例)
| 参数一 | 参数二 | 参数说明 |
| ------------------------- | ------------------------- | ----------------------------------------- |
| {'name':'张三', 'age':18} | 无 | 向数据库中插入一条数据 |
| ['name', 'age'] | [['张三', 18], ['李四', 19]] | 向数据库中插入多条数据,参数一和参数二要一一对应 |
| [{'name':'张三', 'age':18}, {'name':'李四', 'age':19}] | 无 | 向数据库中插入多条数据,每条数据的字段要一致 |
| ['name', 'age'] | {'name':'张三', 'age':18} | 参数一表示插入哪些字段,参数二表示要插入的数据,该方法可以过滤掉多余字段,比如你可以给参数二传入{'name':'张三', 'age':18, 'delete': '1'}其中的delete字段会被过滤掉,数据库依然不会报错 |
#### 2.2 修改数据
- 复制demo11为demo12,修改对应的template.yml
- 修改index.py代码
```python
import json
import logging
from AliFCWeb.fcorm import Orm, Example
from AliFCWeb import fcIndex, get, post, put, delete, mysqlConn, ResponseEntity
_log = logging.getLogger()
_conn = mysqlConn
@fcIndex(debug=True)
def handler(environ, start_response):
pass
@post()
def testFC(data):
orm = Orm(_conn, 'user')
userId = orm.updateByPrimaryKey({'age': 20}, 1)
user = orm.selectByPrimaeyKey(1)
return ResponseEntity.ok(user)
```
- 运行测试

updateByPrimaryKey()接收两个参数
- data:要更新的数据
- id:要更新的数据的键,如果为空则会去data中寻找键。比如使用orm.updateByPrimaryKey({'age': 20, 'id':1})也是一样的效果
#### 2.3 删除数据
- 复制demo12为demo13,修改对应的template.yml
- 修改index.py代码
```python
import json
import logging
from AliFCWeb.fcorm import Orm, Example
from AliFCWeb import fcIndex, get, post, put, delete, mysqlConn, ResponseEntity
_log = logging.getLogger()
_conn = mysqlConn
@fcIndex(debug=True)
def handler(environ, start_response):
pass
@post()
def testFC(data):
orm = Orm(_conn, 'user')
userId = orm.deleteByPrimaryKey(1)
users = orm.selectAll()
return ResponseEntity.ok(users)
```
- 运行测试

deleteByPrimaryKey()接收一个参数
- id:要更新的数据的键,
这里还使用了selectAll()方法,该方法可以返回该表中的所有数据,同时你可以看到,如果返回的数据是一个列表会自动添加num,你也可以通过ResponseEntity类的setNum()方法指定num,这在分页查询中十分有用
### 3. 使用Example类进行高级查询
前面我们已经使用过selectByPrimaeyKey()和selectAll()两个方法进行查询操作了,如果想要更高级的查询,则需要使用Example类
- 在数据库新建一张表house
```sql
CREATE TABLE `house` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`user_id` int(11) DEFAULT NULL,
`address` varchar(255) DEFAULT NULL,
`price` int(10) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
```
- 复制demo13重命名为demo14
- 修改demo14的template.yml文件
- 修改index.py文件
```python
import json
import logging
from AliFCWeb.fcorm import Orm, Example
from AliFCWeb import fcIndex, get, post, put, delete, mysqlConn, ResponseEntity
_log = logging.getLogger()
_conn = mysqlConn
@fcIndex(debug=True)
def handler(environ, start_response):
pass
@get()
def testFC(data):
try:
res = testFC1(data)
_conn.commit()
return res
except Exception as e:
_conn.rollback()
_log.error(e)
return ResponseEntity.badRequest(e)
def testFC1(data):
# 关闭自动提交
userOrm = Orm(_conn, 'user', auto_commit=False)
houseOrm = Orm(_conn, 'house', auto_commit=False)
print('==============插入数据==============')
userId = userOrm.insertData({'name':'张三', 'age':18})
houseOrm.insertData(['user_id', 'address', 'price'],
[[userId, '成都市武侯区', 100],
[userId, '成都市高新区', 200],
[userId, '北京市东城区', 300],
[userId, '北京市西城区', 400],
[userId, '北京市朝阳区', 500]])
print('==============所有user数据==============')
users = userOrm.selectAll()
print(users)
print('==============所有house数据==============')
houses = houseOrm.selectAll()
print(houses)
print('==============所有位于成都的house数据==============')
houses = houseOrm.selectByExample(Example().andLike('address', '成都%'))
print(houses)
print('==============所有不在成都的house数据==============')
houses = houseOrm.selectByExample(Example().andNotLike('address', '成都%'))
print(houses)
print('==============所有user_id为1的house数据==============')
houses = houseOrm.selectByExample(Example().andEqualTo({'user_id': 1}))
print(houses)
print('==============连接查询==============')
userOrm.leftJoin('house', 'house.user_id=user.id')
houses = userOrm.selectByExample(Example().andEqualTo({'price': 100}))
print(houses)
print('==============清除缓存==============')
userOrm.clear()
print('==============所有售价大于200小于400的house数据==============')
houses = houseOrm.selectByExample(Example().andBetween('price', 200, 400))
print(houses)
print('==============所有北京的房子涨价10==============')
houseOrm.updateByExample({'price': '+10'}, Example().andLike('address', '北京%'))
houses = houseOrm.selectAll()
print(houses)
print('==============分页查询==============')
num, houses = houseOrm.selectPageByExample(Example().andEqualTo({'user_id': userId}), 2, 3)
print('符合条件的总条数:{}'.format(num))
print('本页数据:')
print(houses)
print('==============多值查询==============')
houses = houseOrm.selectByExample(Example().andInValues('price', [100, 200]))
print(houses)
return ResponseEntity.ok('请查看控制台日志')
```
## 八、自定义锚点
在使用数据库时,我们用到的mysqlConn、redisConn和postgresqlConn其实就是锚点。锚点的作用是在运行时加载全局变量。**当你需要在全局变量中使用environ中的数据或者在全局变量中使用配置中心时可以派上用场。**
- 拷贝demo14重命名为demo15
- 修改template.yml文件
- 新增锚点文件mySign.py
- 首先引入
```python
from AliFCWeb.sign import Sign
```
- 新建MySign类并继承Sign重写replace()方法即可
- 修改index.py文件
```python
import json
import logging
from AliFCWeb import fcIndex, get, post, put, delete, ResponseEntity
from mySign import MySign
_log = logging.getLogger()
@fcIndex(debug=True)
def handler(environ, start_response):
pass
# 使用锚点
@MySign
def getUrl():
pass
@get()
def testFC(data):
return ResponseEntity.ok(getUrl)
```
- 测试结果

|
AliFCWeb
|
/AliFCWeb-1.1.1.tar.gz/AliFCWeb-1.1.1/README.md
|
README.md
|
import argparse
import math
import os
import random
from subprocess import call
import sys
from textwrap import TextWrapper
import webbrowser
import praw #Reddit API Wrapper
__version__ = "0.3.2"
USER_AGENT = 'AlienFeed v'+__version__+' by u/jw989 seen on ' \
'Github http://github.com/jawerty/AlienFeed'
#Reddit object accessed via praw
r = praw.Reddit(user_agent=USER_AGENT)
#Color codes used to make the display pretty
class TerminalColor(object):
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
SUBTEXT = '\033[90m'
INFO = '\033[96m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
color = TerminalColor()
#Tags to describe each link
class LinkType(object):
NSFW = '[NSFW]'
POST = '[POST]'
PIC = '[PIC]'
PICS = '[PICS]'
VIDEO = '[VIDEO]'
def get_link_types(link):
types = []
# When the image's link ends with image_types or comes from the imagehosts
# then it will append the link types (e.g. [NSFW], [POST], [VIDEO])
image_types = ('jpg', 'jpeg', 'gif', 'png')
image_hosts = ('imgur', 'imageshack', 'photobucket', 'beeimg')
if link.url == link.permalink:
types.append(color.INFO + LinkType.POST + color.ENDC)
elif link.url.split('.')[-1].lower() in image_types:
types.append(color.OKGREEN + LinkType.PIC + color.ENDC)
elif link.domain.split('.')[-2].lower() in image_hosts:
types.append(color.OKGREEN + LinkType.PICS + color.ENDC)
elif link.media:
types.append(color.OKGREEN + LinkType.VIDEO + color.ENDC)
if link.over_18:
types.append(color.FAIL + LinkType.NSFW + color.ENDC)
return ' '.join(types)
class _parser(argparse.ArgumentParser): # the parsing object for argparse -- used to initialize argparse.
def error(self, message):
sys.stderr.write(color.FAIL +
'\nAlienFeed error: %s\n' % (message + color.ENDC))
self.print_help()
sys.exit(2)
#method to display the generated links from submission_getter
def subreddit_viewer(generator):
links = submission_getter(generator, verbose=True)
# submission_getter - Used to gather the praw generated input and
# create the AlienFeed output log utilizing the TerminalCOlors object 'color'
def submission_getter(generator, memo=[], verbose=False):
links = []
scores = []
subreddits = set()
for x, link in enumerate(generator):
memo.append(link.url)
if verbose:
links.append(link)
scores.append(link.score)
subreddits.add(str(link.subreddit))
if not verbose:
return memo
# aligning all of the arrows ' -> ' for the AlienFeed output
count_width = int(math.log(len(links), 10)) + 1
score_width = len(str(max(scores)))
fmt = {'arrow': ' -> '}
indent = ' ' * (count_width + len(fmt['arrow']) + score_width + 1)
try:
_, terminal_width = os.popen('stty size', 'r').read().split()
terminal_width = int(terminal_width)
except:
terminal_width = 80
wrapper = TextWrapper(subsequent_indent=indent, width=terminal_width)
for i, link in enumerate(links):
fmt['count'] = color.OKGREEN + str(i + 1).rjust(count_width)
fmt['score'] = color.WARNING + str(link.score).rjust(score_width)
fmt['title'] = color.OKBLUE + link.title
fmt['tags'] = get_link_types(link)
if len(subreddits) > 1:
fmt['title'] += color.SUBTEXT + u' ({0})'.format(link.subreddit)
wrap = wrapper.wrap(
u'{count}{arrow}{score} {title} {tags}'.format(**fmt))
for line in wrap:
print line
return memo #The generated ouput to be displayed to the user
# method to output color text
def print_colorized(text):
print color.HEADER, text, color.ENDC
# warning for AlienFeed
def print_warning(text, exc=None, exc_details=None):
if exc and exc_details:
print color.FAIL, exc, exc_details
print color.WARNING, text , color.ENDC
def main():
# argparse argument management
parser = _parser(description='''AlienFeed, by Jared Wright, is a
commandline application made for displaying and
interacting with recent Reddit links. I DO NOT HAVE
ANY AFILIATION WITH REDDIT, I AM JUST A HACKER''')
parser.add_argument("-l", "--limit", type=int, default=10,
help='Limits output (default output is 10 links)')
parser.add_argument("subreddit", default='front',
help="Returns top links from subreddit 'front' "
"returns the front page")
parser.add_argument("-o", "--open", type=int,
help='Opens one link that matches the number '
'inputted. Chosen by number')
parser.add_argument("-r", "--random", action='store_true',
help='Opens a random link (must be the only '
'optional argument)')
parser.add_argument("-U", "--update", action='store_true',
help='Automatically updates AlienFeed via pip')
parser.add_argument("-v", "--version",action='store_true',
help='Displays version of AlienFeed.')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args=parser.parse_args()
subm_gen = None
# returns current version of AlienFeed
if args.version:
print "AlienFeed version: "+__version__
# random automatically opens the link, therefore -o interferes with -r
if args.open and args.random:
print_warning("You cannot use [-o OPEN] with [-r RANDOM]")
sys.exit(1)
# if random is present, it ignores the other arguments for the sake of simplicity
if args.random:
if args.limit == 10:
if args.subreddit == 'front':
front = r.get_front_page(limit=200)
links = submission_getter(front)
else:
# Only deals with random 'top' posts on the front page
top = r.get_subreddit(args.subreddit).get_top(limit=200)
new = r.get_subreddit(args.subreddit).get_new(limit=200)
hot = r.get_subreddit(args.subreddit).get_hot(limit=200)
links = submission_getter(top)
links = submission_getter(new, links)
links = submission_getter(hot, links)
try:
webbrowser.open( random.choice(links) ) #opens in default web browser
print_colorized("\nviewing a random submission\n")
except IndexError, e:
print_warning("There was an error with your input. "
"Hint: Perhaps the subreddit you chose was "
"too small to run through the program",
"IndexError:", e)
else:
print_warning("You cannot use [-l LIMIT] with [-r RANDOM] "
"(unless the limit is 10)")
sys.exit(1)
elif args.open:
try:
subr = (r.get_subreddit(args.subreddit).get_hot(limit=args.limit)
if args.subreddit != 'front' else
r.get_front_page(limit=args.limit))
links = submission_getter(subr)
webbrowser.open( links[args.open - 1] )
print '\nviewing submission\n'
except IndexError, e:
print_warning("The number you typed in was out of the feed's range"
" (try to pick a number between 1-10 or add"
" --limit {0}".format(e), "IndexError:", e)
except praw.errors.InvalidSubreddit, e:
print_warning("I'm sorry but the subreddit '{0}' does not exist; "
"try again.".format(args.subreddit),
"InvalidSubreddit:", e)
else:
if args.subreddit == 'front':
subm_gen = r.get_front_page(limit=args.limit)
print_colorized('Top {0} front page links:'.format(args.limit))
else:
subm_gen = r.get_subreddit(args.subreddit).get_hot(limit=args.limit)
print_colorized('Top {0} /r/{1} links:'.format(
args.limit, args.subreddit))
try:
if subm_gen:
subreddit_viewer(subm_gen)
except praw.errors.InvalidSubreddit, e:
print_warning("I'm sorry but the subreddit '{0}' does not exist; "
"try again.".format(args.subreddit),
"InvalidSubreddit:", e)
# When argument is added, pip will automatically run as a child process (optional)
if args.update == True:
try:
print "Upgrading AlienFeed..."
call(['pip', 'install', 'alienfeed', '--upgrade', '--quiet'])
except OSError, e:
print_warning("You cannot use -U without having pip installed.")
if __name__ == '__main__':
main()
|
AlienFeed
|
/AlienFeed-0.3.2.tar.gz/AlienFeed-0.3.2/alienfeed/alien.py
|
alien.py
|
import pygame
class Ship:
"""A class to manage the behaviour of the player's ship."""
def __init__(self, ai_settings, screen):
"""Initialize the ship and set its starting position."""
self.screen = screen
# Load the ship image and get its rect.
self.image = pygame.image.load('../images/ship.bmp')
self.image = pygame.transform.rotate(self.image, -90)
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
self.ai_settings = ai_settings
# Start each new ship at the left center of the screen.
self.rect.centery = self.screen_rect.centery
self.rect.left = self.screen_rect.left
# Store a decimal value for the ship's center.
self.centerx = float(self.rect.centerx)
self.centery = float(self.rect.centery)
# Movement flag
self.moving_right = False
self.moving_left = False
self.moving_up = False
self.moving_down = False
def update(self):
"""Update the ship's position based on the movement flag."""
# Update the ship's center value, not the rect.
# if self.moving_right and self.rect.right < self.screen_rect.right:
# self.centerx += self.ai_settings.ship_speed_factor
#
# if self.moving_left and self.rect.left > self.screen_rect.left:
# self.centerx -= self.ai_settings.ship_speed_factor
if self.moving_up and self.rect.top > self.screen_rect.top:
self.centery -= self.ai_settings.ship_speed_factor
if self.moving_down and self.rect.bottom < self.screen_rect.bottom:
self.centery += self.ai_settings.ship_speed_factor
# Update rect object from self.center
self.rect.centerx = self.centerx
self.rect.centery = self.centery
def blitme(self):
"""Draw the ship at its current location."""
self.screen.blit(self.image, self.rect)
def center_ship(self):
"""Center the ship on the screen."""
self.centery = self.screen_rect.centery
|
AlienInvasion-Raph692
|
/AlienInvasion_Raph692-0.1.0-py3-none-any.whl/sideways_shooter/ss_ship.py
|
ss_ship.py
|
import sys
import pygame
from ss_bullet import Bullet
from ss_target import Target
from time import sleep
def check_keydown_events(event, ss_settings, screen, stats, ship, bullets, target):
"""Responding to keypresses."""
# Move the ship to the left or right
if event.key == pygame.K_RIGHT:
ship.moving_right = True
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_UP:
ship.moving_up = True
elif event.key == pygame.K_DOWN:
ship.moving_down = True
elif event.key == pygame.K_SPACE:
fire_bullet(ss_settings, screen, ship, bullets)
elif event.key == pygame.K_p and not stats.game_active:
start_game(ss_settings, stats, ship, bullets, target)
elif event.key == pygame.K_q:
sys.exit()
def fire_bullet(ai_settings, screen, ship, bullets):
"""Fire a bullet if limit not reached yet."""
# Create a new bullet and add it to the bullets group.
if len(bullets) < ai_settings.bullets_allowed:
new_bullet = Bullet(ai_settings, screen, ship)
bullets.add(new_bullet)
def check_keyup_events(event, ship):
"""Responding to key releases."""
# Stop moving the ship when releasing arrow keys.
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
elif event.key == pygame.K_UP:
ship.moving_up = False
elif event.key == pygame.K_DOWN:
ship.moving_down = False
def check_events(ss_settings, screen, stats, ship, bullets, target, play_button):
"""Respond to key presses and mouse events."""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(ss_settings, stats, play_button, ship, bullets, target, mouse_y, mouse_x)
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ss_settings, screen, stats, ship, bullets, target)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
def check_play_button(ss_settings, stats, play_button, ship, bullets, target, mouse_y, mouse_x):
"""Start a new game when the player clicks Play."""
button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
if button_clicked and not stats.game_active:
# Restart game
start_game(ss_settings, stats, ship, bullets, target)
def start_game(ss_settings, stats, ship, bullets, target):
"""Start a new game."""
# Reset the game settings.
ss_settings.initialize_dynamic_settings()
# Hide the mouse cursor.
pygame.mouse.set_visible(False)
# Reset the game statistics.
stats.reset_stats()
stats.game_active = True
# Empty the list of bullets
bullets.empty()
# Center the ship and target.
target.center_target()
ship.center_ship()
def update_screen(ai_settings, screen, stats, ship, bullets, target, play_button):
"""Update images on the screen and flip to the new screen."""
# Redraw the screen during each pass through the loop.
screen.fill(ai_settings.bg_color)
# Redraw all bullets behind ship and aliens.
for bullet in bullets.sprites():
bullet.draw_bullet()
ship.blitme()
# Redraw target
target.draw_target()
# Draw the play button when the game is inactive.
if not stats.game_active:
play_button.draw_button()
# Make the most recently drawn screen visible.
pygame.display.flip()
def update_bullets(ss_settings, screen, stats, bullets, target):
"""Update position of bullets and get rid of old bullets."""
# Update bullet positions.
bullets.update()
# Get rid of bullets that have disappeared.
screen_rect = screen.get_rect()
for bullet in bullets.copy():
if bullet.rect.left >= screen_rect.right:
check_bullet_edges(stats, bullets)
# Check for target-bullet collisions
if pygame.sprite.spritecollideany(target, bullets):
target_hit(ss_settings, bullets)
def check_bullet_edges(stats, bullets,):
"""Check if a bullet has missed the target."""
if stats.ships_left > 0:
# Decrement ships left
stats.ships_left -= 1
# Empty the list of bullets.
bullets.empty()
# # Pause.
# sleep(0.2)
else:
stats.game_active = False
pygame.mouse.set_visible(True)
def check_target_edges(ss_settings, target):
"""Check if the target has reached the top of bottom of the screen."""
if target.check_edges():
ss_settings.target_direction *= -1
def target_hit(ss_settings, bullets):
"""Respond to the target being hit by a bullet."""
# ToDo add scores for hitting target
# Empty the list of bullets.
bullets.empty()
# Increase speed settings.
ss_settings.increase_speed()
def update_target(ss_settings, target):
"""Check if the target is at an edge and then update its position on the screen."""
check_target_edges(ss_settings, target)
target.update()
# def create_target(ss_settings, screen):
# """Create a target place it on the center right."""
# target = Target(ss_settings, screen)
# target.center_target()
# return target
|
AlienInvasion-Raph692
|
/AlienInvasion_Raph692-0.1.0-py3-none-any.whl/sideways_shooter/ss_game_functions.py
|
ss_game_functions.py
|
import pygame.font
from pygame.sprite import Group
from ship import Ship
class ScoreBoard:
"""A class to report scoring information."""
def __init__(self, ai_settings, screen, stats):
"""Initialize score-keeping attributes."""
self.screen = screen
self.screen_rect = screen.get_rect()
self.ai_settings = ai_settings
self.stats = stats
# Font settings for scoring information.
self.text_color = (30, 30, 30)
self.font = pygame.font.SysFont(None, 48)
# Prepare the initial score images.
self.prep_images(ai_settings, screen)
def prep_images(self, ai_settings, screen):
"""Prepare the score images."""
self.prep_score()
self.prep_high_score()
self.prep_level()
self.prep_ships(ai_settings, screen)
def prep_score(self):
"""Turn the score into a rendered image."""
# Round scores and include comma separator
rounded_score = round(self.stats.score, -1)
score_str = "{:,}".format(rounded_score)
self.score_image = self.font.render(score_str, True, self.text_color, self.ai_settings.bg_color)
# Display the score at the top right of the screen.
self.score_rect = self.score_image.get_rect()
self.score_rect.right = self.screen_rect.right - 20
self.score_rect.top = 20
def prep_high_score(self):
"""Turn the high score into a rendered image."""
rounded_high_score = round(self.stats.high_score, -1)
high_score_str = "{:,}".format(rounded_high_score)
self.high_score_image = self.font.render(high_score_str, True, self.text_color, self.ai_settings.bg_color)
# Display the high score at the top center of the screen.
self.high_score_rect = self.high_score_image.get_rect()
self.high_score_rect.centerx = self.screen_rect.centerx
self.high_score_rect.top = self.screen_rect.top
def prep_level(self):
"""Turn the level into a rendered image."""
level_str = str(self.stats.level)
self.level_image = self.font.render(level_str, True, self.text_color, self.ai_settings.bg_color)
# Display the level at the top left of the screen.
self.level_rect = self.level_image.get_rect()
self.level_rect.right = self.score_rect.right
self.level_rect.top = self.score_rect.bottom + 10
def prep_ships(self, ai_settings, screen):
"""Show how many ships are left."""
self.ships = Group()
for ship_number in range(self.stats.ships_left):
ship = Ship(ai_settings, screen)
ship.rect.x = 10 + ship_number * ship.rect.width
ship.rect.y = 10
self.ships.add(ship)
def show_score(self):
"""Draw score to the screen."""
self.screen.blit(self.score_image, self.score_rect)
self.screen.blit(self.high_score_image, self.high_score_rect)
self.screen.blit(self.level_image, self.level_rect)
self.ships.draw(self.screen)
|
AlienInvasion-Raph692
|
/AlienInvasion_Raph692-0.1.0-py3-none-any.whl/scripts/scoreboard.py
|
scoreboard.py
|
import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
"""A class to manage the behaviour of the player's ship."""
def __init__(self, ai_settings, screen):
"""Initialize the ship and set its starting position."""
super(Ship, self).__init__()
self.screen = screen
self.ai_settings = ai_settings
# Load the ship image and get its rect.
self.image = pygame.image.load('../images/ship.bmp')
# self.image = pygame.image.load('images/dick_small.bmp') # change size to 60x60 pixels in GIMP
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# Start each new ship at the bottom center of the screen.
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
# Store a decimal value for the ship's center.
self.centerx = float(self.rect.centerx)
self.centery = float(self.rect.centery)
self.bottom = float(self.rect.bottom)
# Movement flag
self.moving_right = False
self.moving_left = False
self.moving_up = False
self.moving_down = False
def update(self):
"""Update the ship's position based on the movement flag."""
# Update the ship's center value, not the rect.
if self.moving_right and self.rect.right < self.screen_rect.right:
self.centerx += self.ai_settings.ship_speed_factor
if self.moving_left and self.rect.left > self.screen_rect.left:
self.centerx -= self.ai_settings.ship_speed_factor
if self.moving_up and self.rect.top > self.screen_rect.top:
self.centery -= self.ai_settings.ship_speed_factor
if self.moving_down and self.rect.bottom < self.screen_rect.bottom:
self.centery += self.ai_settings.ship_speed_factor
# Update rect object from self.center
self.rect.centerx = self.centerx
self.rect.centery = self.centery
def blitme(self):
"""Draw the ship at its current location."""
self.screen.blit(self.image, self.rect)
def center_ship(self):
"""Center the ship on the screen."""
self.centerx = self.screen_rect.centerx
self.centery = self.screen_rect.bottom - 20
|
AlienInvasion-Raph692
|
/AlienInvasion_Raph692-0.1.0-py3-none-any.whl/scripts/ship.py
|
ship.py
|
import sys
import pygame
import json
from bullet import Bullet
from alien import Alien
from time import sleep
from star import Star
from random import randint
def check_keydown_events(event, ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Responding to keypresses."""
# Move the ship to the left or right
if event.key == pygame.K_RIGHT:
ship.moving_right = True
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_UP:
ship.moving_up = True
elif event.key == pygame.K_DOWN:
ship.moving_down = True
elif event.key == pygame.K_SPACE:
fire_bullet(ai_settings, screen, ship, bullets)
elif event.key == pygame.K_p and not stats.game_active:
start_game(ai_settings, screen, stats, sb, ship, aliens, bullets)
elif event.key == pygame.K_q:
write_high_score(stats)
sys.exit()
def fire_bullet(ai_settings, screen, ship, bullets):
"""Fire a bullet if limit not reached yet."""
# Create a new bullet and add it to the bullets group.
if len(bullets) < ai_settings.bullets_allowed:
new_bullet = Bullet(ai_settings, screen, ship)
bullets.add(new_bullet)
def check_keyup_events(event, ship):
"""Responding to key releases."""
# Stop moving the ship when releasing arrow keys.
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
elif event.key == pygame.K_UP:
ship.moving_up = False
elif event.key == pygame.K_DOWN:
ship.moving_down = False
def check_events(ai_settings, screen, stats, sb, ship, aliens, bullets, play_button):
"""Respond to key presses and mouse events."""
for event in pygame.event.get():
if event.type == pygame.QUIT:
write_high_score(stats)
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets, mouse_y, mouse_x)
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, stats, sb, ship, aliens, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
def write_high_score(stats):
"""Writing the high score to a file."""
if stats.score >= stats.high_score:
filename = 'high_score.json'
with open(filename, 'w') as f_obj:
json.dump(stats.high_score, f_obj)
def check_play_button(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets, mouse_y, mouse_x):
"""Start a new game when the player clicks Play."""
button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
if button_clicked and not stats.game_active:
# Restart game.
start_game(ai_settings, screen, stats, sb, ship, aliens, bullets)
def start_game(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Start a new game."""
# Reset the game settings.
ai_settings.initialize_dynamic_settings()
# Hide the mouse cursor.
pygame.mouse.set_visible(False)
# Reset the game statistics.
stats.reset_stats()
stats.game_active = True
# Empty the list of bullets and aliens.
bullets.empty()
aliens.empty()
# Create a new fleet and center the ship.
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
# Reset the score board images.
sb.prep_images(ai_settings, screen)
def update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets, play_button, stars):
"""Update images on the screen and flip to the new screen."""
# Redraw the screen during each pass through the loop.
screen.fill(ai_settings.bg_color)
# Redraw all bullets behind ship and aliens.
for bullet in bullets.sprites():
bullet.draw_bullet()
# bullet.blitme() # change bullets to drops
# Redraw ship
ship.blitme()
# Redraw fleet of aliens
aliens.draw(screen)
# Redraw group of stars
stars.draw(screen)
# Show the score
sb.show_score()
# Draw the play button when the game is inactive.
if not stats.game_active:
play_button.draw_button()
# Make the most recently drawn screen visible.
pygame.display.flip()
def check_high_score(stats, sb):
"""Check to see if there's a new high score."""
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_score()
def update_bullets(bullets):
"""Update position of bullets and get rid of old bullets."""
# Update bullet positions.
bullets.update()
# Get rid of bullets that have disappeared.
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, bullets, aliens):
"""Responding to bullet-alien collisions."""
# Remove any bullets and aliens that have collided
# Python returns a collisions dictionary when bullet hits alien.
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
if collisions:
# loop through dict to award points for each alien hit.
# each value is a list of aliens hit by a single bullet (key).
for aliens in collisions.values():
stats.score += ai_settings.alien_points * len(aliens)
sb.prep_score()
check_high_score(stats, sb)
# Repopulate the alien fleet if it has been shot down.
if len(aliens) < 1:
# If the entire fleet is destroyed, start a new level.
start_new_level(ai_settings, screen, stats, sb, ship, aliens, bullets)
def start_new_level(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Start a new level when the entire alien fleet is destroyed."""
bullets.empty()
ai_settings.increase_speed()
# Increase level
stats.level += 1
sb.prep_level()
create_fleet(ai_settings, screen, ship, aliens)
def change_fleet_direction(ai_settings, aliens):
"""Drop the entire fleet and change the fleet's direction."""
for alien in aliens.sprites():
alien.rect.y += ai_settings.fleet_drop_speed
ai_settings.fleet_direction *= -1
def check_fleet_edges(ai_settings, aliens):
"""Respond appropriately if any aliens have reached an edge."""
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_direction(ai_settings, aliens)
break
def ship_hit(ai_settings, stats, screen, sb, ship, aliens, bullets):
"""Respond to ship being hit by an alien."""
if stats.ships_left > 0:
# Decrement ships left.
stats.ships_left -= 1
sb.prep_ships(ai_settings, screen)
# Empty the list of bullets and aliens.
bullets.empty()
aliens.empty()
# Create a new fleet and center the ship.
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
# Pause
sleep(0.2)
else:
stats.game_active = False
pygame.mouse.set_visible(True)
def check_aliens_bottom(ai_settings, stats, screen, sb, ship, aliens, bullets):
"""Check if any aliens have reached the bottom."""
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
ship_hit(ai_settings, stats, screen, sb, ship, aliens, bullets)
break
def update_aliens(ai_settings, stats, screen, sb, aliens, ship, bullets):
"""
Check if the fleet is at an edge,
and then update the position of all aliens in the fleet.
"""
check_fleet_edges(ai_settings, aliens)
# Check if any aliens have reached the bottom
check_aliens_bottom(ai_settings, stats, screen, sb, ship, aliens, bullets)
aliens.update()
# Look for alien-ship collisions.
if pygame.sprite.spritecollideany(ship, aliens):
ship_hit(ai_settings, stats, screen, sb, ship, aliens, bullets)
# def check_stars_edges(stars):
# """Respond appropriately when stars/raindrops disappear off the bottom of the screen."""
# for star in stars.sprites():
# star.check_bottom()
#
#
# def update_stars(stars):
# """Update the position of all stars/raindrops."""
# check_stars_edges(stars)
# stars.update()
def get_number_aliens_x(ai_settings, alien_width):
"""Determine the number of aliens that fit in a row."""
available_space_x = ai_settings.screen_width - alien_width * 2
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x
# def get_number_stars_x(ai_settings, star_width):
# """Determine the number of stars that fit in a row."""
# available_space_x = ai_settings.screen_width - 2 * star_width
# number_stars_x = int(available_space_x / (2 * star_width))
# return number_stars_x
def get_number_rows(ai_settings, ship_height, alien_height):
"""Determine the number of rows of aliens that fit on the screen."""
available_space_y = ai_settings.screen_height - 3 * alien_height - ship_height
number_rows = int(available_space_y / (2 * alien_height))
return number_rows
# def get_number_rows_star(ai_settings, star_height):
# """Determine the number of rows of stars that fit on the screen."""
# number_rows = int(ai_settings.screen_height / (2 * star_height))
# return number_rows
def create_alien(ai_settings, screen, aliens, alien_number, row_number):
"""Create an alien and place it in a row."""
alien = Alien(ai_settings, screen)
alien_width = alien.rect.width
alien.x = alien_width + alien_width * 2 * alien_number
alien.rect.x = alien.x
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number
aliens.add(alien)
# def create_star(ai_settings, screen, stars, row_number):
# """Create a star and place it in a row."""
# star = Star(ai_settings, screen)
# star_width = star.rect.width
#
# # Introduce randomness when placing stars
# random_number = randint(-10, 10) # (-30, 30) for stars and *4 in row below
# star.x = star_width + star_width * 2 * random_number
# star.rect.x = star.x
# star.rect.y = star.rect.height + 2 * star.rect.height * row_number
# stars.add(star)
def create_fleet(ai_settings, screen, ship, aliens):
"""Create a full fleet of aliens."""
# Create an alien and find the number of aliens in a row.
alien = Alien(ai_settings, screen)
num_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)
num_rows = get_number_rows(ai_settings, ship.rect.height, alien.rect.height)
# Create the fleet of aliens.
for row_number in range(num_rows):
for alien_number in range(num_aliens_x):
create_alien(ai_settings, screen, aliens, alien_number, row_number)
# def create_stars(ai_settings, screen, stars):
# """Create a group of stars."""
# star = Star(ai_settings, screen)
# num_stars_x = get_number_stars_x(ai_settings, star.rect.width)
# num_rows = get_number_rows_star(ai_settings, star.rect.height)
#
# # Create stars
# for row_number in range(num_rows):
# for star_number in range(num_stars_x):
# create_star(ai_settings, screen, stars, row_number)
|
AlienInvasion-Raph692
|
/AlienInvasion_Raph692-0.1.0-py3-none-any.whl/scripts/game_functions.py
|
game_functions.py
|
# AlignQC
See wiki for the most up-to-date manual.
[https://github.com/jason-weirather/AlignQC/wiki](https://github.com/jason-weirather/AlignQC/wiki)
**Generate a report** on sequencing alignments to understand read alignments vs read sizes, error patterns in reads, annotations and rarefractions.
**Share your reports** with anyone who has an internet browser.
Ultimately this software should be suitable for assessing alignments of from a variety of sequencing platforms and a variety of sequence types. The focus for the first version of this software is on the transcriptome analysis of third generation sequencing data outputs.
##### Report Generation Requirements
- Linux
- R
- python 2.7+
##### Report Viewing Requirements
- Mozilla Firefox or Google Chrome Browser
##### Installation (optional)
You can add the `AlignQC/bin` directory to your path if you want to call `alignqc` directly from the command line.
If you require a path for python 2.7+ other than `/usr/bin/python`, you can modify `AlignQC/bin/alignqc` to reflect this.
If you prefer to invoke AlignQC directly from python you can, i.e., `python AlignQC/bin/alignqc`
By default `Rscript` should be installed in your path, if it is not, you can specify a location during the `analysis` command with the `--rscript_path` option.
##### Fast start
The following command should be sufficient for assessing a long read alignment.
`alignqc analysis long_reads.bam -r ref_genome.fa -a ref_transcriptome.gpd -o long_reads.alignqc.xhtml`
If you don't readily have your reference genome or reference annotation available you can try the following.
`alignqc analysis long_reads.bam --no_reference --no_annotation -o long_reads.alignqc.xhtml`
## AlignQC programs
Currently AlignQC only offers the `analysis` program.
## Analysis
`alignqc analysis`
The analysis command is the most basic command for assessing an alignment. It provides reports and plots in xhtml format.
### Inputs
A complete list of optional commands for each sub command is available with the `-h` option.
`alignqc analysis -h`
Will report all analysis required and optional inputs.
#### 1. BAM format alignment file
The preferred format for transcriptome analysis is GMAP output (the 'samse') format. Default output, or an output that can produce multiple alignment paths for a read is recommended if you want the ability to assess chimeric reads.
You can convert the SAM output of GMAP into BAM format using Samtools.
http://www.htslib.org/
Any properly formated BAM file should work however this software has only been tested with GMAP and hisat outputs at the moment.
http://samtools.github.io/hts-specs/SAMv1.pdf
Please note that analyzing very large hiseq datasets has not been tested and memory requirements have not been optimized for this type of run. If you want to check error patterns of HISEQ downsampling the data is advised.
#### (optional) 2. Genome fasta file
The reference genome these sequences were aligned to, in fasta format, can allows you to assess the error rates and error patterns in the alignments.
If you choose not to use a reference genome you must explicitly specify `--no_reference`
#### (optional) 3. GenePred format annotation file
Providing an annotation file provides context such as known transcripts, and exons, introns, and intergenic regions to help describe the data. It is also necessary for rarefraction curves.
If you choose not to use a reference annotation you must explicitly specify `--no_annotation`
The genePred format is described here.
http://www.healthcare.uiowa.edu/labs/au/IDP/IDP_gpd_format.asp
And it is also described here as "Gene Predictions and RefSeq Genes with Gene Names" genePred format described by UCSC.
https://genome.ucsc.edu/FAQ/FAQformat.html#format9
- geneName
- name
- chrom
- strand
- txStart
- txEnd
- cdsStart
- cdsEnd
- exoncount
- exonStarts
- exonEnds
### Outputs
At least one output format must be specified.
To view the output xhtml Mozilla Firefox or Google Chrome browser is recommend.
Since the recommended output type contains large URI data embedded in the xhtml page, Internet Explorer will likely not be compatible. The memory requirements of the regular output may strain some systems.
If you only want to share the visual results with others we recommend the `--portable_output` option because this version contains only the main text and png files.
If accessing the embedded data in the xhtml is a problem, you can output the data in a folder format `--output_folder`, which can provide you more convenient access.
#### (option 1) Standard xhtml output
`-o` or `--output`
The recommended output of this software is a single xhtml file that contains all the relevant files from the analysis embeded as base64 encoded URI data.
This means you can click any of the links in the document and the browser will download the data of interest (i.e. PDF format versions of a figure) from within the document.
Bed tracks compatible with the UCSC genome browser are also provided.
#### (option 2) Portable xhtml output
`--portable_output`
This output is recommended if you want to email these results or share them over a bandwidth limited connection. This format only has the png images and webpage text. Links are disabled.
#### (option 3) Output folder
`--output_folder`
Store an output folder that contains all the data and figures. An xhtml file is still available in this folder.
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/README.md
|
README.md
|
import sys, argparse, re, gzip, inspect, os
from seqtools.statistics import average
def main(args):
#define read name programs
#ONT basecalls
#ont matches a uuid4
ont_prog = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}_Basecall_2D_(.*)$')
pacbio_prog = re.compile('^(m[^\/]+)\/(\d+)\/(ccs|\d+_\d+)')
inf = None
if re.search('\.gz$',args.input):
inf = gzip.open(args.input)
else:
inf = open(args.input)
unclassified = {'aligned':0,'unaligned':0}
classified = {}
pb_cell = {}
#pb_mol = set()
for line in inf:
f = line.rstrip().split("\t")
name = f[0]
m_ont = ont_prog.match(name)
m_pb = pacbio_prog.match(name)
if m_ont:
if 'ONT' not in classified:
classified['ONT'] = {}
type = m_ont.group(1)
if type not in classified['ONT']:
classified['ONT'][type] = {'aligned':0,'unaligned':0}
if f[1] != 'unaligned':
classified['ONT'][type]['aligned']+=1
else:
classified['ONT'][type]['unaligned']+=1
elif m_pb:
cell = m_pb.group(1)
mol = int(m_pb.group(2))
if cell not in pb_cell: pb_cell[cell] = {'molecules':set(),'reads':0,'molecules_aligned':set()}
pb_cell[cell]['molecules'].add(mol)
pb_cell[cell]['reads'] += 1
#pb_mol.add(mol)
if 'PacBio' not in classified:
classified['PacBio'] = {}
type = 'ccs'
if m_pb.group(3) != 'ccs': type = 'subread'
if type not in classified['PacBio']:
classified['PacBio'][type] = {'aligned':0,'unaligned':0}
if f[1] != 'unaligned':
classified['PacBio'][type]['aligned']+=1
pb_cell[cell]['molecules_aligned'].add(mol)
else:
classified['PacBio'][type]['unaligned']+=1
else:
if f[1] != 'unaligned':
unclassified['aligned']+=1
else:
unclassified['unaligned']+=1
inf.close()
# Finished reading the reads now we can make a report
of = open(args.output_base,'w')
if len(classified.keys()) > 0:
of.write("SP\n")
for classification in sorted(classified.keys()):
for subclass in sorted(classified[classification].keys()):
dat = classified[classification][subclass]
of.write("GN\t"+classification+"\t"+subclass+"\t"+str(sum(dat.values()))+"\t"+str(dat['aligned'])+"\t"+str(dat['unaligned'])+"\n")
of.write("GN\tUnclassified\t\t"+str(sum(unclassified.values()))+"\t"+str(unclassified['aligned'])+"\t"+str(unclassified['unaligned'])+"\n")
if 'PacBio' in classified:
of.write("PB\tCell Count\t"+str(len(pb_cell.keys()))+"\n")
of.write("PB\tMolecule Count\t"+str(sum([len(pb_cell[x]['molecules']) for x in pb_cell.keys()]))+"\n")
of.write("PB\tAligned Molecule Count\t"+str(sum([len(pb_cell[x]['molecules_aligned']) for x in pb_cell.keys()]))+"\n")
of.write("PB\tMax Reads Per Cell\t"+str(max([pb_cell[x]['reads'] for x in pb_cell.keys()]))+"\n")
of.write("PB\tAvg Reads Per Cell\t"+str(average([pb_cell[x]['reads'] for x in pb_cell.keys()]))+"\n")
of.write("PB\tMin Reads Per Cell\t"+str(min([pb_cell[x]['reads'] for x in pb_cell.keys()]))+"\n")
of.write("PB\tMax Molecules Per Cell\t"+str(max([len(pb_cell[x]['molecules']) for x in pb_cell.keys()]))+"\n")
of.write("PB\tAvg Molecules Per Cell\t"+str(average([len(pb_cell[x]['molecules']) for x in pb_cell.keys()]))+"\n")
of.write("PB\tMin Molecules Per Cell\t"+str(min([len(pb_cell[x]['molecules']) for x in pb_cell.keys()]))+"\n")
of.write("PB\tMax Aligned Molecules Per Cell\t"+str(max([len(pb_cell[x]['molecules_aligned']) for x in pb_cell.keys()]))+"\n")
of.write("PB\tAvg Aligned Molecules Per Cell\t"+str(average([len(pb_cell[x]['molecules_aligned']) for x in pb_cell.keys()]))+"\n")
of.write("PB\tMin Aligned Molecules Per Cell\t"+str(min([len(pb_cell[x]['molecules_aligned']) for x in pb_cell.keys()]))+"\n")
mols = [[len(pb_cell[x]['molecules_aligned']),len(pb_cell[x]['molecules'])] for x in pb_cell.keys()]
smols = sorted(mols,key=lambda x: x[0])
of.write("PB\tMolecules Per Cell Distro\t"+",".join(['/'.join([str(x[0]),str(x[1])]) for x in smols])+"\n")
of1 = open(args.output_base+'.pacbio','w')
for val in smols:
of1.write(str(val[0])+"\t"+str(val[1])+"\n")
of1.close()
of.close()
def do_args():
parser = argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="input lengths table")
parser.add_argument('output_base',help="output file basename")
args = parser.parse_args()
return args
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd
args = do_args()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
args = do_args()
main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/get_platform_report.py
|
get_platform_report.py
|
import argparse, sys, os, gzip, pickle, zlib, base64
from shutil import rmtree, copy
from multiprocessing import cpu_count, Pool, Lock
from tempfile import mkdtemp, gettempdir
from subprocess import Popen, PIPE
from seqtools.format.sam.bam.files import BAMFile
from seqtools.range import GenomicRange
import seqtools.cli.utilities.bam_bgzf_index as bam_bgzf_index
## The purpose of this script is to read through a bam alignment and record as much information as possible from it. ##
## The bam should be indexed ahead of time in our index format.
gfinished = None
gtotal = None
glock = Lock()
g_count = 0
g_sortpipe = None
def do_chunk(ilines,infile,args):
"""Takes in a the lines from the index file to work on in array form,
and the bam file name, and the arguments
returns a list of the necessary data for chimera detection ready for sorting
"""
ilines = [x.rstrip().split("\t") for x in ilines]
coord = [int(x) for x in ilines[0][2:4]]
bf = BAMFile(infile,BAMFile.Options(blockStart=coord[0],innerStart=coord[1]))
results = []
for i in range(0,len(ilines)):
flag = int(ilines[i][5])
e = bf.read_entry()
#if not e: break
value = None
if e.is_aligned():
tx = e.get_target_transcript(args.minimum_intron_size)
value = {'qrng':e.actual_original_query_range.get_range_string(),'tx':tx.get_gpd_line(),'flag':flag,'qlen':e.original_query_sequence_length,'aligned_bases':e.get_aligned_bases_count()}
results.append(e.entries.qname+"\t"+base64.b64encode(
zlib.compress(
pickle.dumps(value))))
#results.append([e.value('qname'),zlib.compress(pickle.dumps(value))])
else:
value = {'qrng':'','tx':'','flag':flag,'qlen':e.original_query_sequence_length,'aligned_bases':0}
results.append(e.entries.qname+"\t"+base64.b64encode(
zlib.compress(
pickle.dumps(value))))
#results.append([e.value('qname'),zlib.compress(pickle.dumps(value))])
return results
def process_chunk(res):
global glock
glock.acquire()
#global g_preordered
global g_sortpipe
global g_count
g_count += len(res)
for val in res:
g_sortpipe.stdin.write(val+"\n")
sys.stderr.write(str(g_count)+" \r")
glock.release()
def main(args):
bind_path = args.input+'.bgi'
if not os.path.isfile(bind_path):
bind_path = args.tempdir+'/myindex.bgi'
cmd = ["bam_bgzf_index.py",args.input,"-o",bind_path,"--threads",str(args.threads)]
bam_bgzf_index.external_cmd(cmd)
#call(cmd.split())
#parallel_thread = ''
#if args.threads > 1: parallel_thread = ' --parallel='+str(args.threads)+' '
#cmd1 = 'sort '+parallel_thread+' -k1,1 -T '+args.tempdir+'/'
if args.threads > 1:
cmd1 = ['sort','-k1,1','-T',args.tempdir+'/',
'--parallel='+str(args.threads)]
else:
cmd1 = ['sort','-k1,1','-T',args.tempdir+'/']
cmd2 = 'gzip'
global g_sortpipe
global g_count
g_count = 0
of = open(args.output,'wb')
if os.name != 'nt':
gzippipe = Popen(cmd2.split(),stdout=of,stdin=PIPE,close_fds=True)
g_sortpipe = Popen(cmd1,stdout=gzippipe.stdin,stdin=PIPE,close_fds=True)
else:
sys.stderr.write("WARNING: Windows OS detected. operating in single thread mode.\n")
if args.threads > 1: raise ValueError('Error. --threads must be 1 for windows operation')
gzippipe = Popen(cmd2,stdout=of,stdin=PIPE, shell=True)
g_sortpipe = Popen(cmd1,stdout=gzippipe.stdin,stdin=PIPE, shell=True)
inf = gzip.open(bind_path)
chunksize = args.chunk_size
buffer = []
if args.threads > 1:
p = Pool(processes=args.threads)
for line in inf:
buffer.append(line)
if len(buffer)>=chunksize:
if args.threads > 1:
p.apply_async(do_chunk,args=(buffer[:],args.input,args),callback=process_chunk)
else:
r = do_chunk(buffer[:],args.input,args)
process_chunk(r)
buffer = []
if len(buffer) > 0:
if args.threads > 1:
p.apply_async(do_chunk,args=(buffer[:],args.input,args),callback=process_chunk)
else:
r= do_chunk(buffer[:],args.input,args)
process_chunk(r)
if args.threads > 1:
p.close()
p.join()
inf.close()
sys.stderr.write("\n")
g_sortpipe.communicate()
gzippipe.communicate()
of.close()
def do_inputs():
# Setup command line inputs
parser=argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="BAMFILE input")
parser.add_argument('-o','--output',help="gzipped output",required=True)
parser.add_argument('--threads',type=int,default=cpu_count(),help="INT number of threads to run. Default is system cpu count")
parser.add_argument('--minimum_intron_size',type=int,default=68)
parser.add_argument('--chunk_size',type=int,default=10000,help="number of alignments to process at a time")
# Temporary working directory step 1 of 3 - Definition
group = parser.add_mutually_exclusive_group()
group.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is made and destroyed here.")
group.add_argument('--specific_tempdir',help="This temporary directory will be used, but will remain after executing.")
args = parser.parse_args()
# Temporary working directory step 2 of 3 - Creation
setup_tempdir(args)
return args
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir):
os.makedirs(args.specific_tempdir.rstrip('/'))
args.tempdir = args.specific_tempdir.rstrip('/')
if not os.path.exists(args.specific_tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
else:
args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
def external_cmd(cmd):
#need to save arguments
cache_argv = sys.argv
sys.argv = cmd
args = do_inputs()
main(args)
#need to set the arguments back to what they were
sys.argv = cache_argv
return
if __name__=="__main__":
#do our inputs
args = do_inputs()
main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/bam_preprocess.py
|
bam_preprocess.py
|
import argparse, sys, os, gzip, itertools, inspect, pickle, zlib, base64
from shutil import rmtree
from multiprocessing import cpu_count, Pool, Lock
from tempfile import mkdtemp, gettempdir
from subprocess import Popen, PIPE
from seqtools.range import GenomicRangeFromString
from seqtools.range.multi import ranges_to_coverage
from seqtools.format.gpd import GPD, SortedOutputFile as SortedGPDOutputFile
from seqtools.stream import GZippedOutputFile
## The purpose of this script is to read through a bam alignment and record as much information as possible from it. ##
## The bam should be indexed ahead of time in our index format.
glock = Lock()
g_done = None
g_lines = None
best_gpd = None
chimera_gpd = None
technical_chimera_gpd = None
technical_atypical_chimera_gpd = None
gapped_gpd = None
gpd_total = None
g_lengths = None
def process_buffer_output(results):
#print results
global glock
glock.acquire()
global best_gpd
global g_lengths
global chimera_gpd
global gapped_gpd
global technical_chimera_gpd
global technical_atypical_chimera
global g_done
global g_lines
g_done += len(results['lengths'])
g_lines += results['buffer_quantity']
sys.stderr.write(str(g_lines)+" alignments "+str(g_done)+" reads \r")
for v in results['best']:
best_gpd.write(v+"\n")
for v in results['lengths']:
g_lengths.write(v+"\n")
for v in results['chimera']:
chimera_gpd.write(v+"\n")
for v in results['gapped']:
gapped_gpd.write(v+"\n")
for v in results['technical_chimera']:
technical_chimera_gpd.write(v+"\n")
for v in results['technical_atypical_chimera']:
technical_atypical_chimera_gpd.write(v+"\n")
#'original_count':original_count,'gapped_count':gapped_count,'technical_atypical_chimera_count':technical_atypical_chimera_count,'techinical_chimera_count':technical_chimera_count,'chimera_count':chimera_count}
glock.release()
def do_buffer(buffer,args):
lengths = []
best = []
chimera = []
technical_chimera = []
gapped = []
technical_atypical_chimera = []
chimera_count = 0
technical_chimera_count = 0
gapped_count = 0
technical_atypical_chimera_count = 0
original_count = 0
results = []
buffer_quantity = 0
for qname in sorted(buffer.keys()):
buffer_quantity += len(buffer[qname])
dat = [pickle.loads(zlib.decompress(base64.b64decode(x))) for x in buffer[qname]]
for i in range(0,len(dat)):
if dat[i]['aligned_bases'] > 0:
dat[i]['tx'] = GPD(dat[i]['tx'])
dat[i]['qrng'] = GenomicRangeFromString(dat[i]['qrng'])
else:
dat[i]['tx'] = None
dat[i]['qrng'] = None
unaligned = [x for x in dat if x['aligned_bases'] == 0]
aligned = [x for x in dat if x['aligned_bases'] > 0]
#now dat should be set up with anything we need
if len(aligned)==0:
lengths.append(qname+"\tunaligned\t0\t0\t"+str(unaligned[0]['qlen']))
continue
#if len([x for x in aligned if x['flag'] & 2304==0])==0:
# lengths.append(qname+"\tunaligned\t0\t0\t"+str(aligned[0]['qlen']))
# continue
if len(aligned)==1:
if dat[0]['aligned_bases'] > 0:
lengths.append(qname+"\toriginal\t"+str(dat[0]['aligned_bases'])+"\t"+str(dat[0]['aligned_bases'])+"\t"+str(dat[0]['qlen']))
best.append(dat[0]['tx'].get_gpd_line())
original_count += 1
continue
# we have multiple paths too look for
qlen = max([x['qlen'] for x in aligned])
#print aligned
#print [x['tx'].get_gene_name() for x in aligned]
best_possible = [i for i in range(0,len(aligned)) if aligned[i]['flag'] & 2304 == 0]
best_ind = 0
if len(best_possible) == 0:
## only secondary alignments to look at
longest = 0
for i in range(0,len(aligned)):
if aligned[i]['aligned_bases'] > longest:
longest = aligned[i]['aligned_bases']
longind = i
else:
best_ind= best_possible[0]
best.append(dat[best_ind]['tx'].get_gpd_line())
v = check_paths(aligned,best_ind,args)
o_qlen = dat[best_ind]['qrng'].length
v_qlen = v['qlen']
if v['type'] == 'chimera':
chimera_count += 1
for p in v['path']:
chimera.append(p['tx'].get_gpd_line())
elif v['type'] == 'self-chimera':
technical_chimera_count += 1
for p in v['path']:
technical_chimera.append(p['tx'].get_gpd_line())
elif v['type'] == 'self-chimera-atypical':
technical_atypical_chimera_count += 1
for p in v['path']:
technical_atypical_chimera.append(p['tx'].get_gpd_line())
elif v['type'] == 'gapped':
gapped_count += 1
for p in v['path']:
gapped.append(p['tx'].get_gpd_line())
elif v['type'] == 'original':
original_count +=1
else:
sys.stderr.write("WARNING unaccounted for type\n")
lengths.append(qname+"\t"+v['type']+"\t"+str(o_qlen)+"\t"+str(v_qlen)+"\t"+str(max([x['qlen'] for x in aligned])))
return {'lengths':lengths,'gapped':gapped,'chimera':chimera,'best':best,'technical_chimera':technical_chimera,'technical_atypical_chimera':technical_atypical_chimera,'original_count':original_count,'gapped_count':gapped_count,'technical_atypical_chimera_count':technical_atypical_chimera_count,'technical_chimera_count':technical_chimera_count,'chimera_count':chimera_count,'buffer_quantity':buffer_quantity}
def main(args):
chunksize = args.chunk_size
inf = gzip.open(args.input)
args.output = args.output.rstrip('/')
if not os.path.exists(args.output):
os.makedirs(args.output)
buffer = {}
prev = None
global g_done
global g_lines
g_done = 0
g_lines = 0
global best_gpd
best_gpd = SortedGPDOutputFile(args.output+'/best.sorted.gpd.gz',tempdir=args.tempdir)
global g_lengths
g_lengths = GZippedOutputFile(args.output+'/lengths.txt.gz')
#cmd = "gzip"
#lof = open(args.output+'/lengths.txt.gz','w')
#plen = Popen(cmd.split(),stdout=lof,stdin=PIPE,close_fds=True)
#g_lengths = plen.stdin
global chimera_gpd
chimera_gpd = GZippedOutputFile(args.output+'/chimera.gpd.gz')
global technical_chimera_gpd
technical_chimera_gpd = GZippedOutputFile(args.output+'/technical_chimeras.gpd.gz')
global technical_atypical_chimera_gpd
technical_atypical_chimera_gpd = GZippedOutputFile(args.output+'/technical_atypical_chimeras.gpd.gz')
global gapped_gpd
gapped_gpd = GZippedOutputFile(args.output+'/gapped.gpd.gz')
global gpd_total
gpd_total = {'original_count':0,'gapped_count':0,'technical_atypical_chimera_count':0,'techinical_chimera_count':0,'chimera_count':0,'unaligned':0}
if args.threads > 1:
p = Pool(processes=args.threads)
z = 0
for line in inf:
(qname, data) = line.rstrip().split("\t")
if qname!=prev:
buftot = len(buffer.keys())
if buftot >= chunksize:
if args.threads > 1:
p.apply_async(do_buffer,args=(buffer,args),callback=process_buffer_output)
else:
r = do_buffer(buffer,args)
process_buffer_output(r)
buffer = {}
if qname not in buffer: buffer[qname] = []
buffer[qname].append(data)
prev = qname
if len(buffer.keys()) > 0:
if args.threads > 1:
p.apply_async(do_buffer,args=(buffer,args),callback=process_buffer_output)
else:
r = do_buffer(buffer,args)
process_buffer_output(r)
if args.threads > 1:
p.close()
p.join()
sys.stderr.write("\n")
best_gpd.close()
chimera_gpd.close()
technical_chimera_gpd.close()
technical_atypical_chimera_gpd.close()
gapped_gpd.close()
g_lengths.close()
#plen.communicate()
#lof.close()
inf.close()
# Temporary working directory step 3 of 3 - Cleanup
if not args.specific_tempdir:
rmtree(args.tempdir)
# path
# aligned_bases - bases aligned not counting any deltions or insertions
# indecies -
# type - original/chimera/self-chimera/gapped
# qlen - range spanned by query alignments
def check_paths(path_data,best_ind,args):
#other_inds = [x for x in range(0,len(path_data)) if x != best_ind]
possibles = get_index_sets(len(path_data))
new_best = [path_data[best_ind]]
new_bases = path_data[best_ind]['aligned_bases']
new_inds = set([best_ind])
new_type = 'original'
new_qlen = path_data[best_ind]['qrng'].length
for possible_path in possibles:
if best_ind not in possible_path: continue # only consider path sets that have our best index in it
res = evaluate_path(path_data,possible_path,best_ind,args)
if res['any']:
bases = sum([x['aligned_bases'] for x in res['path']])
if bases > new_bases:
new_best = res['path']
new_bases = bases
new_inds = set(possible_path)
qrngs = [res['path'][0]['qrng']]
for i in range(1,len(res['path'])):
if qrngs[-1].overlaps(res['path'][i]['qrng']):
qrngs[-1] = qrngs[-1].merge(res['path'][i]['qrng'])
else: qrngs.append(res['path'][i]['qrng'])
#new_qlen = sum([x.length() for x in qrngs])
new_qlen = sum([x.length for x in ranges_to_coverage(qrngs)])
if res['gapped']: new_type = 'gapped'
elif res['chimera']: new_type = 'chimera'
elif res['self-chimera']: new_type = 'self-chimera'
elif res['self-chimera-atypical']: new_type = 'self-chimera-atypical'
else:
sys.stderr.write("WARNING: Unaccounted for type\n")
return {'path':new_best, 'aligned_bases':new_bases, 'indecies':new_inds,'type':new_type,'qlen':new_qlen}
#print path_data[best_ind]
# Create a dictionary with the follwing information
# path: a list of alignments order by query placement
# gapped: is it a gapped alignment
# chimera: is it a fusion-like
# self-chimera: is it a + - of an overlapping target sequence
def evaluate_path(path_data,possible_path,best_ind,args):
pord = sorted([path_data[i] for i in possible_path],key=lambda x: x['qrng'].start)
best_bases = path_data[best_ind]['aligned_bases']
bases = sum([x['aligned_bases'] for x in pord])
res = {'path':pord,'gapped':False,'chimera':False,'self-chimera':False,'self-chimera-atypical':False,'any':False}
if len(path_data) <= 1: return res
if bases+bases*args.required_fractional_improvement < best_bases:
return res
for p in pord:
if p['aligned_bases'] < args.min_aligned_bases: return res
# check for query overlaps ... not a useful build
for i in range(0,len(pord)):
for j in range(i+1,len(pord)):
if args.max_query_gap:
if pord[i]['qrng'].distance(pord[j]['qrng']) > args.max_query_gap: return res
if pord[i]['qrng'].overlap_size(pord[j]['qrng']) > args.max_query_overlap:
return res
chrcount = len(set([x['tx'].range.chr for x in pord]))
# check for target overlaps ... not gapped or chimera but maybe self-chimera
for i in range(0,len(pord)):
for j in range(i+1,len(pord)):
if pord[i]['tx'].overlap_size(pord[j]['tx']) > args.max_target_overlap:
#res['gapped'] = False
#res['chimera'] = False
if pord[i]['tx'].strand != pord[j]['tx'].strand and chrcount == 1:
res['self-chimera'] = True
res['any'] = True
else:
res['self-chimera-atypical'] = True
res['any'] = True
return res
for i in range(0,len(pord)):
for j in range(i+1,len(pord)):
if args.max_target_gap:
dist = pord[i]['tx'].range.distance(pord[j]['tx'].range)
if dist > args.max_target_gap or dist == -1:
res['chimera'] = True
res['gapped'] = False
res['any'] = True
if len(pord) > 1 and not res['self-chimera'] and not res['chimera']:
res['gapped'] = True
res['any'] = True
return res
def get_index_sets(indlen):
r = []
inds = range(0,indlen)
for l in range(1,len(inds)+1):
for subset in itertools.combinations(inds,l):
r.append(subset)
return r
def do_inputs():
# Setup command line inputs
parser=argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="use bam_preprocess result as input")
parser.add_argument('-o','--output',help="OUTPUTDIR",required=True)
parser.add_argument('--threads',type=int,default=cpu_count(),help="INT number of threads to run. Default is system cpu count")
parser.add_argument('--minimum_intron_size',type=int,default=68)
parser.add_argument('--chunk_size',type=int,default=10000,help="number of reads to do at once")
# Arguments for finding alternate multi alignment paths
parser.add_argument('--min_aligned_bases',type=int,default=50,help="Don't consider very short alignments")
parser.add_argument('--max_query_overlap',type=int,default=10,help="Consider two alignments incompatible if greater overlap than this")
parser.add_argument('--max_target_overlap',type=int,default=10,help="Consider two alignments incompatible if greater overlap than this")
parser.add_argument('--max_target_gap',type=int,default=500000,help="Not a gapped alignment if gap is greater than this")
parser.add_argument('--max_query_gap',type=int,default=500000,help="Consider a gapped alignment incompatible if greater thant this")
parser.add_argument('--required_fractional_improvement',type=float,default=0.2,help="combination path should be this much better than best single alignment")
# Temporary working directory step 1 of 3 - Definition
group = parser.add_mutually_exclusive_group()
group.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is made and destroyed here.")
group.add_argument('--specific_tempdir',help="This temporary directory will be used, but will remain after executing.")
args = parser.parse_args()
# Temporary working directory step 2 of 3 - Creation
setup_tempdir(args)
return args
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir):
os.makedirs(args.specific_tempdir.rstrip('/'))
args.tempdir = args.specific_tempdir.rstrip('/')
if not os.path.exists(args.specific_tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
else:
args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
def external_cmd(cmd):
#need to save arguments
cache_argv = sys.argv
sys.argv = cmd
args = do_inputs()
main(args)
#need to set the arguments back to what they were
sys.argv = cache_argv
return
if __name__=="__main__":
#do our inputs
args = do_inputs()
main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/traverse_preprocessed.py
|
traverse_preprocessed.py
|
import sys, argparse, gzip, re, os, inspect, itertools
from multiprocessing import Pool, cpu_count
from seqtools.format.gpd import GPDStream
from seqtools.range.multi import merge_ranges, subtract_ranges, BedArrayStream, sort_ranges
from seqtools.range import GenomicRange
from seqtools.stream import MultiLocusStream
def main(args):
inf = None
chrlens = {}
chrbed = []
if re.search('\.gz$',args.chromosome_lengths):
inf = gzip.open(args.chromosome_lengths)
else:
inf = open(args.chromosome_lengths)
for line in inf:
f = line.rstrip().split("\t")
chrlens[f[0]] = int(f[1])
chrbed.append(GenomicRange(f[0],1,int(f[1])))
inf.close()
inf = None
exonbed = []
txbed = []
sys.stderr.write("Reading Exons\n")
if re.search('\.gz$',args.annotation_gpd):
inf = gzip.open(args.annotation_gpd)
else:
inf = open(args.annotation_gpd)
gs = GPDStream(inf)
for gpd in gs:
exonbed += [x.range for x in gpd.exons]
txbed.append(gpd.range)
inf.close()
sys.stderr.write("Merging "+str(len(txbed))+" transcripts\n")
txbed = merge_ranges(txbed)
sys.stderr.write(str(len(txbed))+" transcripts after merging\n")
sys.stderr.write("Finding intergenic\n")
intergenicbed = subtract_ranges(chrbed,txbed)
sys.stderr.write("Found "+str(len(intergenicbed))+" intergenic regions\n")
intergenicbp = sum([x.length for x in intergenicbed])
sys.stderr.write("Intergenic size: "+str(intergenicbp)+"\n")
sys.stderr.write("Merging "+str(len(exonbed))+" exons\n")
exonbed = merge_ranges(exonbed)
sys.stderr.write(str(len(exonbed))+" exons after merging\n")
sys.stderr.write("Finding introns\n")
intronbed = subtract_ranges(txbed,exonbed)
sys.stderr.write("Found "+str(len(intronbed))+" introns\n")
chrbp = sum([x.length for x in chrbed])
sys.stderr.write("Genome size: "+str(chrbp)+"\n")
txbp = sum([x.length for x in txbed])
sys.stderr.write("Tx size: "+str(txbp)+"\n")
exonbp = sum([x.length for x in exonbed])
sys.stderr.write("Exon size: "+str(exonbp)+"\n")
intronbp = sum([x.length for x in intronbed])
sys.stderr.write("Intron size: "+str(intronbp)+"\n")
#sys.stderr.write(str(txbp+intergenicbp)+"\n")
if args.output_beds:
if not os.path.exists(args.output_beds): os.makedirs(args.output_beds)
with open(args.output_beds+'/chrs.bed','w') as of1:
for rng in chrbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n")
with open(args.output_beds+'/exon.bed','w') as of1:
for rng in exonbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n")
with open(args.output_beds+'/intron.bed','w') as of1:
for rng in intronbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n")
with open(args.output_beds+'/intergenic.bed','w') as of1:
for rng in intergenicbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n")
with open(args.output_beds+'/tx.bed','w') as of1:
for rng in txbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n")
inf = None
if re.search('\.gz$',args.reads_gpd):
inf = gzip.open(args.reads_gpd)
else:
inf = open(args.reads_gpd)
reads = {}
gs = GPDStream(inf)
for gpd in gs:
reads[gpd.gene_name] = {}
sys.stderr.write("Checking "+str(len(reads.keys()))+" Aligned Reads\n")
#now we know all features we can annotate reads
sys.stderr.write("Read through our reads and bed entries\n")
sys.stderr.write("Annotate intron\n")
intron = annotate_gpds(args,intronbed)
intronnames = set(intron.keys())
sys.stderr.write("Annotate intergenic\n")
intergenic = annotate_gpds(args,intergenicbed)
intergenicnames = set(intergenic.keys())
sys.stderr.write("Annotate exons\n")
exons = annotate_gpds(args,exonbed)
exonnames = set(exons.keys())
allnames = exonnames|intronnames|intergenicnames
sys.stderr.write(str(len(allnames))+" reads attributed to a feature\n")
vals = set(reads.keys())-allnames
if len(vals) > 0:
sys.stderr.write("WARNING unable to ascribe annotation to "+str(len(vals))+" reads\n")
donenames = set()
of = sys.stdout
if args.output:
if re.search('\.gz$',args.output):
of = gzip.open(args.output,'w')
else:
of = open(args.output,'w')
for name in allnames:
exonfrac = 0
intronfrac = 0
intergenicfrac = 0
readlen = 0
exoncount = 0
if name in exons:
exonfrac = float(exons[name][1])/float(exons[name][0])
readlen = exons[name][0]
exoncount = exons[name][2]
if name in intron:
intronfrac = float(intron[name][1])/float(intron[name][0])
readlen = intron[name][0]
exoncount = intron[name][2]
if name in intergenic:
intergenicfrac = float(intergenic[name][1])/float(intergenic[name][0])
readlen = intergenic[name][0]
exoncount = intergenic[name][2]
vals = {'exon':exonfrac,'intron':intronfrac,'intergenic':intergenicfrac}
type = None
if exonfrac >= 0.5:
type = 'exon'
elif intronfrac >= 0.5:
type = 'intron'
elif intergenicfrac >= 0.5:
type = 'intergenic'
else:
type = sorted(vals.keys(),key=lambda x: vals[x])[-1]
if vals[type] == 0:
sys.stderr.write("WARNING trouble setting type\n")
if not type: continue
of.write(name+"\t"+type+"\t"+str(exoncount)+"\t"+str(readlen)+"\n")
of.close()
def generate_locus(mls):
for es in mls:
[gpds,inbeds] = es.payload
if len(gpds) == 0 or len(inbeds) == 0:
continue
yield es
def annotate_gpds(args,inputbed):
if args.threads > 1:
p = Pool(processes=args.threads)
bas = BedArrayStream(sort_ranges(inputbed))
inf = None
if re.search('\.gz$',args.reads_gpd):
inf = gzip.open(args.reads_gpd)
else:
inf = open(args.args.reads_gpd)
gs = GPDStream(inf)
mls = MultiLocusStream([gs,bas])
results = {}
# try and implement as a multiprocessing map function
csize = 100 #control how many jobs to send to one thread at a time
if args.threads > 1:
results2 = p.imap_unordered(func=annotate_inner,iterable=generate_locus(mls),chunksize=csize)
else:
results2 = itertools.imap(annotate_inner,generate_locus(mls))
for chunk in results2:
for res in chunk:
results[res[0]] = res[1:]
inf.close()
return results
def annotate_inner(es):
results = []
[gpds,inbeds] = es.payload
for gpd in gpds:
orig = gpd.length
tot = 0
for rng1 in [x.range for x in gpd.exons]:
tot += sum([y.overlap_size(rng1) for y in inbeds])
if tot > 0:
results.append([gpd.gene_name,orig,tot,gpd.get_exon_count()])
return results
def do_inputs():
parser = argparse.ArgumentParser(description="Assign genomic features to reads based on where they majority of the read lies. In the event of a tie prioritize exon over intron and intron over intergenic.",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('reads_gpd',help="reads gpd")
parser.add_argument('annotation_gpd',help="reference annotations gpd")
parser.add_argument('chromosome_lengths',help="reference lengths table")
parser.add_argument('--output_beds',help="save features")
parser.add_argument('-o','--output',help="output results")
parser.add_argument('--threads',default=cpu_count(),type=int,help="number of threads default cpu_count()")
args = parser.parse_args()
return args
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd
args = do_inputs()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
args = do_inputs()
main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/annotate_from_genomic_features.py
|
annotate_from_genomic_features.py
|
"""Get information at the locus level. Good if you have no gene annotation"""
import argparse, sys, os, re, gzip, inspect, os
from random import shuffle
from shutil import rmtree
from multiprocessing import cpu_count, Pool, Lock
from tempfile import mkdtemp, gettempdir
from seqtools.format.gpd import GPDStream
from seqtools.stream import LocusStream
from seqtools.structure.gene import TranscriptLoci, TranscriptLociMergeRules, TranscriptGroup
glock = Lock()
last_range = None
def main(args):
# Setup inputs
inf = sys.stdin
if args.input != '-':
if re.search('\.gz$',args.input):
inf = gzip.open(args.input)
else:
inf = open(args.input)
of = sys.stdout
# Setup outputs
if args.output:
if re.search('\.gz$',args.output):
of = gzip.open(args.output,'w')
else:
of = open(args.output,'w')
mr = TranscriptLociMergeRules('is_any_overlap')
mr.set_use_junctions(False)
if args.threads > 1:
p = Pool(processes=args.threads)
results = []
z = 0
for locus in LocusStream(GPDStream(inf)):
vals = locus.payload
if args.downsample:
if len(vals) > args.downsample:
shuffle(vals)
vals = vals[0:args.downsample]
locus.set_payload(vals)
if args.threads <= 1:
tls = Queue(do_locus(locus,mr,z,args,verbose=True))
results.append(tls)
else:
tls = p.apply_async(do_locus,args=(locus,mr,z,args,False),callback=process_output)
results.append(tls)
z += len(locus.payload)
if args.threads > 1:
p.close()
p.join()
#sys.exit()
sys.stderr.write("\n")
sys.stderr.write("Outputing results\n")
if args.output_loci:
if re.search('\.gz$',args.output_loci):
ofl = gzip.open(args.output_loci,'w')
else:
ofl = open(args.output_loci,'w')
lnum = 0
for res in sorted([y for y in [r.get() for r in results] if y],key=lambda x: (x.chr,x.start,x.end)):
rng = res.get_range_string()
rngout = res.copy()
tls = res.payload
for tl in sorted(tls,key=lambda x: (x.range.chr,x.range.start,x.range.end)):
lnum += 1
txs = sorted(tl.get_transcripts(),key=lambda x: (x.range.chr,x.range.start,x.range.end))
tlrng = [str(x) for x in tl.range.get_bed_array()]
ofl.write("\t".join(tlrng)+"\t"+str(lnum)+"\t"+str(len(txs))+"\n")
for tx in txs:
cov = tx.payload[1]
of.write("\t".join(tlrng)+"\t"+str(lnum)+"\t"+str(len(txs))+"\t"+str(tx.payload[0])+"\t"+str(z)+"\t"+tx.gene_name+"\t"+str(cov['average_coverage'])+"\t"+str(cov['fraction_covered'])+"\t"+str(cov['mindepth'])+"\n")
if args.output_loci:
ofl.close()
inf.close()
of.close()
# Temporary working directory step 3 of 3 - Cleanup
#if not args.specific_tempdir:
# rmtree(args.tempdir)
def process_output(curr_range):
global last_range
global glock
if not curr_range: return None
if len(curr_range.payload)==0: return None
glock.acquire()
if curr_range:
if not last_range:
last_range = curr_range
sys.stderr.write("Pos: "+curr_range.get_range_string()+" \r")
elif curr_range.cmp(last_range) == 1:
last_range = curr_range
sys.stderr.write("Pos: "+last_range.get_range_string()+" \r")
glock.release()
return curr_range
def do_locus(locus,mr,curline,args,verbose=True):
txl = TranscriptLoci()
txl.set_merge_rules(mr)
z = 0
tot = len(locus.payload)
for gpd in locus.payload:
z += 1
curline+=1
gpd.set_payload([curline,None])
if verbose: sys.stderr.write("adding transcript: "+str(z)+'/'+str(tot)+" \r")
if gpd.get_exon_count() < args.min_exon_count: continue
txl.add_transcript(gpd)
if len(txl.get_transcripts()) > 0:
covs = txl.get_depth_per_transcript()
remove_list = []
for g in txl.get_transcripts():
if g.id not in covs: continue
x = covs[g.id]
g.payload[1] = x
if x['average_coverage'] < args.min_depth:
remove_list.append(g.id)
elif args.min_depth > 1 and x['fraction_covered'] < args.min_coverage_at_depth:
remove_list.append(g.id)
for tx_id in remove_list:
txl.remove_transcript(tx_id)
if verbose: sys.stderr.write("\n")
if verbose:
if txl.range:
sys.stderr.write(txl.range.get_range_string()+"\n")
#sys.stderr.write('partition locus'+"\n")
tls = txl.partition_loci(verbose=verbose)
curr_range = None
for tl in tls:
if not curr_range: curr_range = tl.range
curr_range = curr_range.merge(tl.range)
if not curr_range: return None
curr_range.set_payload(tls)
return curr_range
# Let us handle an output with get command like apply_async
class Queue:
def __init__(self,val):
self.val = val
def get(self):
return self.val
def do_inputs():
# Setup command line inputs
parser=argparse.ArgumentParser(description="Read SORTED genepred as input. Output stuff about loci.",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="INPUT FILE or '-' for STDIN")
parser.add_argument('-o','--output',help="OUTPUTFILE or STDOUT if not set")
parser.add_argument('--output_loci',help="Only describe the loci")
parser.add_argument('--threads',type=int,default=cpu_count(),help="INT number of threads to run. Default is system cpu count")
parser.add_argument('--downsample',type=int,help="downsample to this number as maximum locus parsed")
# Run specific arguments
parser.add_argument('--min_depth',type=float,default=1,help="Only consider reads with averge coverage this much or higher")
parser.add_argument('--min_coverage_at_depth',type=float,default=0.8,help="Only consider reads covered at 'min_depth' at this fraction or greater.")
parser.add_argument('--min_exon_count',type=float,default=1,help="Only construct loci from reads with this many or more exons")
## Temporary working directory step 1 of 3 - Definition
#group = parser.add_mutually_exclusive_group()
#group.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is made and destroyed here.")
#group.add_argument('--specific_tempdir',help="This temporary directory will be used, but will remain after executing.")
args = parser.parse_args()
# Temporary working directory step 2 of 3 - Creation
#setup_tempdir(args)
return args
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir):
os.makedirs(args.specific_tempdir.rstrip('/'))
args.tempdir = args.specific_tempdir.rstrip('/')
if not os.path.exists(args.specific_tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
else:
args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd
args = do_inputs()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
#do our inputs
args = do_inputs()
main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/gpd_loci_analysis.py
|
gpd_loci_analysis.py
|
"""measure distances of junctions to references junctions"""
import argparse, sys, os, gzip
from shutil import rmtree
from tempfile import mkdtemp, gettempdir
from time import sleep
from multiprocessing import cpu_count, Pool
from seqtools.format.gpd import GPD, GPDStream
from seqtools.range import GenomicRange, GenomicRangeFromString
from seqtools.range.multi import sort_ranges, BedArrayStream, BedStream
from seqtools.stream import MultiLocusStream
rcnt = 0
def main(args):
# Start by reading in our reference GPD
exon_start_strings = set()
exon_end_strings = set()
inf = None
if args.reference[-3:] == '.gz':
inf = gzip.open(args.reference)
else:
inf = open(args.reference)
z = 0
for line in inf:
z += 1
if z%1000 == 0: sys.stderr.write("ref transcripts: "+str(z)+" starts: "+str(len(exon_start_strings))+" ends: "+str(len(exon_end_strings))+" \r")
gpd = GPD(line)
if gpd.get_exon_count() < 2: continue
for j in gpd.junctions:
exon_start_strings.add(j.right.get_range_string())
exon_end_strings.add(j.left.get_range_string())
inf.close()
sys.stderr.write("\n")
# Now convert each start or end to list of ranges to overlap
sys.stderr.write("finding start windows\n")
starts = get_search_ranges_from_strings(exon_start_strings,args)
sh = BedArrayStream(starts)
ofst = open(args.tempdir+'/starts.bed','w')
for v in sh:
ofst.write("\t".join([str(x) for x in v.get_bed_array()]+[v.payload.get_range_string()])+"\n")
ofst.close()
starts = None
sh = None
sys.stderr.write("finding end windows\n")
ends = get_search_ranges_from_strings(exon_end_strings,args)
eh = BedArrayStream(ends)
ofen = open(args.tempdir+'/ends.bed','w')
for v in eh:
ofen.write("\t".join([str(x) for x in v.get_bed_array()]+[v.payload.get_range_string()])+"\n")
ofen.close()
# switch to file based operations to save some memory
ends = None
eh = None
sleep(10) # give garbage collection time to flush these big memory objects
sinf = open(args.tempdir+'/starts.bed')
einf = open(args.tempdir+'/ends.bed')
sh = BedStream(sinf)
eh = BedStream(einf)
# now stream in our reads
sys.stderr.write("working through reads\n")
inf = sys.stdin
if args.input != '-':
if args.input[-3:] == '.gz': inf = gzip.open(args.input)
else: inf = open(args.input)
gh = GPDStream(inf)
mls = MultiLocusStream([gh,sh,eh])
z = 0
global rcnt
rcnt = 0
#start_distances = []
#end_distances = []
buffer = []
max_buffer = 100
tos = open(args.tempdir+'/starts.txt','w')
toe = open(args.tempdir+'/ends.txt','w')
p = Pool(processes=args.threads)
csize=10
results = p.imap_unordered(process_locus,gen_locus(mls,args),chunksize=csize)
for r in results:
if len(r[0]) > 0:
tos.write("\n".join([str(x) for x in r[0]])+"\n")
if len(r[1]) > 0:
toe.write("\n".join([str(x) for x in r[1]])+"\n")
tos.close()
toe.close()
inf.close()
sys.stderr.write("\n")
# now we have the distance, we don't actually know if a start is a start or an end from what have
distances = {}
sys.stderr.write("Reading start distances\n")
inf = open(args.tempdir+'/starts.txt')
for line in inf:
d = int(line.rstrip())
if d not in distances: distances[d] = 0
distances[d] += 1
inf.close()
sys.stderr.write("Reading end distances\n")
inf = open(args.tempdir+'/ends.txt')
for line in inf:
d = int(line.rstrip())
if d not in distances: distances[d] = 0
distances[d] += 1
inf.close()
# now output results
of = sys.stdout
if args.output: of = open(args.output,'w')
for d in sorted(distances.keys()):
of.write(str(d)+"\t"+str(distances[d])+"\n")
of.close()
# Temporary working directory step 3 of 3 - Cleanup
if not args.specific_tempdir:
rmtree(args.tempdir)
def gen_locus(mls,args):
global rcnt
z = 0
for es in mls:
z += 1
if z%1000 == 0: sys.stderr.write(es.get_range_string()+" locus: "+str(z)+" reads: "+str(rcnt)+" \r")
if len(es.payload[0]) == 0: continue
rcnt += len(es.payload[0])
yield [es,args]
class Queue:
def __init__(self,val):
self.val = [val]
def get(self):
return self.pop(0)
def process_locus(vals):
(es,args) = vals
out_start_distances = []
out_end_distances = []
streams = es.payload
reads = streams[0]
starts = streams[1]
for i in range(0,len(starts)):
v = starts[i].payload
starts[i].set_payload(GenomicRangeFromString(v))
ends = streams[2]
for i in range(0,len(ends)):
v = ends[i].payload
ends[i].set_payload(GenomicRangeFromString(v))
#if len(starts) == 0 and len(ends) == 0: continue
for read in reads:
if read.get_exon_count() < 2: continue
# multi exon
for j in read.junctions:
ex_start = j.right
ex_end = j.left
# we can look for evidence for each
#sys.exit()
evstart = [x.payload.start-ex_start.start for x in starts if x.overlaps(ex_start) and x.payload.distance(ex_start) <= args.window]
if len(evstart) > 0:
# get the best distance
min_dist = args.window+10
num = None
for v in evstart:
if abs(v) < min_dist:
min_dist = abs(v)
num = v
if num is not None:
out_start_distances.append(num)
evend = [ex_end.end-x.payload.end for x in ends if x.overlaps(ex_end) and x.payload.distance(ex_end) <= args.window]
if len(evend) > 0:
# get the best distance
min_dist = args.window+10
num = None
for v in evend:
if abs(v) < min_dist:
min_dist = abs(v)
num = v
if num is not None:
out_end_distances.append(num)
return [out_start_distances,out_end_distances]
def get_search_ranges_from_strings(position_strings,args):
results = []
for pstr in position_strings:
rng = GenomicRangeFromString(pstr)
rngw = rng.copy()
rngw.start = max(rng.start-args.window,1)
rngw.end =rng.start+args.window
rngw.set_payload(rng)
results.append(rngw)
return sort_ranges(results)
def do_inputs():
# Setup command line inputs
parser=argparse.ArgumentParser(description="Given your SORTED read mappings in GPD format, how do the junction sites differ from the nearest known reference?",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="INPUT reads GPD or '-' for STDIN")
parser.add_argument('-r','--reference',required=True,help="Reference GPD")
parser.add_argument('-o','--output',help="OUTPUTFILE or STDOUT if not set")
parser.add_argument('--threads',type=int,default=cpu_count(),help="INT number of threads to run. Default is system cpu count")
parser.add_argument('-w','--window',type=int,default=30,help="Window for how far to search for a nearby reference")
# Temporary working directory step 1 of 3 - Definition
group = parser.add_mutually_exclusive_group()
group.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is made and destroyed here.")
group.add_argument('--specific_tempdir',help="This temporary directory will be used, but will remain after executing.")
args = parser.parse_args()
# Temporary working directory step 2 of 3 - Creation
setup_tempdir(args)
return args
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir):
os.makedirs(args.specific_tempdir.rstrip('/'))
args.tempdir = args.specific_tempdir.rstrip('/')
if not os.path.exists(args.specific_tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
else:
args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd
args = do_inputs()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
args = do_inputs()
main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/gpd_to_junction_variance.py
|
gpd_to_junction_variance.py
|
"""Look for bias across the 5' to 3' of annotated transcripts"""
import sys, argparse, re, gzip, os, inspect
from subprocess import PIPE, Popen
from multiprocessing import Pool, cpu_count
from tempfile import mkdtemp, gettempdir
from shutil import rmtree
from seqtools.format.gpd import GPDStream
from seqtools.range import GenomicRange, GenomicRangeFromString
from seqtools.range.multi import ranges_to_coverage
from seqtools.statistics import average
from seqtools.stream import LocusStream, MultiLocusStream
def main(args):
sort_annot(args)
sort_ref(args)
inf0 = open(args.tempdir+'/annot.sorted.txt')
anns = AnnotStream(inf0)
ls0 = LocusStream(anns)
inf1 = open(args.tempdir+'/ref.sorted.gpd')
gs1 = GPDStream(inf1)
ls1 = LocusStream(gs1)
sys.stderr.write("reading read genepred\n")
inf2 = None
if is_gzip(args.read_genepred):
inf2 = gzip.open(args.read_genepred)
else:
inf2 = open(args.read_genepred)
gs2 = GPDStream(inf2)
ls2 = LocusStream(gs2)
mls = MultiLocusStream([ls0,ls1,ls2])
sys.stderr.write("stream loci\n")
z = 0
totals = []
for l in mls:
z += 1
[l0,l1,l2] = l.payload
if args.minimum_read_count > len(l0): continue
if args.minimum_read_count > len(l2): continue
rvals = do_locus(l0,l1,l2,args)
for rval in rvals:
totals.append(rval)
sys.stderr.write(str(z)+" "+l.get_range_string()+" "+str([len(x) for x in l.payload])+" "+str(len(totals))+" proccessed \r")
sys.stderr.write("\n")
#results = {}
#for i in range(1,101):
# results[str(i)] = []
read_total = 0
############
outs = []
for v in totals:
if not v: continue
bins = sorted([int(x) for x in v[0].keys()])
outs.append([0 for x in range(1,101)])
read_total+=v[1]
for i in range(1,101):
if str(i) in v[0]:
#results[str(i)].append(v[0][str(i)])
outs[-1][i-1] = v[0][str(i)]
of = sys.stdout
if args.output and re.search('\.gz',args.output):
of = gzip.open(args.output,'w')
elif args.output:
of = open(args.output,'w')
tot = len(outs)
#for i in range(1,101):
# ostr = str(i)
# tot = len(results[str(i)])
# for j in results[str(i)]:
# ostr += "\t"+str(j)
# of.write(ostr+"\n")
tnum = 0
for o in outs:
tnum += 1
of.write(str(tnum)+"\t"+"\t".join([str(x) for x in o])+"\n")
of.close()
if args.output_counts:
of = open(args.output_counts,'w')
of.write(str(tot)+"\t"+str(read_total)+"\n")
of.close()
sys.stderr.write(str(tot)+" total transcripts \t"+str(read_total)+" total reads\n")
if not args.specific_tempdir:
rmtree(args.tempdir)
def spawn_jobs(mls,args):
z = 0
for l in mls:
z += 1
[l0,l1,l2] = l.payload
if args.minimum_read_count > len(l0): continue
if args.minimum_read_count > len(l2): continue
vals = do_locus(l0,l1,l2,args)
for v in vals:
yield v
def do_locus(annots,refs,reads,args):
read_to_tx = {}
tx_to_read = {}
for a in annots:
for b in [x.get_value() for x in a.payload]:
if b['matching_exon_count'] < args.minimum_matched_exons: continue
if b['read_length'] < args.minimum_read_length: continue
read_to_tx[b['read_name']] = b['tx_name']
if b['tx_name'] not in tx_to_read: tx_to_read[b['tx_name']] = {}
tx_to_read[b['tx_name']][b['read_name']] = ''
for tx in tx_to_read.keys():
if len(tx_to_read[tx]) < args.minimum_read_count:
del tx_to_read[tx]
tx_to_ref = {}
for ref in refs:
for gpd in ref.payload:
tx = gpd.entries.name
tx_to_ref[tx] = gpd
for read in reads:
for b in read.payload:
if b.entries.name not in read_to_tx: continue
tx = read_to_tx[b.entries.name]
if tx not in tx_to_read: continue
if tx not in tx_to_ref: continue
tx_to_read[tx][b.entries.name] = b
rvals = []
for tx in tx_to_read:
try: # so bad
rvals.append(do_tx_line([tx_to_ref[tx],tx_to_read[tx].values(),args]))
except:
continue
return rvals
def sort_ref(args):
sys.stderr.write("Sorting in reference genePred\n")
if args.threads > 1:
cmd = ['sort','-S2G','-k3,3','-k5,5n','-k6,6n',
'--parallel='+str(args.threads)]
else:
cmd = ['sort','-S2G','-k3,3','-k5,5n','-k6,6n']
of = open(args.tempdir+'/ref.sorted.gpd','w')
p = Popen(cmd,stdin=PIPE,stdout=of)
refgpd = {}
if args.ref_genepred[-3:] == '.gz':
inf = gzip.open(args.ref_genepred)
else:
inf = open(args.ref_genepred)
#gs = GPDStream(inf)
z = 0
for line in inf:
z += 1
if z%1000==0: sys.stderr.write(str(z)+" \r")
#if z not in refcnt: continue
#if refcnt[z] < args.minimum_read_count: continue
p.stdin.write(line)
#refgpd[z] = gpd
p.communicate()
sys.stderr.write("\n")
inf.close()
def sort_annot(args):
sys.stderr.write("Sorting read annotations\n")
cmd = ['sort','-S2G','-k1,1','-k2,2n','-k3,3n']
cmd2 = 'cut -f 4-'
of0 = open(args.tempdir+'/annot.sorted.txt','w')
p1 = Popen(cmd2.split(),stdin=PIPE,stdout=of0)
p0 = Popen(cmd,stdin=PIPE,stdout=p1.stdin)
inf = None
if is_gzip(args.annotations):
inf = gzip.open(args.annotations)
else:
inf = open(args.annotations)
k = 0
for line in inf:
k+=1
if k%1000==0: sys.stderr.write(str(k)+" \r")
f = line.rstrip().split("\t")
r = GenomicRangeFromString(f[13])
#r.set_payload(parse_annot(f))
p0.stdin.write(r.chr+"\t"+str(r.start)+"\t"+str(r.end)+"\t"+line)
sys.stderr.write("\n")
of0.close()
p0.communicate()
p1.communicate()
inf.close()
def do_tx_line(vals):
(ref_gpd,reads,args) = vals
allbits = []
read_count = 0
outrange = reads[-1].range
for read in reads:
if not args.allow_overflowed_matches and read.range.start < ref_gpd.range.start: continue
if not args.allow_overflowed_matches and read.range.end > ref_gpd.range.end: continue
v = ref_gpd.union(read)
for e in [x.range for x in v.exons]: allbits.append(e)
read_count += 1
if len(allbits)==0: return None
if read_count < args.minimum_read_count: return None
cov = ranges_to_coverage(allbits)
#print [x.get_payload() for x in cov]
curr = 0
bps = []
for i in range(0,ref_gpd.length):
bps.append(0)
for rng1 in [x.range for x in ref_gpd.exons]:
overs = [[z[0],z[1].payload] for z in [[y.merge(rng1),y] for y in cov] if z[0]]
for ov in overs:
dist1 = ov[0].start - rng1.start+curr
dist2 = ov[0].end - rng1.start+curr
for i in range(dist1,dist2+1):
try: # so bad
bps[i]+=ov[1]
except:
continue
curr+=rng1.length
trimmedbps = bps
if args.only_covered_ends:
start = 0
finish = len(bps)-1
for i in range(0,len(bps)):
if bps[i] != 0:
start = i
break
for i in reversed(range(0,len(bps))):
if bps[i] != 0:
finish = i
break
trimmedbps = bps[start:finish+1]
exp = float(sum(trimmedbps))/float(len(trimmedbps))
if ref_gpd.strand=='-': trimmedbps = list(reversed(trimmedbps))
if len(trimmedbps) < args.minimum_read_count: return None
#bin the results
vals = {}
for dat in [[str(1+int(100*float(i)/float(len(trimmedbps)))),float(trimmedbps[i])/float(read_count)] for i in range(0,len(trimmedbps))]:
if dat[0] not in vals: vals[dat[0]] = []
vals[dat[0]].append(dat[1])
for num in vals:
vals[num] = average(vals[num])
return [vals, read_count, exp, len(trimmedbps),ref_gpd.get_exon_count(),outrange.get_range_string()]
def is_gzip(name):
if re.search('\.gz$',name): return True
return False
def do_inputs():
parser = argparse.ArgumentParser(description="Generate a coverage per bin over the length of a molecule to observe bias in the 5' to 3' mapping of reads",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('read_genepred',help="Input genepred")
parser.add_argument('ref_genepred',help="Reference genepred")
parser.add_argument('annotations',help="Input annotations")
parser.add_argument('--threads',type=int,default=cpu_count(),help="Threads count")
parser.add_argument('--full',action='store_true',help="only consider full length matched reads")
parser.add_argument('--only_covered_ends',action='store_true',help="remove ends with zero coverage")
parser.add_argument('--allow_overflowed_matches',action='store_true',help="by default we don't consider matches that arent fully within the bounds of their annotated transcript.")
parser.add_argument('--minimum_read_count',type=int,default=5,help="minimum number of reads")
parser.add_argument('--minimum_read_length',type=int,default=100,help="at least this many bp")
parser.add_argument('--minimum_matched_exons',type=int,default=2,help="require reads matched at least this many exons")
parser.add_argument('-o','--output',help="write output to file")
parser.add_argument('--output_counts',help="write number of transcripts and reads used")
# Temporary working directory step 1 of 3 - Definition
group = parser.add_mutually_exclusive_group()
group.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is ")
group.add_argument('--specific_tempdir',help="This temporary directory will be used")
args = parser.parse_args()
setup_tempdir(args)
return args
def parse_annot(f):
res={
'read_line':int(f[0]),\
'read_name':f[1],\
'gene_name':f[2],\
'tx_name':f[3],\
'type':f[4],\
'matching_exon_count':int(f[5]),\
'consecutive_exons':int(f[6]),\
'read_exons':int(f[7]),\
'tx_exons':int(f[8]),\
'overlap':int(f[9]),\
'read_length':int(f[10]),\
'tx_length':int(f[11])}
return res
class Annot:
def __init__(self,line):
f = line.rstrip().split("\t")
self.value = parse_annot(f)
self._range = GenomicRangeFromString(f[13])
@property
def range(self):
return self._range
def get_value(self):
return self.value
class AnnotStream:
def __init__(self,fh):
self.fh = fh
def read_entry(self):
line = self.fh.readline()
if not line: return False
a = Annot(line)
return a
def next(self):
r = self.read_entry()
if not r: raise StopIteration
else:
return r
def __iter__(self):
return self
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir):
os.makedirs(args.specific_tempdir.rstrip('/'))
args.tempdir = args.specific_tempdir.rstrip('/')
if not os.path.exists(args.specific_tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
else:
args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd
args = do_inputs()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
args = do_inputs()
main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/annotated_read_bias_analysis.py
|
annotated_read_bias_analysis.py
|
import argparse, sys, os, re, gzip
from shutil import rmtree, copyfile
from multiprocessing import cpu_count
from tempfile import mkdtemp, gettempdir
import dump
from seqtools.statistics import average, N50, median, standard_deviation
g_version = None
def main(args):
#do our inputs
args.output = args.output.rstrip('/')
if not os.path.exists(args.output):
os.makedirs(args.output)
all_files = []
z = 0
sys.stderr.write("Collecting information regarding available inputs\n")
for infile in args.xhtml_inputs:
z += 1
outfile = args.tempdir+'/'+str(z)+'.list'
cmd = ['dump.py',infile,'-l','-o',outfile]
dump.external_cmd(cmd)
res = {'fname':infile,'xlist':set(),'index':z}
with open(outfile) as inf:
for line in inf:
line= line.rstrip()
primary = re.match('(\S+)',line).group(1)
res['xlist'].add(primary)
m = re.match('\S+\s+\[(.+)\]',line)
if m:
alts = m.group(1).split(' ')
for alt in alts: res['xlist'].add(alt)
all_files.append(res)
sys.stderr.write("Extracting data for comparison\n")
for file in all_files:
sys.stderr.write(" Extracting for "+str(file['fname'])+" "+str(file['index'])+"\n")
if 'alignment_stats.txt' in file['xlist']:
ofile = args.tempdir+'/'+str(file['index'])+'.alignment_stats.txt'
cmd = ['dump.py',file['fname'],'-e','alignment_stats.txt','-o',ofile]
dump.external_cmd(cmd)
file['alignment_stats'] = {}
with open(ofile) as inf:
for line in inf:
f = line.rstrip().split("\t")
file['alignment_stats'][f[0]] = int(f[1])
if 'error_stats.txt' in file['xlist']:
ofile = args.tempdir+'/'+str(file['index'])+'.error_stats.txt'
cmd = ['dump.py',file['fname'],'-e','error_stats.txt','-o',ofile]
dump.external_cmd(cmd)
file['error_stats'] = {}
with open(ofile) as inf:
for line in inf:
f = line.rstrip().split("\t")
file['error_stats'][f[0]] = f[1]
if 'lengths.txt.gz' in file['xlist']:
ofile = args.tempdir+'/'+str(file['index'])+'.lengths.txt.gz'
cmd = ['dump.py',file['fname'],'-e','lengths.txt.gz','-o',ofile]
dump.external_cmd(cmd)
file['lengths'] = {'average':'','median':'','N50':'','stddev':'','average_aligned':'','median_aligned':'','N50_aligned':'','stddev_aligned':''}
inf = gzip.open(ofile)
lengths = []
for line in inf:
f = line.rstrip().split("\t")
lengths.append([int(f[3]),int(f[4])])
if len(lengths) > 0:
file['lengths']['average']=average([x[1] for x in lengths])
file['lengths']['median']=median([x[1] for x in lengths])
file['lengths']['N50']=N50([x[1] for x in lengths])
if len([x[0] for x in lengths if x[0] != 0]) > 0:
file['lengths']['average_aligned']=average([x[0] for x in lengths if x[0] != 0])
file['lengths']['median_aligned']=median([x[0] for x in lengths if x[0] != 0])
file['lengths']['N50_aligned']=N50([x[0] for x in lengths if x[0] != 0])
if len(lengths) > 2:
file['lengths']['stddev']=standard_deviation([x[1] for x in lengths])
if len([x[0] for x in lengths if x[0] != 0]) > 2:
file['lengths']['stddev_aligned']=standard_deviation([x[0] for x in lengths if x[0] != 0])
# Now we can output table
ofname = args.tempdir+'/stats_table.txt'
of = open(ofname,'w')
header = 'File'+"\t"
#Basic
header += 'Reads'+"\t"
header += 'Avg_Read_Length'+"\t"
header += 'Median_Read_Length'+"\t"
header += 'N50_Read_Length'+"\t"
header += 'Stddev_Read_Length'+"\t"
header += 'Aligned_Read_Count'+"\t"
header += 'Aligned_Reads'+"\t"
header += 'Avg_Aligned_Length'+"\t"
header += 'Median_Aligned_Length'+"\t"
header += 'N50_Aligned_Length'+"\t"
header += 'Stddev_Aligned_Length'+"\t"
header += 'Chimeric_Total_Reads'+"\t"
header += 'Chimeric_Trans_Reads'+"\t"
header += 'Chimeric_Self_Reads'+"\t"
header += 'Bases'+"\t"
header += 'Bases_Aligned'+"\t"
# Error
header += 'Error_Rate'+"\t"
header += 'Mismatches'+"\t"
header += 'Deletions_Total'+"\t"
header += 'Deletions_Homopolymer'+"\t"
header += 'Insertions_Total'+"\t"
header += 'Insertions_Homopolymer'
of.write(header+"\n")
for file in all_files:
of.write(file['fname']+"\t")
basic = get_basic(file)
of.write("\t".join([str(x) for x in basic]))
of.write("\t")
error = get_error(file)
of.write("\t".join([str(x) for x in error]))
of.write("\n")
of.close()
copyfile(args.tempdir+'/stats_table.txt',args.output+'/stats_table.txt')
# Temporary working directory step 3 of 3 - Cleanup
if not args.specific_tempdir:
rmtree(args.tempdir)
# File is the dict that has the 'fname' and 'alignment_stats' and 'error_stats'
# if there is error stats return the array to output
def get_error(file):
error = ['' for x in range(0,7)]
if 'error_stats' not in file: return error
dat = file['error_stats']
if dat['ALIGNMENT_BASES'] > 0:
error[0] = float(dat['ANY_ERROR'])/float(dat['ALIGNMENT_BASES'])
if dat['ALIGNMENT_BASES'] > 0:
error[1] = float(dat['MISMATCHES'])/float(dat['ALIGNMENT_BASES'])
if dat['ALIGNMENT_BASES'] > 0:
error[2] = float(dat['ANY_DELETION'])/float(dat['ALIGNMENT_BASES'])
if dat['ANY_DELETION'] > 0:
error[3] = float(dat['HOMOPOLYMER_DELETION'])/float(dat['ANY_DELETION'])
if dat['ALIGNMENT_BASES'] > 0:
error[4] = float(dat['ANY_INSERTION'])/float(dat['ALIGNMENT_BASES'])
if dat['ANY_INSERTION'] > 0:
error[5] = float(dat['HOMOPOLYMER_INSERTION'])/float(dat['ANY_INSERTION'])
return error
# File is the dict that has the 'fname' and 'alignment_stats' and 'error_stats'
# if there is alignment stats return the array to output
def get_basic(file):
basic = ['' for x in range(0,16)]
if 'alignment_stats' not in file: return basic
dat = file['alignment_stats']
basic[0] = dat['TOTAL_READS']
basic[1] = file['lengths']['average']
basic[2] = file['lengths']['median']
basic[3] = file['lengths']['N50']
basic[4] = file['lengths']['stddev']
basic[5] = dat['ALIGNED_READS']
if dat['TOTAL_READS'] > 0:
basic[6] = float(dat['ALIGNED_READS'])/float(dat['TOTAL_READS'])
basic[7] = file['lengths']['average_aligned']
basic[8] = file['lengths']['median_aligned']
basic[9] = file['lengths']['N50_aligned']
basic[10] = file['lengths']['stddev_aligned']
if dat['ALIGNED_READS'] > 0:
basic[11] = float(dat['CHIMERA_ALIGN_READS'])/float(dat['ALIGNED_READS'])
if dat['CHIMERA_ALIGN_READS'] > 0:
basic[12] = float(dat['TRANSCHIMERA_ALIGN_READS'])/float(dat['CHIMERA_ALIGN_READS'])
if dat['CHIMERA_ALIGN_READS'] > 0:
basic[13] = float(dat['SELFCHIMERA_ALIGN_READS'])/float(dat['CHIMERA_ALIGN_READS'])
basic[14] = dat['TOTAL_BASES']
if dat['TOTAL_BASES'] > 0:
basic[15] = float(dat['ALIGNED_BASES'])/float(dat['TOTAL_BASES'])
return basic
def external_cmd(cmd,version=None):
#set version by input
global g_version
g_version = version
cache_argv = sys.argv
sys.argv = cmd
args = do_inputs()
main(args)
sys.argv = cache_argv
def do_inputs():
# Setup command line inputs
parser=argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('xhtml_inputs',nargs='+',help="xhtml analysis files")
parser.add_argument('-o','--output',required=True,help="OUTPUT directory")
parser.add_argument('--threads',type=int,default=cpu_count(),help="INT number of threads to run. Default is system cpu count")
# Temporary working directory step 1 of 3 - Definition
group = parser.add_mutually_exclusive_group()
group.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is made and destroyed here.")
group.add_argument('--specific_tempdir',help="This temporary directory will be used, but will remain after executing.")
args = parser.parse_args()
# Temporary working directory step 2 of 3 - Creation
setup_tempdir(args)
return args
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir):
os.makedirs(args.specific_tempdir.rstrip('/'))
args.tempdir = args.specific_tempdir.rstrip('/')
if not os.path.exists(args.specific_tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
else:
args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
if __name__=="__main__":
args = do_inputs()
main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/compare.py
|
compare.py
|
import argparse, sys, os, gzip
from shutil import rmtree
from multiprocessing import cpu_count
from tempfile import mkdtemp, gettempdir
def main(args):
chrcovs = {}
total = {}
inf = gzip.open(args.input)
for line in inf:
f = line.rstrip().split("\t")
chr = f[0]
start = int(f[1])
finish = int(f[2])
depth = int(f[3])
if chr not in chrcovs: chrcovs[chr] = {}
if depth not in chrcovs[chr]: chrcovs[chr][depth] = 0
chrcovs[chr][depth] += finish-start
if depth not in total: total[depth] = 0
total[depth] += finish-start
inf.close()
chrlens = {}
with open(args.reflens) as inf:
for line in inf:
f = line.rstrip().split("\t")
chrlens[f[0]]=int(f[1])
total_len = sum(chrlens.values())
cov_len = sum(total.values())
print total_len
print cov_len
depths = sorted(total.keys())
#bases = total_len-cov_len
prev = total_len-cov_len
oflpt = gzip.open(args.output+"/line_plot_table.txt.gz",'w')
for d in depths:
oflpt.write(str(d)+"\t"+str(prev+1)+"\t"+str(total_len)+"\n")
oflpt.write(str(d)+"\t"+str(prev+total[d])+"\t"+str(total_len)+"\n")
prev = prev+total[d]
oflpt.close()
oftdt = gzip.open(args.output+"/total_distro_table.txt.gz",'w')
for d in depths:
oftdt.write(str(d)+"\t"+str(total[d])+"\t"+str(cov_len)+"\t"+str(total_len)+"\n")
oftdt.close()
ofcdt = gzip.open(args.output+"/chr_distro_table.txt.gz",'w')
for chr in sorted(chrcovs.keys()):
covered_bases = sum(chrcovs[chr].values())
for depth in sorted(chrcovs[chr].keys()):
ofcdt.write(chr + "\t" + str(depth)+"\t"+str(chrcovs[chr][depth])+"\t"+str(covered_bases)+"\t"+str(chrlens[chr])+"\n")
ofcdt.close()
def do_inputs():
# Setup command line inputs
parser=argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="depth INPUT FILE")
parser.add_argument('reflens',help="reflens INPUT FILE")
parser.add_argument('-o','--output',help="OUTPUT directory")
args = parser.parse_args()
return args
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd
args = do_inputs()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
#do our inputs
args = do_inputs()
main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/depth_to_coverage_report.py
|
depth_to_coverage_report.py
|
import sys, argparse, base64, re, os
def main(args):
of = sys.stdout
if args.output:
of = open(args.output,'w')
filecontents = open(args.input).read()
imgs = []
for m in re.finditer('(<\s*.*img.*>)',filecontents):
imgs.append([m.start(),m.end()])
prev = 0
newcontents = ''
for i in range(len(imgs)):
newcontents += filecontents[prev:imgs[i][0]]
newcontents += do_image_tag(filecontents[imgs[i][0]:imgs[i][1]],args)
prev = imgs[i][1]
newcontents += filecontents[prev:]
filecontents = newcontents
if args.all:
a = []
for m in re.finditer('(<a.*>)',filecontents):
a.append([m.start(),m.end()])
prev = 0
newcontents = ''
for i in range(len(a)):
newcontents += filecontents[prev:a[i][0]]
newcontents += do_a_tag(filecontents[a[i][0]:a[i][1]],args)
prev = a[i][1]
newcontents += filecontents[prev:]
filecontents = newcontents
styles = []
for m in re.finditer('(<\s*.*type.*text/css.*>)',filecontents):
styles.append([m.start(),m.end()])
prev = 0
for i in range(len(styles)):
of.write(filecontents[prev:styles[i][0]]+"\n")
of.write(do_style_sheet(filecontents[styles[i][0]:styles[i][1]],args)+"\n")
prev = styles[i][1]
of.write(filecontents[prev:]+"\n")
of.close()
def do_style_sheet(style_sheet,args):
m=re.match('^(.*)(href\s*=\s*["\'][^"\']*["\'])(.*)$',style_sheet)
if not m:
return style_sheet #cant replace for some reason
start = m.group(1)
finish = m.group(3)
src_full = m.group(2)
m = re.match('href\s*=\s*["\']([^"\']+)["\']',src_full)
if not m:
return style_sheet #cant replace for some reason
srcpathpart = m.group(1)
srcpath = os.path.dirname(args.input)+'/'+srcpathpart
if not re.search('\.css',srcpath): return style_sheet
if not os.path.isfile(srcpath): return style_sheet
encoded = base64.b64encode(open(srcpath,'rb').read())
disabler = "\n"
if not args.all:
disabler = "a {\n pointer-events: none;\n}\n"
return '<style type="text/css">'+disabler+"\n"+open(srcpath,'rb').read()+'</style>'
def do_a_tag(a_tag,args):
m=re.match('^(.*)(href\s*=\s*["\'][^"\']*["\'])(.*)$',a_tag)
#print m.group(2)
if not m:
return a_tag #cant replace for some reason
start = m.group(1)
finish = m.group(3)
src_full = m.group(2)
m = re.match('href\s*=\s*["\']([^"\']+)["\']',src_full)
if not m:
return a_tag #cant replace for some reason
srcpathpart = m.group(1)
srcpath = os.path.dirname(args.input)+'/'+srcpathpart
#if not re.search('\.png',srcpath): return img_tag
if not os.path.isfile(srcpath): return a_tag
#sys.stderr.write(srcpath+"\n")
#sys.exit()
encoded = base64.b64encode(open(srcpath,'rb').read())
if re.search('\.pdf$',srcpath):
return start+' href="data:application/pdf;base64,'+encoded+'" '+finish
if re.search('\.gz$',srcpath):
return start+' href="data:application/x-gzip;base64,'+encoded+'" '+finish
return start+' href="data:text/plain;base64,'+encoded+'" '+finish
def do_image_tag(img_tag,args):
m=re.match('^(.*)(src\s*=\s*["\'][^"\']*["\'])(.*)$',img_tag)
if not m:
return img_tag #cant replace for some reason
start = m.group(1)
finish = m.group(3)
src_full = m.group(2)
m = re.match('src\s*=\s*["\']([^"\']+)["\']',src_full)
if not m:
return img_tag #cant replace for some reason
srcpathpart = m.group(1)
srcpath = os.path.dirname(args.input)+'/'+srcpathpart
if not re.search('\.png',srcpath): return img_tag
if not os.path.isfile(srcpath): return img_tag
encoded = base64.b64encode(open(srcpath,'rb').read())
return start+' src="data:image/png;base64,'+encoded+'" '+finish
def do_inputs():
parser = argparse.ArgumentParser(description="Put css style sheets and PNG images into html file",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="Input file")
parser.add_argument('-o','--output',help="output file")
parser.add_argument('-a','--all',action="store_true",help="replace other files as well")
args = parser.parse_args()
return args
# for calling from another python script
def external(args):
main(args)
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd
args = do_inputs()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
args = do_inputs()
main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/make_solo_html.py
|
make_solo_html.py
|
import argparse, sys, os, random, inspect, os, time, gc, gzip
from shutil import rmtree
from multiprocessing import cpu_count
from tempfile import mkdtemp, gettempdir
from subprocess import call
from seqtools.format.sam.bam.files import BAMFile
from seqtools.format.sam.bam.bamindex import BAMIndexRandomAccessPrimary as BIRAP
from seqtools.errors import ErrorProfileFactory
from seqtools.format.fasta import FASTAData
# Take the bam file as an input and produce plots and data file for context errors.
def main(args):
# make our error profile report
sys.stderr.write("Reading reference fasta\n")
if args.reference[-3:] == '.gz':
ref = FASTAData(gzip.open(args.reference).read())
else:
ref = FASTAData(open(args.reference).read())
sys.stderr.write("Reading index\n")
epf = ErrorProfileFactory()
bf = BAMFile(args.input,BAMFile.Options(reference=ref))
bind = None
if args.random:
if args.input_index:
bind = BIRAP(index_file=args.input_index,alignment_file=args.input)
else:
bind = BIRAP(index_file=args.input+'.bgi',alignment_file=args.input)
z = 0
strand = 'target'
if args.query: strand = 'query'
con = 0
while True:
#rname = random.choice(bf.index.get_names())
#print rname
#coord = bf.index.get_longest_target_alignment_coords_by_name(rname)
#print coord
coord = bind.get_random_coord()
if not coord: continue
e = bf.fetch_by_coord(coord)
if not e.is_aligned(): continue
epf.add_alignment(e)
z+=1
if z%100==1:
con = epf.get_min_context_count(strand)
sys.stderr.write(str(z)+" alignments, "+str(con)+" min context coverage\r")
if args.max_alignments <= z: break
if args.stopping_point <= con: break
else:
z = 0
strand = 'target'
if args.query: strand = 'query'
con = 0
for e in bf:
if e.is_aligned():
epf.add_alignment(e)
z+=1
if z%100==1:
con = epf.get_min_context_count(strand)
sys.stderr.write(str(z)+" alignments, "+str(con)+" min context coverage\r")
if args.max_alignments <= z: break
if args.stopping_point <= con: break
sys.stderr.write("\n")
#if bf.index:
# bf.index.destroy()
bf = None
if bind:
bind.destroy()
sys.stderr.write('working with:'+"\n")
sys.stderr.write(str(z)+" alignments, "+str(con)+" min context coverage"+"\n")
epf.write_context_error_report(args.tempdir+'/err.txt',strand)
for ofile in args.output:
cmd = [args.rscript_path,
os.path.dirname(os.path.realpath(__file__))+'/plot_base_error_context.r',
args.tempdir+'/err.txt',ofile]
if args.scale:
cmd += [str(x) for x in args.scale]
sys.stderr.write(" ".join(cmd)+"\n")
call(cmd)
sys.stderr.write("finished\n")
if args.output_raw:
of = open(args.output_raw,'w')
with open(args.tempdir+"/err.txt") as inf:
for line in inf:
of.write(line)
epf.close()
time.sleep(5)
gc.collect()
time.sleep(5)
# Temporary working directory step 3 of 3 - Cleanup
if not args.specific_tempdir:
rmtree(args.tempdir)
def do_inputs():
# Setup command line inputs
parser=argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="INPUT FILE or '-' for STDIN")
parser.add_argument('--input_index',help="Index file for bam if other than default location")
parser.add_argument('-r','--reference',required=True,help="Reference Genome")
parser.add_argument('-o','--output',nargs='+',required=True,help="OUTPUTFILE(s)")
parser.add_argument('--output_raw',help="Save the raw data")
parser.add_argument('--scale',type=float,nargs=6,help="<insertion_min> <insertion_max> <mismatch_min> <mismatch_max> <deletion_min> <deletion_max>")
parser.add_argument('--max_alignments',type=int,default=10000000000,help="The maximum number of alignments to scan")
parser.add_argument('--stopping_point',type=int,default=1000,help="Stop after you see this many of each context")
#parser.add_argument('--threads',type=int,default=cpu_count(),help="INT number of threads to run. Default is system cpu count")
# Temporary working directory step 1 of 3 - Definition
group1 = parser.add_mutually_exclusive_group(required=True)
group1.add_argument('--target',action='store_true',help="Context on the target sequence")
group1.add_argument('--query',action='store_true',help="Context on the query sequence")
group = parser.add_mutually_exclusive_group()
group.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is made and destroyed here.")
group.add_argument('--specific_tempdir',help="This temporary directory will be used, but will remain after executing.")
parser.add_argument('--random',action='store_true',help="Randomly select alignments, requires an indexed bam")
parser.add_argument('--rscript_path',default='Rscript',help="Path to Rscript")
args = parser.parse_args()
# Temporary working directory step 2 of 3 - Creation
setup_tempdir(args)
return args
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir):
os.makedirs(args.specific_tempdir.rstrip('/'))
args.tempdir = args.specific_tempdir.rstrip('/')
if not os.path.exists(args.specific_tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
else:
args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd
args = do_inputs()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
#do our inputs
args = do_inputs()
main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/bam_to_context_error_plot.py
|
bam_to_context_error_plot.py
|
import argparse, sys, os, time, re, gzip, locale, inspect
from shutil import rmtree, copy, copytree
from tempfile import mkdtemp, gettempdir
from seqtools.range.multi import BedStream
from seqtools.format.gpd import GPDStream
import make_solo_html
# global
g_version = None
locale.setlocale(locale.LC_ALL,'') #default locale
def main(args):
# Create the output HTML
make_html(args)
udir = os.path.dirname(os.path.realpath(__file__))
# Make the portable output
cmd = [udir+'/make_solo_html.py',args.tempdir+'/report.xhtml',
'-o',args.tempdir+'/portable_report.xhtml']
sys.stderr.write(" ".join(cmd)+"\n")
make_solo_html.external_cmd(cmd)
# Make the full output
cmd = [udir+'/make_solo_html.py',args.tempdir+'/report.xhtml',
'-o',args.tempdir+'/output_report.xhtml','--all']
sys.stderr.write(" ".join(cmd)+"\n")
make_solo_html.external_cmd(cmd)
if args.output_folder:
copytree(args.tempdir,args.output_folder)
if args.portable_output:
copy(args.tempdir+'/portable_report.xhtml',args.portable_output)
if args.output:
copy(args.tempdir+'/output_report.xhtml',args.output)
## Temporary working directory step 3 of 3 - Cleanup
#if not args.specific_tempdir:
# rmtree(args.tempdir)
def make_html(args):
global g_version
#read in our alignment data
mydate = time.strftime("%Y-%m-%d")
a = {}
with open(args.tempdir+'/data/alignment_stats.txt') as inf:
for line in inf:
(name,numstr)=line.rstrip().split("\t")
a[name]=int(numstr)
#read in our special read analysis
special = {}
with open(args.tempdir+'/data/special_report') as inf:
for line in inf:
f = line.rstrip().split("\t")
if f[0] not in special: special[f[0]] = []
special[f[0]].append(f[1:])
#Only have error stats if we are using it
e = {}
if args.genome:
#read in our error data
with open(args.tempdir+'/data/error_stats.txt') as inf:
for line in inf:
(name,numstr)=line.rstrip().split("\t")
e[name]=int(numstr)
# read in our coverage data
coverage_data = {}
# this one will be set in annotation on section
tx_to_gene = {}
coverage_data['genome_total'] = 0
with open(args.tempdir+'/data/chrlens.txt') as inf:
for line in inf:
f = line.rstrip().split("\t")
coverage_data['genome_total']+=int(f[1])
inf = gzip.open(args.tempdir+'/data/depth.sorted.bed.gz')
coverage_data['genome_covered'] = 0
bs = BedStream(inf)
for rng in bs:
f = line.rstrip().split("\t")
coverage_data['genome_covered'] += rng.length
inf.close()
# The annotation section
if args.gpd:
inf = open(args.tempdir+'/data/beds/exon.bed')
coverage_data['exons_total'] = 0
bs = BedStream(inf)
for rng in bs:
f = line.rstrip().split("\t")
coverage_data['exons_total'] += rng.length
inf.close()
inf = open(args.tempdir+'/data/beds/intron.bed')
coverage_data['introns_total'] = 0
bs = BedStream(inf)
for rng in bs:
f = line.rstrip().split("\t")
coverage_data['introns_total'] += rng.length
inf.close()
inf = open(args.tempdir+'/data/beds/intergenic.bed')
coverage_data['intergenic_total'] = 0
bs = BedStream(inf)
for rng in bs:
f = line.rstrip().split("\t")
coverage_data['intergenic_total'] += rng.length
inf.close()
inf = gzip.open(args.tempdir+'/data/exondepth.bed.gz')
coverage_data['exons_covered'] = 0
bs = BedStream(inf)
for rng in bs:
f = line.rstrip().split("\t")
coverage_data['exons_covered'] += rng.length
inf.close()
inf = gzip.open(args.tempdir+'/data/introndepth.bed.gz')
coverage_data['introns_covered'] = 0
bs = BedStream(inf)
for rng in bs:
f = line.rstrip().split("\t")
coverage_data['introns_covered'] += rng.length
inf.close()
inf = gzip.open(args.tempdir+'/data/intergenicdepth.bed.gz')
coverage_data['intergenic_covered'] = 0
bs = BedStream(inf)
for rng in bs:
f = line.rstrip().split("\t")
coverage_data['intergenic_covered'] += rng.length
inf.close()
# deal with annotations
ref_genes = {}
ref_transcripts = {}
if args.gpd[-3:] == '.gz':
gs = GPDStream(gzip.open(args.gpd))
else:
gs = GPDStream(open(args.gpd))
#gs = GPDStream(inf)
for gpd in gs:
tx_to_gene[gpd.transcript_name] = gpd.gene_name
ref_genes[gpd.gene_name] = [0,0]
ref_transcripts[gpd.transcript_name] = [0,0]
inf = gzip.open(args.tempdir+'/data/annotbest.txt.gz')
for line in inf:
f = line.rstrip().split("\t")
gene = f[2]
tx = f[3]
if f[4]=='partial': ref_genes[gene][0] += 1
elif f[4]=='full': ref_genes[gene][1] += 1
if f[4]=='partial': ref_transcripts[tx][0] += 1
elif f[4]=='full': ref_transcripts[tx][1] += 1
inf.close()
#get our locus count
if args.do_loci:
inf = gzip.open(args.tempdir+'/data/loci.bed.gz')
locuscount = 0
for line in inf:
locuscount += 1
inf.close()
#get our annotation counts
if args.gpd:
genefull = 0
geneany = 0
txfull = 0
txany = 0
inf = gzip.open(args.tempdir+'/data/annotbest.txt.gz')
genes_f = {}
genes_a = {}
txs_f = {}
txs_a = {}
for line in inf:
f = line.rstrip().split("\t")
g = f[2]
t = f[3]
if g not in genes_a: genes_a[g] = 0
genes_a[g]+=1
if t not in txs_a: txs_a[t] = 0
txs_a[t]+=1
if f[4] == 'full':
if g not in genes_f: genes_f[g] = 0
genes_f[g]+=1
if t not in txs_f: txs_f[t] = 0
txs_f[t]+=1
inf.close()
genefull = len(genes_f.keys())
geneany = len(genes_a.keys())
txfull = len(txs_f.keys())
txany = len(txs_a.keys())
# still in args.gpd required
#Get evidence counts for bias
bias_tx_count = None
bias_read_count = None
if os.name != 'nt' and sys.platform != 'darwin':
with open(args.tempdir+'/data/bias_counts.txt') as inf:
for line in inf:
f = line.rstrip().split("\t")
bias_tx_count = int(f[0])
bias_read_count = int(f[1])
#make our css directory
if not os.path.exists(args.tempdir+'/css'):
os.makedirs(args.tempdir+'/css')
udir = os.path.dirname(os.path.realpath(__file__))
#copy css into that directory
copy(udir+'/data/mystyle.css',args.tempdir+'/css/mystyle.css')
of = open(args.tempdir+'/report.xhtml','w')
ostr = '''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<link rel="stylesheet" type="text/css" href="css/mystyle.css" />
<title>Long Read Alignment and Error Report</title>
</head>
<body>
'''
of.write(ostr)
#########################################
# 1. TOP BLOCK
ostr = '''
<div class="result_block">
<div class="top_block">
<div>
Generated on:
</div>
<div class="input_value">
'''
of.write(ostr)
of.write(mydate)
ostr = '''
</div>
</div>
<div class="top_block">
<div>
Version:
</div>
<div class="input_value">'''
of.write(ostr)
of.write(str(g_version))
ostr = '''
</div>
</div>
<div class="top_block">
<div>Execution parmeters:</div>
<div class="input_value">
<a id="params.txt" href="data/params.txt">params.txt</a>
</div>
</div>
<div class="top_block">
<div>Long read alignment and error report for:</div>
<div class="input_value" id="filename">'''
of.write(ostr+"\n")
of.write(args.input)
ostr = '''
</div>
</div>
<div class="clear"></div>
<div class="top_block">
<div>
Reference Genome:
</div>
<div class="input_value">'''
of.write(ostr)
of.write(str(args.genome))
#else:
# of.write(' '*20)
ostr = '''
</div>
</div>
<div class="top_block">
<div>
Reference Transcriptome:
</div>
<div class="input_value">'''
of.write(ostr)
if args.gtf:
of.write(str(args.gtf))
else:
of.write(str(args.gpd))
#else:
# of.write(' '*20)
ostr = '''
</div>
</div>
</div>
<div class="clear"></div>
<hr />
'''
of.write(ostr)
##################################
# 2. ALIGNMENT ANALYSIS
## This block should be in every output. Generated from the BAM
ostr = '''
<div class="result_block">
<div class="subject_title">
<table><tr><td class="c1">Alignment analysis</td><td class="c2"><span class="highlight">'''
of.write(ostr)
reads_aligned = perc(a['ALIGNED_READS'],a['TOTAL_READS'],1)
of.write(reads_aligned)
ostr = '''
</span></td><td class="c2"><span class="highlight2">reads aligned</span></td><td class="c2"><span class="highlight">'''
of.write(ostr)
bases_aligned = perc(a['ALIGNED_BASES'],a['TOTAL_BASES'],1)
of.write(bases_aligned)
ostr = '''
</span></td><td class="c2"><span class="highlight2">bases aligned <i>(of aligned reads)</i></span></td></tr></table>
</div>
<div class="one_third left">
<table class="data_table">
<tr class="rhead"><td colspan="3">Read Stats</td></tr>'''
of.write(ostr+"\n")
total_read_string = '<tr><td>Total reads</td><td>'+str(addcommas(a['TOTAL_READS']))+'</td><td></td></tr>'
of.write(total_read_string+"\n")
unaligned_read_string = '<tr><td>- Unaligned reads</td><td>'+str(addcommas(a['UNALIGNED_READS']))+'</td><td>'+perc(a['UNALIGNED_READS'],a['TOTAL_READS'],1)+'</td></tr>'
of.write(unaligned_read_string+"\n")
aligned_read_string = '<tr><td>- Aligned reads</td><td>'+str(addcommas(a['ALIGNED_READS']))+'</td><td>'+perc(a['ALIGNED_READS'],a['TOTAL_READS'],1)+'</td></tr>'
of.write(aligned_read_string+"\n")
single_align_read_string = '<tr><td>--- Single-align reads</td><td>'+str(addcommas(a['SINGLE_ALIGN_READS']))+'</td><td>'+perc(a['SINGLE_ALIGN_READS'],a['TOTAL_READS'],1)+'</td></tr>'
of.write(single_align_read_string+"\n")
gapped_align_read_string = '<tr><td>--- Gapped-align reads</td><td>'+str(addcommas(a['GAPPED_ALIGN_READS']))+'</td><td>'+perc(a['GAPPED_ALIGN_READS'],a['TOTAL_READS'],2)+'</td></tr>'
of.write(gapped_align_read_string+"\n")
gapped_align_read_string = '<tr><td>--- Chimeric reads</td><td>'+str(addcommas(a['CHIMERA_ALIGN_READS']))+'</td><td>'+perc(a['CHIMERA_ALIGN_READS'],a['TOTAL_READS'],2)+'</td></tr>'
of.write(gapped_align_read_string+"\n")
gapped_align_read_string = '<tr><td>----- Trans-chimeric reads</td><td>'+str(addcommas(a['TRANSCHIMERA_ALIGN_READS']))+'</td><td>'+perc(a['TRANSCHIMERA_ALIGN_READS'],a['TOTAL_READS'],2)+'</td></tr>'
of.write(gapped_align_read_string+"\n")
gapped_align_read_string = '<tr><td>----- Self-chimeric reads</td><td>'+str(addcommas(a['SELFCHIMERA_ALIGN_READS']))+'</td><td>'+perc(a['SELFCHIMERA_ALIGN_READS'],a['TOTAL_READS'],2)+'</td></tr>'
of.write(gapped_align_read_string+"\n")
ostr='''
<tr class="rhead"><td colspan="3">Base Stats <i>(of aligned reads)</i></td></tr>'''
of.write(ostr+"\n")
total_bases_string = '<tr><td>Total bases</td><td>'+str(addcommas(a['TOTAL_BASES']))+'</td><td></td></tr>'
of.write(total_bases_string+"\n")
unaligned_bases_string = '<tr><td>- Unaligned bases</td><td>'+str(addcommas(a['UNALIGNED_BASES']))+'</td><td>'+perc(a['UNALIGNED_BASES'],a['TOTAL_BASES'],1)+'</td></tr>'
of.write(unaligned_bases_string+"\n")
aligned_bases_string = '<tr><td>- Aligned bases</td><td>'+str(addcommas(a['ALIGNED_BASES']))+'</td><td>'+perc(a['ALIGNED_BASES'],a['TOTAL_BASES'],1)+'</td></tr>'
of.write(aligned_bases_string+"\n")
single_align_bases_string = '<tr><td>--- Single-aligned bases</td><td>'+str(addcommas(a['SINGLE_ALIGN_BASES']))+'</td><td>'+perc(a['SINGLE_ALIGN_BASES'],a['TOTAL_BASES'],1)+'</td></tr>'
of.write(single_align_bases_string+"\n")
gapped_align_bases_string = '<tr><td>--- Other-aligned bases</td><td>'+str(addcommas(a['GAPPED_ALIGN_BASES']))+'</td><td>'+perc(a['GAPPED_ALIGN_BASES'],a['TOTAL_BASES'],2)+'</td></tr>'
of.write(gapped_align_bases_string+"\n")
ostr = '''
</table>
<table class="right">
<tr><td>Unaligned</td><td><div id="unaligned_leg" class="legend_square"></div></td></tr>
<tr><td>Trans-chimeric alignment</td><td><div id="chimeric_leg" class="legend_square"></div></td></tr>
<tr><td>Self-chimeric alignment</td><td><div id="selfchimeric_leg" class="legend_square"></div></td></tr>
<tr><td>Gapped alignment</td><td><div id="gapped_leg" class="legend_square"></div></td></tr>
<tr><td>Single alignment</td><td><div id="single_leg" class="legend_square"></div></td></tr>
</table>
</div>
<div class="two_thirds right">
<div class="rhead">Summary [<a download="alignments.pdf" href="plots/alignments.pdf">pdf</a>]</div>
<img src="plots/alignments.png" alt="alignments_png" />
</div>
<div class="clear"></div>
<div class="two_thirds right">
<div class="rhead">Exon counts of best alignments [<a download="exon_size_distro.pdf" href="plots/exon_size_distro.pdf">pdf</a>]</div>
<img src="plots/exon_size_distro.png" alt="exon_size_distro_png" />
</div>
'''
of.write(ostr)
if len(special['GN']) > 1:
ostr = '''
<div class="one_half left">
<table class="one_half data_table">
<tr class="rhead"><td colspan="5">Long read name information</td></tr>
<tr><td>Type</td><td>Sub-type</td><td>Reads</td><td>Aligned</td><td>Fraction</td></tr>
'''
of.write(ostr)
for f in [x for x in special['GN'] if x[0] != 'Unclassified' or int(x[2])>0]:
of.write(' <tr><td>'+f[0]+'</td><td>'+f[1]+'</td><td>'+addcommas(int(f[2]))+'</td><td>'+addcommas(int(f[3]))+'</td><td>'+perc(int(f[3]),int(f[2]),2)+'</td></tr>'+"\n")
ostr = '''
</table>
'''
of.write(ostr)
if 'PB' in special:
# We have pacbio specific report
pb = {}
for f in special['PB']:
pb[f[0]]=f[1]
if re.search('\.',f[1]): pb[f[0]]=float(f[1])
ostr = '''
<div class="rhead">PacBio SMRT Cells [<a download="pacbio.pdf" href="/plots/pacbio.pdf">pdf</a>]</div>
<img src="plots/pacbio.png" alt="pacbio_png" />
<table class="horizontal_legend right">
<tr><td>Aligned</td><td><div class="legend_square pacbio_aligned_leg"></div></td><td>Unaligned</td><td><div class="legend_square pacbio_unaligned_leg"></div></td></tr>
</table>
<table class="data_table one_half">
<tr class="rhead"><td colspan="4">PacBio Stats</td></tr>
'''
of.write(ostr)
of.write(' <tr><td>Total Cell Count</td><td colspan="3">'+addcommas(int(pb['Cell Count']))+'</td></tr>')
of.write(' <tr><td>Total Molecule Count</td><td colspan="3">'+addcommas(int(pb['Molecule Count']))+'</td></tr>')
of.write(' <tr><td>Total Molecules Aligned</td><td colspan="3">'+addcommas(int(pb['Aligned Molecule Count']))+' ('+perc(pb['Aligned Molecule Count'],pb['Molecule Count'],2)+')</td></tr>')
of.write(' <tr class="rhead"><td>Per-cell Feature</td><td>Min</td><td>Avg</td><td>Max</td></tr>')
of.write(' <tr><td>Reads</td><td>'+addcommas(int(pb['Min Reads Per Cell']))+'</td><td>'+addcommas(int(pb['Avg Reads Per Cell']))+'</td><td>'+addcommas(int(pb['Max Reads Per Cell']))+'</td></tr>')
of.write(' <tr><td>Molecules</td><td>'+addcommas(int(pb['Min Molecules Per Cell']))+'</td><td>'+addcommas(int(pb['Avg Molecules Per Cell']))+'</td><td>'+addcommas(int(pb['Max Molecules Per Cell']))+'</td></tr>')
of.write(' <tr><td>Aligned Molecules</td><td>'+addcommas(int(pb['Min Aligned Molecules Per Cell']))+'</td><td>'+addcommas(int(pb['Avg Aligned Molecules Per Cell']))+'</td><td>'+addcommas(int(pb['Max Aligned Molecules Per Cell']))+'</td></tr>')
ostr = '''
</table>
'''
of.write(ostr)
ostr = '''
</div>
'''
of.write(ostr)
ostr = '''
</div>
<div class="clear"></div>
<hr />
'''
of.write(ostr)
###################################
# 3. ANNOTATION ANALYSIS
### This block should only be done when we have annotations
if args.gpd:
ostr = '''
<div class="result_block">
<div class="subject_title">Annotation Analysis</div>
<div class="one_half left">
<div class="rhead">Distribution of reads among genomic features [<a download="read_genomic_features.pdf" href="plots/read_genomic_features.pdf">pdf</a>]</div>
<img src="plots/read_genomic_features.png" alt="read_genomic_features_png" />
<table class="one_half right horizontal_legend">
<tr>
<td>Exons</td><td><div class="exon_leg legend_square"></div></td><td></td>
<td>Introns</td><td><div class="intron_leg legend_square"></div></td><td></td>
<td>Intergenic</td><td><div class="intergenic_leg legend_square"></div></td><td></td>
</tr>
</table>
</div>
<div class="one_half right">
<div class="rhead">Distribution of annotated reads [<a download="annot_lengths.pdf" href="plots/annot_lengths.pdf">pdf</a>]</div>
<img src="plots/annot_lengths.png" alt="annot_lengths_png" />
<table class="one_half right horizontal_legend">
<tr>
<td>Partial annotation</td><td><div class="partial_leg legend_square"></div></td><td></td>
<td>Full-length</td><td><div class="full_leg legend_square"></div></td><td></td>
<td>Unannotated</td><td><div class="unannotated_leg legend_square"></div></td><td></td>
</tr>
</table>
</div>
<div class="clear"></div>
<div class="one_half right">
<div class="rhead">Distribution of identified reference transcripts [<a download="transcript_distro.pdf" href="plots/transcript_distro.pdf">pdf</a>]</div>
<img src="plots/transcript_distro.png" alt="transcript_distro_png" />
<table class="one_half right horizontal_legend">
<tr>
<td>Partial annotation</td><td><div class="partial_leg legend_square"></div></td><td></td>
<td>Full-length</td><td><div class="full_leg legend_square"></div></td><td></td>
</tr>
</table>
</div>
<div class="one_half left">
<table class="data_table one_half">
<tr class="rhead"><td colspan="5">Annotation Counts</td></tr>
<tr><td>Feature</td><td>Evidence</td><td>Reference</td><td>Detected</td><td>Percent</td></tr>
'''
of.write(ostr)
cnt = len([x for x in ref_genes.keys() if sum(ref_genes[x])>0])
of.write(' <tr><td>Genes</td><td>Any match</td><td>'+addcommas(len(ref_genes.keys()))+'</td><td>'+addcommas(cnt)+'</td><td>'+perc(cnt,len(ref_genes.keys()),2)+'</td></tr>'+"\n")
cnt = len([x for x in ref_genes.keys() if ref_genes[x][1]>0])
of.write(' <tr><td>Genes</td><td>Full-length</td><td>'+addcommas(len(ref_genes.keys()))+'</td><td>'+addcommas(cnt)+'</td><td>'+perc(cnt,len(ref_genes.keys()),2)+'</td></tr>'+"\n")
cnt = len([x for x in ref_transcripts.keys() if sum(ref_transcripts[x])>0])
of.write(' <tr><td>Transcripts</td><td>Any match</td><td>'+addcommas(len(ref_transcripts.keys()))+'</td><td>'+addcommas(cnt)+'</td><td>'+perc(cnt,len(ref_transcripts.keys()),2)+'</td></tr>'+"\n")
cnt = len([x for x in ref_transcripts.keys() if ref_transcripts[x][1]>0])
of.write(' <tr><td>Transcripts</td><td>Full-length</td><td>'+addcommas(len(ref_transcripts.keys()))+'</td><td>'+addcommas(cnt)+'</td><td>'+perc(cnt,len(ref_transcripts.keys()),2)+'</td></tr>'+"\n")
ostr = '''
</table>
<table class="data_table one_half">
<tr class="rhead"><td colspan="4">Top Genes</td></tr>
<tr><td>Gene</td><td>Partial</td><td>Full-length</td><td>Total Reads</td></tr>
'''
of.write(ostr)
# get our top genes
vs = reversed(sorted(ref_genes.keys(),key=lambda x: sum(ref_genes[x]))[-5:])
for v in vs:
of.write(' <tr><td>'+v+'</td><td>'+addcommas(ref_genes[v][0])+'</td><td>'+addcommas(ref_genes[v][1])+'</td><td>'+addcommas(sum(ref_genes[v]))+'</td></tr>'+"\n")
ostr='''
</table>
<table class="data_table one_half">
<tr class="rhead"><td colspan="5">Top Transcripts</td></tr>
<tr><td>Transcript</td><td>Gene</td><td class="smaller_text">Partial</td><td class="smaller_text">Full- length</td><td class="smaller_text">Total Reads</td></tr>
'''
of.write(ostr)
vs = reversed(sorted(ref_transcripts.keys(),key=lambda x: sum(ref_transcripts[x]))[-5:])
for v in vs:
of.write(' <tr><td class="smaller_text">'+v+'</td><td class="smaller_text">'+tx_to_gene[v]+'</td><td class="smaller_text">'+addcommas(ref_transcripts[v][0])+'</td><td class="smaller_text">'+addcommas(ref_transcripts[v][1])+'</td><td class="smaller_text">'+addcommas(sum(ref_transcripts[v]))+'</td></tr>'+"\n")
ostr = '''
</table>
</div>
<div class="clear"></div>
</div>
<hr />
'''
of.write(ostr) # still in conditional for if we have annotation
##################################
# 4. COVERAGE ANALYSIS
### For Coverage we can do part of it without annotations
ostr = '''
<div class="result_block">
<div class="subject_title">Coverage analysis     
<span class="highlight">'''
of.write(ostr+"\n")
of.write(perc(coverage_data['genome_covered'],coverage_data['genome_total'],2)+"\n")
ostr = '''
</span><span class="highlight2">reference sequences covered</span>
</div>
<div class="one_half left">
<div class="rhead">Coverage of reference sequences [<a download="covgraph.pdf" href="plots/covgraph.pdf">pdf</a>]</div>
<img src="plots/covgraph.png" alt="covgraph_png" />
</div>
<div class="one_half left">
<div class="rhead">Coverage distribution [<a download="perchrdepth.pdf" href="plots/perchrdepth.pdf">pdf</a>]</div>
<img src="plots/perchrdepth.png" alt="perchrdepth_png" />
</div>
<div class="clear"></div>
'''
of.write(ostr)
### The next part of coverage requires annotations
if args.gpd:
ostr = '''
<div class="one_half left">
<table class="data_table one_half">
<tr class="rhead"><td colspan="4">Coverage statistics</td></tr>
<tr><td>Feature</td><td>Feature (bp)</td><td>Coverage (bp)</td><td>Fraction</td></tr>
'''
# still in annotation conditional
of.write(ostr)
of.write(' <tr><td>Genome</td><td>'+addcommas(coverage_data['genome_total'])+'</td><td>'+addcommas(coverage_data['genome_covered'])+'</td><td>'+perc(coverage_data['genome_covered'],coverage_data['genome_total'],2)+'</td></tr>')
of.write(' <tr><td>Exons</td><td>'+addcommas(coverage_data['exons_total'])+'</td><td>'+addcommas(coverage_data['exons_covered'])+'</td><td>'+perc(coverage_data['exons_covered'],coverage_data['exons_total'],2)+'</td></tr>')
of.write(' <tr><td>Introns</td><td>'+addcommas(coverage_data['introns_total'])+'</td><td>'+addcommas(coverage_data['introns_covered'])+'</td><td>'+perc(coverage_data['introns_covered'],coverage_data['introns_total'],2)+'</td></tr>')
of.write(' <tr><td>Intergenic</td><td>'+addcommas(coverage_data['intergenic_total'])+'</td><td>'+addcommas(coverage_data['intergenic_covered'])+'</td><td>'+perc(coverage_data['intergenic_covered'],coverage_data['intergenic_total'],2)+'</td></tr>')
ostr = '''
</table>
</div>
<div class="one_half right">
<div class="rhead">Annotated features coverage [<a download="feature_depth.pdf" href="plots/feature_depth.pdf">pdf</a>]</div>
<img src="plots/feature_depth.png" alt="feature_depth_png" />
<table class="one_third right">
<tr><td>Genome</td><td><div class="legend_square genome_cov_leg"></div></td>
<td>Exons</td><td><div class="legend_square exon_cov_leg"></div></td>
<td>Introns</td><td><div class="legend_square intron_cov_leg"></div></td>
<td>Intergenic</td><td><div class="legend_square intergenic_cov_leg"></div></td></tr>
</table>
</div>
'''
of.write(ostr)
if os.name != 'nt' and sys.platform != 'darwin':
ostr = '''
<div class="one_half left">
<div class="rhead">Bias in alignment to reference transcripts [<a download="bias.pdf" href="plots/bias.pdf">pdf</a>]</div>
<table>
'''
of.write(ostr)
# still in conditional for annotation requirement
of.write('<tr><td colspan="2">Evidence from:</td></tr>')
of.write('<tr><td>Total Transcripts</td><td>'+str(addcommas(bias_tx_count))+'</td></tr>'+"\n")
of.write('<tr><td>Total reads</td><td>'+str(addcommas(bias_read_count))+'</td></tr>'+"\n")
ostr='''
</table>
'''
of.write(ostr)
ostr='''
<img src="plots/bias.png" alt="bias_png" />
'''
if bias_read_count > 0: of.write(ostr)
ostr='''
</div>
'''
of.write(ostr)
ostr = '''
<div class="clear"></div>
'''
# still in annotations check
of.write(ostr)
# done with annotations check
ostr = '''
</div>
<hr />
'''
of.write(ostr)
#############################################
# 5. RAREFRACTION ANALYSIS
### Rarefraction analysis block requires do_loci or annotations
if args.do_loci or args.gpd:
ostr = '''
<div class="subject_title"><table><tr><td class="c1">Rarefraction analysis</td>
'''
of.write(ostr)
if args.gpd:
ostr = '''
<td class="c2"><span class="highlight">
'''
# still in do_loci or annotations conditional
of.write(ostr)
of.write(str(addcommas(geneany))+"\n")
ostr = '''
</span></td><td class="c3"><span class="highlight2">Genes detected</span></td><td class="c4"><span class="highlight">
'''
# still in do_loci or annotations conditional
of.write(ostr)
of.write(str(addcommas(genefull))+"\n")
ostr = '''
</span></td><td class="c5"><span class="highlight2">Full-length genes</span></td>
'''
# still in do_loci or annotations conditional
of.write(ostr)
ostr = '''
</tr></table>
</div>
<div class="result_block">
<div class="one_half left">
'''
of.write(ostr)
if args.gpd:
ostr = '''
<div class="rhead">Gene detection rarefraction [<a download="gene_rarefraction.pdf" href="plots/gene_rarefraction.pdf">pdf</a>]</div>
<img src="plots/gene_rarefraction.png" alt="gene_rarefraction_png" />
</div>
<div class="one_half left">
<div class="rhead">Transcript detection rarefraction [<a download="transcript_rarefraction" href="plots/transcript_rarefraction.pdf">pdf</a>]</div>
<img src="plots/transcript_rarefraction.png" alt="transcript_rarefraction_png" />
</div>
<div class="clear"></div>
'''
# still in args.gpd
of.write(ostr)
#done with args.anotation
ostr = '''
<div class="one_half left">
<table class="data_table one_third">
<tr><td class="rhead" colspan="3">Rarefraction stats</td></tr>
<tr class="bold"><td>Feature</td><td>Criteria</td><td>Count</td></tr>
'''
# still in do_loci or annotations conditional
of.write(ostr+"\n")
if args.gpd:
of.write('<tr><td>Gene</td><td>full-length</td><td>'+str(addcommas(genefull))+'</td></tr>')
of.write('<tr><td>Gene</td><td>any match</td><td>'+str(addcommas(geneany))+'</td></tr>')
of.write('<tr><td>Transcript</td><td>full-length</td><td>'+str(addcommas(txfull))+'</td></tr>')
of.write('<tr><td>Transcript</td><td>any match</td><td>'+str(addcommas(txany))+'</td></tr>')
if args.do_loci: of.write('<tr><td>Locus</td><td></td><td>'+str(addcommas(locuscount))+'</td></tr>')
ostr='''
</table>
<table id="rarefraction_legend">
<tr><td>Any match</td><td><div class="rareany_leg legend_square"></div></td></tr>
<tr><td>full-length</td><td><div class="rarefull_leg legend_square"></div></td></tr>
<tr><td class="about" colspan="2">vertical line height indicates 5%-95% CI of sampling</td></tr>
</table>
</div>
'''
# still in do_loci or annotations conditional
of.write(ostr)
if args.do_loci:
ostr = '''
<div class="one_half left">
<div class="rhead">Locus detection rarefraction [<a download="locus_rarefraction.pdf" href="plots/locus_rarefraction.pdf">pdf</a>]</div>
<img src="plots/locus_rarefraction.png" alt="locus_rarefraction_png" />
</div>
'''
# in do_loci condition
of.write(ostr)
# still in do_loci or annotations conditional
ostr = '''
</div>
<div class="clear"></div>
<hr />
'''
# still in do_loci or annotations conditional
of.write(ostr)
# Finished do_loci or annotations conditional
###################################
# 6. ERROR PATTERN
# We need a reference in order to do error pattern analysis
if args.genome or args.gpd:
ostr = '''
<div class="subject_title">Error pattern analysis     <span class="highlight">
'''
of.write(ostr)
if not args.genome and args.gpd:
# We don't have any information to fill in the header about errror rates
ostr = '''
</span></div>
'''
of.write(ostr)
if args.genome:
# We do have error rate information
error_rate = perc(e['ANY_ERROR'],e['ALIGNMENT_BASES'],3)
of.write(error_rate)
ostr='''
</span> <span class="highlight2">error rate</span></div>
<div class="subject_subtitle">      based on aligned segments</div>
<div class="result_block">
<div class="full_length right">
<div class="rhead">Error rates, given a target sequence [<a download="context_plot.pdf" href="plots/context_plot.pdf">pdf</a>]</div>
<img src="plots/context_plot.png" alt="context_plot_png" />
</div>
<div class="clear"></div>
<table class="data_table one_third left">
<tr class="rhead"><td colspan="3">Alignment stats</td></tr>
'''
of.write(ostr+"\n")
best_alignments_sampled_string = '<tr><td>Best alignments sampled</td><td>'+str(e['ALIGNMENT_COUNT'])+'</td><td></td></tr>'
of.write(best_alignments_sampled_string+"\n")
ostr = '''
<tr class="rhead"><td colspan="3">Base stats</td></tr>
'''
of.write(ostr+"\n")
bases_analyzed_string = '<tr><td>Bases analyzed</td><td>'+str(addcommas(e['ALIGNMENT_BASES']))+'</td><td></td></tr>'
of.write(bases_analyzed_string+"\n")
correctly_aligned_string = '<tr><td>- Correctly aligned bases</td><td>'+str(addcommas(e['ALIGNMENT_BASES']-e['ANY_ERROR']))+'</td><td>'+perc((e['ALIGNMENT_BASES']-e['ANY_ERROR']),e['ALIGNMENT_BASES'],1)+'</td></tr>'
of.write(correctly_aligned_string+"\n")
total_error_string = '<tr><td>- Total error bases</td><td>'+str(addcommas(e['ANY_ERROR']))+'</td><td>'+perc(e['ANY_ERROR'],e['ALIGNMENT_BASES'],3)+'</td></tr>'
of.write(total_error_string+"\n")
mismatched_string = '<tr><td>--- Mismatched bases</td><td>'+str(addcommas(e['MISMATCHES']))+'</td><td>'+perc(e['MISMATCHES'],e['ALIGNMENT_BASES'],3)+'</td></tr>'
of.write(mismatched_string+"\n")
deletion_string = '<tr><td>--- Deletion bases</td><td>'+str(addcommas(e['ANY_DELETION']))+'</td><td>'+perc(e['ANY_DELETION'],e['ALIGNMENT_BASES'],3)+'</td></tr>'
of.write(deletion_string+"\n")
complete_deletion_string = '<tr><td>----- Complete deletion bases</td><td>'+str(addcommas(e['COMPLETE_DELETION']))+'</td><td>'+perc(e['COMPLETE_DELETION'],e['ALIGNMENT_BASES'],3)+'</td></tr>'
of.write(complete_deletion_string+"\n")
homopolymer_deletion_string = '<tr><td>----- Homopolymer deletion bases</td><td>'+str(addcommas(e['HOMOPOLYMER_DELETION']))+'</td><td>'+perc(e['HOMOPOLYMER_DELETION'],e['ALIGNMENT_BASES'],3)+'</td></tr>'
of.write(homopolymer_deletion_string+"\n")
insertion_string = '<tr><td>--- Insertion bases</td><td>'+str(addcommas(e['ANY_INSERTION']))+'</td><td>'+perc(e['ANY_INSERTION'],e['ALIGNMENT_BASES'],3)+'</td></tr>'
of.write(insertion_string+"\n")
complete_insertion_string = '<tr><td>----- Complete insertion bases</td><td>'+str(addcommas(e['COMPLETE_INSERTION']))+'</td><td>'+perc(e['COMPLETE_INSERTION'],e['ALIGNMENT_BASES'],3)+'</td></tr>'
of.write(complete_insertion_string+"\n")
homopolymer_insertion_string = '<tr><td>----- Homopolymer insertion bases</td><td>'+str(addcommas(e['HOMOPOLYMER_INSERTION']))+'</td><td>'+perc(e['HOMOPOLYMER_INSERTION'],e['ALIGNMENT_BASES'],3)+'</td></tr>'
of.write(homopolymer_insertion_string+"\n")
ostr = '''
</table>
<div class="full_length left">
<div class="rhead">Alignment-based error rates [<a download="alignment_error_plot.pdf" href="plots/alignment_error_plot.pdf">pdf</a>]</div>
<img class="square_image" src="plots/alignment_error_plot.png" alt="alignment_error_plot_png" />
</div>
</div>
<div class="clear"></div>
'''
of.write(ostr)
if args.gpd and os.name != 'nt' and sys.platform != 'darwin':
# We can output the junction variance plot
ostr = '''
<div class="left full_length">
<div class="rhead">Distance of observed junctions from reference junctions [<a download="junvar.pdf" href="plots/junvar.pdf">pdf</a>]</div>
<img src="plots/junvar.png" alt="junvar_png" />
</div>
<div class="clear"></div>
'''
of.write(ostr)
#close error box if we have a reason to be here
if args.genome or args.gpd:
ostr = '''
<hr />
'''
of.write(ostr)
# finished with args.genome condition
##############################
# 8. Raw data block
ostr = '''
<div id="bed_data">
<table class="header_table">
<tr><td class="rhead" colspan="2">Browser-ready Bed data</td></tr>
<tr>
<td>Best Alignments:</td>
<td class="raw_files"><a download="best.sorted.bed.gz" href="data/best.sorted.bed.gz">best.sorted.bed.gz</a></td>
</tr>
<tr>
<td>Gapped Alignments:</td>
<td class="raw_files"><a download="gapped.bed.gz" href="data/gapped.bed.gz">gapped.bed.gz</a></td>
</tr>
<tr>
<td>Trans-chimeric Alignments:</td>
<td class="raw_files"><a download="chimera.bed.gz" href="data/chimera.bed.gz">chimera.bed.gz</a></td>
</tr>
<tr>
<td>Self-chimeric Alignments:</td>
<td class="raw_files"><a download="technical_chimeras.bed.gz" href="data/technical_chimeras.bed.gz">technical_chimeras.bed.gz</a></td>
</tr>
<tr>
<td>Other-chimeric Alignments:</td>
<td class="raw_files"><a download="techinical_atypical_chimeras.bed.gz" href="data/technical_atypical_chimeras.bed.gz">techinical_atypical_chimeras.bed.gz</a></td>
</tr>
</table>
</div>
<div id="raw_data">
<table class="header_table">
<tr><td class="rhead" colspan="2">Raw data</td></tr>
<tr>
<td>Alignments stats raw report:</td>
<td class="raw_files"><a id="alignment_stats.txt" href="data/alignment_stats.txt">alignment_stats.txt</a></td>
</tr>
<tr>
<td>Read lengths:</td>
<td class="raw_files"><a download="lengths.txt.gz" href="data/lengths.txt.gz">lengths.txt.gz</a></td>
</tr>
<tr>
<td>Reference sequence lengths:</td>
<td class="raw_files"><a id="chrlens.txt" href="data/chrlens.txt">chrlens.txt</a></td>
</tr>
<tr>
<td>Coverage bed:</td>
<td class="raw_files"><a download="depth.sorted.bed.gz" href="data/depth.sorted.bed.gz">depth.sorted.bed.gz</a></td>
</tr>
'''
of.write(ostr)
if args.do_loci:
of.write('<tr> <td>Loci basics bed:</td><td class="raw_files"><a download="loci.bed.gz" href="data/loci.bed.gz">loci.bed.gz</a></td></tr>'+"\n")
of.write('<tr><td>Locus read data bed:</td><td class="raw_files"><a download="loci-all.bed.gz" href="data/loci-all.bed.gz">loci-all.bed.gz</a></td></tr>'+"\n")
of.write('<tr><td>Locus rarefraction:</td><td class="raw_files"><a download="locus_rarefraction.txt" href="data/locus_rarefraction.txt">locus_rarefraction.txt</a></td></tr>'+"\n")
if args.gpd:
ostr = '''
<tr>
<td>Read annotations:</td>
<td class="raw_files"><a download="annotbest.txt.gz" href="data/annotbest.txt.gz">annotbest.txt.gz</a></td>
</tr>
<tr>
<td>Read genomic features:</td>
<td class="raw_files"><a download="read_genomic_features.txt.gz" href="data/read_genomic_features.txt.gz">read_genomic_features.txt.gz</a></td>
</tr>
<tr>
<td>Annotation status and read lengths:</td>
<td class="raw_files"><a download="annot_lengths.txt.gz" href="data/annot_lengths.txt.gz">annot_lengths.txt.gz</a></td>
</tr>
<tr>
<td>Gene any match rarefraction:</td>
<td class="raw_files"><a download="gene_rarefraction.txt" href="data/gene_rarefraction.txt">gene_rarefraction.txt</a></td>
</tr>
<tr>
<td>Gene full-length rarefraction:</td>
<td class="raw_files"><a download="gene_full_rarefraction.txt" href="data/gene_full_rarefraction.txt">gene_full_rarefraction.txt</a></td>
</tr>
<tr>
<td>Transcript any match rarefraction:</td>
<td class="raw_files"><a download="transcript_rarefraction.txt" href="data/transcript_rarefraction.txt">transcript_rarefraction.txt</a></td>
</tr>
<tr>
<td>Transcript full-length rarefraction:</td>
<td class="raw_files"><a download="transcript_full_rarefraction.txt" href="data/transcript_full_rarefraction.txt">transcript_full_rarefraction.txt</a></td>
</tr>
'''
of.write(ostr)
ostr = '''
<tr>
<td>Bias table:</td>
<td class="raw_files"><a download="bias_table.txt.gz" href="data/bias_table.txt.gz">bias_table.txt.gz</a></td>
</tr>
'''
if os.name != 'nt' and sys.platform != 'darwin': of.write(ostr)
ostr = '''
<tr>
<td>Junction variance table:</td>
<td class="raw_files"><a download="junvar.txt" href="data/junvar.txt">junvar.txt</a></td>
</tr>
'''
# if args.gpd
if os.name != 'nt' and sys.platform != 'darwin': of.write(ostr)
# done with args.gpd
#output data that depends on reference
if args.genome:
ostr = '''
<tr>
<td>Alignment errors data:</td>
<td class="raw_files"><a download="error_data.txt" href="data/error_data.txt">error_data.txt</a></td>
</tr>
<tr>
<td>Alignment error report:</td>
<td class="raw_files"><a download="error_stats.txt" href="data/error_stats.txt">error_stats.txt</a></td>
</tr>
<tr>
<td>Contextual errors data:</td>
<td class="raw_files"><a download="context_error_data.txt" href="data/context_error_data.txt">context_error_data.txt</a></td>
</tr>
'''
# if args.genome
of.write(ostr)
# back to any condition
ostr = '''
</table>
</div>
</body>
</html>
'''
of.write(ostr)
#Pre: numerator and denominator
#Post: percentage string
def perc(num,den,decimals=0):
s = "{0:."+str(decimals)+"f}%"
if float(den) == 0: return 'NA'
return s.format(100*float(num)/float(den))
def addcommas(val):
return locale.format("%d",val,grouping=True)
#def do_inputs():
# # Setup command line inputs
# parser=argparse.ArgumentParser(description="Create an output report",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument('input',help="INPUT FILE or '-' for STDIN")
# parser.add_argument('-o','--output',help="OUTPUT Folder or STDOUT if not set")
# parser.add_argument('--portable_output',help="OUTPUT file in a portable html format")
# group1 = parser.add_mutually_exclusive_group(required=True)
# group1.add_argument('-r','--reference',help="Reference Fasta")
# group1.add_argument('--no_reference',action='store_true',help="No Reference Fasta")
# parser.add_argument('--annotation',help="Reference annotation genePred")
# parser.add_argument('--threads',type=int,default=1,help="INT number of threads to run. Default is system cpu count")
# # Temporary working directory step 1 of 3 - Definition
# parser.add_argument('--tempdir',required=True,help="This temporary directory will be used, but will remain after executing.")
#
# ### Parameters for alignment plots
# parser.add_argument('--min_aligned_bases',type=int,default=50,help="for analysizing alignment, minimum bases to consider")
# parser.add_argument('--max_query_overlap',type=int,default=10,help="for testing gapped alignment advantage")
# parser.add_argument('--max_target_overlap',type=int,default=10,help="for testing gapped alignment advantage")
# parser.add_argument('--max_query_gap',type=int,help="for testing gapped alignment advantge")
# parser.add_argument('--max_target_gap',type=int,default=500000,help="for testing gapped alignment advantage")
# parser.add_argument('--required_fractional_improvement',type=float,default=0.2,help="require gapped alignment to be this much better (in alignment length) than single alignment to consider it.")
#
# ### Parameters for locus analysis
# parser.add_argument('--min_depth',type=float,default=1.5,help="require this or more read depth to consider locus")
# parser.add_argument('--min_coverage_at_depth',type=float,default=0.8,help="require at leas this much of the read be covered at min_depth")
# parser.add_argument('--min_exon_count',type=int,default=2,help="Require at least this many exons in a read to consider assignment to a locus")
#
# ### Params for alignment error plot
# parser.add_argument('--alignment_error_scale',nargs=6,type=float,help="<ins_min> <ins_max> <mismatch_min> <mismatch_max> <del_min> <del_max>")
# parser.add_argument('--alignment_error_max_length',type=int,default=100000,help="The maximum number of alignment bases to calculate error from")
#
# ### Params for context error plot
# parser.add_argument('--context_error_scale',nargs=6,type=float,help="<ins_min> <ins_max> <mismatch_min> <mismatch_max> <del_min> <del_max>")
# parser.add_argument('--context_error_stopping_point',type=int,default=1000,help="Sample at least this number of each context")
# args = parser.parse_args()
#
# # Temporary working directory step 2 of 3 - Creation
# setup_tempdir(args)
# return args
#def setup_tempdir(args):
# if not os.path.exists(args.tempdir):
# os.makedirs(args.tempdir.rstrip('/'))
# if not os.path.exists(args.tempdir):
# sys.stderr.write("ERROR: Problem creating temporary directory\n")
# sys.exit()
# return
def external(args,version=None):
#set our global by the input version
global g_version
g_version = version
main(args)
if __name__=="__main__":
sys.stderr.write("calling create html as main\n")
#do our inputs
#args = do_inputs()
#main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/create_html.py
|
create_html.py
|
import argparse, os, inspect, sys, gzip
from subprocess import Popen, PIPE
from tempfile import mkdtemp, gettempdir
from shutil import rmtree
from distutils.spawn import find_executable
from seqtools.format.gtf import GTFFile
import prepare_all_data
import create_html
g_version = None
def main(args):
if not args.output and not args.portable_output and not args.output_folder:
sys.stderr.write("ERROR: must specify some kind of output\n")
sys.exit()
## Check and see if directory for outputs exists
if args.output_folder:
if os.path.isdir(args.output_folder):
sys.stderr.write("ERROR: output directory already exists. Remove it to to use this location\n")
sys.exit()
global g_version
#Make sure rscript is installed
try:
cmd = args.rscript_path+' --version'
prscript = Popen(cmd.split(),stdout=PIPE,stderr=PIPE)
rline = prscript.communicate()
sys.stderr.write("Using Rscript version:\n")
sys.stderr.write(rline[1].rstrip()+"\n")
except:
sys.stderr.write("ERROR: Rscript not installed\n")
sys.exit()
if args.no_genome:
sys.stderr.write("WARNING: No reference specified. Will be unable to report error profile\n")
if args.no_transcriptome:
sys.stderr.write("WARNING: No annotation specified. Will be unable to report feature specific outputs\n")
#Do the conversion of gtf as early as possible if we have one
if args.gtf:
if args.gtf[-3:] == '.gz': ginf = gzip.open(args.gtf)
else: ginf = gzip.open(args.gtf)
gobj = GTFFile(ginf)
of = open(args.tempdir+'/txome.gpd','w')
gobj.write_genepred(of)
of.close()
args.gpd = args.tempdir+'/txome.gpd'
prepare_all_data.external(args)
create_html.external(args,version=g_version)
# Temporary working directory step 3 of 3 - Cleanup
if not args.specific_tempdir:
if os.name != 'nt':
rmtree(args.tempdir)
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir):
os.makedirs(args.specific_tempdir.rstrip('/'))
args.tempdir = args.specific_tempdir.rstrip('/')
if not os.path.exists(args.specific_tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
else:
args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
def external_cmd(cmd,version=None):
#set version by input
global g_version
g_version = version
cache_argv = sys.argv
sys.argv = cmd
args = do_inputs()
main(args)
sys.argv = cache_argv
def do_inputs():
parser=argparse.ArgumentParser(description="Create an output report",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
label1 = parser.add_argument_group(title="Input parameters",description="Required BAM file. If reference or annotation is not set, use --no_reference or --no_annotation respectively to continue.")
label1.add_argument('input',help="INPUT BAM file")
group1 = label1.add_mutually_exclusive_group(required=True)
group1.add_argument('-g','--genome',help="Reference Fasta")
group1.add_argument('--no_genome',action='store_true',help="No Reference Fasta")
group2 = label1.add_mutually_exclusive_group(required=True)
group2.add_argument('-t','--gtf',help="Reference transcriptome in GTF format, assumes gene_id and transcript_id are defining gene and transcript names")
group2.add_argument('--gpd',help="Reference transcriptome in genePred format")
group2.add_argument('--no_transcriptome',action='store_true',help="No annotation is available")
# output options
label2 = parser.add_argument_group(title="Output parameters",description="At least one output parameter must be set")
label2.add_argument('-o','--output',help="OUTPUT xhtml with data")
label2.add_argument('--portable_output',help="OUTPUT file in a small xhtml format")
label2.add_argument('--output_folder',help="OUTPUT folder of all data")
label3 = parser.add_argument_group(title="Performance parameters")
label3.add_argument('--threads',type=int,default=1,help="INT number of threads to run. Default is system cpu count")
# Temporary working directory step 1 of 3 - Definition
label4 = parser.add_argument_group(title="Temporary folder parameters")
group = label4.add_mutually_exclusive_group()
group.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is made and destroyed here.")
group.add_argument('--specific_tempdir',help="This temporary directory will be used, but will remain after executing.")
### Parameters for alignment plots
label5 = parser.add_argument_group(title="Alignment plot parameters")
label5.add_argument('--min_intron_size',type=int,default=68,help="minimum intron size when smoothing indels")
label5.add_argument('--min_aligned_bases',type=int,default=50,help="for analysizing alignment, minimum bases to consider")
label5.add_argument('--max_query_overlap',type=int,default=10,help="for testing gapped alignment advantage")
label5.add_argument('--max_target_overlap',type=int,default=10,help="for testing gapped alignment advantage")
label5.add_argument('--max_query_gap',type=int,help="for testing gapped alignment advantge")
label5.add_argument('--max_target_gap',type=int,default=500000,help="for testing gapped alignment advantage")
label5.add_argument('--required_fractional_improvement',type=float,default=0.2,help="require gapped alignment to be this much better (in alignment length) than single alignment to consider it.")
### Parameters for locus analysis
label6 = parser.add_argument_group(title="Locus parameters",description="Optionally produce plots and data regarding clusters of sequences")
label6.add_argument('--do_loci',action='store_true',help="this analysis is time consuming at the moment\n")
label6.add_argument('--min_depth',type=float,default=1.5,help="require this or more read depth to consider locus")
label6.add_argument('--min_coverage_at_depth',type=float,default=0.8,help="require at leas this much of the read be covered at min_depth")
label6.add_argument('--min_exon_count',type=int,default=2,help="Require at least this many exons in a read to consider assignment to a locus")
label6.add_argument('--locus_downsample',type=int,default=100,help="Limit how deep to search loci\n")
### Params for alignment error plot
label7 = parser.add_argument_group(title="Alignment error parameters")
label7.add_argument('--alignment_error_scale',nargs=6,type=float,help="<ins_min> <ins_max> <mismatch_min> <mismatch_max> <del_min> <del_max>")
label7.add_argument('--alignment_error_max_length',type=int,default=1000000,help="The maximum number of alignment bases to calculate error from")
### Params for context error plot
label8 = parser.add_argument_group(title="Context error parameters")
label8.add_argument('--context_error_scale',nargs=6,type=float,help="<ins_min> <ins_max> <mismatch_min> <mismatch_max> <del_min> <del_max>")
label8.add_argument('--context_error_stopping_point',type=int,default=5000,help="Sample at least this number of each context")
## Params for rarefraction plots
label9 = parser.add_argument_group(title="Rarefraction plot parameters")
label9.add_argument('--samples_per_xval',type=int,default=10)
### Parameters for bias plots
label10 = parser.add_argument_group(title="Bias parameters")
label10.add_argument('--max_bias_data',type=int,default=500000,help="Bias does not need too large of a dataset. By default data will be downsampled for large datasets.")
label11 = parser.add_argument_group(title="Path parameters")
label11.add_argument('--rscript_path',default='Rscript',help="The location of the Rscript executable. Default is installed in path")
args = parser.parse_args()
if args.output_folder:
if os.path.exists(args.output_folder):
parser.error("output folder already exists. will not overwrite output folder")
setup_tempdir(args)
ex = find_executable(args.rscript_path)
if not ex:
parser.error("Rscript is required and could not located in '"+args.rscript_path+"' its best if you just have it installed in your path by installing the base R program. Or you can specify its location with the --rscript_path option")
ex = find_executable('sort')
if not ex:
parser.error("sort as a command utility is required but could not be located. Perhaps you are not working in an environment with utilities in it")
ex = find_executable('zcat')
if not ex:
parser.error("zcat as a command utility is required but could not be located. Perhaps you are not working in an environment with utilities in it")
ex = find_executable('gzip')
if not ex:
parser.error("gzip as a command utility is required but could not be located. Perhaps you are not working in an environment with utilities in it")
if os.name == 'nt' and args.threads > 1:
args.threads = 1
sys.stderr.write("WARNING: Windows OS detected. Operating in single thread mode. close_fds dependencies need resolved before multi-thread windows mode is enabled.\n")
if sys.platform == 'darwin' and args.threads > 1:
args.threads = 1
sys.stderr.write("WARNING: Mac OS detected. Operating in single thread mode. close_fds dependencies need resolved before multi-thread windows mode is enabled.\n")
return args
if __name__=='__main__':
do_inputs()
main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/analyze.py
|
analyze.py
|
import sys, argparse, gzip, re, random, collections, inspect, os
from multiprocessing import Pool, cpu_count
from seqtools.statistics import average, median
def main(args):
inf = sys.stdin
if args.input != '-':
if re.search('\.gz$',args.input):
inf = gzip.open(args.input)
else:
inf = open(args.input)
of = sys.stdout
if args.output:
of = open(args.output,'w')
vals = []
for line in inf:
f = line.rstrip().split("\t")
if args.full and f[4] != 'full':
vals.append(None)
continue
if args.gene:
vals.append(f[2])
elif args.transcript:
vals.append(f[3])
if args.original_read_count:
if len(vals) > args.original_read_count:
sys.stderr.write("ERROR: cant have a read count greater than the original read count\n")
sys.exit()
vals += [None]*(args.original_read_count-len(vals))
# vals now holds an array to select from
total = len(vals)
xvals = make_sequence(total)
# make shuffled arrays to use for each point
qsvals = []
if args.threads > 1:
p = Pool(processes=args.threads)
for i in range(0,args.samples_per_xval):
if args.threads > 1:
qsvals.append(p.apply_async(get_shuffled_array,args=(vals,)))
else:
qsvals.append(Queue(get_shuffled_array(vals)))
if args.threads > 1:
p.close()
p.join()
svals = [x.get() for x in qsvals]
second_threads = 1
if second_threads > 1:
p = Pool(processes=second_threads)
results = []
for xval in xvals:
if second_threads > 1:
r = p.apply_async(analyze_x,args=(xval,svals,args))
results.append(r)
else:
r = Queue(analyze_x(xval,svals,args))
results.append(r)
if second_threads > 1:
p.close()
p.join()
for r in [x.get() for x in results]:
of.write("\t".join([str(x) for x in r])+"\n")
inf.close()
of.close()
class Queue:
def __init__(self,val):
self.val = val
def get(self):
return self.val
def get_shuffled_array(val):
random.shuffle(val)
return val[:]
# vals contains the annotation for each read
def analyze_x(xval,svals,args):
s = args.samples_per_xval
#cnts = sorted(
# [len(
# [k for k in collections.Counter([z for z in [random.choice(vals) for y in range(0,xval)] if z]).values() if k >= args.min_depth]
# )
# for j in range(0,s)]
# )
cnts = []
for j in range(0,s):
vals = svals[j][0:xval]
cnts.append(len([x for x in collections.Counter([k for k in vals if k]).values() if x >= args.min_depth]))
cnts = sorted(cnts)
lower = float(cnts[int(len(cnts)*0.05)])
mid = median(cnts)
upper = float(cnts[int(len(cnts)*0.95)])
#print len(vals)
#print len([x for x in vals if x >0])
#print cnts[0:5]
#print cnts[-5:]
#print [xval, lower, mid, upper]
return [xval, lower, mid, upper]
def make_sequence(total):
start = [1,2,3,4,5,10]
while True:
start += [x*10 for x in start[-5:]]
if start[-1] > total: break
return [x for x in start if x < total]+[total]
def do_inputs():
parser = argparse.ArgumentParser(description="Take a locus bed file (bed) followed by locus id followed by read count. Generate a rarefraction.",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="Use - for STDIN")
parser.add_argument('-o','--output',help="Write output here")
parser.add_argument('--threads',type=int,default=cpu_count(),help="INT threads to use")
parser.add_argument('--original_read_count',type=int,help="INT allows accounting for unmapped reads not included here.")
parser.add_argument('--samples_per_xval',type=int,default=1000,help="Sample this many times")
parser.add_argument('--min_depth',type=int,default=1,help="Require at least this depth to count as a hit.")
parser.add_argument('--full',action='store_true',help="Return full length matchs only")
group1 = parser.add_mutually_exclusive_group(required=True)
group1.add_argument('--gene',action='store_true',help="Gene based output")
group1.add_argument('--transcript',action='store_true',help="Gene based output")
args = parser.parse_args()
return args
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd
args = do_inputs()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
args = do_inputs()
main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/gpd_annotation_to_rarefraction.py
|
gpd_annotation_to_rarefraction.py
|
"""Calculate non-context based errors"""
import argparse, sys, os, time, gc, gzip
from shutil import rmtree
from tempfile import mkdtemp, gettempdir
from subprocess import call
from seqtools.format.sam.bam.files import BAMFile
from seqtools.errors import ErrorProfileFactory
from seqtools.format.fasta import FASTAData
from seqtools.format.sam.bam.bamindex import BAMIndexRandomAccessPrimary as BIRAP
def main(args):
sys.stderr.write("Reading our reference Fasta\n")
if args.reference[-3:] == '.gz':
ref = FASTAData(gzip.open(args.reference).read())
else:
ref = FASTAData(open(args.reference).read())
sys.stderr.write("Finished reading our reference Fasta\n")
bf = BAMFile(args.input,BAMFile.Options(reference=ref))
bind = None
epf = ErrorProfileFactory()
if args.random:
sys.stderr.write("Reading index\n")
if args.input_index:
bind = BIRAP(index_file=args.input_index,alignment_file=args.input)
else:
bind = BIRAP(index_file=args.input+'.bgi',alignment_file=args.input)
z = 0
while True:
coord = bind.get_random_coord()
if not coord: continue
e = bf.fetch_by_coord(coord)
if not e.is_aligned(): continue
epf.add_alignment(e)
z+=1
#print z
if z %100==1:
con = epf.get_alignment_errors().alignment_length
if args.max_length <= con: break
sys.stderr.write(str(con)+"/"+str(args.max_length)+" bases from "+str(z)+" alignments\r")
sys.stderr.write("\n")
else:
z = 0
for e in bf:
if e.is_aligned():
epf.add_alignment(e)
z+=1
#print z
if z %100==1:
con = epf.get_alignment_errors().alignment_length
if args.max_length <= con: break
sys.stderr.write(str(con)+"/"+str(args.max_length)+" bases from "+str(z)+" alignments\r")
sys.stderr.write("\n")
of = open(args.tempdir+'/report.txt','w')
of.write(epf.get_alignment_errors().get_report())
of.close()
for ofile in args.output:
cmd = [args.rscript_path,
os.path.dirname(os.path.realpath(__file__))+'/plot_alignment_errors.r',
args.tempdir+'/report.txt',ofile]
if args.scale:
cmd += [str(x) for x in args.scale]
sys.stderr.write(" ".join(cmd)+"\n")
call(cmd)
if args.output_raw:
of = open(args.output_raw,'w')
with open(args.tempdir+"/report.txt") as inf:
for line in inf:
of.write(line)
of.close()
if args.output_stats:
of = open(args.output_stats,'w')
of.write(epf.get_alignment_errors().get_stats())
of.close()
sys.stderr.write("finished\n")
if bind:
bind.destroy()
bf = None
epf.close()
time.sleep(5)
gc.collect()
time.sleep(5)
# Temporary working directory step 3 of 3 - Cleanup
if not args.specific_tempdir:
rmtree(args.tempdir)
def do_inputs():
# Setup command line inputs
parser=argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="BAMFILE input")
parser.add_argument('--input_index',help="BAMFILE index")
parser.add_argument('-r','--reference',help="Fasta reference file",required=True)
parser.add_argument('--scale',type=float,nargs=6,help="<ins_min> <ins_max> <mismatch_min> <mismatch_max> <del_min> <del_max>")
parser.add_argument('-o','--output',nargs='+',help="OUTPUTFILE for pdf plot",required=True)
parser.add_argument('--output_stats',help="OUTPUTFILE for a stats report")
parser.add_argument('--output_raw',help="OUTPUTFILE for the raw data")
parser.add_argument('--random',action='store_true',help="randomly select alignments, requires indexed file")
parser.add_argument('--max_length',type=int,default=100000,help="maximum number of alignment bases to use")
# Temporary working directory step 1 of 3 - Definition
group = parser.add_mutually_exclusive_group()
group.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is made and destroyed here.")
group.add_argument('--specific_tempdir',help="This temporary directory will be used, but will remain after executing.")
parser.add_argument('--rscript_path',default='Rscript',help="Rscript path")
args = parser.parse_args()
# Temporary working directory step 2 of 3 - Creation
setup_tempdir(args)
return args
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir):
os.makedirs(args.specific_tempdir.rstrip('/'))
args.tempdir = args.specific_tempdir.rstrip('/')
if not os.path.exists(args.specific_tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
else:
args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd
args = do_inputs()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
#do our inputs
args = do_inputs()
main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/bam_to_alignment_error_plot.py
|
bam_to_alignment_error_plot.py
|
import sys, argparse, gzip, re, random, collections, inspect, os
from multiprocessing import Pool, cpu_count
from seqtools.statistics import average, median
def main(args):
inf = sys.stdin
if args.input != '-':
if re.search('\.gz$',args.input):
inf = gzip.open(args.input)
else:
inf = open(args.input)
of = sys.stdout
if args.output:
of = open(args.output,'w')
vals = []
for line in inf:
f = line.rstrip().split("\t")
lid = int(f[3])
txcnt = int(f[4])
vals += [lid]*txcnt
if args.original_read_count:
if len(vals) > args.original_read_count:
sys.stderr.write("ERROR: cant have a read count greater than the original read count\n")
sys.exit()
vals += [None]*(args.original_read_count-len(vals))
# vals now holds an array to select from
qsvals = []
if args.threads > 1:
p = Pool(processes=args.threads)
for i in range(0,args.samples_per_xval):
if args.threads > 1:
qsvals.append(p.apply_async(get_rand,args=(vals,)))
else:
qsvals.append(Queue(get_rand(vals)))
if args.threads > 1:
p.close()
p.join()
svals = [x.get() for x in qsvals]
total = len(vals)
xvals = make_sequence(total)
second_threads = 1
if second_threads > 1:
p = Pool(processes=args.threads)
results = []
for xval in xvals:
if second_threads > 1:
r = p.apply_async(analyze_x,args=(xval,svals,args))
results.append(r)
else:
r = Queue(analyze_x(xval,svals,args))
results.append(r)
if second_threads > 1:
p.close()
p.join()
for r in [x.get() for x in results]:
of.write("\t".join([str(x) for x in r])+"\n")
inf.close()
of.close()
def get_rand(vals):
random.shuffle(vals)
return vals[:]
class Queue:
def __init__(self,val):
self.val = val
def get(self):
return self.val
def analyze_x(xval,svals,args):
s = args.samples_per_xval
#cnts = sorted([len([k for k in collections.Counter([z for z in [random.choice(vals) for y in range(0,xval)] if z]).values() if k >= args.min_depth]) for j in range(0,s)])
cnts = []
for i in range(0,s):
vals = svals[i][0:xval]
res = len([y for y in collections.Counter([x for x in vals if x]).values() if y >= args.min_depth])
cnts.append(res)
cnts = sorted(cnts)
lower = float(cnts[int(len(cnts)*0.05)])
mid = median(cnts)
upper = float(cnts[int(len(cnts)*0.95)])
return [xval, lower, mid, upper]
def make_sequence(total):
start = [1,2,3,4,5,10]
while True:
start += [x*10 for x in start[-5:]]
if start[-1] > total: break
return [x for x in start if x < total]+[total]
def do_inputs():
parser = argparse.ArgumentParser(description="Take a locus bed file (bed) followed by locus id followed by read count. Generate a rarefraction.",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="Use - for STDIN")
parser.add_argument('-o','--output',help="Write output here")
parser.add_argument('--threads',type=int,default=cpu_count(),help="INT threads to use")
parser.add_argument('--original_read_count',type=int,help="INT allows accounting for unmapped reads not included here.")
parser.add_argument('--samples_per_xval',type=int,default=1000,help="Sample this many times")
parser.add_argument('--min_depth',type=int,default=1,help="Require at least this depth to count as a hit.")
args = parser.parse_args()
return args
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd
args = do_inputs()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
args = do_inputs()
main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/locus_bed_to_rarefraction.py
|
locus_bed_to_rarefraction.py
|
import argparse, sys, os, gzip, re
from shutil import rmtree, copy
from multiprocessing import cpu_count
from tempfile import mkdtemp, gettempdir
from subprocess import call
def main(args):
udir = os.path.dirname(os.path.realpath(__file__))
#sys.stderr.write("Making text report\n")
sys.stderr.write("making plot\n")
for ofile in args.output:
cmd = [args.rscript_path,udir+'/plot_gapped_alignment_statistics.r',
args.input,ofile]
sys.stderr.write(" ".join(cmd)+"\n")
call(cmd)
if args.output_stats:
do_stats(args)
sys.stderr.write("Finished.\n")
# Temporary working directory step 3 of 3 - Cleanup
if not args.specific_tempdir:
rmtree(args.tempdir)
def do_stats(args):
total_reads = 0
unaligned_reads = 0
aligned_reads = 0
single_align_reads = 0
gapped_align_reads = 0
chimera_align_reads = 0
selfchimera_align_reads = 0
transchimera_align_reads = 0
total_bases = 0
unaligned_bases = 0
aligned_bases = 0
single_align_bases = 0
gapped_align_bases = 0
chimera_align_bases = 0
selfchimera_align_bases = 0
transchimera_align_bases = 0
inf = None
if re.search('\.gz',args.input):
inf = gzip.open(args.input)
else:
inf = open(args.input)
for line in inf:
(name, type, single, both, rlen) = line.rstrip().split("\t")
single = int(single)
both = int(both)
rlen = int(rlen)
total_reads += 1
if type=="unaligned": unaligned_reads += 1
else: aligned_reads += 1
if type=="original": single_align_reads +=1
if type=="gapped":
gapped_align_reads += 1
gapped_align_bases += both-single
if type=="chimera":
transchimera_align_reads += 1
transchimera_align_bases += both-single
if type=="self-chimera" or type=="self-chimera-atypical":
selfchimera_align_reads += 1
selfchimera_align_bases += both-single
if re.search('chimera',type):
chimera_align_reads +=1
chimera_align_bases += both-single
if type!="unaligned":
total_bases += rlen
unaligned_bases += (rlen-both)
aligned_bases += both
single_align_bases += single
of = open(args.output_stats,'w')
of.write("TOTAL_READS\t"+str(total_reads)+"\n")
of.write("UNALIGNED_READS\t"+str(unaligned_reads)+"\n")
of.write("ALIGNED_READS\t"+str(aligned_reads)+"\n")
of.write("SINGLE_ALIGN_READS\t"+str(single_align_reads)+"\n")
of.write("GAPPED_ALIGN_READS\t"+str(gapped_align_reads)+"\n")
of.write("CHIMERA_ALIGN_READS\t"+str(chimera_align_reads)+"\n")
of.write("TRANSCHIMERA_ALIGN_READS\t"+str(transchimera_align_reads)+"\n")
of.write("SELFCHIMERA_ALIGN_READS\t"+str(selfchimera_align_reads)+"\n")
of.write("TOTAL_BASES\t"+str(total_bases)+"\n")
of.write("UNALIGNED_BASES\t"+str(unaligned_bases)+"\n")
of.write("ALIGNED_BASES\t"+str(aligned_bases)+"\n")
of.write("SINGLE_ALIGN_BASES\t"+str(single_align_bases)+"\n")
of.write("GAPPED_ALIGN_BASES\t"+str(gapped_align_bases)+"\n")
of.write("CHIMERA_ALIGN_BASES\t"+str(chimera_align_bases)+"\n")
of.write("TRANSCHIMERA_ALIGN_BASES\t"+str(transchimera_align_bases)+"\n")
of.write("SELFCHIMERA_ALIGN_BASES\t"+str(selfchimera_align_bases)+"\n")
of.close()
inf.close()
def do_inputs():
# Setup command line inputs
parser=argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="INPUT lengths.txt file")
parser.add_argument('-o','--output',nargs='+',help="OUTPUT FILE can put multiple")
parser.add_argument('--output_stats',help="Save some summary statistics")
parser.add_argument('--rscript_path',default='Rscript',help="Path of Rscript")
# Temporary working directory step 1 of 3 - Definition
group = parser.add_mutually_exclusive_group()
group.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is made and destroyed here.")
group.add_argument('--specific_tempdir',help="This temporary directory will be used, but will remain after executing.")
args = parser.parse_args()
# Temporary working directory step 2 of 3 - Creation
setup_tempdir(args)
return args
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir):
os.makedirs(args.specific_tempdir.rstrip('/'))
args.tempdir = args.specific_tempdir.rstrip('/')
if not os.path.exists(args.specific_tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
else:
args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd
#do our inputs
args = do_inputs()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
#do our inputs
args = do_inputs()
main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/make_alignment_plot.py
|
make_alignment_plot.py
|
"""This script will call most of the individual modules analyzing the data"""
import argparse, sys, os, time, re, gzip, locale, inspect, time
from subprocess import Popen, PIPE
# BAM imports
import bam_preprocess
import traverse_preprocessed
import bam_to_chr_lengths
import get_platform_report
import gpd_loci_analysis
import gpd_to_exon_distro
import make_alignment_plot
import depth_to_coverage_report
import locus_bed_to_rarefraction
# BAM + reference imports
import bam_to_context_error_plot
import bam_to_alignment_error_plot
# BAM + annotation
import annotate_from_genomic_features
import get_depth_subset
import annotated_length_analysis
import gpd_annotation_to_rarefraction
import annotated_read_bias_analysis
import gpd_to_junction_variance
# BAM
from seqtools.cli.utilities.gpd_to_bed_depth import external_cmd as gpd_to_bed_depth
from seqtools.cli.utilities.bed_depth_to_stratified_coverage import external_cmd as bed_depth_to_stratified_coverage
from seqtools.cli.utilities.gpd_to_UCSC_bed12 import external_cmd as gpd_to_UCSC_bed12
# BAM + annotation
from seqtools.cli.utilities.gpd_annotate import external_cmd as gpd_annotate
# read count
rcnt = -1
tlog = None
def main(args):
if not os.path.exists(args.tempdir+'/plots'):
os.makedirs(args.tempdir+'/plots')
if not os.path.exists(args.tempdir+'/data'):
os.makedirs(args.tempdir+'/data')
if not os.path.exists(args.tempdir+'/logs'):
os.makedirs(args.tempdir+'/logs')
if not os.path.exists(args.tempdir+'/temp'):
os.makedirs(args.tempdir+'/temp')
global tlog
tlog = TimeLog(args.tempdir+'/logs/time.log')
## Extract data that can be realized from the bam
make_data_bam(args)
## Extract data that can be realized from the bam and reference
if args.genome:
make_data_bam_reference(args)
## Extract data that can be realized from bam and reference annotation
if args.gpd:
make_data_bam_annotation(args)
# Write params file
of = open(args.tempdir+'/data/params.txt','w')
for arg in vars(args):
of.write(arg+"\t"+str(getattr(args,arg))+"\n")
of.close()
class TimeLog:
def __init__(self,fname):
self.fh = open(fname,'w')
self.recording = False
self.st = time.time()
def start(self,msg):
self.st = time.time()
self.fh.write(msg+"\n")
self.fh.flush()
def write(self,msg):
self.fh.write('$ '+msg+"\n")
def stop(self):
self.fh.write("--- "+str(time.time()-self.st)+ " seconds ---\n")
self.fh.flush()
def make_data_bam(args):
global tlog
# Get the data necessary for making tables and reports
tlog.start("traverse bam and preprocess")
# 1. Traverse bam and store alignment mappings ordered by query name
udir = os.path.dirname(os.path.realpath(__file__))
cmd = [udir+'/bam_preprocess.py',args.input,'--minimum_intron_size',
str(args.min_intron_size),'-o',args.tempdir+'/temp/alndata.txt.gz',
'--threads',str(args.threads),'--specific_tempdir',
args.tempdir+'/temp/']
sys.stderr.write("Creating initial alignment mapping data\n")
sys.stderr.write(" ".join(cmd)+"\n")
bam_preprocess.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("traverse preprocessed")
# 2. Describe the alignments by traversing the previously made file
cmd = [udir+'/traverse_preprocessed.py',args.tempdir+'/temp/alndata.txt.gz',
'-o',args.tempdir+'/data/','--specific_tempdir',args.tempdir+'/temp/',
'--threads',str(args.threads)]
if args.min_aligned_bases:
cmd += ['--min_aligned_bases',str(args.min_aligned_bases)]
if args.max_query_overlap:
cmd += ['--max_query_overlap',str(args.max_query_overlap)]
if args.max_target_overlap:
cmd += ['--max_target_overlap',str(args.max_target_overlap)]
if args.max_query_gap:
cmd += ['--max_query_gap',str(args.max_query_gap)]
if args.max_target_gap:
cmd += ['--max_target_gap',str(args.max_target_gap)]
if args.required_fractional_improvement:
cmd += ['--required_fractional_improvement',
str(args.required_fractional_improvement)]
sys.stderr.write("Traverse bam for alignment analysis\n")
sys.stderr.write(" ".join(cmd)+"\n")
traverse_preprocessed.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("get chromosome lengths from bam")
# 3. get chromosome lengths from bam
cmd = [udir+'/bam_to_chr_lengths.py',args.input,
'-o',args.tempdir+'/data/chrlens.txt']
sys.stderr.write("Writing chromosome lengths from header\n")
sys.stderr.write(" ".join(cmd)+"\n")
bam_to_chr_lengths.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("look for platform specific read names")
# Now we can find any known reads
# 4. Go through read names to find if there are platform-specific read names present
sys.stderr.write("Can we find any known read types\n")
cmd = [udir+'/get_platform_report.py',args.tempdir+'/data/lengths.txt.gz',
args.tempdir+'/data/special_report']
sys.stderr.write(" ".join(cmd)+"\n")
get_platform_report.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("check for pacbio in case we make a special graph")
# Check for pacbio to see if we need to make a graph for it
do_pb = False
with open(args.tempdir+'/data/special_report') as inf:
for line in inf:
f = line.rstrip().split("\t")
if f[0]=='PB':
do_pb = True
break
if do_pb:
cmd = [args.rscript_path,udir+'/plot_pacbio.r',
args.tempdir+'/data/special_report.pacbio',
args.tempdir+'/plots/pacbio.png']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/special_report_pacbio_png')
cmd = [args.rscript_path,udir+'/plot_pacbio.r',
args.tempdir+'/data/special_report.pacbio',
args.tempdir+'/plots/pacbio.pdf']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/special_report_pacbio_pdf')
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("get a depth for our best alignments")
# 5. Go through the genepred file and get a depth bed for our best alignments
sys.stderr.write("Go through genepred best alignments and make a bed depth file\n")
cmd = ["gpd_to_bed_depth.py",args.tempdir+'/data/best.sorted.gpd.gz',
'-o',args.tempdir+'/data/depth.sorted.bed.gz',"--threads",
str(args.threads)]
sys.stderr.write("Generate the depth bed for the mapped reads\n")
sys.stderr.write(" ".join(cmd)+"\n")
gpd_to_bed_depth(cmd)
sys.stderr.write("Stratify the depth to make it plot quicker and cleaner\n")
cmd = ["bed_depth_to_stratified_coverage.py",
args.tempdir+'/data/depth.sorted.bed.gz','-l',
args.tempdir+"/data/chrlens.txt",
'-o',args.tempdir+'/temp/depth.coverage-strata.sorted.bed.gz',
'--output_key',args.tempdir+'/temp/coverage-strata.key',
'--minimum_coverage','100000']
bed_depth_to_stratified_coverage(cmd)
global rcnt #read count
rcnt = 0
tinf = gzip.open(args.tempdir+'/data/lengths.txt.gz')
for line in tinf: rcnt += 1
tinf.close()
tlog.write(" ".join(cmd))
tlog.stop()
# For now reporting loci will be optional until it can be tested and optimized.
if args.do_loci:
tlog.start("do locus search")
# 6. Go through the best alignments and look for loci
sys.stderr.write("Approximate loci and mapped read distributions among them.\n")
cmd = [udir+"/gpd_loci_analysis.py",
args.tempdir+'/data/best.sorted.gpd.gz','-o',
args.tempdir+'/data/loci-all.bed.gz','--output_loci',
args.tempdir+'/data/loci.bed.gz','--downsample',
str(args.locus_downsample),'--threads',str(args.threads)]
if args.min_depth:
cmd += ['--min_depth',str(args.min_depth)]
if args.min_depth:
cmd += ['--min_coverage_at_depth',str(args.min_coverage_at_depth)]
if args.min_exon_count:
cmd += ['--min_exon_count',str(args.min_exon_count)]
sys.stderr.write(" ".join(cmd)+"\n")
gpd_loci_analysis.external_cmd(cmd)
cmd = [udir+"/locus_bed_to_rarefraction.py",
args.tempdir+'/data/loci.bed.gz','-o',
args.tempdir+'/data/locus_rarefraction.txt','--threads',
str(args.threads),'--original_read_count',str(rcnt)]
sys.stderr.write("Make rarefraction curve\n")
sys.stderr.write(" ".join(cmd)+"\n")
locus_bed_to_rarefraction.external_cmd(cmd)
sys.stderr.write("Make locus rarefraction plot\n")
for ext in ['png','pdf']:
cmd = [args.rscript_path,udir+'/plot_annotation_rarefractions.r',
args.tempdir+'/plots/locus_rarefraction.'+ext,'locus',
args.tempdir+'/data/locus_rarefraction.txt','#FF000088']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/plot_locus_rarefraction_'+ext)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("get ready for alignment plot")
# 7. Alignment plot preparation
sys.stderr.write("Get ready for alignment plot\n")
cmd = [udir+'/make_alignment_plot.py',args.tempdir+'/data/lengths.txt.gz',
'--rscript_path',args.rscript_path,'--output_stats',
args.tempdir+'/data/alignment_stats.txt','--output',
args.tempdir+'/plots/alignments.png',
args.tempdir+'/plots/alignments.pdf']
sys.stderr.write("Make alignment plots\n")
sys.stderr.write(" ".join(cmd)+"\n")
make_alignment_plot.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("make depth reports")
# 8. Make depth reports
sys.stderr.write("Making depth reports\n")
cmd = [udir+'/depth_to_coverage_report.py',
args.tempdir+'/data/depth.sorted.bed.gz',
args.tempdir+'/data/chrlens.txt','-o',args.tempdir+'/data']
sys.stderr.write(" ".join(cmd)+"\n")
depth_to_coverage_report.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("make coverage plots")
# do the depth graphs
sys.stderr.write("Making coverage plots\n")
cmd = [args.rscript_path,udir+'/plot_chr_depth.r',
args.tempdir+'/data/line_plot_table.txt.gz',
args.tempdir+'/data/total_distro_table.txt.gz',
args.tempdir+'/data/chr_distro_table.txt.gz',
args.tempdir+'/plots/covgraph.png']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/covgraph_png')
cmd = [args.rscript_path,udir+'/plot_chr_depth.r',
args.tempdir+'/data/line_plot_table.txt.gz',
args.tempdir+'/data/total_distro_table.txt.gz',
args.tempdir+'/data/chr_distro_table.txt.gz',
args.tempdir+'/plots/covgraph.pdf']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/covgraph_pdf')
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("make chr depth plots")
# do depth plots
sys.stderr.write("Making chr depth plots\n")
cmd = [args.rscript_path,udir+'/plot_depthmap.r',
args.tempdir+'/temp/depth.coverage-strata.sorted.bed.gz',
args.tempdir+'/data/chrlens.txt',
args.tempdir+'/temp/coverage-strata.key',
args.tempdir+'/plots/perchrdepth.png']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/perchr_depth_png')
cmd = [args.rscript_path,udir+'/plot_depthmap.r',
args.tempdir+'/temp/depth.coverage-strata.sorted.bed.gz',
args.tempdir+'/data/chrlens.txt',
args.tempdir+'/temp/coverage-strata.key',
args.tempdir+'/plots/perchrdepth.pdf']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/perchr_depth_pdf')
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("get the exon size distribution")
#Get the exon distribution
sys.stderr.write("Get the exon distributions\n")
cmd = [udir+'/gpd_to_exon_distro.py',args.tempdir+'/data/best.sorted.gpd.gz',
'-o',args.tempdir+'/data/exon_size_distro.txt.gz','--threads',
str(args.threads)]
sys.stderr.write(" ".join(cmd)+"\n")
gpd_to_exon_distro.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("plot exon distro")
cmd = [args.rscript_path,udir+'/plot_exon_distro.r',
args.tempdir+'/data/exon_size_distro.txt.gz',
args.tempdir+'/plots/exon_size_distro.png']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/exon_size_distro_png')
cmd = [args.rscript_path,udir+'/plot_exon_distro.r',
args.tempdir+'/data/exon_size_distro.txt.gz',
args.tempdir+'/plots/exon_size_distro.pdf']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/exon_size_distro_pdf')
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("make bed file")
# Make a UCSC compatible bed file
sys.stderr.write("Make a UCSC genome browser compatible bed file\n")
cmd = ['gpd_to_UCSC_bed12.py','--headername',args.input+':best',
args.tempdir+'/data/best.sorted.gpd.gz','-o',
args.tempdir+'/data/best.sorted.bed.gz','--color','red']
sys.stderr.write(" ".join(cmd)+"\n")
gpd_to_UCSC_bed12(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("make bed file")
cmd = ['gpd_to_UCSC_bed12.py','--headername',args.input+':trans-chimera',
args.tempdir+'/data/chimera.gpd.gz','-o',
args.tempdir+'/data/chimera.bed.gz','--color','blue']
sys.stderr.write(" ".join(cmd)+"\n")
gpd_to_UCSC_bed12(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("make bed file")
cmd = ['gpd_to_UCSC_bed12.py','--headername',args.input+':gapped',
args.tempdir+'/data/gapped.gpd.gz','-o',
args.tempdir+'/data/gapped.bed.gz','--color','orange']
sys.stderr.write(" ".join(cmd)+"\n")
gpd_to_UCSC_bed12(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
cmd = ['gpd_to_UCSC_bed12.py','--headername',args.input+':self-chimera',
args.tempdir+'/data/technical_chimeras.gpd.gz','-o',
args.tempdir+'/data/technical_chimeras.bed.gz','--color','green']
sys.stderr.write(" ".join(cmd)+"\n")
gpd_to_UCSC_bed12(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("make bed file")
cmd = ['gpd_to_UCSC_bed12.py','--headername',args.input+':self-atypical',
args.tempdir+'/data/technical_atypical_chimeras.gpd.gz','-o',
args.tempdir+'/data/technical_atypical_chimeras.bed.gz','--color',
'purple']
sys.stderr.write(" ".join(cmd)+"\n")
gpd_to_UCSC_bed12(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
def make_data_bam_reference(args):
global tlog
# make the context error plots
udir = os.path.dirname(os.path.realpath(__file__))
# Find the index file that was generated earlier.
indfile = None
if os.path.exists(args.tempdir+'/temp/myindex.bgi'):
indfile = args.tempdir+'/temp/myindex.bgi'
tlog.start("Get context error")
# 1. Context error
cmd = [udir+'/bam_to_context_error_plot.py',args.input,'-r',args.genome,
'--target','--output_raw',args.tempdir+'/data/context_error_data.txt',
'-o',args.tempdir+'/plots/context_plot.png',
args.tempdir+'/plots/context_plot.pdf','--rscript_path',
args.rscript_path,'--random','--specific_tempdir',
args.tempdir+'/temp']
if args.context_error_scale:
cmd += ['--scale']+[str(x) for x in args.context_error_scale]
if args.context_error_stopping_point:
cmd += ['--stopping_point',str(args.context_error_stopping_point)]
if indfile:
cmd += ['--input_index',indfile]
sys.stderr.write("Making context plot\n")
sys.stderr.write(" ".join(cmd)+"\n")
bam_to_context_error_plot.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
time.sleep(3)
#gc.collect()
tlog.start("alignment based error")
# 2. Alignment overall error
cmd = [udir+'/bam_to_alignment_error_plot.py',args.input,'-r',
args.genome,'--output_stats',args.tempdir+'/data/error_stats.txt',
'--output_raw',args.tempdir+'/data/error_data.txt','-o',
args.tempdir+'/plots/alignment_error_plot.png',
args.tempdir+'/plots/alignment_error_plot.pdf','--rscript_path',
args.rscript_path]
if args.alignment_error_scale:
cmd += ['--scale']+[str(x) for x in args.alignment_error_scale]
if args.alignment_error_max_length:
cmd += ['--max_length',str(args.alignment_error_max_length)]
if indfile:
cmd += ['--input_index',indfile]
cmd += ['--random']
cmd += ['--specific_tempdir',args.tempdir+'/temp']
sys.stderr.write("Making alignment error plot\n")
sys.stderr.write(" ".join(cmd)+"\n")
bam_to_alignment_error_plot.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
time.sleep(3)
#gc.collect()
return
def make_data_bam_annotation(args):
global tlog
udir = os.path.dirname(os.path.realpath(__file__))
tlog.start("identify genomic features exon intron integenic")
# 1. Use annotations to identify genomic features (Exon, Intron, Intergenic)
# And assign membership to reads
# Stores the feature bed files in a beds folder
cmd = [udir+'/annotate_from_genomic_features.py','--output_beds',
args.tempdir+'/data/beds',args.tempdir+'/data/best.sorted.gpd.gz',
args.gpd,args.tempdir+'/data/chrlens.txt','-o',
args.tempdir+'/data/read_genomic_features.txt.gz','--threads',
str(args.threads)]
sys.stderr.write("Finding genomic features and assigning reads membership\n")
sys.stderr.write(" ".join(cmd)+"\n")
tlog.write(" ".join(cmd))
annotate_from_genomic_features.external_cmd(cmd)
tlog.stop()
time.sleep(3)
tlog.start("get per-exon depth")
# 2. Get depth distributions for each feature subset
# now get depth subsets
sys.stderr.write("get depths of features\n")
cmd = [udir+'/get_depth_subset.py',args.tempdir+'/data/depth.sorted.bed.gz',
args.tempdir+'/data/beds/exon.bed','-o',
args.tempdir+'/data/exondepth.bed.gz']
sys.stderr.write(" ".join(cmd)+"\n")
get_depth_subset.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("get per-intron subset")
cmd = [udir+'/get_depth_subset.py',args.tempdir+'/data/depth.sorted.bed.gz',
args.tempdir+'/data/beds/intron.bed','-o',
args.tempdir+'/data/introndepth.bed.gz']
sys.stderr.write(" ".join(cmd)+"\n")
get_depth_subset.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("get per intergenic depth")
cmd = [udir+'/get_depth_subset.py',args.tempdir+'/data/depth.sorted.bed.gz',
args.tempdir+'/data/beds/intergenic.bed','-o',
args.tempdir+'/data/intergenicdepth.bed.gz']
sys.stderr.write(" ".join(cmd)+"\n")
get_depth_subset.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("plot feature depth png")
# 3. Plot the feature depth
cmd = [args.rscript_path,udir+'/plot_feature_depth.r',
args.tempdir+'/data/depth.sorted.bed.gz',
args.tempdir+'/data/exondepth.bed.gz',
args.tempdir+'/data/introndepth.bed.gz',
args.tempdir+'/data/intergenicdepth.bed.gz',
args.tempdir+'/plots/feature_depth.png']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/featuredepth_png')
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("plot feature depth pdf")
cmd = [args.rscript_path,udir+'/plot_feature_depth.r',
args.tempdir+'/data/depth.sorted.bed.gz',
args.tempdir+'/data/exondepth.bed.gz',
args.tempdir+'/data/introndepth.bed.gz',
args.tempdir+'/data/intergenicdepth.bed.gz',
args.tempdir+'/plots/feature_depth.pdf']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/featuredepth_pdf')
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("generate plots of which reads correspont to which features png")
# 4. Generate plots from reads assigend to features
sys.stderr.write("Plot read assignment to genomic features\n")
cmd = [args.rscript_path,udir+'/plot_annotated_features.r',
args.tempdir+'/data/read_genomic_features.txt.gz',
args.tempdir+'/plots/read_genomic_features.png']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/read_genomic_features_png')
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("generate plots of which reads correspont to which features pdf")
cmd = [args.rscript_path,udir+'/plot_annotated_features.r',
args.tempdir+'/data/read_genomic_features.txt.gz',
args.tempdir+'/plots/read_genomic_features.pdf']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/read_genomic_features_pdf')
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("annotate the reads")
# 5. annotated the best mappend read mappings
cmd = ['gpd_annotate.py',args.tempdir+'/data/best.sorted.gpd.gz','-r',
args.gpd,'-o',args.tempdir+'/data/annotbest.txt.gz']
if args.threads:
cmd += ['--threads',str(args.threads)]
sys.stderr.write("Annotating reads\n")
sys.stderr.write(" ".join(cmd)+"\n")
gpd_annotate(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
time.sleep(3)
tlog.start("plot by transcript length png")
# 6. Make plots of the transcript lengths
sys.stderr.write("Make plots from transcript lengths\n")
cmd = [args.rscript_path,udir+'/plot_transcript_lengths.r',
args.tempdir+'/data/annotbest.txt.gz',
args.tempdir+'/plots/transcript_distro.png']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/transcript_distro_png')
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("plot by transcript length png")
sys.stderr.write("Make plots from transcript lengths\n")
cmd = [args.rscript_path,udir+'/plot_transcript_lengths.r',
args.tempdir+'/data/annotbest.txt.gz',
args.tempdir+'/plots/transcript_distro.pdf']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/transcript_distro_pdf')
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("make length distribution from annotations")
# 7. Make length distributions for plotting
sys.stderr.write("making length distributions from annotations\n")
cmd = [udir+'/annotated_length_analysis.py',
args.tempdir+'/data/best.sorted.gpd.gz',
args.tempdir+'/data/annotbest.txt.gz',
'-o',args.tempdir+'/data/annot_lengths.txt.gz']
sys.stderr.write(" ".join(cmd)+"\n")
annotated_length_analysis.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("plot annot length distro png")
# 8. Plot length distributions
cmd = [args.rscript_path,udir+'/plot_annotation_analysis.r',
args.tempdir+'/data/annot_lengths.txt.gz',
args.tempdir+'/plots/annot_lengths.png']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/annot_lengths_png')
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("plot annot length distro png")
cmd = [args.rscript_path,udir+'/plot_annotation_analysis.r',
args.tempdir+'/data/annot_lengths.txt.gz',
args.tempdir+'/plots/annot_lengths.pdf']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/annot_lengths_pdf')
tlog.write(" ".join(cmd))
tlog.stop()
# 9. Get rarefraction curve data
global rcnt
if rcnt < 0:
sys.stderr.write("Getting read count\n")
rcnt = 0
tinf = gzip.open(args.tempdir+'/data/lengths.txt.gz')
for line in tinf: rcnt += 1
tinf.close()
tlog.start("get rarefraction gene")
sys.stderr.write("Writing rarefraction curves\n")
cmd = [udir+'/gpd_annotation_to_rarefraction.py',
args.tempdir+'/data/annotbest.txt.gz',
'--samples_per_xval',str(args.samples_per_xval),
'--original_read_count',str(rcnt),'--threads',str(args.threads),
'--gene','-o',args.tempdir+'/data/gene_rarefraction.txt']
sys.stderr.write(" ".join(cmd)+"\n")
gpd_annotation_to_rarefraction.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("rarefraction transcript")
cmd = [udir+'/gpd_annotation_to_rarefraction.py',
args.tempdir+'/data/annotbest.txt.gz','--samples_per_xval',
str(args.samples_per_xval),'--original_read_count',str(rcnt),
'--threads',str(args.threads),'--transcript','-o',
args.tempdir+'/data/transcript_rarefraction.txt']
sys.stderr.write(" ".join(cmd)+"\n")
gpd_annotation_to_rarefraction.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("rarefraction gene full")
cmd = [udir+'/gpd_annotation_to_rarefraction.py',
args.tempdir+'/data/annotbest.txt.gz','--samples_per_xval',
str(args.samples_per_xval),'--original_read_count',str(rcnt),
'--threads',str(args.threads),'--full','--gene','-o',
args.tempdir+'/data/gene_full_rarefraction.txt']
sys.stderr.write(" ".join(cmd)+"\n")
gpd_annotation_to_rarefraction.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("rarefraction gene full")
cmd = [udir+'/gpd_annotation_to_rarefraction.py',
args.tempdir+'/data/annotbest.txt.gz','--samples_per_xval',
str(args.samples_per_xval),'--original_read_count',str(rcnt),
'--threads',str(args.threads),'--full','--transcript','-o',
args.tempdir+'/data/transcript_full_rarefraction.txt']
sys.stderr.write(" ".join(cmd)+"\n")
gpd_annotation_to_rarefraction.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("plot multiple rarefractions")
# 10. Plot the rarefraction curves
for type in ['gene','transcript']:
for ext in ['png','pdf']:
cmd = [args.rscript_path,udir+'/plot_annotation_rarefractions.r',
args.tempdir+'/plots/'+type+'_rarefraction.'+ext,type,
args.tempdir+'/data/'+type+'_rarefraction.txt','#FF000088',
args.tempdir+'/data/'+type+'_full_rarefraction.txt','#0000FF88']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/plot_'+type+'_rarefraction_'+ext)
tlog.write(" ".join(cmd))
tlog.stop()
if os.name == 'nt' or sys.platform == 'darwin': return
## For the bias data we need to downsample
## Using some system utilities to accomplish this
sys.stderr.write("downsampling mappings for bias calculation\n")
cmd0 = 'zcat'
if args.threads > 1:
cmd1 = ['sort','-R','-S1G','-T',
args.tempdir+'/temp','--parallel='+str(args.threads)]
else:
cmd1 = ['sort','-R','-S1G','-T',args.tempdir+'/temp']
cmd2 = 'head -n '+str(args.max_bias_data)
if args.threads > 1:
cmd3 = ['sort','-k3,3','-k5,5n','-k','6,6n','-S1G','-T',
args.tempdir+'/temp --parallel='+str(args.threads)]
else:
cmd3 = ['sort','-k3,3','-k5,5n','-k','6,6n','-S1G','-T',
args.tempdir+'/temp']
inf = open(args.tempdir+'/data/best.sorted.gpd.gz')
of = gzip.open(args.tempdir+'/temp/best.random.sorted.gpd.gz','w')
if os.name != 'nt':
p0 = Popen(cmd0.split(),stdin=inf,stdout=PIPE)
p1 = Popen(cmd1,stdin=p0.stdout,stdout=PIPE)
p2 = Popen(cmd2.split(),stdin=p1.stdout,stdout=PIPE)
p3 = Popen(cmd3,stdin=p2.stdout,stdout=PIPE)
else:
sys.stderr.write("WARNING: Windows OS detected. using shell.")
p0 = Popen(cmd0,stdin=inf,stdout=PIPE,shell=True)
p1 = Popen(" ".join(cmd1),stdin=p0.stdout,stdout=PIPE,shell=True)
p2 = Popen(cmd2,stdin=p1.stdout,stdout=PIPE,shell=True)
p3 = Popen(" ".join(cmd3),stdin=p2.stdout,stdout=PIPE,shell=True)
for line in p3.stdout:
of.write(line)
p3.communicate()
p2.communicate()
p1.communicate()
p0.communicate()
of.close()
inf.close()
# now downsample annotations
sys.stderr.write("Downsampling annotations for bias\n")
inf = gzip.open(args.tempdir+'/temp/best.random.sorted.gpd.gz')
rnames = set()
for line in inf:
f = line.rstrip().split("\t")
rnames.add(f[0])
inf.close()
of = gzip.open(args.tempdir+'/temp/annotbest.random.txt.gz','w')
inf = gzip.open(args.tempdir+'/data/annotbest.txt.gz')
for line in inf:
f = line.rstrip().split("\t")
if f[1] in rnames: of.write(line)
inf.close()
of.close()
tlog.start("use annotations to check for 5' to 3' biase")
# 11. Use annotation outputs to check for bias
sys.stderr.write("Prepare bias data\n")
cmd = [udir+'/annotated_read_bias_analysis.py',
args.tempdir+'/temp/best.random.sorted.gpd.gz',args.gpd,
args.tempdir+'/temp/annotbest.random.txt.gz','-o',
args.tempdir+'/data/bias_table.txt.gz','--output_counts',
args.tempdir+'/data/bias_counts.txt','--allow_overflowed_matches',
'--threads',str(args.threads),'--specific_tempdir',args.tempdir+'/temp']
sys.stderr.write(" ".join(cmd)+"\n")
annotated_read_bias_analysis.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("plot bias png")
# 12. Plot bias
cmd = [args.rscript_path,udir+'/plot_bias.r',
args.tempdir+'/data/bias_table.txt.gz',
args.tempdir+'/plots/bias.png']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/bias_png.log')
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("plot bias pdf")
cmd = [args.rscript_path,udir+'/plot_bias.r',
args.tempdir+'/data/bias_table.txt.gz',
args.tempdir+'/plots/bias.pdf']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/bias_pdf.log')
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("Prepare junction variance data")
# 13. Get distances of observed junctions from reference junctions
cmd = [udir+'/gpd_to_junction_variance.py','-r',args.gpd,
args.tempdir+'/temp/best.random.sorted.gpd.gz',
'--specific_tempdir',args.tempdir+'/temp','-o',
args.tempdir+'/data/junvar.txt','--threads',str(args.threads)]
sys.stderr.write(" ".join(cmd)+"\n")
gpd_to_junction_variance.external_cmd(cmd)
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("plot junvar png")
# 14. Junction distances
cmd = [args.rscript_path,udir+'/plot_junvar.r',
args.tempdir+'/data/junvar.txt',
args.tempdir+'/plots/junvar.png']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/junvar_png.log')
tlog.write(" ".join(cmd))
tlog.stop()
tlog.start("plot junvar pdf")
cmd = [args.rscript_path,udir+'/plot_junvar.r',
args.tempdir+'/data/junvar.txt',
args.tempdir+'/plots/junvar.pdf']
sys.stderr.write(" ".join(cmd)+"\n")
mycall(cmd,args.tempdir+'/logs/junvar_pdf.log')
tlog.write(" ".join(cmd))
tlog.stop()
return
def mycall(cmd,lfile):
ofe = open(lfile+'.err','w')
ofo = open(lfile+'.out','w')
p = Popen(cmd,stderr=ofe,stdout=ofo)
p.communicate()
ofe.close()
ofo.close()
return
#def do_inputs():
# # Setup command line inputs
# parser=argparse.ArgumentParser(description="Create an output report",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument('input',help="INPUT FILE or '-' for STDIN")
# parser.add_argument('-o','--output',help="OUTPUT Folder or STDOUT if not set")
# parser.add_argument('--portable_output',help="OUTPUT file in a portable html format")
# group1 = parser.add_mutually_exclusive_group(required=True)
# group1.add_argument('-r','--reference',help="Reference Fasta")
# group1.add_argument('--no_reference',action='store_true',help="No Reference Fasta")
# parser.add_argument('--annotation',help="Reference annotation genePred")
# parser.add_argument('--threads',type=int,default=1,help="INT number of threads to run. Default is system cpu count")
# # Temporary working directory step 1 of 3 - Definition
# parser.add_argument('--tempdir',required=True,help="This temporary directory will be used, but will remain after executing.")
#
# ### Parameters for alignment plots
# parser.add_argument('--min_aligned_bases',type=int,default=50,help="for analysizing alignment, minimum bases to consider")
# parser.add_argument('--max_query_overlap',type=int,default=10,help="for testing gapped alignment advantage")
# parser.add_argument('--max_target_overlap',type=int,default=10,help="for testing gapped alignment advantage")
# parser.add_argument('--max_query_gap',type=int,help="for testing gapped alignment advantge")
# parser.add_argument('--max_target_gap',type=int,default=500000,help="for testing gapped alignment advantage")
# parser.add_argument('--required_fractional_improvement',type=float,default=0.2,help="require gapped alignment to be this much better (in alignment length) than single alignment to consider it.")
#
# ### Parameters for locus analysis
# parser.add_argument('--do_loci',action='store_true',help="This analysis is time consuming at the moment so don't do it unless necessary")
# parser.add_argument('--min_depth',type=float,default=1.5,help="require this or more read depth to consider locus")
# parser.add_argument('--min_coverage_at_depth',type=float,default=0.8,help="require at leas this much of the read be covered at min_depth")
# parser.add_argument('--min_exon_count',type=int,default=2,help="Require at least this many exons in a read to consider assignment to a locus")
# parser.add_argument('--locus_downsample',type=int,default=100,help="Only include up to this many long reads in a locus\n")
#
# ### Params for alignment error plot
# parser.add_argument('--alignment_error_scale',nargs=6,type=float,help="<ins_min> <ins_max> <mismatch_min> <mismatch_max> <del_min> <del_max>")
# parser.add_argument('--alignment_error_max_length',type=int,default=100000,help="The maximum number of alignment bases to calculate error from")
#
# ### Params for context error plot
# parser.add_argument('--context_error_scale',nargs=6,type=float,help="<ins_min> <ins_max> <mismatch_min> <mismatch_max> <del_min> <del_max>")
# parser.add_argument('--context_error_stopping_point',type=int,default=1000,help="Sample at least this number of each context")
#
# ## Params for rarefraction plots
# parser.add_argument('--samples_per_xval',type=int,default=500)
#
# args = parser.parse_args()
# # Temporary working directory step 2 of 3 - Creation
# setup_tempdir(args)
# return args
def setup_tempdir(args):
if not os.path.exists(args.tempdir):
os.makedirs(args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
def external(args):
main(args)
if __name__=="__main__":
sys.stderr.write("excute as prepare all data as main\n")
#do our inputs
# Can disable calling as main
#args = do_inputs()
#main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/prepare_all_data.py
|
prepare_all_data.py
|
import argparse, sys, os, re, base64, zlib, gzip, StringIO
from shutil import rmtree
from multiprocessing import cpu_count
from tempfile import mkdtemp, gettempdir
from xml.etree import ElementTree
from seqtools.format.bed import Bed12
g_version = None
def fixtag(ns, tag, nsmap):
return '{'+nsmap[ns]+'}'+tag
def main(args):
of = sys.stdout
if args.output != '-':
of = open(args.output,'w')
names = {}
tree = ElementTree.parse(args.xhtml_input)
if args.verbose:
sys.stderr.write("Traversing xhtml\n")
for v in [x for x in tree.iter() if x.tag=='{http://www.w3.org/1999/xhtml}a']:
data = v.attrib
name = None
if 'download' in data:
name = data['download']
elif 'id' in data:
name = data['id']
if not name:
if args.verbose:
sys.stderr.write("warning no name for linked data\n")
continue
info = data['href']
m = re.match('data:[^,]+base64,',info)
if not m: continue
if args.list:
names[name] = ''
m = re.match('data:[^,]+base64,(.*)$',info)
if not m:
sys.stderr.write("warning unable to get base64 string")
continue
v = base64.b64decode(m.group(1))
names[name] = v
if args.verbose:
sys.stderr.write("Finished traversing xhtml\n")
if args.list:
for name in sorted(names.keys()):
newname = name
if name[-3:]=='.gz':
newname = name[:-3]
if is_ucsc_bed(newname):
of.write(name+" ["+newname+" "+newname[:-4]+".gpd]\n")
else:
if name[-3:]=='.gz':
of.write(name +" ["+newname+"]\n")
else:
of.write(newname+"\n")
return
shortnames = {}
for name in names:
if name[-3:] == '.gz': shortnames[name[:-3]] = name
else: shortnames[name] = name
### if we are still here we must be doing an extract
exname = args.extract
out_gzipped = False
if args.extract[-3:]=='.gz':
out_gzipped = True
exname = args.extract[:-3]
#handle case of a gpd conversion
if is_ucsc_gpd(exname):
oname = exname[:-4]+'.bed.gz'
if oname not in names:
sys.stderr.write("ERROR '"+args.extract+"' is not found. Use --list option to see what is available\n")
sys.exit()
sio = StringIO.StringIO(zlib.decompress(names[oname],15+32))
header = sio.readline()
for v in sio:
b = Bed12(v)
print b.get_gpd_line()
return
# figure out what the stored name is
oname = shortnames[exname]
in_gzipped = False
if oname[-3:]=='.gz': in_gzipped = True
if exname not in shortnames:
sys.stderr.write("ERROR '"+args.extract+"' is not found. Use --list option to see what is available\n")
sys.exit()
if in_gzipped and not out_gzipped:
of.write(zlib.decompress(names[oname],15+32)) #special for gzip format
else:
of.write(names[oname])
#of.write(names[args.extract])
of.close()
# Temporary working directory step 3 of 3 - Cleanup
if not args.specific_tempdir:
rmtree(args.tempdir)
def is_ucsc_bed(newname):
if newname == 'best.sorted.bed' or \
newname== 'chimera.bed' or \
newname== 'gapped.bed' or \
newname== 'techinical_chimeras.bed' or \
newname == 'techinical_atypical_chimeras.bed':
return True
return False
def is_ucsc_gpd(newname):
if newname == 'best.sorted.gpd' or \
newname== 'chimera.gpd' or \
newname== 'gapped.gpd' or \
newname== 'technical_chimeras.gpd' or \
newname == 'technical_atypical_chimeras.gpd':
return True
return False
def external_cmd(cmd,version=None):
#set version by input
global g_version
g_version = version
cache_argv = sys.argv
sys.argv = cmd
args = do_inputs()
main(args)
sys.argv = cache_argv
def do_inputs():
# Setup command line inputs
parser=argparse.ArgumentParser(description="Extract data from xhtml output of alignqc through the command line",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('xhtml_input',help="INPUT XHTML FILE")
parser.add_argument('-o','--output',default='-',help="OUTPUTFILE or STDOUT if not set")
parser.add_argument('-v','--verbose',action='store_true',help="Show all stderr messages\n")
group1 = parser.add_mutually_exclusive_group(required=True)
group1.add_argument('-l','--list',action='store_true',help='show available data')
group1.add_argument('-e','--extract',help='dump this data')
#parser.add_argument('--threads',type=int,default=cpu_count(),help="INT number of threads to run. Default is system cpu count")
# Temporary working directory step 1 of 3 - Definition
group = parser.add_mutually_exclusive_group()
group.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is made and destroyed here.")
group.add_argument('--specific_tempdir',help="This temporary directory will be used, but will remain after executing.")
args = parser.parse_args()
# Temporary working directory step 2 of 3 - Creation
setup_tempdir(args)
return args
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir):
os.makedirs(args.specific_tempdir.rstrip('/'))
args.tempdir = args.specific_tempdir.rstrip('/')
if not os.path.exists(args.specific_tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
else:
args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
if __name__=="__main__":
main()
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/alignqc/dump.py
|
dump.py
|
import argparse, sys, os, gzip, inspect
from shutil import rmtree, copy
from multiprocessing import cpu_count
from tempfile import mkdtemp, gettempdir
from subprocess import PIPE, Popen
from Bio.Format.GPD import GPDStream
from Bio.Stream import LocusStream
import classify_reads
#bring in the folder to the path for our utilities
#pythonfolder_loc = "../pyutil"
pythonfolder_loc = "../../Au-public/iron/utilities"
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe() ))[0],pythonfolder_loc)))
if cmd_subfolder not in sys.path:
sys.path.insert(0,cmd_subfolder)
import gpd_to_nr
import gpd_annotate
def main():
#do our inputs
args = do_inputs()
# first we need to run the classify
classify_reads.external_cmd('classify_reads.py '+args.input_annot+' '+args.input_gpd+' -o '+args.tempdir+'/classify.txt.gz')
get_novel_sets(args.tempdir+'/classify.txt.gz',args.input_gpd,args.tempdir+'/novel_isoform_reads.gpd.gz',args.tempdir+'/novel_locus_reads.gpd.gz',args)
# Now we can make a new non-redundant set of genpreds from the novel isoforms
sys.stderr.write("making NR novel isoforms\n")
cmd = 'gpd_to_nr.py '+args.tempdir+'/novel_isoform_reads.gpd.gz '+\
' -j '+str(args.junction_tolerance)+' --threads '+str(args.threads)+\
' --minimum_junction_end_support '+str(args.minimum_junction_end_support)+\
' --minimum_support '+str(args.minimum_support)+\
' --gene_names '+\
' -o '+args.tempdir+'/novel_isoforms_nr.gpd.gz'
gpd_to_nr.external_cmd(cmd)
sys.stderr.write("reannotating novel based on our new gpd\n")
# Now we reannotate the novel based on the these newly annotated isoforms
cmd = 'gpd_anntotate.py '+args.tempdir+'/novel_locus_reads.gpd.gz '+\
' --threads '+str(1)+' '+\
' -r '+args.tempdir+'/novel_isoforms_nr.gpd.gz '+\
' -o '+args.tempdir+'/novel_locus_reads.annot.txt.gz'
gpd_annotate.external_cmd(cmd)
# now this new annotation should be classified
# the new isoform will be in novel_isoform_reads.gpd.gz
cmd = 'classify_reads.py '+args.tempdir+'/novel_locus_reads.annot.txt.gz '+args.tempdir+'/novel_locus_reads.gpd.gz -o '+args.tempdir+'/classify_novel.txt.gz'
sys.stderr.write(cmd+"\n")
classify_reads.external_cmd(cmd)
get_novel_sets(args.tempdir+'/classify_novel.txt.gz',args.tempdir+'/novel_locus_reads.gpd.gz',args.tempdir+'/novel_isoform_reads2.gpd.gz',args.tempdir+'/novel_locus_reads2.gpd.gz',args)
# now lets combine our novel isoform reads making sure to sort them
of = open(args.tempdir+'/new_novel_isoform_reads.gpd.gz','w')
cmd2 = 'gzip'
p2 = Popen(cmd2.split(),stdout=of,stdin=PIPE)
cmd1 = 'sort -k3,3 -k5,5n -k6,6n'
p1 = Popen(cmd1.split(),stdout=p2.stdin,stdin=PIPE)
inf = gzip.open(args.tempdir+'/novel_isoform_reads.gpd.gz')
for line in inf: p1.stdin.write(line)
inf.close()
inf = gzip.open(args.tempdir+'/novel_isoform_reads2.gpd.gz')
for line in inf: p1.stdin.write(line)
inf.close()
p1.communicate()
p2.communicate()
of.close()
# Now we can make a new non-redundant set of genpreds from the novel isoforms
sys.stderr.write("making NR novel isoforms\n")
cmd = 'gpd_to_nr.py '+args.tempdir+'/new_novel_isoform_reads.gpd.gz '+\
' -j '+str(args.junction_tolerance)+' --threads '+str(args.threads)+\
' --minimum_junction_end_support '+str(args.minimum_junction_end_support)+\
' --minimum_support '+str(args.minimum_support)+\
' --gene_names '+\
' -o '+args.tempdir+'/novel_isoforms_nr2.gpd.gz'
gpd_to_nr.external_cmd(cmd)
#Only need to reannotate if we are interested in whats left over
#sys.stderr.write("reannotating novel based on our new gpd\n")
## Now we reannotate the novel based on the these newly annotated isoforms
#cmd = 'gpd_anntotate.py '+args.tempdir+'/novel_locus_reads.gpd.gz '+\
# ' --threads '+str(args.threads)+' '+\
# ' -r '+args.tempdir+'/novel_isoforms_nr2.gpd.gz '+\
# ' -o '+args.tempdir+'/novel_locus_reads.annot.txt.gz'
#gpd_annotate.external_cmd(cmd)
sys.stderr.write("now work on the novel loci\n")
# Now lets work on the novel locus
of = open(args.tempdir+'/sorted_novel_locus_reads.gpd.gz','w')
cmd2 = 'gzip'
p2 = Popen(cmd2.split(),stdout=of,stdin=PIPE)
cmd1 = 'sort -k3,3 -k5,5n -k6,6n'
p1 = Popen(cmd1.split(),stdout=p2.stdin,stdin=PIPE)
inf = gzip.open(args.tempdir+'/novel_locus_reads2.gpd.gz')
for line in inf: p1.stdin.write(line)
inf.close()
p1.communicate()
p2.communicate()
of.close()
sys.stderr.write("making NR novel loci\n")
cmd = 'gpd_to_nr.py '+args.tempdir+'/sorted_novel_locus_reads.gpd.gz '+\
' -j '+str(args.junction_tolerance)+' --threads '+str(args.threads)+\
' --minimum_junction_end_support '+str(args.minimum_junction_end_support)+\
' --minimum_support '+str(args.minimum_support)+\
' -o '+args.tempdir+'/novel_locus_nr.gpd.gz'
gpd_to_nr.external_cmd(cmd)
sys.stderr.write("sort the novel isoforms\n")
of = open(args.tempdir+'/novel_isoforms_nr.sorted.gpd.gz','w')
cmd2 = 'gzip'
p2 = Popen(cmd2.split(),stdout=of,stdin=PIPE)
cmd1 = 'sort -k3,3 -k5,5n -k6,6n'
p1 = Popen(cmd1.split(),stdout=p2.stdin,stdin=PIPE)
inf = gzip.open(args.tempdir+'/novel_isoforms_nr2.gpd.gz')
for line in inf: p1.stdin.write(line)
inf.close()
p1.communicate()
p2.communicate()
of.close()
sys.stderr.write("sort the novel loci\n")
of = open(args.tempdir+'/novel_loci_nr.sorted.gpd.gz','w')
cmd2 = 'gzip'
p2 = Popen(cmd2.split(),stdout=of,stdin=PIPE)
cmd1 = 'sort -k3,3 -k5,5n -k6,6n'
p1 = Popen(cmd1.split(),stdout=p2.stdin,stdin=PIPE)
inf = gzip.open(args.tempdir+'/novel_locus_nr.gpd.gz')
for line in inf: p1.stdin.write(line)
inf.close()
p1.communicate()
p2.communicate()
of.close()
# Now we can rename totally novel genes based on locus overlap
of = open(args.tempdir+'/novel_loci_nr_named.sorted.gpd.gz','w')
cmd2 = 'gzip'
p2 = Popen(cmd2.split(),stdout=of,stdin=PIPE)
cmd1 = 'sort -k3,3 -k5,5n -k6,6n'
p1 = Popen(cmd1.split(),stdout=p2.stdin,stdin=PIPE)
inf = gzip.open(args.tempdir+'/novel_loci_nr.sorted.gpd.gz')
gs = GPDStream(inf)
ls = LocusStream(gs)
z = 0
for rng in ls:
z+=1
rng_string = rng.get_range_string()
gpds = rng.get_payload()
for gpd in gpds:
gene_name = 'LOC'+str(z)+'|'+str(len(gpds))+'|'+rng_string
f = gpd.get_gpd_line().rstrip().split("\t")
f[0] = gene_name
gpd_line = "\t".join(f)
p1.stdin.write(gpd_line+"\n")
p1.communicate()
p2.communicate()
of.close()
# we are almost done but we need to make sure these genepreds aren't subsets of known genes
sys.stderr.write("reannotating novel-isoform by reference\n")
cmd = 'gpd_anntotate.py '+args.tempdir+'/novel_isoforms_nr.sorted.gpd.gz '+\
' --threads '+str(1)+' '+\
' -r '+args.reference_annotation_gpd+\
' -o '+args.tempdir+'/novel_isoforms_nr.annot.txt.gz'
gpd_annotate.external_cmd(cmd)
cmd = 'classify_reads.py '+args.tempdir+'/novel_isoforms_nr.annot.txt.gz '+args.tempdir+'/novel_isoforms_nr.sorted.gpd.gz -o '+args.tempdir+'/classify_novel_isoform_ref.txt.gz'
sys.stderr.write(cmd+"\n")
classify_reads.external_cmd(cmd)
# now we can screen to make sure things in the novel isoform file really are novel isoforms
blacklist = set()
finf = gzip.open(args.tempdir+'/classify_novel_isoform_ref.txt.gz')
for line in finf:
f = line.rstrip().split("\t")
if f[2]=='subset' or f[2]=='full': blacklist.add(f[0])
finf.close()
fof = gzip.open(args.tempdir+'/novel_isoforms_nr.filtered.sorted.gpd.gz','w')
finf = gzip.open(args.tempdir+'/novel_isoforms_nr.sorted.gpd.gz')
for line in finf:
f = line.rstrip().split("\t")
if f[1] in blacklist: continue
fof.write(line)
finf.close()
fof.close()
sys.stderr.write("reannotating novel-locus by reference\n")
cmd = 'gpd_anntotate.py '+args.tempdir+'/novel_loci_nr_named.sorted.gpd.gz '+\
' --threads '+str(1)+' '+\
' -r '+args.reference_annotation_gpd+\
' -o '+args.tempdir+'/novel_loci_nr_named.annot.txt.gz'
gpd_annotate.external_cmd(cmd)
cmd = 'classify_reads.py '+args.tempdir+'/novel_loci_nr_named.annot.txt.gz '+args.tempdir+'/novel_loci_nr_named.sorted.gpd.gz -o '+args.tempdir+'/classify_novel_loci.txt.gz'
sys.stderr.write(cmd+"\n")
classify_reads.external_cmd(cmd)
# now we can screen to make sure things in the novel isoform file really are novel isoforms
blacklist = set()
finf = gzip.open(args.tempdir+'/classify_novel_loci.txt.gz')
for line in finf:
f = line.rstrip().split("\t")
if f[2]=='subset' or f[2]=='full': blacklist.add(f[0])
finf.close()
fof = gzip.open(args.tempdir+'/novel_loci_nr_named.filtered.sorted.gpd.gz','w')
finf = gzip.open(args.tempdir+'/novel_loci_nr_named.sorted.gpd.gz')
for line in finf:
f = line.rstrip().split("\t")
if f[1] in blacklist: continue
fof.write(line)
finf.close()
fof.close()
if not os.path.exists(args.output):
os.makedirs(args.output)
copy(args.tempdir+'/novel_loci_nr_named.filtered.sorted.gpd.gz',args.output+'/novel_loci_nr_named.sorted.gpd.gz')
copy(args.tempdir+'/novel_isoforms_nr.filtered.sorted.gpd.gz',args.output+'/novel_isoforms_nr.sorted.gpd.gz')
# Temporary working directory step 3 of 3 - Cleanup
if not args.specific_tempdir:
rmtree(args.tempdir)
def get_novel_sets(classification,input_gpd,out_iso,out_locus,args):
# now we want to create a non redundant version of the novel isoforms
novel_isoforms = set()
novel_isoform_genes = {}
novel_loci = set()
inf = gzip.open(classification)
for line in inf:
f = line.rstrip().split("\t")
if f[2] == 'novel-isoform':
novel_isoforms.add(f[0])
novel_isoform_genes[f[0]]=f[1] # save the gene name
elif f[2] == 'novel-locus':
novel_loci.add(f[0])
inf.close()
sys.stderr.write("outputing novel isoforms to a file\n")
tof = gzip.open(out_iso,'w')
lof = gzip.open(out_locus,'w')
inf_gpd = None;
if input_gpd[-3:]=='.gz':
inf_gpd = gzip.open(input_gpd)
else:
inf_gpd = open(input_gpd)
z = 0
for line in inf_gpd:
z += 1
if z % 1000 == 0: sys.stderr.write(str(z)+" reads processed\r")
f = line.rstrip().split("\t")
if f[1] in novel_isoforms:
f[0] = novel_isoform_genes[f[0]]
newline = "\t".join(f)
tof.write(newline+"\n")
elif f[1] in novel_loci:
lof.write(line)
inf_gpd.close()
tof.close()
lof.close()
sys.stderr.write("\n")
def do_inputs():
# Setup command line inputs
parser=argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input_annot',help="<input annotbest.txt>")
parser.add_argument('input_gpd',help="<input best.sorted.gpd>")
parser.add_argument('-a','--reference_annotation_gpd',required=True,help="Reference annotation GPD")
parser.add_argument('-o','--output',required=True,help="OUTPUT DIRECTORY")
parser.add_argument('--threads',type=int,default=cpu_count(),help="INT number of threads to run. Default is system cpu count")
# Run parameters
parser.add_argument('-j','--junction_tolerance',type=int,default=10,help="number of bp to tolerate junction mismatch on either side")
parser.add_argument('--minimum_junction_end_support',type=int,default=2,help="minimum coverage of end exons")
parser.add_argument('--minimum_support',type=int,default=2,help="minimum supporting reads")
# Temporary working directory step 1 of 3 - Definition
group = parser.add_mutually_exclusive_group()
group.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is made and destroyed here.")
group.add_argument('--specific_tempdir',help="This temporary directory will be used, but will remain after executing.")
args = parser.parse_args()
# Temporary working directory step 2 of 3 - Creation
setup_tempdir(args)
return args
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir):
os.makedirs(args.specific_tempdir.rstrip('/'))
args.tempdir = args.specific_tempdir.rstrip('/')
if not os.path.exists(args.specific_tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
else:
args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
if __name__=="__main__":
main()
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/scripts/alignqc_to_novel_transcriptome.py
|
alignqc_to_novel_transcriptome.py
|
import argparse, sys, os
from shutil import rmtree
from multiprocessing import cpu_count
from tempfile import mkdtemp, gettempdir
from subprocess import Popen, PIPE
def main():
#do our inputs
args = do_inputs()
for name in args.inputs:
cmd = 'alignqc dump '+name+' -e lengths.txt'
p = Popen(cmd.split(),stdout=PIPE)
lengths = []
for line in p.stdout:
f = line.rstrip().split("\t")
l = int(f[4])
lengths.append(l)
print name +"\t"+"\t".join([str(x) for x in sorted(lengths)])
p.communicate()
# Temporary working directory step 3 of 3 - Cleanup
if not args.specific_tempdir:
rmtree(args.tempdir)
def do_inputs():
# Setup command line inputs
parser=argparse.ArgumentParser(description="Convert a list of xhtmls from alignqc into a list of read lengths for each file",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('inputs',nargs='+',help="INPUT xhtml FILEs")
parser.add_argument('-o','--output',help="OUTPUTFILE or STDOUT if not set")
parser.add_argument('--threads',type=int,default=cpu_count(),help="INT number of threads to run. Default is system cpu count")
# Temporary working directory step 1 of 3 - Definition
group = parser.add_mutually_exclusive_group()
group.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is made and destroyed here.")
group.add_argument('--specific_tempdir',help="This temporary directory will be used, but will remain after executing.")
args = parser.parse_args()
# Temporary working directory step 2 of 3 - Creation
setup_tempdir(args)
return args
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir):
os.makedirs(args.specific_tempdir.rstrip('/'))
args.tempdir = args.specific_tempdir.rstrip('/')
if not os.path.exists(args.specific_tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
else:
args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
if __name__=="__main__":
main()
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/scripts/alignqcs_to_read_lengths.py
|
alignqcs_to_read_lengths.py
|
import sys, argparse, gzip
#from Bio.Format.GPD import GPD
def main(args):
rinf = None
if args.read_annotations[-3:] == '.gz':
rinf = gzip.open(args.read_annotations)
else:
rinf = open(args.read_annotations)
ginf = None
if args.best_gpd[-3:] == '.gz':
ginf = gzip.open(args.best_gpd)
else:
ginf = open(args.best_gpd)
of = sys.stdout
if args.output:
if args.output[-3:] == '.gz':
of = gzip.open(args.output,'w')
else:
of = open(args.output,'w')
seen_reads = set()
sys.stderr.write("traversing annotations\n")
for line in rinf:
f = line.rstrip().split("\t")
read_name = f[1]
gene_name = f[2]
match_type = f[4]
matching_exons = int(f[5])
consecutive_exons = int(f[6])
read_exons = int(f[7])
if read_exons < 2: continue
seen_reads.add(read_name)
if match_type == 'full':
of.write(read_name+"\t"+gene_name+"\tfull"+"\n")
elif consecutive_exons == read_exons:
of.write(read_name+"\t"+gene_name+"\tsubset"+"\n")
else:
of.write(read_name+"\t"+gene_name+"\tnovel-isoform"+"\n")
sys.stderr.write("traversing reads\n")
z = 0
for line in ginf:
z+=1
if z%1000 == 0: sys.stderr.write(str(z)+" reads processed\r")
#gpd = GPD(line)
f = line.rstrip().split("\t")
if f[1] in seen_reads: continue
if int(f[8]) < 2: continue
of.write(f[1]+"\t"+"\tnovel-locus"+"\n")
sys.stderr.write("\n")
def do_inputs():
parser = argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('read_annotations',help="annotbest from alignqc is sufficient")
parser.add_argument('best_gpd',help="best gpd from alignqc is sufficient")
parser.add_argument('-o','--output',help="output the refined read classifications")
args = parser.parse_args()
return args
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd.split()
args = do_inputs()
main(args)
sys.argv = cache_argv
return
if __name__=="__main__":
args = do_inputs()
main(args)
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/scripts/classify_reads.py
|
classify_reads.py
|
import sys, argparse, gzip
from Bio.Format.GPD import GPD
from Bio.Range import ranges_to_coverage
def main():
parser = argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="Use - for STDIN")
parser.add_argument('genepred',help="the genepred used for this alignqc")
parser.add_argument('--min_exons',type=int,default=1,help="At least this number of exons")
parser.add_argument('--full',action='store_true',help="only use full matches")
parser.add_argument('-o','--output',help="OUTPUT file or nothing for STDOUT")
args = parser.parse_args()
inf = sys.stdin
if args.input != '-':
if args.input[-3:]=='.gz':
inf = gzip.open(args.input)
else: inf = open(args.input)
genes = {}
sys.stderr.write("Reading annotation file\n")
for line in inf:
f = line.rstrip().split("\t")
gene = f[2]
tx = f[3]
type = f[4]
if args.full and type != 'full': continue
if gene not in genes:
genes[gene] = {}
genes[gene]['transcripts'] = {}
genes[gene]['cnt'] = 0
if tx not in genes[gene]['transcripts']:
genes[gene]['transcripts'][tx] = 0
genes[gene]['cnt'] += 1
genes[gene]['transcripts'][tx] += 1
inf.close()
txs = {}
sys.stderr.write("Reading genepred file\n")
z = 0
with open(args.genepred) as inf:
for line in inf:
z +=1
if z%1000==0: sys.stderr.write(str(z)+" \r")
gpd = GPD(line)
exs = []
for ex in gpd.exons:
exs.append(ex.range)
txs[gpd.get_transcript_name()] = exs
sys.stderr.write("\n")
vals = []
sys.stderr.write("Traversing annotation file\n")
for gene in genes:
for tx in genes[gene]['transcripts']:
v = genes[gene]['transcripts'][tx]
exons = txs[tx]
if len(exons) < args.min_exons: continue
for i in range(0,v):
vals += exons[:]
sys.stderr.write("Generating coverage file "+str(len(vals))+"\n")
of = sys.stdout
if args.output:
if args.output[-3:]=='.gz':
of = gzip.open(args.output,'w')
else:
of = open(args.output,'w')
covs = ranges_to_coverage(vals)
for v in covs:
of.write(v.chr+"\t"+str(v.start-1)+"\t"+str(v.end)+"\t"+str(v.get_payload())+"\n")
# of.write(tx+"\t"+gene+"\t"+str(genes[gene]['transcripts'][tx])+"\t"+str(genes[gene]['cnt'])+"\n")
of.close()
if __name__=="__main__":
main()
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/scripts/alignqc_annotation_to_bed_depth.py
|
alignqc_annotation_to_bed_depth.py
|
import sys, argparse, re, os
from subprocess import Popen, PIPE
from Bio.Statistics import average, median, standard_deviation
def main():
parser = argparse.ArgumentParser(description="Assume standard pacbio ccs and subread read name formats, and ONT name formats where _pass_2D _pass_tem _pass_com _fail_2D _fail_tem or _fail_com have been appended to the name.",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('inputs',nargs='+',help="Specify xhtml files")
parser.add_argument('--aligned',action='store_true',help="restrict output to aligned reads only")
args = parser.parse_args()
p_pbccs = re.compile('^m[^\/]+\/\d+\/ccs$')
p_pbsub = re.compile('^m[^\/]+\/\d+\/\d+_\d+$')
p_ontpass2D = re.compile('^\S+_pass_2D$')
p_ontpasstem = re.compile('^\S+_pass_tem$')
p_ontpasscom = re.compile('^\S+_pass_com$')
p_ontfail2D = re.compile('^\S+_fail_2D$')
p_ontfailtem = re.compile('^\S+_fail_tem$')
p_ontfailcom = re.compile('^\S+_fail_com$')
results = []
for fname in args.inputs:
sys.stderr.write("processing "+fname+"\n")
c = {
'pacbio':0,
'ont':0,
'ccs':0,
'sub':0,
'ontpass2D':0,
'ontpasstem':0,
'ontpasscom':0,
'ontfail2D':0,
'ontfailtem':0,
'ontfailcom':0,
'ontpass':0,
'ontfail':0,
'ont2D':0,
'ont1D':0,
'other':0 }
cmd = 'alignqc dump '+fname+' -e lengths.txt'
p = Popen(cmd.split(),stdout=PIPE)
for line in p.stdout:
f = line.rstrip().split("\t")
bp = int(f[3])
if args.aligned and bp == 0: continue
rname = f[0]
if p_pbccs.match(rname):
c['ccs']+=1
c['pacbio']+=1
elif p_pbsub.match(rname):
c['sub'] += 1
c['pacbio']+=1
elif p_ontpass2D.match(rname):
c['ontpass2D'] += 1
c['ont2D'] += 1
c['ont']+=1
c['ontpass']+=1
elif p_ontpasstem.match(rname):
c['ontpasstem'] += 1
c['ont1D'] += 1
c['ont']+=1
c['ontpass']+=1
elif p_ontpasscom.match(rname):
c['ontpasscom'] += 1
c['ont1D'] += 1
c['ont']+=1
c['ontpass']+=1
elif p_ontfail2D.match(rname):
c['ontfail2D'] += 1
c['ont2D'] += 1
c['ont']+=1
c['ontfail']+=1
elif p_ontfailtem.match(rname):
c['ontfailtem'] += 1
c['ont1D'] += 1
c['ont']+=1
c['ontfail']+=1
elif p_ontfailcom.match(rname):
c['ontfailcom'] += 1
c['ont1D'] += 1
c['ont']+=1
c['ontfail']+=1
else:
c['other']+=1
p.communicate()
results.append(c)
k = results[0].keys()
print "feature\tsum\tmedian\taverage\tstandard deviation"
for feature in sorted(k):
arr = [x[feature] for x in results]
print feature+"\t"+str(sum(arr))+"\t"+str(median(arr))+"\t"+str(average(arr))+"\t"+str(standard_deviation(arr))
if __name__=="__main__":
main()
|
AlignQC
|
/AlignQC-2.0.5.tar.gz/AlignQC-2.0.5/scripts/alignqcs_to_read_type_count_per_cell_statistics.py
|
alignqcs_to_read_type_count_per_cell_statistics.py
|
# AlignmentReporter
A tool for DM to easily make __Alignment Change Graphs__ for their Campaigns, __with a save system__
to switch between parties. Originally just a part of a larger toolbox, I am currently making it a standalone project.
It is a bit old and need some changes (mainly using coroutines and C compilation).
The tool is quite simple in its way, but I will add a tutorial later. For testing, simply launche the __init__.py at
the root directory and enter a save file name. It will automatically load any existing file in the data folder if it
exists.
I have **already created two save files from personal games** (accessible with the names "**Celtaidd**" and "**Volac**").
The generated images are in the "***AlignmentReporter/out/***" directory, but a preview is loaded in the tool after each
image generation.
## System recommendations
This tool has been tested with the following setups. Please share any working or non-working setup you may use to
use this tool.
- *OS:*
- ***Windows 10 (19041.928)***
- ***Linux Mint 20.1 (Xfce)***
- *Python **3.8.5***
## Installation:
in the root directory
```
python setup.py install
```
## Typical usages:
### Python:
```python
import AlignmentReporter as AR; AR.launch()
```
### in terminal:
```
AlignR-Launch
```
## Typical Output

## Tutorial
*To be implemented*
## Known Issues
- GUI still freezes in Windows 10 when generating the image
- Color implementation will sometimes accept wrong inputs or refuse correct ones.
## Licence notice
Every ***.UI*** file contained in this project has been created with the **QtDesigner** software from the **Qt Company** under the **GPL-3.0 License**. *(see the [Official Qt website](https://www.qt.io/) for more informations)*.
|
AlignR
|
/AlignR-2.0.5.tar.gz/AlignR-2.0.5/README.md
|
README.md
|
import math
import traceback as tr
from typing import Dict, List, Any, Tuple
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.interpolate import make_interp_spline, BSpline
def map_to_circle(df):
"""
Remap DataFrame values to a circle of R=1
:param df: DataFrame with two columns "x" and "y"
:type df: pandas.DataFrame
:return: DataFrame with values remapped to a circle of R=1
:rtype: pandas.DataFrame
"""
for i, row in enumerate(df.values):
x, y = row
ratio1 = max(1e-15, math.sqrt(x ** 2 + y ** 2))
n_x, n_y = (abs(x / ratio1), abs(y / ratio1))
if (
math.tan(math.acos(n_x))
/ max(1e-15, (1 / (max(1e-15, math.tan(math.acos(n_x))))))
<= 1.0
):
ratio2: float = math.sqrt(math.tan(math.acos(n_x)) ** 2 + 1)
else:
ratio2: float = math.sqrt(
(1 / max(1e-15, math.tan(math.acos(n_x)))) ** 2 + 1
)
df.loc[i] = (x / ratio2, y / ratio2)
return df
def rotatematrix(array, kwargs, order=2.0, angle=180):
"""
Try to apply a rotation matrix to an existing array and plot the result
:param array: input array or dataframe
:param kwargs: kwargs for the plotting methods
:param angle: (optional) angle in degree for the rotation matrix, default is 180
:param order: zorder for the plot() function
:type order: float
:type array: np.ndarray or pandas.DataFrame
:type kwargs: dict[str, Any]
:type angle: float
"""
angle: np.ndarray = np.radians(angle)
rotation_matrix: np.ndarray = np.array(
[[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]
)
new_array: List[np.ndarray] = list()
for i, row in enumerate(array.reset_index().values):
row: np.ndarray = np.array(row)
rotated_matrix: np.ndarray = rotation_matrix * row
for j, r in enumerate(rotated_matrix):
row[j] = rotated_matrix[j].sum()
new_array.append(row)
array: pd.DataFrame = pd.DataFrame(
np.array(new_array), columns=["x", "y"]
).sort_values(["x", "y"])
try:
xnew: np.ndarray = np.linspace(array["x"].min(), array["x"].max(), 200)
spl: BSpline = make_interp_spline(tuple(array["x"]), array["y"], k=2)
power_smooth: np.ndarray = spl(xnew)
plt.plot(xnew, power_smooth, zorder=order, **kwargs)
except Exception:
plt.plot(array["x"], array["y"], zorder=order, **kwargs)
def plot_background(n=100, kwargs=None):
"""
Try to plot the line elements in bachground of the image, the circle and separation between alignment areas
:param n: (optional) 'Quality' of the lines drawn, use carefully with high values, default is 100
:param kwargs: (optional) kwargs for the plotting methods, default is empty
:type n: int
:type kwargs: dict
"""
n: int = int(n)
kwargs: Dict[str, Any] = dict() if not kwargs else kwargs
try:
plt.figure(figsize=(5, 5), constrained_layout=True)
x: np.ndarray = np.linspace(-1.0, 1.0, n)
x_small: np.ndarray = np.linspace(-1.0, 1.0, round(math.sqrt(n) * 2))
ax1: np.ndarray = np.linspace(1.0 / 3.0, 1.0, round(math.sqrt(n)))
ax2: np.ndarray = np.zeros(round(math.sqrt(n))) + 1.0 / 3.0
bars: Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]] = (
(ax1, ax2),
(ax2, ax1),
)
for bar in bars:
df_no_mid: pd.DataFrame = map_to_circle(
pd.DataFrame(np.array(bar).transpose(), columns=["x", "y"])
).set_index("x")
for angle in np.linspace(0, 270, 4):
rotatematrix(df_no_mid, kwargs, order=1, angle=angle)
y: np.ndarray = np.array([math.sqrt(1 - v ** 2) for v in x])
bkwards: Dict[str, Any] = kwargs.copy()
bkwards["linewidth"] *= 2
plt.plot(x, y, zorder=1, **bkwards)
plt.plot(x, -y, zorder=1, **bkwards)
inner_circle: pd.DataFrame = pd.DataFrame(
np.array(
[
x_small / 3.0,
np.array([math.sqrt((1.0 / 9.0) - v ** 2) for v in x_small / 3.0]),
]
).transpose(),
columns=["x", "y"],
)
plt.plot(inner_circle["x"], inner_circle["y"], zorder=1, **kwargs)
plt.plot(inner_circle["x"], -inner_circle["y"], zorder=1, **kwargs)
except Exception:
tr.print_exc()
def plot_foreground(tight=False, kwargs=dict()):
"""
Try to plot the text elements of the graph and everything that needs to be on top ov the rest
:param tight: (optional) if True will call plt.tight_layout() at the end, default is False
:param kwargs: (optional) kwargs for the plotting methods, default is empty
:type tight: bool
:type kwargs: dict
"""
try:
df: pd.DataFrame = pd.DataFrame(
np.array(
[
[-2 / 3, 2 / 3],
[0.0, 2 / 3],
[2 / 3, 2 / 3],
[-2 / 3, 0],
[0.0, 0],
[2 / 3, 0],
[-2 / 3, -2 / 3],
[0.0, -2 / 3],
[2 / 3, -2 / 3],
]
)
)
df = map_to_circle(df)
plt.annotate(
"LG",
df.loc[0],
ha="center",
va="center",
fontsize=kwargs["fontsize"],
fontweight="bold",
)
plt.annotate(
"NG",
df.loc[1],
ha="center",
va="center",
fontsize=kwargs["fontsize"],
fontweight="bold",
)
plt.annotate(
"CG",
df.loc[2],
ha="center",
va="center",
fontsize=kwargs["fontsize"],
fontweight="bold",
)
plt.annotate(
"LN",
df.loc[3],
ha="left",
va="center",
fontsize=kwargs["fontsize"],
fontweight="bold",
)
plt.annotate(
"TN",
df.loc[4],
ha="center",
va="center",
fontsize=kwargs["fontsize"],
fontweight="bold",
)
plt.annotate(
"CN",
df.loc[5],
ha="center",
va="center",
fontsize=kwargs["fontsize"],
fontweight="bold",
)
plt.annotate(
"LE",
df.loc[6],
ha="center",
va="center",
fontsize=kwargs["fontsize"],
fontweight="bold",
)
plt.annotate(
"NE",
df.loc[7],
ha="center",
va="center",
fontsize=kwargs["fontsize"],
fontweight="bold",
)
plt.annotate(
"CE",
df.loc[8],
ha="center",
va="center",
fontsize=kwargs["fontsize"],
fontweight="bold",
)
plt.axis("square")
plt.axis("off")
plt.xlim(-1.5, 1.5)
plt.ylim(-1.5, 1.5)
title: str = kwargs["title"] if kwargs["title"] else None
if title:
text: str = kwargs["title"]
alignment: str = kwargs["alignment"] if kwargs["alignment"] else "center"
plt.title(
text,
y=0.9,
loc=alignment,
fontsize=kwargs["fontsize"] * 1.1,
fontweight="bold",
)
if tight:
plt.tight_layout()
except Exception:
tr.print_exc()
# Endfile
|
AlignR
|
/AlignR-2.0.5.tar.gz/AlignR-2.0.5/AlignmentReporter/Vizualisation.py
|
Vizualisation.py
|
import time
from typing import List, Tuple
import numpy as np
def compute_time(t):
"""
Function that convert a given time value into a string
:param t: Time in second
:type t: float
:return: Time string
:rtype: str
"""
force = False
p = "Process took: "
if t >= 60 ** 2:
force = True
h = int(t / 60 ** 2)
if h > 1:
p += "{:2} hours, ".format(h)
else:
p += "{:2} hour, ".format(h)
t %= 60 ** 2
if t >= 60 or force:
force = True
m = int(t / 60)
if m > 1:
p += "{:2} minutes, ".format(m)
else:
p += "{:2} minute, ".format(m)
t %= 60
if t > 1:
p += "{} seconds".format(round(t, 2))
else:
p += "{} second".format(round(t, 2))
return p
def timeit(func):
"""
Decorator function computing the excecution time of a function
:param func: Function to decorate
:type func: function
:return: wrapped func
:rtype: function
"""
def inner(*args, **kwargs):
inner.__doc__ = timeit.__doc__
start = time.time()
result = func(*args, **kwargs)
print(func.__name__ + " " + str(round(time.time() - start, 4)))
return result
return inner
def alignment_to_position(entries, first_entry_weight=1):
"""
Transform a given list of string alignment entries into positional entries. Apply a weight modifier to the first
entry of the list
:param entries: 1D-array of string entries like ('LG', 'NG', 'CG', ...)
:param first_entry_weight: (Optional) Weight of the first entry of the "entries" parameter
:type entries: tuple or list or np.ndarray
:return: List of positional values
:rtype: Tuple[Tuple[float, float]]
"""
values: List[Tuple[float, float]] = []
value: List[str] = list()
for i, old_value in enumerate(entries):
x = []
y = []
if len(old_value) == 2:
value = [
old_value[0],
old_value[1],
] # jit forced manual list(str) conversion
if value[0] == "N":
value[0] = "T"
elif len(old_value) == 1:
value = [old_value[0]]
for v in value:
if v == "L":
x.append(-1.0)
elif v == "T":
x.append(0.0)
elif v == "C":
x.append(1.0)
elif v in ("G", "B"):
y.append(1.0)
elif v == "N":
y.append(0.0)
elif v in ("E", "M"):
y.append(-1.0)
if len(x) > len(y):
for j in range(len(x) - len(y)):
y.append(np.nan)
elif len(y) > len(x):
for j in range(len(y) - len(x)):
x.append(np.nan)
if i == 0:
for j in range(0, max(0, first_entry_weight - 1)):
values += list(zip(x, y))
values += tuple(list(zip(x, y)))
return tuple(values)
|
AlignR
|
/AlignR-2.0.5.tar.gz/AlignR-2.0.5/AlignmentReporter/UI/py/funcutils.py
|
funcutils.py
|
import math
import os
import time
import traceback as tr
from typing import Tuple, Dict, Callable, Union, List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PySide2.QtCore import Signal, QObject
from PySide2.QtWidgets import QLabel
import AlignmentReporter.Vizualisation as vis
from AlignmentReporter.UI.py.default_parameters import (
BACKGROUND_KWARGS,
PLOT_KWARGS,
METADATA,
)
from AlignmentReporter.UI.py.funcutils import alignment_to_position, compute_time
from AlignmentReporter.UI.py.typed_dict import DataDict, PlayerDict
class MyQLabel(QLabel):
resized: Signal = Signal()
def __init__(self):
"""
Simple subclass of QLabel that emits a Signal when resized
"""
super(MyQLabel, self).__init__()
def resizeEvent(self, event):
"""
Override of the QWidget resizeEvent to add a custom QSignal.emit()
"""
self.resized.emit()
super(MyQLabel, self).resizeEvent(event)
class Worker(QObject):
signal = Signal()
finished = Signal()
def __init__(self, data, *args, **kwargs):
"""
Subclass of QObject that generate an output image with given parameter data
:param data: array containing the plotting data, output path, temp output path and the fontsize
:param args: any positional argument given to QObject
:param kwargs: any keyword argument given to QObject
:type data: tuple[DataDict, str, str, int]
"""
super(Worker, self).__init__(*args, **kwargs)
self.data = data
@property
def data(self):
"""
Method packing the attributes into a data tuple. (Unused)
:return: tuple containing the plotting data, output path, temp output path and the fontsize
:rtype: tuple[DataDict, str, str, int]
"""
return self.__data, self.__final_file, self.__temp_file, self.__fontsize
@data.setter
def data(self, data_list):
"""
Method unpacking the given data into the correct attributes
:param data_list: array containing the plotting data, output path, temp output path and the fontsize
:type data_list: tuple[DataDict, str, str, int]
"""
self.__data, self.__final_file, self.__temp_file, self.__fontsize = data_list
def generate_image(self):
try:
data: DataDict = self.__data
final_file: str = self.__final_file
temp_file: str = self.__temp_file
fontsize: int = self.__fontsize
al: Tuple[
Tuple[str, str, str], Tuple[str, str, str], Tuple[str, str, str]
] = (("LG", "NG", "CG"), ("LN", "TN", "CN"), ("LE", "NE", "CE"))
plt.close()
t_start = time.time()
players = data["players"]
line_qual = int(
360 * (10 ** np.linspace(-0.5, 3.8, 100)[data["hs_line_quality"]])
)
vis.plot_background(n=line_qual, kwargs=BACKGROUND_KWARGS)
for i in range(line_qual): # Fixme: Maybe not do that
self.signal.emit()
t = data["le_image_title"] if data["chb_image_title"] else False
alignment = (
"left"
if data["title_alignment"] == 0
else "right"
if data["title_alignment"] == 2
else "center"
)
pos_y = float(data["hs_legend_v_offset"] / 100.0) * 1.5
pos_x = data["hs_legend_h_offset"] * 0.015
stretch = float(data["hs_legend_stretch"] / 50.0)
players_values: List[PlayerDict] = list(players.values())
if data["gb_add_auto_party"]:
party_pos_list: Union[Tuple[float, float], List[float, float]] = list()
if data["cob_party_starting_alignment"] != "Average":
first_pos: np.array = np.array(
*alignment_to_position(
[data["cob_party_starting_alignment"]],
1,
)
)
else:
first_pos_list: List[np.ndarray] = list()
for v in players.values():
first_pos_list += alignment_to_position([v["Entries"][0]])
first_pos: np.ndarray = np.array(first_pos_list).mean(axis=0)
first_pos_list = list()
for i in range(data["sb_first_entry_weight"]):
first_pos_list.append(first_pos)
party_func: Callable = np.mean if data["rb_average"] else np.sum
party_name: str = data["le_party_name"]
party_color: str = data["le_party_color"]
party_all_entries: Union[
np.ndarray, List[Tuple[Tuple[float, float]]]
] = list()
len_entries: Union[List[int], Tuple[int]] = list()
for player in players.values():
party_all_entries.append(
alignment_to_position(player["Entries"][1:])
)
len_entries.append(len(party_all_entries[-1]))
party_array_values: np.ndarray = np.zeros(
(max(len_entries), len(players.values()), 2)
)
party_array_values[:, :, :] = 0.0
for i, array in enumerate(party_all_entries):
party_array_values[: len_entries[i], i, :] = np.array(array)
party_align_values = np.concatenate(
(first_pos_list, party_func(party_array_values, axis=1)), axis=0
)
party_player: Dict[str, Union[np.ndarray, str]] = {
"Color": party_color,
"Entries": party_align_values,
"Name": party_name,
}
players_values.append(party_player)
players_pos: np.ndarray = np.array(
list(
zip(
np.linspace(pos_x, pos_x, len(players_values)),
np.linspace(
pos_y,
(pos_y - (stretch * len(players_values))),
len(players_values),
),
)
)
)
for player, pos in zip(players_values, players_pos):
if len(player["Entries"]) > 0:
color: str = player["Color"]
if player is not players_values[-1]:
a: np.ndarray = np.array(player["Entries"])
values: Tuple[Tuple[float, float]] = alignment_to_position(
entries=a, first_entry_weight=data["sb_first_entry_weight"]
)
else:
values: Tuple[Tuple[float, float]] = player["Entries"]
df_player = pd.DataFrame(
np.array(values), columns=["x", "y"]
).fillna(np.array(values).mean())
mean_df_normal = (
df_player.fillna(value=df_player.mean())
.rolling(data["sb_rolling_window_size"], min_periods=1)
.mean()
.iloc[
max(
0,
data["sb_rolling_window_size"]
- data["sb_first_entry_weight"],
) :,
:,
]
)
mean_df = vis.map_to_circle(mean_df_normal)
mean_df["alpha"] = np.logspace(-0.5, 0, mean_df.shape[0])
ha = (
"left"
if data["legend_text_alignment"] == 0
else "center"
if data["legend_text_alignment"] == 1
else "right"
)
s = np.logspace(-1.2, 1.5, mean_df.shape[0]) * math.sqrt(
(data["hs_scale"]) / 100.0
)
plt.plot(mean_df["x"], mean_df["y"], color=color, **PLOT_KWARGS)
self.signal.emit()
prev_markers: Tuple = (
"o",
"x",
"*",
"+",
"<",
"^",
">",
"v",
"",
"$" + data["le_previous_custom"] + "$",
)
last_markers: Tuple = (
"o",
"x",
"*",
"+",
"<",
"^",
">",
"v",
"",
"$" + data["le_current_custom"] + "$",
)
kwargs: Dict[str, str] = {
"marker": prev_markers[data["previous_marker"]]
}
for i in range(mean_df.shape[0]):
if i == mean_df.shape[0] - 1:
kwargs["marker"]: str = last_markers[data["current_marker"]]
row: pd.DataFrame = pd.DataFrame(mean_df.iloc[i, :]).transpose()
for alpha, scale in zip(
np.linspace(row["alpha"].values[-1], 0.0, 10) ** 8,
np.linspace(s[i], s[i] * 1.1, 4),
):
kwargs["alpha"]: float = alpha
kwargs["s"]: float = scale
if i == mean_df.shape[0] - 1:
kwargs["marker"]: str = last_markers[
data["current_marker"]
]
kwargs["s"]: float = (
scale * data["hs_current_scale"] / 10.0
)
plt.scatter(data=row, x="x", y="y", color=color, **kwargs)
self.signal.emit()
first_row: pd.DataFrame = pd.DataFrame(
mean_df_normal.iloc[0, :]
).transpose()
last_row: pd.DataFrame = pd.DataFrame(
mean_df_normal.iloc[-1, :]
).transpose()
if first_row["y"].values[0] >= 0.334:
y_o = 0
elif first_row["y"].values[0] <= -0.334:
y_o = 2
else:
y_o = 1
if first_row["x"].values[0] <= -1.0 / 3.0:
x_o = 0
elif first_row["x"].values[0] >= 1.0 / 3.0:
x_o = 2
else:
x_o = 1
if last_row["y"].values[0] >= 1.0 / 3.0:
y = 0
elif first_row["y"].values[0] <= -1.0 / 3.0:
y = 2
else:
y = 1
if last_row["x"].values[0] <= -1.0 / 3.0:
x = 0
elif last_row["x"].values[0] >= 1.0 / 3.0:
x = 2
else:
x = 1
p_o_al: Tuple[float, float] = al[y_o][x_o]
p_al: Tuple[float, float] = al[y][x]
plt.annotate(
player["Name"] + ":\n{} -> {}".format(p_o_al, p_al),
xy=pos,
color=color,
ha=ha,
va="center",
fontsize=fontsize,
fontweight="semibold",
)
vis.plot_foreground(
tight=False,
kwargs={"title": t, "alignment": alignment, "fontsize": fontsize * 1.1},
)
self.signal.emit()
print(compute_time(time.time() - t_start))
title: str = (
data["le_image_title"].replace(" ", "_").lower()
if data["chb_image_title"]
else "party_players_alignment"
)
new_title: str = ""
for c in title:
if "-123456789_abcdefghijklmnopqrstuvwxyz".find(c) != -1:
new_title += c
else:
new_title += "_"
title: str = new_title
ext: str = ".png" if data["image_format"] < 2 else ".jpg"
f_path = os.path.join(final_file, title + ext)
print("Starting saving data")
im_size: int = min(data["sb_image_dpi"], 720)
self.savefig(
temp_file, im_size, f_format="png", quality=6, transparency=True
)
self.signal.emit()
self.savefig(f_path)
except Exception:
tr.print_exc()
def savefig(self, out, size=None, f_format=None, transparency=None, quality=None):
"""
Method that outputs the final version of the image into the output folder
:param out: fullpath of the output file
:param size: (optional) size value converted into dpi for the matplotlib.pyplot.savefig method
:param f_format: (optional) ['png' or 'jpeg'] override output file format, default is based on user choice data
:param transparency: (optional) override output transparency if file format is 'png', default is based on
user choice data
:param quality: (optional) [1 <-> 12] override jpeg file quality if file format is 'jpeg', default is based on user
choice data
:type out: str
:type size: int
:type f_format: str
:type transparency: bool
:type quality: int
"""
metadata: Dict[str, str] = METADATA
metadata["Creation Time"]: str = time.ctime()
dpi: float = (
size / (72 / 100 * 5.0)
if size
else self.__data["sb_image_dpi"] / (72 / 100 * 5.0)
)
f_format = (
f_format
if f_format
else "png"
if self.__data["image_format"] < 2
else "jpeg"
)
transparency = (
transparency
if transparency
else True
if self.__data["image_format"] == 1
else False
)
quality = (
round(np.linspace(0, 95, 12)[quality - 1])
if quality
else round(np.linspace(0, 95, 12)[self.__data["hs_jpeg_qual"] - 1])
)
plt.savefig(
fname=out,
dpi=dpi,
format=f_format,
transparent=transparency,
pil_kwargs={"quality": int(round(quality)), "metadata": metadata},
)
try:
self.finished.emit()
except RuntimeError:
pass # Worker already deleted
|
AlignR
|
/AlignR-2.0.5.tar.gz/AlignR-2.0.5/AlignmentReporter/UI/py/classutils.py
|
classutils.py
|
import json
import os
import pickle
import sys
import time
import traceback as tr
from typing import List, Dict
import matplotlib.pyplot as plt
import numpy as np
from PySide2.QtCore import QFile, Qt, Signal, SIGNAL, SLOT, QThread
from PySide2.QtGui import QPixmap, QIcon
from PySide2.QtUiTools import QUiLoader
from PySide2.QtWidgets import (
QSizePolicy,
QMainWindow,
QApplication,
QLabel,
QWidget,
QLayout,
QStyle,
QLineEdit,
)
import AlignmentReporter.UI.Qt.AlignmentReporterRessources as ar_r
import AlignmentReporter.Vizualisation as vis
from AlignmentReporter.UI.py.default_parameters import BACKGROUND_KWARGS, METADATA
from AlignmentReporter.UI.py.classutils import MyQLabel, Worker
from AlignmentReporter.UI.py.funcutils import (
timeit,
compute_time,
alignment_to_position,
)
from AlignmentReporter.UI.py.typed_dict import DataDict, PlayerDict
ar_r.qInitResources # avoid import optimization removing the previous line
_path = __file__.split("UI")[0]
_icon_path = os.path.join(_path, "UI", "Qt")
with open(os.path.join(_path, "UI", "Qt", "style.css"), "r") as f:
_style_sheet: str = f.read()
class SettingWindow(QMainWindow):
def __init__(self, parent=None):
"""
Subclass used for the child window with the graph customization parameters
:param parent: (deprecated) Set the parent window instance for this subclass instance
:type parent: QWidget or QMainWindow
"""
# Window setup
super(SettingWindow, self).__init__()
loader: QUiLoader = QUiLoader()
file: QFile = QFile(os.path.join(_path, "UI", "Qt", "imageSettings.ui"))
file.open(QFile.ReadOnly)
self.centralWidget: QMainWindow = loader.load(file, self)
file.close()
# self.setParent(parent)
self.setWindowTitle("Image Settings")
self.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Minimum)
# self.setMinimumSize(self.centralWidget.minimumSize())
self.setStyleSheet(_style_sheet)
self.assignWidgets()
self.setWindowFlags(Qt.Window)
self.setWindowIcon(
QIcon(
os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
"AlignmentTool.ico",
)
)
)
# Plugin setup
# Custom variables
# Init Widgets
# Custom functions calls
self.close()
####################################################################################################################
# Default functions #
####################################################################################################################
def assignWidgets(self):
"""
Link Widgets to functions
"""
pass
####################################################################################################################
# Custom UI functions #
####################################################################################################################
####################################################################################################################
# Custom Effective functions #
####################################################################################################################
@staticmethod
def print_click():
""" "
PLACEHOLDER FUNCTION
"""
print("Button clicked")
class MainWindow(QMainWindow):
advanced: Signal = Signal()
def __init__(self, savefile):
"""
Subclass used for the child window with the graph customization parameters
:param savefile: name string for the saving file e.g: "Celtaidd"
:type savefile: str
"""
# Window setup
super(MainWindow, self).__init__()
self.setStyleSheet(_style_sheet)
loader: QUiLoader = QUiLoader()
ui_file: QFile = QFile(os.path.join(_path, "UI", "Qt", "mainWindow.ui"))
ui_file.open(QFile.ReadOnly)
self.centralWidget = loader.load(ui_file, self)
ui_file.close()
self.setWindowIcon(
QIcon(
os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
"AlignmentTool.ico",
)
)
)
self.settingsUI: SettingWindow = SettingWindow(self)
self.setWindowTitle("Alignment Reporter v2.0")
self.settingsUI.setWindowTitle("Image Settings")
self.__save = savefile
self.assignWidgets()
self.setWindowFlags(Qt.Window)
# Plugin setup
# Custom variables
self.datapath: str = os.path.join(_path, "data")
self.savefile: str = os.path.join(self.datapath, self.__save + ".pkl")
self.savefile_json: str = os.path.join(self.datapath, self.__save + ".json")
self.data = dict()
self.__TMP: str = os.path.join(self.datapath, "TMP.pkl")
self.__Final: str = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "out"
)
self.__player_data: dict = dict()
self.__fontsize: int = 8
self.finished: bool = True
self.loop: QThread = QThread()
self.workers: List[QThread] = list()
# Init Widgets
self.mutate_widget(self.centralWidget.l_image)
self.centralWidget.f_progress_bar.hide()
self.centralWidget.prb_preview.setValue(0)
self.centralWidget.l_image.resized.connect(self.resize_image)
self.centralWidget.cob_players_select.setFocus()
group = (
self.centralWidget.line,
self.centralWidget.line_2,
self.centralWidget.line_3,
self.centralWidget.line_4,
self.centralWidget.line_5,
self.centralWidget.line_6,
self.centralWidget.line_7,
)
for widget in group:
widget.setProperty(
"holder", True
) # Stylesheet property for holding widgets
# Custom functions calls
self.show()
####################################################################################################################
# Default functions #
####################################################################################################################
def assignWidgets(self):
"""
Method used to connect QSignals to other Methods
"""
self.centralWidget.tb_settings.released.connect(self.settingsUI.show)
self.centralWidget.pb_save_quit.released.connect(self.close)
self.centralWidget.pb_set_color.released.connect(self.set_player_color)
self.centralWidget.pb_set_party_color.released.connect(self.set_party_color)
self.centralWidget.pb_set_name.released.connect(self.set_player_name)
self.centralWidget.pb_set_party_name.released.connect(self.set_party_name)
self.centralWidget.pb_set_title.released.connect(self.set_title)
self.centralWidget.pb_add_player_entry.released.connect(self.add_entry)
self.centralWidget.pb_delete_player_entry.released.connect(self.del_entry)
self.centralWidget.pb_delete_player_all_entries.released.connect(
self.clear_entries
)
self.centralWidget.pb_save_player.released.connect(self.save_player)
self.centralWidget.pb_del_player.released.connect(self.del_player)
self.centralWidget.cob_players_select.currentIndexChanged.connect(
self.update_player
)
self.centralWidget.pb_generate.released.connect(self.run_generate_image)
self.centralWidget.pb_save.released.connect(self.save)
# self.centralWidget.gb_add_auto_party.toggled.connect(self.clicked_party_player)
self.advanced.connect(self.progress_update)
def change_signals_block_state(self, state):
"""
Method used to un/block QSignals on critical connections when an infinite loop is possible
:param state: Set the parent window instance for this subclass instance
:type state: bool
"""
self.centralWidget.cob_players_select.blockSignals(state)
####################################################################################################################
# Custom functions #
####################################################################################################################
@staticmethod
def print_click():
""" "
PLACEHOLDER FUNCTION
"""
print("Button clicked")
def mutate_widget(self, old):
"""
Method used to "change" the class for the image label. The new instance emits a custom QSignal when it is
resized.
:param old: Widget used to hold the image preview
:type old: QLabel
"""
layout: QLayout = old.parent().layout()
old: QLabel = self.centralWidget.l_image
old_name: str = old.objectName()
old_style: QStyle = old.style()
new_label: MyQLabel = MyQLabel()
new_label.setPixmap(old.pixmap())
new_label.setSizePolicy(old.sizePolicy())
new_label.setMinimumSize(old.minimumSize())
new_label.setMaximumSize(old.maximumSize())
new_label.setParent(old.parent())
layout.replaceWidget(old, new_label)
old.deleteLater()
new_label.setObjectName(old_name)
new_label.setStyle(old_style)
new_label.setStyleSheet(old.styleSheet())
self.centralWidget.l_image = new_label
def close(self):
"""
Method used to save data and close the window
"""
self.save()
self.settingsUI.close()
super(MainWindow, self).close()
def show(self):
"""
Method used to show the window and load the image
"""
try:
self.load()
self.image = None
except FileNotFoundError:
pass
super(MainWindow, self).show()
def save(self, js=True):
"""
Method used to save data to a pickle file.
:param js: (optional) If True a json will also be saved (mainly for debug and to read data without launching
the tool), default is True
:type js: bool
"""
self.update_data()
try:
with open(self.savefile, "wb") as f:
try:
pickle.dump(self.data, f)
except TypeError:
print(self.data)
if js:
with open(self.savefile_json, "w", encoding="utf-8") as save_file:
json.dump(self.data, save_file, indent=4, sort_keys=True)
except Exception:
tr.print_exc()
def save_player(self):
"""
Method used to save current player parameters data into the players' data dictionary
"""
self.update_data()
c_widget: QWidget = self.centralWidget
player: dict = self.data["player"]
self.change_signals_block_state(True)
if c_widget.cob_players_select.findText(player["Name"]) == -1:
new_player_list: List[str] = list()
new_player_list.append(player["Name"])
selected_player_name: str = c_widget.cob_players_select.currentText()
for row in range(c_widget.cob_players_select.count()):
text: str = c_widget.cob_players_select.itemText(row)
if text != "New Player":
new_player_list.append(text)
try:
del self.data["players"][
c_widget.cob_players_select.currentText()
]
except KeyError:
pass
if selected_player_name != "New Player":
new_player_list.remove(selected_player_name)
new_player_list: List[str] = ["New Player"] + sorted(new_player_list)
c_widget.cob_players_select.clear()
for p in new_player_list:
c_widget.cob_players_select.addItem(p)
c_widget.cob_players_select.setCurrentIndex(
c_widget.cob_players_select.findText(player["Name"])
)
elif (
c_widget.cob_players_select.findText(player["Name"])
is not c_widget.cob_players_select.currentIndex()
):
c_widget.cob_players_select.setCurrentIndex(
c_widget.cob_players_select.findText(player["Name"])
)
try:
self.data["players"][player["Name"]] = player
except KeyError:
self.data["players"] = dict()
self.data["players"][player["Name"]] = player
self.change_signals_block_state(False)
def del_player(self):
"""
Method used to remove all current player data
"""
try:
c_widget: QWidget = self.centralWidget
del self.data["players"][c_widget.cob_players_select.currentText()]
c_widget.cob_players_select.removeItem(
c_widget.cob_players_select.currentIndex()
)
except KeyError:
pass
def update_player(self):
"""
Method used to switch to new selected player data
"""
try:
c_widget: QWidget = self.centralWidget
player_name: str = c_widget.cob_players_select.currentText()
if player_name != "New Player":
try:
self.data["player"]: PlayerDict = self.data["players"][player_name]
except KeyError:
tr.print_exc()
else:
self.data["player"]: PlayerDict = {
"Name": "Player Name",
"Color": "Black",
"Entries": list(),
}
self.update_ui()
self.update_data()
except Exception:
tr.print_exc()
def progress_update(
self, set=False, start=None, stop=None, i=1, current=None, fullstop=False
):
"""
Method used to control the main QProgressbar and call the method 'show' and 'hide' of its QFrame container
:param set: (optional) Reset the progress to 0 or given value
:param start: (optional) If given and 'set=True', will set the minimum value for the progress bar, default is 0
:param stop: (optional) If given and 'set=True', will set the maximum value for the progress bar, default is 100
:param i: (optional) If given, will increase the current value by the given value, default is 1
:param current: (optional) If given, will hardwrite the current value
:param fullstop: (optional) If given, will stop and hide the progress bar
:type set: bool
:type start: int or float
:type stop: int or float
:type i: int
:type current: int or float
:type fullstop: bool
"""
bar = self.centralWidget.prb_preview
if set:
if start:
self.__start: int = start
else:
self.__start: int = 0
if stop:
self.__stop: int = stop
else:
self.__stop: int = 100
self.__i = i
if current:
self.__current: int = current
else:
self.__current: int = self.__start
else:
self.__i: int = i
if current:
self.__current: int = current
else:
self.__current += self.__i
bar.setMinimum(self.__start)
bar.setMaximum(self.__stop)
bar.setValue(self.__current)
if bar.value() >= bar.maximum() or fullstop:
self.centralWidget.f_progress_bar.hide()
self.update()
@property
def current_player_data(self):
"""
Property that tries to return the current player data dictionary extracted from UI placeholderText values
:return: Current player data dictionary
:rtype: PlayerDict
"""
try:
self.__player_data = {
"Name": self.centralWidget.le_player_name.placeholderText(),
"Color": self.centralWidget.le_player_color.placeholderText(),
"Entries": [
self.centralWidget.lw_player_entries.item(entry).text()
for entry in range(self.centralWidget.lw_player_entries.count())
],
}
return self.__player_data
except Exception:
tr.print_exc()
@current_player_data.setter
def current_player_data(self, data=None):
"""
Property.setter that sets current player data dictionary values into UI placeholderText values
:param data: (optional) override current player data and force given values
:type data: PlayerDict
"""
if data:
self.__player_data: Dict[str, List[str]] = data
c_widget: QWidget = self.centralWidget
c_widget.le_player_name.setText("")
c_widget.le_player_name.setPlaceholderText(
self.__player_data["Name"]
if "Name" in self.__player_data.keys()
else "Player Name"
)
c_widget.lw_player_entries.clear()
if "Entries" in self.__player_data.keys():
for entry in self.__player_data["Entries"]:
self.add_entry(entry)
c_widget.le_player_color.setText("")
c_widget.le_player_color.setPlaceholderText(
self.__player_data["Color"]
if "Color" in self.__player_data.keys()
else "Black"
)
def load(self, js=True):
"""
Method that loads save data into UI
:param js: (optional) Force json save file
:type js: bool
"""
with open(self.savefile, "rb") as data_file:
try:
self.data = pickle.load(data_file)
except EOFError:
tr.print_exc()
if js:
try:
with open(self.savefile_json, "r", encoding="utf-8") as data_file:
self.data = json.load(data_file)
except EOFError:
tr.print_exc()
try:
self.update_ui(True)
except KeyError:
tr.print_exc()
@property
def data(self):
"""
Property that returns players data
:return: Current players' data values
:rtype: DataDict
"""
return self.__data
@data.setter
def data(self, data):
"""
Property.setter that override players data
:param data: New players' data values
:type data: DataDict
"""
self.__data = data.copy()
@property
def image(self):
"""
Property that returns the QLabel used to show the image preview
:return: the QLabel holding the image preview
:rtype: PySide2.QWidgets.QLabel
"""
return self.centralWidget.l_image
@image.setter
def image(self, img=None):
"""
Property that generate the preview image
:param img: (optional) if given, will override the image preview with the one given
:type img: np.ndarray
"""
# self.update_data()
try:
f_path = self.__TMP
if not img:
line_qual = int(
360
* (10 ** np.linspace(-0.5, 3.8, 100)[self.data["hs_line_quality"]])
)
t = (
self.data["le_image_title"]
if self.data["chb_image_title"]
else False
)
alignment = (
"left"
if self.data["title_alignment"] == 0
else "right"
if self.data["title_alignment"] == 2
else "center"
)
vis.plot_background(n=line_qual, kwargs=BACKGROUND_KWARGS)
vis.plot_foreground(
tight=False,
kwargs={
"title": t,
"alignment": alignment,
"fontsize": self.__fontsize * 1.1,
},
)
self.savefig(
f_path,
min(self.data["sb_image_dpi"], 500),
"png",
quality=6,
transparency=True,
)
self.centralWidget.l_image.setPixmap(
QPixmap(f_path).scaled(
np.array(self.centralWidget.l_image.size()) * 1,
mode=Qt.SmoothTransformation,
aspectMode=Qt.KeepAspectRatio,
)
)
except KeyError:
pass
def resize_image(self):
"""
Method called by QSignal that generates a resized preview image
"""
self.centralWidget.l_image.setPixmap(
QPixmap(self.__TMP).scaled(
self.centralWidget.l_image.size() * 1,
mode=Qt.SmoothTransformation,
aspectMode=Qt.KeepAspectRatio,
)
)
def savefig(self, out, size=None, f_format=None, transparency=None, quality=None):
"""
Method that outputs the final version of the image into the output folder
:param out: absolute path of the output file
:param size: (optional) size value converted into dpi for the matplotlib.pyplot.savefig method
:param f_format: (optional) ['png' or 'jpeg'] override output file format, default is based on user choice data
:param transparency: (optional) override output transparency if file format is 'png', default is
based on user choice data
:param q: (optional) [1 <-> 12] override jpeg file quality if file format is 'jpeg', default is based on user
choice data
:type out: str
:type size: int
:type f_format: str
:type transparency: bool
:type quality: int
"""
self.update_data()
metadata = METADATA
metadata["Creation Time"] = time.ctime()
dpi = (
size / (72 / 100 * 5.0)
if size
else self.data["sb_image_dpi"] / (72 / 100 * 5.0)
)
f_format = (
f_format if f_format else "png" if self.data["image_format"] < 2 else "jpeg"
)
transparency = (
transparency
if transparency
else True
if self.data["image_format"] == 1
else False
)
quality = (
round(np.linspace(0, 95, 12)[quality - 1])
if quality
else round(np.linspace(0, 95, 12)[self.data["hs_jpeg_qual"] - 1])
)
plt.savefig(
fname=out,
dpi=dpi,
format=f_format,
transparent=transparency,
pil_kwargs={"quality": int(round(quality)), "metadata": metadata},
)
def update_data(self):
"""
Update data dictionary based on UI values
"""
try:
data: DataDict = dict()
c_widget: QWidget = self.centralWidget
s_c_widget: QWidget = self.settingsUI.centralWidget
data["chb_image_title"] = c_widget.chb_image_title.isChecked()
data["le_image_title"] = c_widget.le_image_title.placeholderText()
data["sb_first_entry_weight"] = c_widget.sb_first_entry_weight.value()
data["sb_rolling_window_size"] = c_widget.sb_rolling_window_size.value()
data["cob_players_select"] = c_widget.cob_players_select.currentIndex()
data["gb_add_auto_party"] = c_widget.gb_add_auto_party.isChecked()
data["le_party_color"] = c_widget.le_party_color.placeholderText()
data["le_party_name"] = c_widget.le_party_name.placeholderText()
data[
"cob_party_starting_alignment"
] = c_widget.cob_party_starting_alignment.currentText()
data["rb_average"] = c_widget.rb_party_average.isChecked()
data["out_path"] = (
os.path.realpath((s_c_widget.le_output_path.text()))
if s_c_widget.le_output_path.text()
else os.path.realpath((s_c_widget.le_output_path.placeholderText()))
)
data["image_format"] = (
0
if s_c_widget.rb_png.isChecked()
else 1
if s_c_widget.rb_png_transparency.isChecked()
else 2
)
data["hs_line_quality"] = s_c_widget.hs_line_quality.value()
data["hs_jpeg_qual"] = s_c_widget.hs_jpeg_qual.value()
data["sb_image_dpi"] = s_c_widget.sb_image_dpi.value()
data["hs_current_scale"] = s_c_widget.hs_current_scale.value()
data["hs_legend_h_offset"] = s_c_widget.hs_legend_h_offset.value()
data["hs_legend_v_offset"] = s_c_widget.hs_legend_v_offset.value()
data["hs_legend_v_offset"] = s_c_widget.hs_legend_v_offset.value()
data["hs_legend_stretch"] = s_c_widget.hs_legend_stretch.value()
data["hs_scale"] = s_c_widget.hs_scale.value()
data["le_party_color"] = c_widget.le_party_color.placeholderText()
data["le_party_name"] = c_widget.le_party_name.placeholderText()
data["legend_text_alignment"] = (
0
if s_c_widget.rb_legend_text_left.isChecked()
else 1
if s_c_widget.rb_legend_text_center.isChecked()
else 2
)
data["le_current_custom"] = s_c_widget.le_current_custom.text()
data["current_marker"] = (
0
if s_c_widget.rb_current_o.isChecked()
else 1
if s_c_widget.rb_current_x.isChecked()
else 2
if s_c_widget.rb_current_star.isChecked()
else 3
if s_c_widget.rb_current_plus.isChecked()
else 4
if s_c_widget.rb_current_left.isChecked()
else 5
if s_c_widget.rb_current_up.isChecked()
else 6
if s_c_widget.rb_current_right.isChecked()
else 7
if s_c_widget.rb_current_down.isChecked()
else 8
if s_c_widget.rb_current_none.isChecked()
else 9
)
data["le_previous_custom"] = s_c_widget.le_previous_custom.text()
data["previous_marker"] = (
0
if s_c_widget.rb_previous_o.isChecked()
else 1
if s_c_widget.rb_previous_x.isChecked()
else 2
if s_c_widget.rb_previous_star.isChecked()
else 3
if s_c_widget.rb_previous_plus.isChecked()
else 4
if s_c_widget.rb_previous_left.isChecked()
else 5
if s_c_widget.rb_previous_up.isChecked()
else 6
if s_c_widget.rb_previous_right.isChecked()
else 7
if s_c_widget.rb_previous_down.isChecked()
else 8
if s_c_widget.rb_previous_none.isChecked()
else 9
)
data["title_alignment"] = (
0
if s_c_widget.rb_title_left.isChecked()
else 1
if s_c_widget.rb_title_center.isChecked()
else 2
)
data["player"] = self.current_player_data
if "players" in self.data.keys():
data["players"] = self.data["players"]
else:
data["players"] = dict()
self.data = data
self.__Final = data["out_path"]
except Exception:
tr.print_exc()
def update_ui(self, first_call=False):
"""
Override UI values safely
:param bool first_call: (optional) Also call the method to update player values ui, default False
:type first_call: bool
"""
self.change_signals_block_state(True)
try:
c_widget: QWidget = self.centralWidget
s_c_widget: QWidget = self.settingsUI.centralWidget
current_player: str = c_widget.cob_players_select.currentText()
c_widget.chb_image_title.setChecked(self.data["chb_image_title"])
c_widget.cob_players_select.clear()
c_widget.cob_players_select.addItem("New Player")
for entry in sorted(self.data["players"]):
c_widget.cob_players_select.addItem(entry)
c_widget.cob_players_select.setCurrentIndex(
c_widget.cob_players_select.findText(current_player)
)
c_widget.le_image_title.setPlaceholderText(
self.data["le_image_title"]
if self.data["le_image_title"]
else "Party Players Alignment Chart"
)
c_widget.sb_first_entry_weight.setValue(self.data["sb_first_entry_weight"])
c_widget.sb_rolling_window_size.setValue(
self.data["sb_rolling_window_size"]
)
c_widget.gb_add_auto_party.setChecked(self.data["gb_add_auto_party"])
c_widget.cob_party_starting_alignment.setCurrentIndex(
c_widget.cob_party_starting_alignment.findText(
self.data["cob_party_starting_alignment"]
)
)
if self.data["rb_average"]:
c_widget.rb_party_average.setChecked(True)
else:
c_widget.rb_party_cumul.setChecked(True)
c_widget.le_party_color.setPlaceholderText(self.data["le_party_color"])
c_widget.le_party_name.setPlaceholderText(self.data["le_party_name"])
if current_player in self.data["players"].keys():
p = self.data["players"][current_player]
c_widget.le_player_color.setPlaceholderText(p["Color"])
c_widget.le_player_name.setPlaceholderText(p["Name"])
c_widget.lw_player_entries.clear()
for entry in p["Entries"]:
c_widget.lw_player_entries.addItem(entry)
elif current_player == "New Player":
c_widget.le_player_color.setPlaceholderText("Black")
c_widget.le_player_name.setPlaceholderText("Player Name")
c_widget.lw_player_entries.clear()
s_c_widget.rb_png.setChecked(True) if self.data[
"image_format"
] == 0 else s_c_widget.rb_png_transparency.setChecked(True) if self.data[
"image_format"
] == 1 else s_c_widget.rb_jpeg.setChecked(
True
)
s_c_widget.hs_current_scale.setValue(self.data["hs_current_scale"])
s_c_widget.hs_line_quality.setValue(self.data["hs_line_quality"])
s_c_widget.hs_jpeg_qual.setValue(self.data["hs_jpeg_qual"])
s_c_widget.sb_image_dpi.setValue(self.data["sb_image_dpi"])
s_c_widget.hs_legend_h_offset.setValue(self.data["hs_legend_h_offset"])
s_c_widget.hs_legend_v_offset.setValue(self.data["hs_legend_v_offset"])
s_c_widget.hs_legend_stretch.setValue(self.data["hs_legend_stretch"])
s_c_widget.hs_scale.setValue(self.data["hs_scale"])
s_c_widget.le_current_custom.setText(self.data["le_current_custom"])
s_c_widget.le_output_path.setText(self.data["out_path"])
s_c_widget.rb_current_o.setChecked(True) if self.data[
"current_marker"
] == 0 else s_c_widget.rb_current_x.setChecked(True) if self.data[
"current_marker"
] == 1 else s_c_widget.rb_current_star.setChecked(
True
) if self.data[
"current_marker"
] == 2 else s_c_widget.rb_current_plus.setChecked(
True
) if self.data[
"current_marker"
] == 3 else s_c_widget.rb_current_left.setChecked(
True
) if self.data[
"current_marker"
] == 4 else s_c_widget.rb_current_up.setChecked(
True
) if self.data[
"current_marker"
] == 5 else s_c_widget.rb_current_right.setChecked(
True
) if self.data[
"current_marker"
] == 6 else s_c_widget.rb_current_down.setChecked(
True
) if self.data[
"current_marker"
] == 7 else s_c_widget.rb_current_none.setChecked(
True
) if self.data[
"current_marker"
] == 8 else s_c_widget.rb_current_custom.setChecked(
True
)
s_c_widget.rb_previous_o.setChecked(True) if self.data[
"previous_marker"
] == 0 else s_c_widget.rb_previous_x.setChecked(True) if self.data[
"previous_marker"
] == 1 else s_c_widget.rb_previous_star.setChecked(
True
) if self.data[
"previous_marker"
] == 2 else s_c_widget.rb_previous_plus.setChecked(
True
) if self.data[
"previous_marker"
] == 3 else s_c_widget.rb_previous_left.setChecked(
True
) if self.data[
"previous_marker"
] == 4 else s_c_widget.rb_previous_up.setChecked(
True
) if self.data[
"previous_marker"
] == 5 else s_c_widget.rb_previous_right.setChecked(
True
) if self.data[
"previous_marker"
] == 6 else s_c_widget.rb_previous_down.setChecked(
True
) if self.data[
"previous_marker"
] == 7 else s_c_widget.rb_previous_none.setChecked(
True
) if self.data[
"previous_marker"
] == 8 else s_c_widget.rb_previous_custom.setChecked(
True
)
s_c_widget.le_previous_custom.setText(self.data["le_previous_custom"])
s_c_widget.rb_title_left.setChecked(True) if self.data[
"title_alignment"
] == 0 else s_c_widget.rb_title_center.setChecked(True) if self.data[
"title_alignment"
] == 1 else s_c_widget.rb_title_right.setChecked(
True
)
s_c_widget.rb_legend_text_left.setChecked(True) if self.data[
"legend_text_alignment"
] == 0 else s_c_widget.rb_legend_text_center.setChecked(True) if self.data[
"legend_text_alignment"
] == 1 else s_c_widget.rb_legend_text_right.setChecked(
True
)
s_c_widget.le_output_path.setText(self.__Final)
self.current_player_data = (
self.data["player"] if "player" in self.data.keys() else None
)
if first_call:
self.update_player()
except KeyError:
pass
except Exception:
tr.print_exc()
self.change_signals_block_state(False)
def set_color(self, in_le):
"""
Method that verifies if the color value entered in the given QLineEdit is valid, if so update QLabel
placeholder value and change focus
:param in_le: A QLineEdit widget to test
:type in_le: QLineEdit
"""
colors = (
"black",
"blue",
"brown",
"cyan",
"darkblue",
"darkcyan",
"darkgray",
"darkgrey",
"darkgreen",
"darkmagenta",
"darkred",
"gray",
"grey",
"green",
"lightblue",
"lightcyan",
"lightgray",
"lightgrey",
"lightgreen",
"lightmagenta",
"lightred",
"magenta",
"orange",
"red",
"white",
"yellow",
"pink",
"",
)
# FIXME: Some colors return an error, they need to be checked and possibly extended
if in_le is self.centralWidget.le_player_color:
if self.centralWidget.le_player_color.text().lower() in colors:
if self.centralWidget.le_player_color.text():
self.centralWidget.le_player_color.setPlaceholderText(
self.centralWidget.le_player_color.text().capitalize()
)
self.centralWidget.le_player_color.setText("")
self.centralWidget.le_player_entry.setFocus()
elif in_le is self.centralWidget.le_party_color:
if self.centralWidget.le_party_color.text().lower() in colors:
if self.centralWidget.le_party_color.text():
self.centralWidget.le_party_color.setPlaceholderText(
self.centralWidget.le_party_color.text().capitalize()
)
self.centralWidget.le_party_color.setText("")
self.centralWidget.le_player_name.setFocus()
self.update_data()
def set_player_color(self):
"""
Check and change the current player name in the GUI
"""
self.set_color(self.centralWidget.le_player_color)
def set_party_color(self):
"""
Check and change the party name in the GUI
"""
self.set_color(self.centralWidget.le_party_color)
def set_name(self, le_name):
"""
Method that checks a given QLineEdit for a new inputted value, then changes focus
:param le_name: A QLineEdit that may have a new text value
:type le_name: QLineEdit
"""
if le_name is self.centralWidget.le_player_name:
if self.centralWidget.le_player_name.text():
self.centralWidget.le_player_name.setPlaceholderText(
self.centralWidget.le_player_name.text()
)
self.centralWidget.le_player_name.setText("")
self.centralWidget.le_player_color.setFocus()
elif le_name is self.centralWidget.le_party_name:
if self.centralWidget.le_party_name.text():
self.centralWidget.le_party_name.setPlaceholderText(
self.centralWidget.le_party_name.text()
)
self.centralWidget.le_party_name.setText("")
self.centralWidget.le_party_color.setFocus()
self.update_data()
def set_party_name(self):
"""
Method that updates the party name if a new entry was given
"""
self.set_name(self.centralWidget.le_party_name)
def set_player_name(self):
"""
Method that updates the current player name if a new entry was given
"""
self.set_name(self.centralWidget.le_player_name)
def set_title(self):
"""
Method that updates the current party name if a new entry was given
"""
if self.centralWidget.le_image_title.text():
self.centralWidget.le_image_title.setPlaceholderText(
self.centralWidget.le_image_title.text()
)
self.centralWidget.le_image_title.setText("")
self.centralWidget.le_player_name.setFocus()
self.update_data()
def add_entry(self, entry=None):
"""
Method that verifies if new alignment entry is valid, if so adds it to the QListWidget
:param entry: (optional) Overrides new entry value, default is None
:type entry: str
"""
alignement = (
"LG",
"LB",
"NG",
"NB",
"CG",
"CB",
"LN",
"TN",
"CN",
"LE",
"LM",
"NE",
"NM",
"CE",
"CM",
"L",
"N",
"T",
"C",
"G",
"B",
"E",
"M",
)
if not entry:
entry = self.centralWidget.le_player_entry.text().upper()
if entry in alignement:
self.centralWidget.lw_player_entries.addItem(entry)
self.centralWidget.le_player_entry.clear()
self.update_data()
def del_entry(self):
"""
Method that deletes the selected entry (or the last if none is selected) of the QListWidget
"""
if self.centralWidget.lw_player_entries.currentItem():
self.centralWidget.lw_player_entries.takeItem(
self.centralWidget.lw_player_entries.currentRow()
)
else:
self.centralWidget.lw_player_entries.takeItem(
self.centralWidget.lw_player_entries.count() - 1
)
def clear_entries(self):
"""
Method that clear all current QListWidget entries
"""
self.centralWidget.lw_player_entries.clear()
def run_generate_image(self):
"""
Method that tries to render the output image of the graph and update the progress bar
"""
if self.finished:
self.finished = False
self.update_data()
data: DataDict = self.data
players: Dict[str, PlayerDict] = data["players"]
line_qual = int(
360 * (10 ** np.linspace(-0.5, 3.8, 100)[data["hs_line_quality"]])
)
tasks = 0
for play in players:
player: PlayerDict = players[play]
tasks += (
max(
0,
data["sb_first_entry_weight"] - data["sb_rolling_window_size"],
)
) * 2
tasks += (len(player["Entries"]) - 1) * 2
tasks += (
max(0, data["sb_first_entry_weight"] - data["sb_rolling_window_size"])
) * 2 + 2
self.progress_update(True, 0, 60 + tasks + line_qual, 1, 0)
try:
self.centralWidget.pb_generate.setEnabled(False)
worker: Worker = Worker(
(self.data, self.__Final, self.__TMP, self.__fontsize)
)
worker.moveToThread(self.loop)
worker.finished.connect(self.get_generated_image, Qt.QueuedConnection)
worker.signal.connect(self.progress_update, Qt.QueuedConnection)
worker.finished.connect(worker.deleteLater, Qt.QueuedConnection)
self.loop.started.connect(worker.generate_image, Qt.QueuedConnection)
self.loop.finished.connect(self.loop.quit, Qt.QueuedConnection)
self.loop.start()
self.loop.setPriority(QThread.LowPriority)
self.update()
self.workers.append(worker)
except Exception:
tr.print_exc()
self.centralWidget.pb_generate.setEnabled(True)
def get_generated_image(self):
"""
Method called when a QThread worker is done. Assign the image and return to the "default" GUI look
"""
self.update()
self.finished = True
self.loop.quit()
time.sleep(0.2)
self.image = self.__TMP
self.progress_update(fullstop=True)
self.centralWidget.pb_generate.setEnabled(True)
self.update()
PATH = __file__.split("__init__")[0]
def launch():
"""
Instantiate a new QAplication and mainWindow classes and takes stdin input for savefile name
:param app: (optional) If given, will not generate a new instance but use the one given, default is None
:param win: (optional) if given, will not generate a new instance but use the one given, default is None
:type app: PySide2.QtWidgets.QApplication
:type app: PySide2.QtWidgets.QMainWindow
"""
global app
try:
app = QApplication(sys.argv)
app.setApplicationName("partyAlignmentChartTool")
app.setApplicationDisplayName("Party Alignment Chart Tool")
app.setApplicationVersion("1.0.2")
app.setOrganizationName("Julien Alardot")
win = MainWindow(input("Savefile Name: "))
win.resize(0, 0)
app.setWindowIcon(QIcon(os.path.join(PATH, "UI", "AlignmentTool.icon")))
app.connect(app, SIGNAL("lastWindowClosed()"), app, SLOT("quit()"))
app.setActiveWindow(win)
app.focusWindow()
app.exec_()
app.deleteLater()
del app
except Exception:
tr.print_exc()
|
AlignR
|
/AlignR-2.0.5.tar.gz/AlignR-2.0.5/AlignmentReporter/UI/py/__init__.py
|
__init__.py
|
from PySide2 import QtCore
qt_resource_data = b"\
\x00\x00%>\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x02\x00\x00\x00\x02\x00\x08\x06\x00\x00\x00\xf4x\xd4\xfa\
\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00 \x00IDATx\x9c\xed\
\xddy\x98\x5ce\x99\xf7\xf1o'!\x1b\x09K\x02\x02\
J\xd8\x12\x04\xc2\xbe\x08\x08((\x8c\x22\xa0\x80\x0a.\
\xb8\xa3\xa33\xa3(0\x8e\x8a0\xea\xa02\x88\xe22\
\x8e\xa3\xa2\x82\x0a.\x0c\x0a\x8c\x08\x0a\x8a\xb8!;J\
\x22\xfb\x0eB\x12BB\x84@\xd2I\xba\xdf?\x9e\xea\
7E\xe8\xa4\xbb\xba\xce9\xf7Y\xbe\x9f\xeb\xfa]\x1d\
\x01\xab\xef\xaa\xd4\xa9\xfb\xae\xb3<\x07$I\x92$I\
\x92$I\x92$I\x92$I\x92$I\x92$I\x92\
$I\x92$I\x92$I\x92$I\x92$I\x92$\
I\x92$I\x92$I\x92$I\x92$I\x92$I\
\x92$I\x92$I\x92$I\x92$I\x92$I\x92\
$I\x92$I\x92$I\x92$I\x92$I\x92$\
I\x92$I\x92$I\x92$I\x92$I\x92$I\
\x92$I\x92$I\x92$I\x92$I\x92$I\x92\
$I\x92$I\x92$I\x92$I\x92$I\x92$\
I\x92$I\x92$I\x92$I\x92$I\x92$I\
\x92$I-=\xd1\x05H\x92TQ\xa3\x81\x99\xc0\xd6\
\xc0\xf3\x81uI}\xf5i`\x1ep\x1f0\x1bX\x14\
U\xa0$I\xca\xc6\x86\xc0\x07\x80\xcb\x81\xc5@\xff\x10\
\xe9#\x0d\x01g\x02\xfb\x04\xd4+I\x92\xba\xb0=p\
\x1e\xd0\xcb\xd0M\x7fM\xb9\x138\x0e\x98Pl\xf9\x92\
$\xa9\x13\xeb\x03\xdf\x04V\xd0]\xe3_5\x8f\x00\xef\
\xc4C\xf1\x92$\x95\xce\xcb\x81\xbf\x91m\xe3_5\xbf\
\x01\xa6\x15\xf5\x84$I\xd2\x9a}\x00XN\xbe\xcd\x7f\
\xf3I\xc3\x86$I\x0a\xf4I\x8ai\xfc\xed\xe9\x05\
\x8e.\xe0\xb9I\x92\xa4A\x9cJ\xf1\xcd\x7f \xcb\x81\
\xc3\xf3\x7f\x8a\x92$\xa9]d\xf3\x1f\xc83\xc0ny\
?QI\x92\x94\x94\xa1\xf9\x0f\xe4^`R\xbeOW\
\x92$\x95\xa9\xf9\x0f\xe4\xab\xb9>cI\x92\x1a\xae\x8c\
\xcd\xbf\x9ft>\xc0\x0e9>oI\x92\x1a\xab\xac\xcd\
\x7f \xe7\xe7\xf7\xd4]\x81Hj\xba\xc9\xa4\xb5\xcd7\
X\xe5\xe7\x94\xd6\xbf\x9bL:\x16\xd9\xfesm`<\
0f5\x19E\xfa\xf0\xea#\xad\x9e\xb6\x82\xf4mf\
)\xb0\xa4\xed\xe7b\xe0\xa9\xb6<\x01,l\xcb\xe3\xa4\
\xeb\xa3\xe7\xb5\xe2\x0dU\x94\xa5S\x81\x93\xa3\x8b\x18B\
\x1f\xb0\x15\xf0@\x1e\x0f>&\x8f\x07\x95T\x0a\x1b\x02\
\x9b\x91V\x19\x1b,\x1b\x01\xe3r\xfa\xdd=\xa4;\xa5\
\x8dn\xfbg\x93\xbb|\xcc\xa5\xc0\x1c\xd2\xcal\x03y\
\x18x\x10\xb8\xbf\x95\xc7\xba\xfc\x1dj\x86*4\x7fH\
\xc3\xf4\xdbH\xf5f\xce=\x00R\xb5M%\xdd\x8a\xf4\
\x85\xad\x9f\x03\x99\x01\xac\x13XW\x94\xc5\xc0=\xc0]\
m\xb9\x03\xb8\x0dX\x10X\x97\xca\xa3*\xcd\x7f\xc0M\
\xc0\xeey<\xb0\x03\x80T\x0d\x13Iw#\xdb\x11\xd8\
\xa9\xf5sG\xd2\xb7|\x0d\xcf\x5c\xd2 \xf0W\xe0\x16\
\xe0/\xc0,\xd2\xbd\xdb\xd5\x0cUk\xfe\x90\x0e\xa7m\
H:$\x96)\x07\x00\xa9|&\x93&\xfe=\x81=\
\x80]I\xc7\x01GE\x16US}\xc0\xdd\xa4oY\
7\xb4r#\xe9\x9c\x04\xd5K\x15\x9b\xff\x80W\x02\x97\
G\x17!)[cHM\xfe_\x80\xef\x02\xb7\x92\xfd\
mGMgY\x01\xcc\x06\xce\x02\xde\x05l\x8b_\x96\
\xaa\xae\xecg\xfb\x0f\x95\x0fe\xff\x92H*\xdax`\
\x7f\xe0\x14\xd2D\xff$\xf1\x1f.f\xe8\xcc\x07.\x06\
N \x0dl\xed'7\xaa\xdc\xaa\xde\xfc\xfb\x81/d\
\xfe\xaaH\xca\xddX\xe0e\xc0g\x80?\x90\xced\x8f\
\xfe01\xddg\x11\xf03\xe0\x83\xa4s3TNu\
h\xfe\xfd\xc0\xb7\xb3~a$\xe5c\x07\xe0x\xe0R\
\xd2\xb1\xe4\xe8\x0f\x0f\x93\x7f\x1e\x01\xce!\xdd\xceu=\
T\x06ui\xfe\xfd\xc0\x0f2~m$ed\x12\xf0\
:\xe0l\xd2\xb5\xe9\xd1\x1f\x16&6\xcbI{{>\
\x0a\xccD\x11\xea\xd4\xfc\xfb\x81\x1fe\xfb\xf2H\xea\xc6\
\xa6\xc0?\x03\x97\x91V\xb5\x8b\xfe\x800\xe5\xcd]\xa4\
c\xb8\xfb\xe1\x95\x1cE\xa8[\xf3\xef\xc7\x01@\x0a\xb7\
\x1d\xf0\xef\xa4K\xc6\xa2?\x10L5\xf3(\xf05\xe0\
\xe5x\x22a\x1e\xea\xd8\xfc\xfbq\x00\x90BlCj\
\xfa\xb3\x88\xff\x100\xf5\xca\x5c\xd2-_\xf7\xc1\xcb\x0c\
\xb3P\xd7\xe6\xdf\x8f\x03\x80T\x98\xe9\xa4\x05Cn!\
~\xc37\xcd\xc8\xfd\xc0gIk\x0e\xa8sun\xfe\
\xfd8\x00H\xb9Z\x0fx/\xf0G\xe27v\xd3\xec\
\x5c\x03\xfc\x13\xb0>\x1a\x8e\xba7\xff~\x1c\x00\xa4\xcc\
\x8d\x01\x0e#\xdds\xfb\x19\xe27rc\xda\xf3\x0cp\
\x1ep\x00Z\x9d&4\xff~\x1c\x00\xa4\xcc\xcc\x00N\
'\xddZ6z\xc36f8\xb9\x13\xf80\xe9\xee\x8f\
J\x9a\xd2\xfc\xfbq\x00\x90\xba\xb2\x16i\x91\x96_\x93\
n\x00\x13\xbdA\x1b3\x92<C\xbag\xc4\x9e4[\
\x93\x9a\x7f?\x0e\x00\xd2\x88L'}\xdb\x9fK\xfcF\
lL\x96\xb9\x0ex3i\xb8m\x92\xa65\xff~\x1c\
\x00\xa4\x8e\x1cDZ\xab\xdd;\xeb\x99\xba\xe7a\xe0$\
`\x0a\xf5\xd7\xc4\xe6\xdf\x8f\x03\x804\xa4\x09\xc0{\xf0\
\x9a}\xd3\xcc<\x05|\x19\xd8\x9czjj\xf3\xef\xc7\
\x01@Z\xadMH\xd7P\xcf'~C5&:\xcb\
HW\x0f\xecH}4\xb9\xf9\xf7\xe3\x00 =\xc7\x0c\
\xe0\x9b\xb8\x16\xbf1\x83\xa5\x0f\xb8\x08\xd8\x9djkz\
\xf3\xef\xc7\x01@\xfa\xffv#]\xbb\xef\xf1}c\x86\
\x97K\xa9\xe6\x95\x036\xff\x14\x07\x005\xde\xfe\xc0\xe5\
\xc4o\x8c\xc6T5\x97\x90\x06\xe8*\xb0\xf9\xaf\x8c\x03\
\x80\x1a\xeb\xa5\xc0o\x88\xdf\x08\x8d\xa9K.\x04fR\
^6\xffg\xc7\x01@\x8d\xb3\x1fi\xe1\x9e\xe8\x8d\xcf\
\x98:f9p6\xb0\x19\xe5b\xf3\x7fn\x1c\x00\xd4\
\x18/\x06\xae ~\xa33\xa6\x09Y\x02\x9cA\xba!\
V4\x9b\xff\xe0q\x00P\xed\xcd\x04.&~c3\
\xa6\x89\x99\x0f|\x80\xb8\x95\x05m\xfe\xabO.\x03\xc0\
\xa8<\x1eT\xea\xd04\xe0;\xc0-\xc0k\x82k\x91\
\x9aj*\xf0\x15`6ph\xc1\xbf\xfbT\xe0\xe4\x82\
\x7fg\xe39\x00(\xd2\xfa\xa4]\x8fw\x02\xef\x04F\
\xc7\x96#\x09x!\xe9j\x81\x9f\xb7\xfe\x9c7\x9b\x7f\
\x10\x07\x00E\x18\x03\x1c\x07\xdc\x03\xfc+0>\xb6\x1c\
I\x838\x84\xb47\xe0t`\xed\x9c~\x87\xcd?\x90\
\x03\x80\x8av0iW\xff\x97I{\x00$\x95\xd7Z\
\xc0\xbf\x01\xb7\x01\xaf\xcd\xf8\xb1m\xfe\xc1\x1c\x00T\x94\
mH\xbb\x14/\x03\xb6\x0b\xaeERg\xa6\x01?!\
m\xc3[f\xf0x6\xff\x12p\x00P\xde&\x03\x9f\
'\xdd\xa1\xef\x90\xe0Z$ug\xe0\xb0\xc0\x89\x8c\xfc\
\x9c\x1d\x9b\x7fI8\x00(OG\x91v\x1d\x9eH\xdc\
\xa5E\x92\xb25\x914\xd4_\x0b\xec\xda\xe1\xff\xd7\xe6\
_\x22\x0e\x00\xca\xc3t\xd2\xae\xfe\xf3\x81\x17\x04\xd7\x22\
)\x1f\xbb\x03\xd7\x01\x9f\x06\xc6\x0e\xe3\xbf\xb7\xf9\x97\x8c\
\x03\x80\xb24\x0e\xf8w\xd2.\xc2\x83\x83k\x91\x94\xbf\
1\xc0\xc7\x81\x1b\x81=\xd6\xf0\xdf\xd9\xfcK\xc8\x01@\
Y\xd9\x07\xf83\xf0)\xbc\xacOj\x9a\x1d\x80kH\
\x8d~\xd5\xc3}6\xff\x92r\x00P\xb7\xd6&]\xd2\
\xf7{`\xdb\xe0Z$\xc5\x19Mj\xf4\x7fb\xe5\x95\
>6\xff\x12\x1b\x13]\x80*\xed \xe0,`\x8b\xe0\
:$\x95\xc7\xee\xc0M\xc0/\x81\xc3\x83k\xd1\x1a\xb8\
\x07@#\xb1\x0e\xf0-\xd2\x1d\xfb\xb6\x88-ER\x09\
\x8d\xc7\xe6_z\xee\x01P\xa7^\x06\x9cC\xf9\xee!\
.I\xea\x80{\x004\x5c\xe3\x813\x81_c\xf3\x97\
\xa4\xcas\x0f\x80\x86cW\xe0\x5c`ft!\x92\xa4\
l\xb8\x07@k2\x0a\xf8\x18i\xc5/\x9b\xbf$\xd5\
\x88{\x00\xb4:\x9b\x00\xdf\x07\x0e\x8c.D\x92\x94=\
\xf7\x00h0\x07\x93\x16\xf5\xb1\xf9KRM9\x00\xa8\
\xddZ\xc0\x19\xc0\xa5\xc0\xf3\x82k\x91$\xe5\xc8C\x00\
\x1a\xb0\x19\xe9\xe6={E\x17\x22I\xca\x9f{\x00\x04\
\xf0\x0f\xa4\x95\xbbl\xfe\x92\xd4\x10\x0e\x00\xcd\xd6CZ\
\xa7\xfb\x17\xc0\xd4\xe0Z$I\x05\xf2\x10@s\xad\x07\
|\x0fxut!\x92\xa4\xe29\x004\xd3\xf6\xc0\xc5\
\xc0\xf4\xe8B$I1<\x04\xd0<\xaf&\xdd\xae\xd3\
\xe6/I\x0d\xe6\x00\xd0,\x1f\x01.\x02&G\x17\x22\
I\x8a\xe5!\x80f\x18G\xba}\xef[\xa2\x0b\x91$\
\x95\x83\x03@\xfd=\x8ft\xbc\x7f\xef\xe8B$I\xe5\
\xe1\x00Po\xdb\x00\x97\x01[F\x17\x22I*\x17\xcf\
\x01\xa8\xaf\x97\x00Wc\xf3\x97$\x0d\xc2\x01\xa0\x9e\xde\
\x00\x5c\x01L\x89.D\x92TN\x0e\x00\xf5\xf3a\xe0\
\x87\xa4\x13\xff$I\x1a\x94\xe7\x00\xd4G\x0f\xf0y\xe0\
\x84\xe8B$I\xe5\xe7\x00P\x0f\xa3\x81\xb3\x80wF\
\x17\x22I\xaa\x06\x07\x80\xea\x1bG\xda\xe5\x7fdt!\
\xca\xcd2`\x1e0\x17X0H\x9e\x04\x9ej\xfb\xf9\
\x14\xf0\x0c\xb0\x04X\xda\xf6s9\xb0\x02\xe8kK?\
\xe9P\xe0@\xc6\x00k\x01c[\x99\xd0\x96I\xa4E\
\xa4&\x01\xeb\x90\xee'1\x90\xa9\xc0\x06\xadl\xd8\xfa\
\xe9!F\xa9\xc4\x1c\x00\xaam\x12ie\xbf\x03\xa3\x0b\
\xd1\x88=\x05\xdc\x0b<\x08<\x0c<\xd4\xca\xc3\xc0\x1c\
R\xd3_Hj\xd4y\x19\x18\x06\x00z3z\xcc\xd1\
\xa4A`c\xe0\x05\xadl\x0aL\x036oe\x1ai\
\xd8\x90\x14\xc0\x01\xa0\xba\xd6%\xdd\xc6\xd7\x05~\xca\xef\
i\xe0\x0e\xe0\xf6V\xee\x06\xee!5\xfe\xc7\x02\xeb\xca\
\xd3\x0a\xd2\x003\x07\xf8\xf3j\xfe\x9bQ\xa4!`:\
0\x03\xd8\x9a\xb4v\xc5\xb6\xc0V\xa4!BRN\x1c\
\x00\xaai=\xe0r\xe0E\xd1\x85\xe8Y\x96\x91\x1a\xfd\
-\xad\xcc\x02\xfeJ\xfav\x9f\xe77\xf8\xaa\xea\x03\x1e\
h\xe5\xcaU\xfe\xddX\xd20\xb0C+;\x02\xbb\x90\
\x06\x06I\x19p\x00\xa8\x9e)\xa4k\xfcw\x8b.\xa4\
\xe1\x96\x93\x9a\xfb\x0d\xc0\xf5\xad\x9f\xb3\xc8n\x17z\xd3\
\xf5\x92^\xcfY\xab\xfc\xf3)\xa4A`\x8f\xb6\xb8\xd8\
\x954\x02\x0e\x00\xd52\x15\xf8\x15\xe9\x03P\xc5ZH\
ZY\xf1\x8f\xad\x5cO:\xd1N\xc5Z@\xda[\xd0\
\xbe\xc7\xe0y\xa4Ca/\x06\xf6\x01\xf6\x04\xc6\x17_\
\x9aT-=\xd1\x05h\xd8\xa6\x92>\xf4v\x8a.\xa4\
!\x16\x02\xbfee\xb3\xb9\x15w\xe3W\xc58\xd2\x9e\
\x81\x97\x02/\x03\xf6\x05&\x86V$u\xe7\xc7\xc0\x1b\
\xb3~P\x07\x80jX\x97\xd4\x84\xdc\xed\x9f\x9fe\xa4\
o\xf8\xbf \x1db\xb9\x99\x95g\xc6\xab\xda\xc6\x02{\
\x01\xafhe\x0f\xbcDQ\xd5\xe2\x00\xd0P\x13I'\
\xfc\xed\x1b]H\x0d\xcd\x05~\x06\x5c\x0a\xfc\x1a\xf8{\
l9*\xc8\x14\xe0\x95\xc0\xa1\xc0\xc1\xa4\xbdkR\x99\
9\x004\xd08\xe0\x12\xe0\xa0\xe8Bj\xe4N\xd2\xda\
\x09\x17\x01\xd7\xe2\xb7\xfc\xa6\x1bM:o\xe0\x08\xe0p\
\xd2%\x89R\xd9\xe42\x00\xa8\xbc\xc6\x00\xffG:\xee\
l\xba\xcb]\xc0\xa7I\x97\x92Ik\xb23\xe9\xbdr\
;\xf1\xef[c\x06\xf2#\xd4\x18=\xc0\xb9\xc4\xbf\xe9\
\xaa\x9c9\xc0\x17\x80\xdd;|\xed\xa5\x01;\x03\xa7\x93\
\xd6)\x88~?\x9bf\xc7\x01\xa0A>O\xfc\x1b\xae\
\x8aY\x02\xfc/p\x18^\xe2\xaa\xec\xf4\x90\xae(\xf8\
\x0e\xe9~\x0b\xd1\xefs\xd3\xbc8\x004\xc4\x09\xc4\xbf\
\xd9\xaa\x96\xdb[\xaf\x9b's)o\x13\x81\xb7\x03\x7f\
\xfe}o\x9a\x13\x07\x80\x06x\x13+\xef\xd0f\xd6\
\x9c^\xd2Fq\xc0H^h)\x033\x81/\x01O\
\x10\xbf=\x98z\xc7\x01\xa0\xe6\x0e$\xdd\xb25\xfa\x8d\
V\xf6\xcc\x07>K\xba\xbb\x9cT\x06\x13\x81\xf7\x90\xee\
\xff\x10\xbd}\x98z\xc6\x01\xa0\xc6\xb6e\xe5-_\xcd\
\xe0\xb9\x03\xf8G\xd2}\xe9\xa5\xb2:\x90\xb4\xb6\x84{\
\xf2L\x96q\x00\xa8\xa9\xa9\xa4\xdb\xc3F\xbf\xc1\xca\x9a\
\x9b\x80\xa3p\xe56U\xcbv\xa4\x93\x06\xdd\xabg\xb2\
\x88\x03@\x0d\xad\x05\x5cE\xfc\x9b\xab\x8c\xb9\x9a\xb4J\
\x9bTe/ \x9d'\xf04\xf1\xdb\x94\xa9n\x1c\x00\
j\xe8\xdb\xc4\xbf\xb1\xca\x96\x1b\x81C\xbayQ\xa5\x12\
\xda\x88ty\xaf\x83\x80\x19I\x1c\x00j\xe6x\xe2\xdf\
Te\xcam\xc0\x91]\xbd\xa2R\xf9m\x0c|\x85\xb4\
fE\xf46g\xaa\x13\x07\x80\x1a9\x80t\xf7\xb9\xe8\
7U\x192\x07x\x1f.\xdc\xa3f\xd9\x0c\xf8.\xb0\
\x82\xf8m\xd0\x94?\x0e\x005\xb1)\xe9.t\xd1o\
\xa8\xe8<\x03\x9c\x0aL\xea\xee\xe5\x94*m'\xd2\xdd\
>\xa3\xb7GS\xee8\x00\xd4\xc0X\xe0\x1a\xe2\xdfL\
\xd1\xb9\x18\xd8\xb2\xcb\xd7R\xaa\x93\xc3\xf0\x06Df\xf5\
q\x00\xa8\x81\xaf\x13\xffF\x8a\xcc]\xc0\xab\xba~\x15\
\xa5zZ\x0b\xf80\xdeo\xc0<7\x0e\x00\x15w\x0c\
\xf1o\xa2\xa8,\x03>\x03\x8c\xef\xfaU\x94\xea\xef\x05\
\xc0\xfd\xc4o\xb7\xa6<\xc9e\x00\xf0\xc4\xabb\xcc\x00\
\xfe'\xba\x88 7\x00\xc7\x92\x96I\x954\xb4\xf7\x01\
\x9bG\x17\xa1\xfasu\xb5\xfc\x8d%Mo\x93\xa3\x0b\
)X/\xf0Q`ol\xfe\xd2p\x9d\x0a\x9c\x1c]\
\x84\x9a\xc1=\x00\xf9;\x0d\xd8=\xba\x88\x82\xcd\x02\xde\
\x0a\xfc%\xba\x10\xa9Bl\xfeR\x8d\xbc\x8af\xdd\x14\
\xa4\x0f\xf8\x1ci\xaf\x87\xa4\xe1;\x95\xf8\xed\xd7\x947\
\x9e\x04X1\xa3Hw\xb0\x8b~\xe3\x14\x95\xb9\xc0+\
3y\xe5\xa4f\xb1\xf9\x9b\xa1\xe2I\x80\x15s4\xf0\
\xc2\xe8\x22\x0ar%\xf0\x16\xe0\xd1\xe8B\xa4\x8aq\xb7\
\xbfTC\xb7\x10?5\xe6\x9d>\xe0?\xf0dRi\
$\xfc\xe6o\x86\x1b\xf7\x00T\xc8.\xc0\x8e\xd1E\xe4\
\xecI\xe0m\xc0E\xd1\x85H\x15\xe47\x7f\x85s\x00\
\xc8\xc7\xeb\xa3\x0b\xc8\xd9\x1d\xc0\x11\xa4\xa5K%u\xc6\
\xe6\xafRp\xd7m>\x8e\x88. GW\x92\xae\xed\
\xb7\xf9K\x9d\xb3\xf9\xab4\x1c\x00\xb2\xb7603\xba\
\x88\x9c\x9c\x0d\x1c\x0c<\x11]\x88TA6\x7f\x95\x8a\
\x03@\xf6v\x04z\xa2\x8b\xc8\xc1\xc9\xc0\xbbH\xeb\xfa\
K\xea\x8c\xcd_\xa5\xe39\x00\xd9\xdb>\xba\x80\x8c\xad\
\x00\xde\x0b|;\xba\x10\xa9\xa2l\xfe*%\x07\x80\xec\
M\x89. CK\x817\x03?\x8d.D\xaa(\x9b\
\xbfJ\xcb\x01 {\x93\xa2\x0b\xc8\xc8b\xe0p\xe0\xd7\
\xd1\x85H\x15e\xf3W\xa99\x00do\x5ct\x01\x19\
X\x0c\x1c\x02\xfc.\xba\x10\xa9\xa2l\xfe*=O\x02\
\xcc^\xd5\xcf\x90\x7f\x8at\x13#\x9b\xbf426\x7f\
U\x82\x03@\xf6\x1e\x8b.\xa0\x0b\x03\xdf\xfc\x7f\x1f]\
\x88TQ6\x7fU\x86\x03@\xf6\xfe\x16]\xc0\x08\xf5\
\x02\xaf\xc5\xe6/\x8d\x94\xcd_\x95\xe2\x00\x90\xbd\x9b\xa2\
\x0b\x18\x81>\xe0\x18\xe0\xf2\xe8B\xa4\x8a\xb2\xf9\xabr\
\x1c\x00\xb27\x0fx \xba\x88\x0e\xbd\x0f\xb8 \xba\x08\
\xa9\xa2l\xfe\xaa$\x07\x80|\x5c\x15]@\x07>\x0d\
\x9c\x15]\x84TQ6\x7fU\x96\x03@>\xce\x8f.\
`\x98~\x00\x9c\x12]\x84TQ6\x7fU\x9a\x03@\
>\xae\x00\x1e\x8f.b\x08\xbf'\xad\xed/\xa9s6\
\x7fU\x9e\x03@>\x96Q\xee\xdd\xea\x0f\x92\xce\xf8_\
\x1a]\x88TA6\x7f\xd5\x82\x03@~\xce\x04\x9e\x8e\
.b\x10K\x80#\x81\xf9\xd1\x85H\x15d\xf3Wm\
8\x00\xe4\xe71\xe0\xbf\xa3\x8b\x18\xc4\xfb\xa8\xe6\xa5\x8a\
R4\x9b\xbf\xa4a[\x1b\xb8\x1f\xe8/I\xbe\x91\xeb\
\xb3\x95\xea\xebT\xe2\xb7_\xd3\xdc\xfc\x88\x1c\xb8\x07 \
_\x8b\x81\xf7F\x17\xd1r\x1bp|t\x11R\x05\xf9\
\xcd_\xb5\xe4\x00\x90\xbf_\x02\xa7\x07\xd7\xf0w\xd2I\
\x7fe<'A*3\x9b\xbf\xa4\xae\x8c\x02.&f\
\xd7Q/\xf0\x8a\xfc\x9f\xa2T;\xee\xf67eI.\
\x87\x00T\x9c\xb5I\xeb\x03\x14\xf9\xa6Y\x0e\x1c]\xc4\
\x93\x93j\xc6\xe6o\xca\x14\x07\x80\x1a\x18\x07\xfc\x94b\
\xde0\xcf\x90.\xf7\x93\xd4\x19\x9b\xbf)[\x1c\x00j\
b4\xf0I\xd2\xb7\xf3\xbc\xde,\x7f\x03\xf6.\xe8\xf9\
Hub\xf37e\x8c\x03@\xcd\xec\x0b\xdcM\xf6o\
\x94K\x80\xe7\x15\xf8<\xa4\xba\xb0\xf9\x9b\xb2\xc6\x01\xa0\
\x86\xc6\x02\xef\x07\xe6\xd0\xfd\x1b\xe4A\xe0M\xc5\x96/\
\xd5\x86\xcd\xdf\x949\x0e\x0056\x11x;p%\xd0\
Ggo\x8c\xd9\xa4\xb5\x06\xc6\x15^\xb5T\x0f6\x7f\
S\xf6\xe42\x00\xf4\xe4\xf1\xa0\xea\xca\xf3\x81\xfd\x80\xbd\
\x80\xdd\x81\x8d\x81)\xc0\xfa\xa4K\xfa\xe6\x90\x16\xf5\xb9\
\x1a\xb8\x14\xf8sL\x99R-x\x9d\xbf\xaa\xe0\xc7\xc0\
\x1b\xa3\x8b\x90\xa4\xba\xf0\x9b\xbf\xa9J\x5c\x0aX\x922\
\xe27\x7f5\x9e\x03\x80\xa4\xa6\xb1\xf9K8\x00Hj\
\x16\x9b\xbf\xd4\xe2\x00 \xa9)l\xfeR\x1b\x07\x00I\
M`\xf3\x97V\xe1\x00 \xa9\xee>\x89\xcd_z\x0e\
\x07\x00Iuv\x1c\xf0\x89\xe8\x22\xa42r\x00\x90T\
W/\x07\xce\x8c.B*+\x07\x00Iu\xb4>\xf0\
}\xd2\xdd7%\x0d\xc2\x01@R\x1d\x9dNZV[\
\xd2j8\x00H\xaa\x9b\xed\x81c\xa3\x8b\x90\xca\xce\x01\
@R\xdd\x9c\x84\x9fm\xd2\x90\xdcH$\xd5\xc9\x86\xc0\
Q\xd1EHU\xe0\x00 \xa9N\xde\x00\xac\x15]\x84\
T\x05\x0e\x00\x92\xea\xe45\xd1\x05HU\xe1\x00 \xa9\
.\xc6\x00\xfbF\x17!U\x85\x03\x80\xa4\xba\xd8\x0e\x98\
\x18]\x84T\x15\x0e\x00\x92\xeab\xeb\xe8\x02\xa4*q\
\x00\x90T\x17.\xfc#u\xc0\x01@R]\xac\x1b]\
\x80T%\x0e\x00\x92\xea\xa2'\xba\x00)'}y<\
\xa8\x03\x80\xa4\xbax:\xba\x00)'K\xf3xP\x07\
\x00Iu1/\xba\x00)'\x8b\xf2xP\x07\x00I\
uq_t\x01RN\x1e\xc9\xe3A\x1d\x00$\xd5\xc5\
l\xa0?\xba\x08)\x07w\xe7\xf1\xa0\x0e\x00\x92\xeab\
\x11pkt\x11R\x0en\xc9\xe3A\x1d\x00$\xd5\xc9\
\xe5\xd1\x05H\x19\x9b\x87{\x00$iH?\x89.@\
\xca\xd8\x15y=\xb0\x03\x80\xa4:\xb9\x1a\xb8+\xba\x08\
)C\x17\xe4\xf5\xc0\x0e\x00\x92\xea\xa4\x1f\xf8jt\x11\
RF\xe6\x01\x97\xe6\xf5\xe0\x0e\x00\x92\xea\xe6,\xe0\xd1\
\xe8\x22\xa4\x0c|\x15\xe8\xcd\xeb\xc1G\xe7\xf5\xc0\x92\x14\
d9\xb0\x008<\xba\x10\xa9\x0b\xf3\x817\x93\xd3*\
\x80\xe0\x1e\x00I\xf5t\x0epUp\x0dR7>\x0a\
\xfc=\xcf_\xe0\xcd3$\xd5\xd54\xe0f`jt\
!R\x87.\x03\x0e\xc9\xfb\x97\xb8\x07@R]=\x04\
\x1c\x0d,\x8b.D\xea\xc0\xbd\xc0[\x8b\xf8E\x9e\x03\
\xa9\xce\xee\x03\xee\x00^\x8b_xT~s\x81\x03\
\x81\x87\x8b\xf8en\x10\x92\xea\xee|\xe0u\xc0\x92\xe8\
B\xa45\xb8\x17x\x09\x05\xaec\xe1\x00 \xa9\x09.\
\x06\xf6\xc5;\x06\xaa\x9c.\x03\xf6\xc4E\xac$)7\
\x93H\xd7V/'-\x1adLd\x1e\x03\x8eE\x92\
T\x98\x1dH\x87\x06V\x10\xdf\x04L\xf32\x178\x05\
X\x87@^\x06(\xa9\xc96\x07\xde\x06\x1c\x01\xec\x8a\
\x9f\x89\xca\xcf<\xd2\x8d}. -\xef\x9b\xdb\x0a\x7f\
\xc3\xe5\x9b]\x92\x92\xa9\xc0\xee\xc0L\xd2\x1a\x02\xeb\x01\
\x13\xf0\x5c)u\xa6\x8f\xb4z\xdf\x22\xe0\x11\xd2\xad|\
o!\xa7[\xfaJ\x92$I\x92$I\x92$I\x92\
$I\x92$I\x92$I\x92$I\x92$I\x92$\
I\x92$I\x92$I\x92$I\x92$I\x92$I\
\x92$I\x92$I\x92$I\x92$I\x92$I\x92\
$I\x92$I\x92$I\x92$I\x92$I\x92$\
I\x92$I\x92$I\x92$I\x92$I\x92$I\
\x92$I\x92$I\x92$I\x92$I\x92$I\x92\
$I\x92$I\x92$I\x92$I\x92$I\x92$\
I\x92\xea\xa4'\xba\x80A\x8c\x02\xb6\x03v\x02f\x00\
\x1b\x03\xeb\x01c)g\xbd*\xaf>`\x09\xf0\x04\xf0\
\x10p\x1bp=07\xb2(I*\x83\xb24\xd4\x09\
\xc0\x11\xc0\xeb\x80\x83\x80uc\xcbQ\xcd\xcd\x06.\x04\
\xbe\x0b\xdc\x13\x5c\x8b$5\xd2\x06\xc0i\xc0\x02\xa0\xdf\
\x98\x82\xd3\x07\x5c\x0c\xec\x8a$\xa9\x10\xa3\x80\x0f\x01\x8b\
\x88o\x02\xc6\xac\x00\xbe\x85{\x9e$)W\x9b\x00\xbf\
%\xfeC\xdf\x98U\xf3\x10\xf0b$I\x99\xdb\x11x\
\x98\xf8\x0fzcV\x97^\xe0\x18$\xa9\xe6F\x17\xf8\
\xbbv\x04~\x03lT\xe0\xef\x94:5\x1a8\x12x\
\x14\xb81\xb8\x16I\xcaMQ\x03\xc0\xc0n\x7f\x9b\xbf\
\xaa\xa0\x078\x14\xb8\xb5\x15I\xaa\x9d\x22.\x03\x1cE\
\xfa\xe6\xff\xd2\x02~\x97\x94\xa5\xa7I\xe7\x04\xdc\x12]\
\x88$emT\x01\xbf\xe38l\xfe\xaa\xa6\x89\xc0\x8f\
I\xebTHR\xad\xe4}\x08`\x03\xd2\x82+\xe3r\
\xfe=R^6\x00\xc6\x00\xbf\x8a.D\x92\xb2\x94\xf7\
\x1e\x80\x13\x81ur\xfe\x1dR\xdeN\x00\xa6G\x17!\
IY\xcas\x0f\xc0\x04\xe0\x07\xb8\xfbT\xd57\x1a\x98\
B\xda\x9b%I\xb5\x90\xe7\x1e\x80#\x80\xf5s||\
\xa9Ho\x046\x8f.B\x92\xb2\x92\xe7\x00\xf0\xba\x1c\
\x1f[*\xda\x18\xe0\xdd\xd1EHRV\xf2\xba\x0cp\
\x14\xe9\x06?\xae\xad\xae:\xb9\x1f\xd82\xba\x08I\xca\
B^{\x00\xb6\xc3\xe6\xaf\xfa\xd9\x02\xd8%\xba\x08I\
\xcaB^\x03\xc0N9=\xae\x14\xed\xa0\xe8\x02$)\
\x0by\x0d\x003rz\x5c)\x9aw\x0b\x94T\x0by\
\x0d\x00\x1b\xe7\xf4\xb8R\xb4\x9d\xa3\x0b\x90\xa4,\xe45\
\x00\xac\x97\xd3\xe3J\xd16\xa7\xd8\xbbhJR.\xf2\
\x1a\x00\xc6\xe6\xf4\xb8R\xb41\xa4\xe5\x81%\xa9\xd2\xf2\
\x1a\x00\x8a\xb8\xcb\xa0\x14\xc5\xe5\xad%U^\x11w\x03\
\x94\xea\xc6\xedFR\xe5\xf9A&unIt\x01\x92\
\xd4-\x07\x00\xa9s\x0b\xa2\x0b\x90\xa4n9\x00H\x9d\
y\x1cx2\xba\x08I\xea\x96\x03\x80\xd4\x99\xbfF\x17\
IYp\x00\x90:smt\x01\x92\x94\x05\x07\x00\
\xa93WD\x17 IYp\x00\x90\x86\xefq\xe0\xaa\
\xe8\x22$)\x0b\x0e\x00\xd2\xf0\xfd\x00X\x16]\x84$\
e\xc1\x01@\x1a\x9e\x15\xc0\x97\xa3\x8b\x90\xa4\xac8\x00\
H\xc3s\x0epOt\x11\x92\x94\x15\x07\x00ih\x0b\
\x81\x8fG\x17!IYr\x00\x90\x86\xf6~`nt\
\x11\x92\x94%\x07\x00i\xcd\xbeF:\xf9O\x92j\xc5\
\x01@Z\xbd\xb3\x81\x0fD\x17!Iyp\x00\x90\x06\
w6\xf0n\xa0/\xba\x10I\xca\x83\x03\x80\xf4\x5c6\
\x7fI\xb5\xe7\x00 =\x9b\xcd_R#8\x00H+\
\xd9\xfc%5\x86\x03\x80\x94\xd8\xfc%5\xca\x98\xe8\x02\
\xa4\x12\xb0\xf9K\xc5\x99\x04\x1c\x00\xec\x07\xec\x04l\x05\
l\xdc\xfa\xe7=\xc0\x12`>\xf0\x000\x1b\xb8\x06\xf8\
\x15\xf0H@\xad\x1a\x81\x0b\x80~c*\x90\xef\xe0\x9e\
0)o=\xc0\xa1\xc0\x85\xa4\x06\xdf\xe9v\xda\x07\x5c\
M\x1a\xd4'\x14\x5c\xbb:\xe4\x00`\xaa\x10\x9b\xbf\x94\
\xbf#\x81[\xc9n\xbb\x9d\x0b\x9c\x00\x8c-\xf2Ih\
\xf8\x1c\x00L\xd9c\xf3\x97\xf2\xb5\x05p\x05\xf9m\xc3\
\xb7\x03\xfb\x16\xf5d4|\x0e\x00\xa6\xcc\xb1\xf9K\xf9\
z=\xb0\x88\xfc\xb7\xe5\x15\xc0\xc9\xa4C\x0c*\x09\x07\
\x00S\xd6\xd8\xfc\xa5|\x9dL\xf1\xdb\xf5\x0f\xf1\x90@\
i8\x00\x982\xc6\xe6/\xe5\xeb\x0c\xe2\xb6\xef\xff\x03\
\xd6\xca\xff)j(\x0e\x00\xa6l\xb1\xf9K\xf9\xfa\x04\
\xf1\xdb\xf9\xf7r\x7f\x96\x1a\x92\x03\x80)Sl\xfeR\
\xbe\x8e!~;\x1f\xc8\xf19?W\x0d\xc1\x01\xc0\x94\
%6\x7f)_\xdb\x03O\x13\xbf\xad\x0fd)i\x81\
!\x0d\xc1\x0fF\xd5\xd9\xd9\xb8\xc2\x9f\x94\xa7\xb1\xc0\xb9\
\x94kq\x9e\xb1\xc07\xf1\xca\x80!9\x00\xa8\xael\
\xfeR\xfe>\x07\xec\x12]\xc4 \xf6\x02\xde\x10]D\
Sy\x08\xc0D\xc6\xdd\xfeR\xfe\x0e'~[_S\
f\xe7\xf7\xd4\xeb\xc1\x0fI\xd5\xcd\xd9\xf8\xcd_\xca\xdb\
4\xd2\xa0]f\xdb\x03/\x8b.\xa2\xcc\x1c\x00T'\
6\x7f)\x7f\xe3H{y\xa7D\x172\x0c\xc7D\x17\
Pf\x0e\x00\xaa\x0b\x9b\xbfT\x8c\xaf\x03{F\x171\
L\x87\xe1\xc9\x80\xab\xe5\x00\xa0:\xb0\xf9K\xc58\x0e\
xGt\x11\x1d\xd8\x08\xd8&\xba\x88\xb2r\x00P\xd5\
\xd9\xfc\xa5b\xbc\x02\xf8Bt\x11#P\xc6\xab\x14J\
\xc1\x01@Uf\xf3\x97\x8a\xb13\xe9\xb8\xff\x98\xe8B\
F`zt\x01e\xe5\x00\xa0\xaa\xb2\xf9K\xc5\x98\x06\
\x5c\x0aL\x8e.d\x846\x88.\xa0\xac\x1c\x00TE\
6\x7f\xa9\x18S\x80\xcb\x80\xe7G\x17\xd2\x85q\xd1\x05\
\x94\x95\x03\x80\xaa\xc6\xe6/\x15c\x1d\xe0\x97\xa4\xeb\xe9\
\xablIt\x01e\xe5\x00\xa0*\xb1\xf9K\xc5\x98\x08\
\x5c\x02\xec\x11]H\x06\xe6D\x17PV\x0e\x00\xaa\x0a\
\x9b\xbfT\x8c\x09\xc0E\xc0K\xa2\x0b\xc9\xc8\x9d\xd1\x05\
\x94U\x15\xcf\xe8T\xf3\xd8\xfc\xa5b\xacM\xfa\xe6\x7f\
@p\x1dY\xba!\xba\x80\xb2r\x00P\xd9\xd9\xfc\xa5\
b\xacC:\xdb\x7f\xdf\xe8B2t;\xf0pt\x11\
e\xe5!\x00\x95\x99\xcd_*\xc6\x86\xc0\xaf\xa9W\xf3\
\x07\xf8\xdf\xe8\x02\xca\xcc=\x00*+\x9b\xbfT\x8c\xad\
Hg\xfb\xcf\x88.$c+\x80oE\x17Qf\xee\
\x01P\x19\xd9\xfc\xa5b\xec\x06\x5cM\xfd\x9a?\xc0y\
\xc0\x83\xd1E\x94\x99\x03\x80\xca\xc6\xe6/\x15\xe3p\xe0\
\xb7\xa4\x1b\xe6\xd4\xcd\xd3\xc0)\xd1E\x94\x9d\x03\x80\xca\
\xc4\xe6/\x15\xe3$\xe0B`Rt!9\xf98~\
\xfb\x0fs\x01\xd0oL\x07\xf9\x0e\x0e\xa4R\xde&\x90\
v\x8dGo\xefy\xe6\x12\xa0'\xab\x17\xac\xce<\x09\
Pe1\x95t\x0d\xf2\x93\xd1\x85H55\x83\xf4\xe5\
l\xe7\xe8Br\xf4\x08\xf0v\xd2 \xa0!\xf8\x8dK\
e\xf1\x1a\xe0Z`\xeb\xe8B\xa4\x1a:\x92\xb4 N\
\x9d\x9b\x7f?\xf0\x0e\xe0\xf1\xe0:*\xc3\x01@e\xb2\
\x1d\xe9C\xea\xf5\xd1\x85H51\x0e\xf8\x22\xf0S`\
\xdd\xe0Z\xf2\xf6e\xe0\x8a\xe8\x22\xe49\x00\xa6\xfb\xfc\
\x170\x16I#\xb5\x03\xf0\x17\xe2\xb7\xe5\x222\x0bo\
\xfb\xdb1\xf7\x00\xa8\xac\xde\x0f\x5c\x07\xcc\x8c.D\xaa\
\x98\x1e\xe08\xe0z`\xa7\xe0Z\x8a\xb0\x148\xa6\xf5\
S\x1dp\x00P\x99\xed\x0c\xdcH\xfa0\xf3\xac^i\
h3\x80\xabH\xbb\xc3\xc7\xc7\x96R\x98\x13\x81[\xa2\
\x8b\xd0J\x1e\x020Y\xe7*`:\x92\x063\x0a8\
\x81\xb4\x00N\xf4\xb6Zd~\x9c\xc5\x8b\xa7l9\x00\
\x98<\xb2\x188\x1e\x18\x8d\xa4\x01\xbb\x91\xae\xa0\x89\xde\
>\x8b\xce\x9d\xc0\xe4\x0c^\xbf\xc6\xf2\x10\x80\xaad\x22\
p&\xe9\xd8\xe6\x8b\x82k\x91\xa2\xadK:Y\xf6:\
`\xcf\xe0Z\x8a\xb6\x048\x0a\xd7\x0d\xe9\x8a\x03\x80\xaa\
hW\xe0\x1a\xe0\x7fH\x0b\x08IM\xd2C\xba\xde\xfd\
v\xd2\xc9\xb2M\xdc#\xf6A\xd2\x15\x0e*!\x0f\x01\
\x98\xa2\xb2\x80t\x92\xa0\xabZ\xaa\x09^J:16\
z\xbb\x8b\x8c\xb7\xf8-9\x07\x00Stn'\xadv\
&\xd5\xd1\xb6\xc0O\x88\xdf\xce\xa2\xf3'\xbc\xde\xbf\xf4\
\x1c\x00LT\xfe@\xfa\x96$\xd5\xc1\x16\xc09\xc0r\
\xe2\xb7\xad\xe8<\x02<\xbf\x9b\x17S\xcf\xe69\x00\xaa\
\x9b}I\xf78\xbf\x02\xd8;\xb8\x16i\xa4\xa6\x01\xff\
\x0d\xdcA\xba\xb9M\x13\x8f\xf3\xb7\xeb\x05^G\x1a\x02\
Tr\xee\x010e\xc9\xe5\xc0\xfeH\xd50\x9dt\x8c\
\xbb\x97\xf8m\xa7L9\xb6\x9b\x17U\xc5r\x000e\
\xcb\x1fHw\x1ctEA\x95\xd1\xae\xc0y\xb8\xab\x7f\
\xb0\x9c\xd1\xc5\xeb\xaa\x00\x0e\x00\xa6\xac\xb9\x1dx/0\
\x01)V\x0f\xf0j\xe07\xc4o\x17e\xcd\x058\xb4\
W\x8e\x03\x80){\x1e'}\xb3\xd8\x12\xa9X\xeb\x01\
\x1f\x22\x1d\xdf\x8f\xde\x0e\xca\x9ckqP\xaf$\x07\x00\
S\x95\xac\x00~\x0e\x1c\x81k\x09(_{\x00\xdf&\
-i\x1d\xfd\xbe/{\xee\x076\x1a\xd1\xab\xacp\x0e\
\x00\xa6\x8ay\x048\x8dt\xcd\xb5\x94\x85\x0dI\xf7\xaf\
\xf8\x0b\xf1\xef\xef\xaad\x01\xb0\xfdH^l\x95\x83\x03\
\x80\xa9z\xae'-\xb3\xba!Rg&\x00\xaf\x07.\
\xc6\xb3\xf9;\xcdb`\x9f\xce_r\x95\x89\x03\x80\xa9\
K\x96\x93.%|\x17\xe9\xd8\xad4\x98\xb1\xa4\x13\xfa\
\xce#\xdd\xa0&\xfa}[\xc5,\x03\x0e\xe9\xf4\x85W\
\xf98\x00\x98:\xa6\x974\x0c\xbc\x0f\xd8\x185\xdd\xda\
\xa4o\xfa\xe7\x01\x0b\x89\x7f\x7fV9}\xc0[:{\
\xf9\xd5\xad\xbc.\xaf\xb8\x80\xb4j\x93TW\xfd\xa4\xc3\
\x04\x97\xb4rsl9*\xc8f\xa4o\xa9\x87\x02\x07\
\x01\xe3c\xcb\xa9\x8d\xe3\x81/E\x17\xd14\x0e\x00R\
6\x1e%-?|y\xeb\xe7\xbc\xd8r\x94\x91\x09\xa4\
\xe5\xa5\x0f\x06^\x05\xcc\x8c-\xa7\x96>\x05|2\xba\
\x88&r\x00\x90\xb2\xd7\x0f\xcc&-\xf0\xf2\x1b\xe0w\
\xa43\x9bU~\xe3\x80\x17\x01\x07\x02/\x07\xf6\xc2\xbb\
\xcf\xe5\xe9?\x81\x8fE\x17\xd1T\x0e\x00R\xfe\xfa\x81\
\xdb\x80?\x92\x96$\xfe\x13pWhE\x1a0\x95t\
\xd6\xf9~\xa4o\xfa{`\xc3/\xca\x17\x81\x13\xa2\x8b\
h2\x07\x00)\xc6\x02\xd29\x04\xd7\x92\xce\x1f\xb8\x09\
x0\xb4\xa2\xfa\x9b\x0c\xecF\xfa\x86\xff\x22R\xb3\xdf\
*\xb4\xa2\xe6\xfa\x1a\xf0/\xd1E4\x9d+\x9fI1\
\xa6\x00\xafle\xc0\xe3\xc0\x9f\x81Ym\xb9\x0dx\xaa\
\xf0\xea\xaam\x0c\xe9\xaez\xdb\x03;\x03;\xb5~n\
\x81\xeb\xca\x97\xc1\xb7Hkl(\x98\x03\x80T\x1eS\
I\xc7\x9e\x0f\x5c\xe5\x9f?L\xba\x89\xd1m\xc0\xddm\
\xb9\x8ft\xedt\x13\x8d\x026%}\x83\x9f\xde\xcav\
\xa4U\x1c\xa7\x03k\xc5\x95\xa65\xf8\x1a\xa9\xf9\xf7G\
\x17\x22\x07\x00\xa9\x0a6m\xe5\xa0U\xfey\x1fi\xf9\
\xe2\x07\xda\xf2\xb7\xb6<B\xba\x1a\xa1jC\xc2h\xd2\
\x0a\x8c\x1b\xb7\xb2)0\xad-\x9b\x91\xbe\xcd\x8f\x0d\xaa\
O#\xe31\xff\x92q\x00\x90\xaak\xe0[\xf0\xa6\xa4\
\x13\xd8V\xe7\x09`.\xf0\x18\xe9\xdc\x83\xc7[YH\
Z\xb5nQ+\x8b\x81\xa7[Y\x0c,%-~\xd4\
\xdb\xfa\xf3\x0a\xd2\xd0\xd1\xd7\xfasO\xab\x86\xd1m\x19\
C\xba6~<\xe9d\xba\x81?OneR\xdb\x9f\
\xa7\x0c\x92\x8d\x80\x0dZ\x8f\xab\xfa8\x0d8)\xba\x08\
=\x9b\x03\x80T\x7f\xeb\xb5\xb2Mt!j\xa4O\x00\
\xff\x11]\x84\x9e\xcb\x01@\x92\x94\x87~\xe0D\xd2\xae\
\x7f\x95\x90\x03\x80$)k\xcbH7\xd0:7\xba\x10\
\xad\x9e\x03\x80$)KO\x93\xd6\x81\xf9Et!Z\
3\x07\x00IRV\x16\x90n\x94tMt!\x1a\x9a\
\x03\x80$)\x0b\x0f\x90\xee\x94xkt!\x1a\x1e/\
\xb5\x91$u\xebz`ol\xfe\x95\xe2\x00 I\xea\
\xc6O\x81\x03\x809\xc1u\xa8C\x0e\x00\x92\xa4\x91\xfa\
<\xf0z\xd2\x89\x7f\xaa\x18\xcf\x01\x90$uj9i\
M\xffoD\x17\xa2\x91s\x00\x90$u\xe21\xe0(\
\xe0\xb7\xd1\x85\xa8;\x0e\x00\x92\xa4\xe1\xba\x198\x02x\
0\xba\x10u\xcfs\x00$I\xc3\xf1#`?l\xfe\
\xb5\xe1\x00 IZ\x93>\xe0#\xc0\x9b\xf0d\xbfZ\
\xf1\x10\x80$iu\xe6\x02o\x06\xae\x8c.D\xd9s\
\x00\x90$\x0d\xe6\xb7\xa4o\xfd\x8fF\x17\xa2|x\x08\
@\x92\xd4\xae\x1f\xf8O\xe0@l\xfe\xb5\xe6\x1e\x00I\
\xd2\x80\x05\xc0\xdb\x80\x9fG\x17\xa2\xfc9\x00H\x92\x00\
\xae\x02\xde\x0a<\x1c\x5c\x87\x0a\xe2!\x80\xea\xf9,N\
\xe7\x92\xb2\xb3\x0c8\x89\xb4\xcb\xdf\xe6\xdf \x0e\x00\xd5\
r\x0a\xf0q\xe0\xd5\xc0\xbf\x91\x96\xe3\x94\xa4\x91\xba\x1b\
\xd8\x178\x8dt\xb9\x9f\x1a\xc4\x01\xa0:N\x01>\xdd\
\xfas?p\x06\xf0R\x5c\x94C\xd2\xc8\x9c\x0d\xecJ\
\xba\x95\xaf\x1a\xc8\x01\xa0\x1a\xda\x9b\x7f\xbb?\x01;\x03\
?.\xb6\x1cI\x156\x07x\x0d\xf0.\xe0\xa9\xe0Z\
\x14\xc8\x01\xa0\xfcV\xd7\xfc\x07<\x01\xbc\x11x\x07n\
\xcc\x92\xd6\xec\x07\xc0\xf6\xc0\xcf\xa2\x0bQ<\x07\x80r\
\x1b\xaa\xf9\xb7\xfb.\xb0\x0b\xf0\x87\xfc\xca\x91TQ\xf3\
\x80\xd7\x02\xc7\x90.\xf5\x93\x1c\x00J\xac\x93\xe6?\xe0\
\x1e`\x7f\xe0\xc3\xc0\xd2\xcc+\x92TE?$}\xeb\
\xbf0\xba\x10\x95\x8b\x03@9\x8d\xa4\xf9\x0f\xe8\x03>\
\x0f\xec\x06\xdc\x90YE\x92\xaa\xe6~\xe0\x10\xd2Z\xfe\
\xf3cKQ\x199\x00\x94O7\xcd\xbf\xdd\xad\xc0\xde\
\xa4\xbbx-\xc9\xe0\xf1$U\xc3\x0a\xe0\x0b\xa4o\xfd\
\x97\x05\xd7\xa2\x12s\x00(\x97\xac\x9a\xff\x80\x15\xc0\xe7\
HW\x0a\xfc>\xc3\xc7\x95TN7\x02/\x02\xfe\x15\
o\xdd\xab!8\x00\x94G\xd6\xcd\xbf\xdd\x9d\xa4s\x03\
\xde\x0b,\xcc\xe9wH\x8a\xb3\x10\xf8\x00\xb0\x17ps\
p-\xaa\x08\x07\x80r\xc8\xb3\xf9\x0f\xe8\x07\xbe\x09l\
\x0b\x9c\x9b\xf3\xef\x92T\x8c>\xe0[\xc0\x0b\x81\xaf\x92\
\xf6\xfaI\xc3\xe2\x00\x10\xaf\x88\xe6\xdfn\x1e\xe9\x86\x1f\
\x07\x02\xb7\x15\xf8{%e\xebZ\xd27\xfe\xf7\xe0I\
~\x1a\x01\x07\x80XE7\xffvW\x92\xce\x0d8\x11\
\xf8{P\x0d\x92:7\x07x'\xf0b\xbc\xd2G]\
p\x00\x88\x13\xd9\xfc\x07,\x03\xce\x04\xb6\x01\xce!\x1d\
&\x90TN\x8b\x81O\x013p{U\x89]@z\
s\x9a\xc1s\xf2\xc8_\xda\x5c\xedF\xba'x\xf4\xeb\
c\x8cY\x99\xe5\xc0Y\xc0&H\x19r\x0f@\xf1\xca\
\xf0\xcd\x7fun\x02\x0e\x00\x8e\x04\xee\x8a-E\x12\xe9\
:\xfe]H\xc7\xf9\x1f\x0d\xaeE5\xe3\x00P\xac2\
7\xffv\x17\x013\x81\x7f\x22\x1do\x94T\xac\xdf\x93\
\x86\xf1C\x80\xd9\xb1\xa5H\x9d\xf1\x10\xc0sS\xd6\xdd\
\xfeC\x99\x08\x9cD\xba\xeb`\xf4khLE\x85\xb4\
\xb4\x00\x00\x04\xd4IDAT\xdds\x0d\xf0\x0fH\x15\
\xe6\x00\xf0\xecT\xb5\xf9\xb7[\x1f\xf8\x0c\xf0$\xf1\xaf\
\xa71u\xcbM\xc0aH5\xe0\x00\xb02uh\xfe\
\xed\xa6\x02\xa7\x03O\x11\xff\xda\x1aS\xf5\x5c\x07\x1c\x01\
\xf4 \xd5\x84\x03@J\xdd\x9a\x7f\xbb\x0dH{\x04\x16\
\x11\xff:\x1bS\xb5\x5c\x09\x1c\x84TC\x0e\x00\xf5n\
\xfe\xed\xd6#=\xd7\xf9\xc4\xbf\xe6\xc6\x94=?#-\
\xe0#\xd5V\xd3\x07\x80\xa64\xffv\x13\x81\xf7\x03\xf7\
\x12\xff\xfa\x1bS\xa6,\x01\xbe\x03\xec\x88\xd4\x00M\x1e\
\x00\x9a\xd8\xfc\xdb\x8d\x06\x8e\x06\xae'\xfe\xef\xc2\x98\xc8\
\xcc\x05>\x09l\x84\xd4 M\x1d\x00\x9a\xde\xfcW\xb5\
\x0fp>i\xc9\xe1\xe8\xbf\x1bc\x8a\xcal\xe0X`\
\x1cR\x035q\x00\xb0\xf9\xaf\xde4\xe04\xd2\x9d\x08\
\xa3\xff\x9e\x8c\xc9#\xbd\xc0\x8fH\x8b\xf7H\x8d\xd6\xb4\
\x01\xc0\xe6?<c\x817\x03\xbf#\xfe\xef\xcc\x98,\
r\x1f\xf01\xe0yH\x02\x9a5\x00\xd8\xfcGf{\
\xe0\x8bx\xf5\x80\xa9^zI\xcbe\xbf\x0a\x97S\x97\
\x9e\xa3)\x03\x80\xcd\xbf{c\x81\xa3\x80_\x00+\x88\
\xff;5fu\xb9\x09\xf8 \xb0!\x92V\xab\x09\x03\
\x80\xcd?{/\x00>\x0c\xdcB\xfc\xdf\xaf1\xfd\xa4\
\x9ba}\x01/\xe1\x93\x86\xad\xee\x03\x80\xcd?\x7f;\
\x03g\x00\x0f\x12\xff\xf7m\x9a\x95\x85\xc0\xd9\xc0\xc1\xc0\
\x18$u\xa4\xce\x03\x80\xcd\xbfX=\xc0\xbe\xc0WH\
\xf7C\x8f\xfe\xfb7\xf5\xcc\x22\xe0\xfb\xa4\x1b\xf2\x8cE\
\xd2\x88\xd5u\x00\xb0\xf9\xc7\x1a\x05\xec\x07\x9cI:\xfb\
:\xfa\xfd`\xaa\x9d\xc7\x81\xef\x91n\xc63\x1eI\x99\
\xa8\xe3\x00`\xf3/\x9f]I+\xad\xdd\x00\xf4\x11\xff\
\x1e1\xe5\xcf]\xa4c\xfa\xfb\x93V\xad\x94\x94\xb1\xba\
\x0d\x006\xff\xf2\xdb\x04x7p!\xde\xa1\xd0\xacL\
/p\x15\xf0\x11`;$\xe5\xee|\xe27\xfc\xacb\
\xf3\xaf\x9e1\xc0K\x80SI\xf7[\xf7\xf2\xc2f\xe5\
v\xd29#\x87\x01\x93\x90T\xa8\xef\x11\xff!\x90E\
l\xfe\xf5\xb0>p8\xf0%\xd2%\x86\x1e.\xa8W\
\x1e\x00\xce%\xed\x01\xda\x0cI\xa1\xbeB\xfc\x87B\xb7\
\xb1\xf9\xd7\xd7\x06\xa4\x13\xbf\xce\x00\xfe\x04,%\xfe\xfd\
f\x86\x9f[\x81o\x00o\x016G\xd2\x88\xe4u\x8d\
\xebC9=nQN\x01>\x1d]\x84r3\x9f\xb4\
\x94\xebE\xad\xff=\x1e\xd8\x03\xd8\x0b\xd8\xb3\x95-B\
*\xd3\xaa\x16\x90n-}]\xeb\xe75\xc0c\xa1\x15\
I5\xd1\x93\xd3\xe3\x1e\x06\xfc,\xa7\xc7\xce\x9b\xcd_\
\x90n\xee\xb2\x1b\xe9J\x83\x81\x9f[\x91\xdf6\xa3t\
Y\xde,\xd2\x92\xbb\x03\x0d\xff\xde\xd0\x8a\xa4\x1a\xcb\xeb\
\xc3l#\xd2\x12\x9aUc\xf3\xd7\x9a\xac\x03\xec\xb0J\
f\x92\xde\xef\x1a\xbe%\xa4\xdd\xf8\xb3V\xc9\xa3\x91E\
IM\x93\xe7\xb7\x99Y\xa4\x0f\xc8\xaa\xb0\xf9k\xa4\xd6\
\x05^\x08l\xd3\xfa\xb95\xb0e+M\xbdMl/\
\xe9\xdb\xfb]\xc0\xdd\xad\x9f\x03y\x88t\x22\xa6\xa4@\
y\xaes\xfdS\xaa3\x00\xd8\xfc\xd5\x8dE\xa4\xdd\xd5\
\xd7\x0f\xf2\xef\xd6&\x0d\x02\x9b\x03\x9b\xae\x92MH{\
\x0f\xa6R\xadC\x0b\xbd\xa4\xe3\xf0\x0f\xb7\xf2P\xdb\xcf\
\x81??B\xba\xfcRRI\xe5\xf9\xa13\x9d4\xed\
\x97\xfd\x83\xcd\xe6\xafhcH\xb7\x98\x1d\x18\x06\xa6\xb4\
\xb2~+\xeb\x00\x93W\xc9\xf8V\xc6\xb5\xfd\x1c\xdd\x96\
Q\xad\xac\x00\x96\xaf\x92e\xad\x9f\xbd\xc0\x93\xc0S\xad\
\xac\xfa\xe7\x85\xa4\x13&\x1f[\xe5\xe7\xdf\xf3y\x19$\
\xd5\xc9E\xc4_2\xb4\xa6x\xa9\x9f$I9\xd8\x85\
\xf2\xae\xc2f\xf3\x97$)G_'\xbe\xd9\xdb\xfc%\
I*\xd8d\xcau\xebV\x9b\xbf$I\x05\xd9\x1dx\
\x06\x9b\xbf$I\x8ds\x04\xe9\xccc\x9b\xbf$I\x0d\
s4\xe9\xd2\xa3\xa2\x9b\xffG\x8bxr\x92$i\xf5\
\x0e ]O\x5cD\xe3_\x0a\x1c[\xc8\xb3\x92$I\
C\xda\x14\xf8\x15\xf96\xff\xfb\x80\x17\x17\xf5\x84$I\
\xd2\xf0\xbd\x95\xb4lh\x96\x8d\xbf\x17\xf8\x220\xa9\xc0\
\xe7!I\x92:4\x1e\xf8g\xe0v\xbak\xfc\xcf\x00\
\xdf\x04f\x14[\xbe$I\xea\xd6^\xc0\xe9\xc0\xcd\x0c\
\xef\x8a\x81\x85\xa4\xe5\x86\x8f%\xdd\x91M\x92$\x0dC\
\x99o\xd43\x91t\xaf\xf5-I7J\x99@j\xfa\
\x8bH\xf7\x0d\x1f\xb8\xcdh\x7fT\x81\x92$I\x92$\
I\x92$I\x92$I\x92$I\x92$I\x92$I\
\x92$I\x92$I\x92$I\x92$I\x92$I\x92\
$I\x92$I\x92$I\x92$I\x92$I\x92$\
I\x92$I\x92$I\x92$I\x92$I\x92$I\
\x92$I\x92$I\x92$I\x92$I\x92$I\x92\
$I\x92$I\x92$I\x92$I\x92$I\x92$\
I\x92$I\x92$I\x92$I\x92$I\x92$I\
\x92$I\x92$I\x92$I\x92$I\x92$I\x92\
$I\x92$I\x92$I\x92$I\x92$I\x92$\
I\x92$I\x92*\xef\xff\x01\x05\xa3\x8b\xdeK\xbc\x80\
b\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x0b+\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x01\x00\x00\x00\x01\x00\x08\x06\x00\x00\x00\x5cr\xa8f\
\x00\x00\x00\x09pHYs\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95+\x0e\x1b\x00\x00\x00 cHRM\x00\x00z\
r\x00\x00\x80\xeb\x00\x00\xf5\x8d\x00\x00\x84\xec\x00\x00q\
\x90\x00\x00\xf6\x89\x00\x00;\x1d\x00\x00\x16G^\xee\x0e\
Z\x00\x00\x0a\xb1IDATx\xda\xec\xdd\xddo\x15\
\xf4\x1d\xc7\xf1rl\x8f-\xad\x80<\xa5\xb4\x14L)\
\x04\x1f\x22\x88\x05\xa7&\x14\x11\x10'\xf1\x02\xd0\xa0Y\
\x80\x0eC\x88\x9a\xb1 \x12\x05\xd4\x18Bp\x12\x8c\xba\
,8\x17\x82h\x14\x17\xc2\x05\xc1\xf9\x84B1\x19\x04\
ATt1\x80\xed\x82\x08\x94\x82\xb4\x04k{Z\xda\
\xee\x8a\xc4l\xba(\xb4\xd0\xd3\xef\xeb\xe2\xf5\x0f\xfc\xda\
\xcf\xfb\xa2=\xbf\xf3\xcbhkk\xcb\x00br\x08 \
\x00\x80\x00\x00\x02\x00\x08\x00 \x00\x80\x00\x00\x02\x00\x08\
\x00 \x00\x80\x00\x00\x02\x00\x08\x00 \x00\x80\x00\x00\x02\
\x00\x08\x00 \x00\x80\x00\x00\x02\x00\x08\x00 \x00\x80\x00\
\x00\x02\x00\x08\x00\xfc\x8f\xd6\xd6\xd6\xc4G\x1f}4\xb6\
\xac\xacl{~~\xfe\xb1\xcc\xcc\xcc\xe6\x91#G~\
:s\xe6\xccu\xb5\xb5\xb5\xbd\x9c\x91\x00\xd0E\xa5R\
\xa9dqqqeFFF\xdbO\xc9\xce\xcen\x98\
={\xf6Zg%\x00t1\xdb\xb7o\x1f;t\xe8\
\xd0\x83?7\xfe\x1f[\xbcx\xf1\xf2T*\x95tn\
\x02@\x170\x7f\xfe\xfc\x17~\xc9\xf0\xff[UUU\
\xb1\xf3\x13\x00\xd2Tuuu\xfe\xe8\xd1\xa3w\x9f\xcf\
\xf8\xcf\x998q\xe2\x16g)\x00\xa4\xa1\xab\xae\xba\xea\
\xdf\x172\xfes\x9c\xa5\x00\x90F^{\xed\xb5\xdf%\
\x93\xc9T{\x8c?##\xa3m\xf9\xf2\xe5\x8b\x9d\xab\
\x00\x90.\xbf\xa0\xed4\xfcs\x8a\x8a\x8a\x0e\xb7\xb6\xb6\
&\x9c\xad\x00\xd0\x89M\x980aK{\x8f\xff\x9c'\
\x9f|\xf2ig,\x00tR\xdf|\xf3\xcd\xa0\x8e\x1a\
\x7fFFF\xdb\x1dw\xdc\xf1\x9es\x16\x00:\xa1\xbc\
\xbc\xbc3\x1d9\xfe\x8c\x8c\x8c\xb6\xdc\xdc\xdczg-\
\x00t\xb2\x8f\xf5v\xf4\xf0\xfd7@\x00\xe8\x84\x1a\x1b\
\x1b\xb3\xef\xbd\xf7\xde\xbf\x0b\x80\x00\x10\xcc\xd7_\x7f]\
r1\x87/\x00\x02@'\xb1c\xc7\x8e[\xfa\xf6\xed\
{B\x00\x04\x80`V\xae\x5c\xb9\xf0R\x0c_\x00\x04\
\x80K\xec\xc1\x07\x1f\xfc\xcb\xa5\x1c\xbf\x00\x08\x00\x97\xc8\
\xa8Q\xa3>\xb9\xd4\xe3\x17\x00\x01\xe0\x12\xf8\xf2\xcb/\
\xaf\xeb\x0c\xe3\x17\x00\x01\xe0\x22\xcb\xca\xcaj\xee,\xe3\
\x17\x00\x01\xe0\x22\x999s\xe6\xba\xce4|\x01\x10\x00\
.\x92\xba\xba\xba^\x9dq\xfc\x02 \x00t\xb0\xfc\xfc\
\xfcc\x9du\xfc\x02 \x00t\xa0\xcd\x9b7O\xe9\xcc\
\xe3\x17\x00\x01\xa0\x0b\x5c\xe8\x11\x00\x01\xa0\x13\x996m\
\xdaF\x01\x10\x00\x829p\xe0\xc0\xb0!C\x86T\xa6\
\xcb\xf8\x05@\x00h'o\xbe\xf9\xe6\x8ct\x1a\xbe\x00\
\x08\x00\xed\xe4\x99g\x9ey\xac[\xb7nm\x02 \x00\
\x04r\xe6\xcc\x99\xbc;\xef\xbc\xf3\x9dt\x1c\xbe\x00\x08\
\x00\x17h\xe4\xc8\x91\x9f\xa6\xf3\xf8\x05@\x008O=\
z\xf48\x9d\xee\xe3\x17\x00\x01\xe0<\xacY\xb3\xe6\xf7\
]a\xfc\x02 \x00\xfc\x0a\xa9T*\xd9U\x86/\x00\
\x02\xc0\xafp\xea\xd4\xa9\xdeeee\xdb\x05@\x00\x08\
\xe6\xe3\x8f?\x1e\xd3\xd5\x86/\x00\x02\xc0/\x90H$\
Z\xba\xea\xf8\x05@\x00\xf8\x19---\x89\xf9\xf3\xe7\
\xbf\xd0\x95\xc7/\x00\x02\xc0O8~\xfcx\xff\xae>\
|\x01\x10\x00~Fqqq\xa5\x00\x08\x00\xc1\xbc\xfe\
\xfa\xeb\xf7'\x93\xc9T\x94\xf1\x0b\x80\x00\xf0#\xe9z\
\xa1G\x00\x04\x80\x0b0i\xd2\xa4\xf7\xa2\x0d_\x00\x04\
\xbc\xcf>\xfbldaa\xe1\x91\xa8\xe3\x17\x00\x01\
\x08\xed\x8a+\xae8\x13y\xfc\x02 \x00!\xa5\xd3\x97\
v\x0a\x80\x00\xd0\x8e\x1a\x1b\x1b\xb3\xef\xbb\xef\xbe\xf5\xc6\
/\x00\x02\x10Leee\xb1\xd1\x0b\x80\x00\x04\xb4s\
\xe7\xce\xdf\xf4\xeb\xd7\xef\x84\xd1\x0b\x80\x00\x04\xb3j\xd5\
\xaa\x05\xc6.\x00\x02\x10\xd0\xc3\x0f?\xfcgC\x17\x00\
\x01\x08\xa6\xba\xba:\xbf\xb4\xb4t\xb7\x91\x0b\x80\x00\x04\
\xb3u\xeb\xd6\xf1\xc6-\x00\x02\x10T\xb4\x0b=\x02 \
\x00\xb4\xb5e\x94\x97\x97\xaf5j\x01\x10\x80\x80\xea\xea\
\xeaz\x19\xb4\x00\x08@@\x03\x06\x0c8f\xcc\x02 \
\x00\x01\xbd\xf5\xd6[S\x0cY\x00\x04\xc0\x85\x1e\x04@\
\x00\x228y\xf2d_\xe3\x15\x00\x01\x08\xe8\xe0\xc1\x83\
%%%%\x07\x8dW\x00\x04 \x98\x0d\x1b6L7\
Z\x01\x10\x80\x80\x9e}\xf6\xd9E\x11\xbf\xb4S\x00\x04\
\xb4\xef\xbf\xff>\xef\xae\xbb\xee\xfa\x87\xb1\x0a\x80\x00\
\x044j\xd4\xa8O\x0cU\x00\x04 \x98\x0f>\xf8`\
B\xcf\x9e=O\x1b\xa9\x00\x08@@YYY\xcd\x06\
*\x00\x02\x10LSSS\xd20\x05@\x00\x02\xba\xe6\
\x9ak\xfee\x94\x02 \x00\x01\xed\xd9\xb3\xa7\xd4 \x05\
@\x00\x02\xba\xec\xb2\xcbZ\x8cQ\x00\x04 \xe0\x85\x9e\
\x05\x0b\x16\xac2D\x01\x10\x80`jjj\xfa\x1b\xa0\
\x00\x08@PC\x86\x0c\xa94@\x01\x10\x80`\xd6\xaf\
_?\xe3\xf2\xcb/\xf7\xa5\x9d\x02 \x00\x91477\
g\xce\x9d;\xf7\xafF'\x00\x02\x10\xd0\xe4\xc9\x93\xdf\
18\x01\x10\x80`\xf6\xed\xdbw\xfd\xc0\x81\x03\x0f\x1b\
\x9b\x00\x08@@=z\xf4p\xa1G\x00\x04 \x9a%\
K\x96,70\x01\x10\x80\x80R\xa9\x94\x0b=\x02 \
\x00\x11UUU\x15\x1b\x96\x00\x08@@\xbbv\xed\x1a\
\xd3\xbf\x7f\xff\xe3\x86%\x00\x02\x10\xcc\xf3\xcf?\xffG\
\x83\x12\x00\x01\x08\xa6\xbe\xbe\xbe\xbb!\x09\x80\x00\x04t\
\xfc\xf8\xf1\xfec\xc6\x8c\xd9eH\x02 \x00\xc1TT\
T\x8c3 \x01\x10\x80\xa0\x92\xc9\xa4\x0b=\x02 \x00\
\xd1\xcc\x993g\x8d\xe1\x08\x80\x00\x04t\xfa\xf4\xe9\x1e\
F#\x00\x02\x10PAA\xc1\x11\x83\x11\x00\x01\x08\xf8\
\xe1\x9e\xbc\xbc\xbc3\xc6\x22\x00\x02\x10P\xbf~\xfdN\
\x18\x8a\x00\x08@@\x8f>\xfa\xe8J#\x11\x00\x01\x08\
h\xef\xde\xbd\xa3|_\xbf\x00\x08@\xd4\x031\x0e\x01\
\x10\x00\x01@\x00\x04 \x98'\x9exb\x99q\x08\x80\
\x00\x04u\xed\xb5\xd7z\xa5W\x00\x04 \xa2\x0d\x1b6\
L7\x0c\x01\x10\x80\xa0\xee\xb9\xe7\x9e\x0d\x86!\x00\x02\
\x10\xd4\xcd7\xdf\xbc\xd30\x04@\x00\x82***\xf2\
\x88\x87\x00\x08@T\x83\x07\x0f>d\x18\x02 \x00A\
\x95\x95\x95m7\x0c\x01\x10\x80\xa0f\xcd\x9a\xb5\xce0\
\x04@\x00\x82z\xff\xfd\xf7'\x19\x86\x00\x08@`\xb7\
\xdf~\xfb\x87\xc6!\x00\x02\x10\xd4\xea\xd5\xab\xe7\x19\x87\
\x00\x08\x80\xcb@\x08\x80\x00D\x94\x99\x99\xd9l \x02\
\x00A\x9d8q\xa2o\x9f>}\xbe3\x12\x01\x10\
\x80\xa0\xdex\xe3\x8d\xfb\x8dD\x00\x04 \xb0\x9bn\xba\
\xc9\xdb\x7f\x02 \x00Q\xd5\xd4\xd4\xf4\x1f>|\xf8W\
\xc6\x22\x00\x02\x10\xd8\xd8\xb1c}DX\x00\x04 \xb2\
\xd6\xd6\xd6\x84\xd1\x08\x80\x00\x04\xf6\xf2\xcb/\xcf\xf5/\
B\x01\x10\x80\xe0\x0a\x0b\x0b\xbd\x15(\x00\x02\x10\xd5\xe1\
\xc3\x87\x07\x1a\x90\x00\x08@`\xcd\xcd\xcd\x99s\xe6\xcc\
YcH\x02 \x00\xee\x0d \x00\x02\x10\xd5\xb6m\xdb\
\xc6\x19\x94\x00\x08@`\xd5\xd5\xd5\xf9\xa5\xa5\xa5\xbb\x0d\
K\x00\x04 \xa8\xfa\xfa\xfa\xee\x86%\x00\x02\x10\x5c\x22\
\x91\xf0\xb4\xb8\x00\x08@T\x1b7n\x9c\x9a\x93\x93\xd3\
`d\x02 \x00\x81\x0d\x1b6l\xbf\xa1\x09\x80\x00\x04\
u\xf2\xe4\xc9\xbe\xb7\xdez\xeb?\x8dM\x00\x04 \xb0\
E\x8b\x16\xfd\xc9\xe0\x04@\x00\x02{\xe9\xa5\x97|\xdb\
\xb0\x00\x08@d{\xf7\xee\x1d5`\xc0\x80c\xc6'\
\x00\x02\xe0\xf1\x11#\x14\x00\x01\x88\xa8\xa9\xa9)i\x80\
\x02 \x00\xc1\xad[\xb7n\xa6!\x0a\x80\x00\x04\xd6\xb3\
g\xcf\xd3\xc6(\x00\x02\x10\xd8\xd1\xa3G\x0b\x0cR\x00\
\x04 \xb0)S\xa6l6J\x01\x10\x80\xe0\xbau\xeb\
f\x9c\x02 \x00\x91y\x97P\x00\x04 \xb0\x03\x07\x0e\
\x0c+))9h\xa4\x02 \x00\x81M\x9f>}\x83\
\xa1\x0a\x80\x00\x04~\x8d\xe8\x91G\x1eYe\xac\x02 \
\x00\x81m\xde\xbcyJnnn\xbd\xd1\x0a\x80\x00\x04\
\xb5g\xcf\x9eR\xa3\x15\x00\x01\x08\xac\xb6\xb6\xb6\xd7m\
\xb7\xdd\xb6\xcdx\x05@\x00\x5c$B\x00\x04 \xaa\xac\
\xac,\xaf\x14\x0b\x80\x00D\xb6o\xdf\xbe\xeb\x0dY\x00\
\x04 \xb0\x1bn\xb8\xe1Sc\x16\x00\x01\x08\xec\xec\xd9\
\xb3\x99\x06-\x00\x02\x10\xd8\x8a\x15+\x1es\x91H\x00\
\x04 \xb8\xde\xbd{\xbbH$\x00\x02\x10\xd5\xfe\xfd\xfb\
\x87\x19\xb7\x00\x08@`?\xfc\xf0C\xf7\xa9S\xa7n\
4r\x01\x10\x80\xc0^|\xf1\xc5?x\xa9X\x00\x04\
\xb0M\x9b6\xddm\xec\x02 \x00\x81UUU\x15\
\x0f\x1f>\xfc+\xa3\x17\x00\x01\x08\xea\xd4\xa9S\xbd\x8d\
^\x00\x04 \xf8\x17\x8c,^\xbcx\xb9\xf1\x0b\x80\x00\
\x04\x96\x99\x99\xe9\x22\x91\x00\x08@d\x85\x85\x85G\x04\
\xc0\xef\x81\x00\x04\xf5\xed\xb7\xdf\x0e\x1c1b\xc4\xe7\x02\
\x80C\x08\xec\x81\x07\x1e\xf8\x9b\x00\x08\x00\xc1%\x93\xc9\
\x94\x00\x08\x00AUTT\x8c\xbb\xf2\xca+k\x05@\
\x00\x08l\xf4\xe8\xd1\xbb\x05@\x00\x08\xaa\xbe\xbe\xbe\xbb\
\x00\x08\x00\xc1=\xf7\xdcs\x0b\x04@\x00\x08,''\
\xa7A\x00\x04\x80\xc0*++\x8b\x05@\x00\x08\xac\xb1\
\xb11{\xc6\x8c\x19\xeb\x05@\x00\x08|\x91H\x00\x04\
\x80\xe0\xde~\xfb\xed\xdf\x0a\x80\x00\x10XAA\xc1\x11\
\x01\x10\x00\x02\x9b0a\xc2\x16\x01\x10\x00\x82jjj\
J\x96\x97\x97\xaf\x15\x00\x01 \xb0W_}uf\xba\
^$\xf2\xf3\x13\x00\xda\xc1\xd6\xad[\xc7\x0b\x80\x00\x10\
\xd8\xb1c\xc7\xf2o\xbc\xf1\xc6O\x04@\x00\x08\xec\xa1\
\x87\x1e\xfa\x8b\x00\x08\x00\x81\xa5\xcb+\xc5~V\x02@\
\x07\xd9\xb1c\xc7-\x02 \x00\x046t\xe8\xd0\x83\x02\
\x00\x04\xd6\xd0\xd0\x90-\x00\x02@`\x0b\x17.\x5c\
)\x00\x02@p\xb9\xb9\xb9\xf5\x02 \x00\x04v\xe8\xd0\
\xa1A\x02 \x00\x046~\xfc\xf8\x0f\x05@\x00\x08\xec\
\xa9\xa7\x9ezZ\x00\x04\x80\xc0^y\xe5\x95\xd9\x02 \
\x00\x04\xf6\xc5\x17_\x5c7h\xd0\xa0C\x02 \x00\x04\
u\xf4\xe8\xd1\x02\x01\x10\x00\x02;{\xf6l\xe6\xbcy\
\xf3V\x0b\x80\x00\xe0\x22\x91\x00\x08\x00Q\xf5\xe9\xd3\xe7\
\xbb\x8e\x1e\x7fNNN\x83\xb3\x16\x00:\xa9\x9a\x9a\x9a\
\xfe\x1d\x19\x80I\x93&\xbd\xe7\x9c\x05\x80Nl\xda\xb4\
i\x1b;*\x00K\x97.]\xe6\x8c\x05\x804\x90H\
$Z\xdas\xfcEEE\x87[ZZ\x12\xceV\x00\
H\x03\x9b6m\xba\xbb{\xf7\xee\xedv\x91h\xd9\xb2\
eK\x9d\xab\x00\x90f\xae\xbe\xfa\xea\xaf\xfc\xf5_\x00\
\x08\xaa\xb6\xb6\xb6\xd7\xb8q\xe3\xb6]\xc8\xf8'N\x9c\
\xb8\xc5Y\x0a\x00i\xec\xf1\xc7\x1f_q>\xe3\xaf\xaa\
\xaa*v~\x02@\x17\xf0\xee\xbb\xefN\x1e<x\xf0\
/\xbaC\xb0d\xc9\x92\xe5\xa9T*\xe9\xdc\x04\x80.\
\xa4\xa1\xa1!\xfb\xffE ;;\xbb\xa1\xbc\xbc|\xad\
\xb3\x12\x00\xba\xa8\x96\x96\x96DEE\xc5\xb8\xb2\xb2\xb2\
\xed\x05\x05\x05G\x12\x89D\xcb\x88\x11#>\x9f5k\
\xd6\xba\xba\xba\xba^\xceH\x00\x00\x01\x00\x04\x00\x10\x00\
@\x00@\x00\x1c\x02\x08\x00 \x00\x80\x00\x00\x02\x00\x08\
\x00 \x00\x80\x00\x00\x02\x00\x08\x00 \x00\x80\x00\x00\x02\
\x00\x08\x00 \x00\x80\x00\x00\x02\x00\x08\x00 \x00\x80\x00\
\x00\x97\xc4\x7f\x06\x00R \x97\x14:@H\xe3\x00\x00\
\x00\x00IEND\xaeB`\x82\
\x00\x00\x0bl\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x01\x00\x00\x00\x01\x00\x08\x06\x00\x00\x00\x5cr\xa8f\
\x00\x00\x00\x09pHYs\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95+\x0e\x1b\x00\x00\x00 cHRM\x00\x00z\
r\x00\x00\x80\xeb\x00\x00\xf5\x8d\x00\x00\x84\xec\x00\x00q\
\x90\x00\x00\xf6\x89\x00\x00;\x1d\x00\x00\x16G^\xee\x0e\
Z\x00\x00\x0a\xf2IDATx\xda\xec\xdd\xedo\xd5\
\xe5\x1d\xc7\xf1\xab\xc7\xf6\xd8R\x04\xe4.\xa5\xa5`J\
!x\x13A,85\xa1\x88\x80:\x89\x0f\x04\x0d\x9a\
\x05\xec0\xc6\xa8\x19\x0b\xa2Q\xf0&\x86\x10\x9c\x04\xa3\
.F\xe7B\x10\x8d\xe2Bx@p\xde\xa1PL\x06\
A\x10\x15]\x8c`\xbb \x02\xa5 -\xc1J\x0f\xa5\
={\xd4d331\x0a\x19\xf0}=x\xff\x03\xd7\
\xf5\xf9\xbd\x1eP~\xe7\x97\xf2\xf9|\x92\x143\x87 \
\x01@\x12\x00$\x01@\x12\x00$\x01@\x12\x00$\x01\
@\x12\x00$\x01@\x12\x00$\x01@\x12\x00$\x01@\
\x12\x00$\x01@\x12\x00$\x01@\x12\x00$\x01@\x12\
\x00$\x01@\x12\x00$\x01@\x12\x00\xfe\xaf\xb5\xb6\xb6\
\xf6\x995k\xd6\x8aQ\xa3F}\x96\xc9d:\xcb\xcb\
\xcb\xf7\xd6\xd6\xd6n\xac\xaf\xaf\x9f\xd0\xd9\xd9\x99qF\
\x02\xc0YZ]]\xdd\xf2\xe2\xe2\xe2c)\xa5\xfc\xff\
j\xe8\xd0\xa1\xbb\x8f\x1d;V\xec\xac\x04\x80\xb3\xa8\x5c\
.\x97]\xb0`\xc1\xa2\x9fz\xf0\x7f\x8c\xc0;\xef\xbc\
s\xbds\x13\x00\xce\x82\x1a\x1b\x1b\xab~\xce\x83\xff\xe3\
\x1e~\xf8\xe1\xc5\xceO\x008\x83\x9b<y\xf2\xba_\
\xf2\xf0w7a\xc2\x84\x0d---}\x9c\xa5\x00p\
&\x1e\xc8\xafx\xf8\xbb\xbb\xf0\xc2\x0b\xbft\x96\x02\xc0\
\x19\xd6\xc2\x85\x0b\x1f9\x19\x00\xa4\x94\xf2=z\xf4h\
[\xb3f\xcdM\xceU\x008\x03\xea\xec\xec\xccTV\
V\xee9Y\x00\xa4\x94\xf2\x99L\xa6\xd3\xd9\x0a\x00g\
@\x8f<\xf2\xc8\xc2\x93\xf9\xf0\xffg\xd3\xa6M[\xed\
\x8c\x05\x80\xd3\xb8)S\xa6\xbc{\xaa\x00H)\xe5\x9b\
\x9b\x9b\x07:g\x01\xe04\xad\xa4\xa4\xe4\xd8\xa9\x04 \
\xa5\x94\xef\xd7\xaf\xdfw\xceZ\x008K\xff\xf5\xff\xe7\
TPP\x90w\xde\x02@P\x00RJ\xf9\xbb\xef\xbe\
\xfb\x85\x13'N\x14:w\x01 \x00\xdd\xed\xdb\xb7\
\xaf\xdc\xd9\x0b\x00A\x01\x182d\xc8\xee\xcf?\xff\xfc\
\x12\xe7/\x00\x04\x04\xa0\xbb\x97_~\xf9\x0ew \x00\
\x04\x05 \xa5\x94\x7f\xfc\xf1\xc7\x9fp\x0f\x02@P\x00\
RJ\xf9\x89\x13'~\xe0.\x04\x80\xa0\x00\xa4\x94\xf2\
\xbbw\xef\x1e\xe2>\x04\x80\xa0\x00\xa4\x94\xf2\xa5\xa5\xa5\
m\xeeD\x00\x08\x0a@w\xf3\xe6\xcd[\xe2n\x04\x80\
\xa0\x00\xa4\x94\xf2~oP\x00\x08\x0c@J)?|\
\xf8\xf0]\xeeH\x00\x08\x0a@J)\xbfi\xd3\xa6\xab\
\xdc\x93\x00\x10\x14\x00/\x12\x09\x00\xc1\x01H)\xe5\xef\
\xbd\xf7\xde\xe7\xdd\x97\x00\x10\x14\x80\x94R\xfe\xf2\xcb/\
\xffx\xff\xfe\xfde\xeeM\x00\x08\x08@w\xeb\xd7\xaf\
\x9f\xe8\xee\x04\x80\xa0\x00d\xb3\xd9\xdc+\xaf\xbc2\xd3\
\xfd\x09\x00\x01\x01\xe8\xae\xae\xaen\xf9\xf1\xe3\xc7\xb3\xee\
Q\x00\x08\x08@J)?i\xd2\xa4u\xeeQ\x00\x08\
\x0a@J)_^^\xbe\xd7]\x0a\x00A\x01H)\
\xe5\xdfz\xeb\xad\xdf\xbaO\x01 (\x00\xdduuu\
e\xdc\xab\x00\x10\x14\x80\x193f\xacloo\xf7\x22\
\x91\x00\x10\x11\x80\xee\x1a\x1a\x1a\xaa\xdc\xaf\x00\x10\x14\x80\
\x94R\xbe\xa4\xa4\xe4\x98;\x16\x00\x82\x02\x90R\xca?\
\xfd\xf4\xd3s\xdd\xb3\x00\x10\x14\x80\xee\xda\xda\xdaz\xb8\
o\x01 (\x00c\xc7\x8e\xdd\xea\xbe\x05\x80\xa0\x00\xa4\
\x94\xf2\xe7\x9f\x7f~K}}\xfd\x04\xf7.\x87\x10\x10\
\x80\xee\x17\x89\xdc\xbb\x1cBP\x00\xba\xbb\xf3\xce;\xff\
\xea\xfe\x01\xe0 \x82\x02\x90R\xca\x8f\x1a5\xea\xb3o\
\xbf\xfdv\xb0\x1d\x00\x00\x00A\xab\xa8\xa8\xf0\x22\x11\x00\
\x00\x10\xb9\xc2\xc2\xc2\x0e[\x00\x00\x00\x027\x7f\xfe\xfc\
E^$\x02\x00\x00\x82w\xf8\xf0\xe1\xbev\x01\x00\x00\
\x04m\xe4\xc8\x91_666z\x91\x08\x00\x00\x88\xdc\
\x9a5kn\xb2\x0f\x00\x00 h\x99L\xa6\xf3\xb9\xe7\
\x9e\xfb\x83\x8d\x00\x00\x00\x81\xbb\xf9\xe6\x9bW\xff\xf0\xc3\
\x0f^$\x02\x00\x00\x22\xf7\xd5W_\x8d\xb0\x17\x00\x00\
h}\xfb\xf6\xfd\xce^\x00\x00\x80\xc0\x15\x14\x14\xe4\
\x17/^\xfc\x90\xdd\x00\x00\x00\x81;q\xe2D\xa1\xed\
\x00\x00\x00\x81\xbb\xec\xb2\xcb>\xb1\x1f\x00\x00 p;\
v\xec\xb8\xd4\x86\x00\x00\x80\xc0\x15\x15\x15y\x91\x08\x00\
\x00\x88\x9e\xaf\x14\x03\x00\x00\x81\xbb\xe6\x9ak6\xb4\xb4\
\xb4\xf4\xb1)\x00\x00 p\xdb\xb6m\xab\xb1+\x00\x00\
h\xa5\xa5\xa5mk\xd7\xae\x9dj[\x00\x00@\xe0\
\xee\xbf\xff\xfe\xa5~`\x04\x00\x00\x08\xdc\xf4\xe9\xd3W\
\xd9\x18\x00\x00\x10\xb8\xea\xea\xea];w\xee\xf4\x22\x11\
\x00\x00\x10\xb5~\xfd\xfay\x91\x08\x00\x00\x88\xfe\x22\x91\
\xbd\x01\x00\x00\xc1\x9b:u\xeaZ\xbb\x03\x00\x00\x02\xb7\
o\xdf\xber\xdb\x03\x00\x00\x02\xd7\xbbw\xef#\xf6\x07\
\x00\x00\x04n\xc5\x8a\x153m\x10\x00\x00\xf0\x22\x91\x17\
\x89\x00\x00\x80\xa8]{\xed\xb5\x1f\xd8\x22\x00\x00\x10\xb8\
A\x83\x06\xed\xdf\xbe}\xfb\x18\x9b\x04\x00\x00\x02\xf7\xe2\
\x8b/\xdem\x97\x00\x00@\xe0\x1e|\xf0\xc1?\xd9&\
\x00\x00\x10\xb8\xab\xaf\xbe\xfa\x1f\x87\x0e\x1d\xeao\xa3\x00\
\x00@\xd0F\x8c\x18\xf1\x95\x8d\x02\x00\x00\x81+))\
9\xb6z\xf5\xea\x9bm\x15\x00\x00\x08\xfc\xa5b[\x05\
\x00\x00\x82\xd7\xd6\xd6\xe6+\xc5\x00\x00@\xd4jjj\
\xb6655\x95\xd9-\x00\x00\x10\xb8\x0d\x1b6L\xb0\
]\x00\x00 x\xf6\x0b\x00\x00\x04n\xf6\xec\xd9\xcb:\
::|\xa9\x18\x00\x00\x88\xdc\x9e={\x06\xdb2\x00\
\x00\x10\xb4\x8a\x8a\x8a\xbd\xb6\x0c\x00\x00\x04\xae\xb0\xb0\xb0\
\xe3\xa5\x97^\xba\xcb\xa6\x01\x00\x80\xc0\xf9\x1a\x11\x00\x00\
\x10\xbc\xf1\xe3\xc7o\xb4m\x00\x00 p#G\x8e\xfc\
\xb2\xb9\xb9y\xa0\x8d\x03\x00\x00A\xbb\xe2\x8a+\xb6\xd8\
8\x00\x00\x10\xb8\xd7_\x7f\xfdv;\x07\x00\x00\x02\x7f\
\x97\xf0\xe0\xc1\x83~X\x04\x00\x00\x88\xfc'B[\x07\
\x00\x00\xbc7 \x00\x00 b/\xbc\xf0\x82_\x1b\x06\
\x00\x00||D\x00\x00@\xc8\xde{\xef\xbd)6\x0f\
\x00\x00\x04m\xd6\xacY+l\x1e\x00\x00\x08Zmm\
\xad\xff\x22\x0c\x00\x00Dm\xe8\xd0\xa1\xbbm\x1e\x00\x00\
\x08Zee\xe5\x1e\x9b\x07\x00\x00\x82v\xe5\x95Wn\
\xb6y\x00\x00 h\xb7\xdcr\xcb*\x9b\x07\x00\x00\x82\
\xb6j\xd5\xaa\xe96\x0f\x00\x00\x04\xec\xe2\x8b/\xfe\xa7\
\xbd\x03\x00\x00A{\xf4\xd1G\x17\xda;\x00\x00\xe0e\
\x01\x00\x00\x00\x00\x80\x00p\xd6w\xce9\xe7tn\
\xdf\xbe}\x8c\xad\x03\x00\x00\x01{\xe0\x81\x07\x96\xd89\
\x00\x00\x10\xb0\x01\x03\x06\x1c\xb4q\x00\x00 `={\
\xf6<\xbae\xcb\x96q6\x0e\x00\x00\x04\xab\xbc\xbc\xdc\
\xb7\x02\x01\x00\x80\x88\x1d9r\xa4\x97]\x03\x00\x00\x01\
\x9b={\xf62\x9b\x06\x00\x00\x82\x95\xcdfs\xb6\x0c\
\x00\x00\x04\xad\xbe\xbe~\x82-\x03\x00\x00\xc1\x1a7n\
\xdc\x96\x03\x07\x0e\xf8\x00(\x00\x00\x10\xb1\xb6\xb6\xb6\x1e\
6\x0c\x00\x00\x04\xeb\x99g\x9e\xf9\xa3\xed\x02\x00\x00\xc1\
\x1a8p\xe0\x01\xff\xb9\x07\x00\x00\x08Zccc\x95\
\xcd\x02\x00\x00\x01\xcb\xe5rY{\x05\x00\x00\x02\xb6`\
\xc1\x82E\xb6\x0a\x00\x00\x04\xabW\xaf^Gl\x14\x00\
\x00\x08\xd8\xe0\xc1\x83\xf7\xec\xd8\xb1\xe3R\x1b\x05\x00\x00\
\x82u\xfd\xf5\xd7\xbfm\x9b\x00\x00@\xc0\xee\xba\xeb\xae\
\xbfttt\x14\xda&\x00\x00\x10\xa8s\xcf=7\xb7\
r\xe5\xca\x196\x09\x00\x00\x04k\xd8\xb0a\x0d\xb6\x08\
\x00\x00\x04\xad\xb9\xb9\xd9\x0b=\x00\x00@\xb4\xe6\xce\x9d\
\xbb\xb4\xab\xab+c\x87\x00\x00@\xc0\xdf\xeb\xb7?\x00\
\x00 `\xdb\xb6m\xab\xb1=\x00\x00 `\x17]t\
\x91\xaf\xf4\x02\x00\x00\x11;~\xfc\xb8\x17z\x00\x00\x80\
h\x15\x15\x15u\xd8\x1a\x00\x00\x10\xb0\xde\xbd{\x1fy\
\xff\xfd\xf7'\xd9\x1a\x00\x00\x10\xac1c\xc6|lc\
\x00\x00@\xc0n\xbc\xf1\xc6\xbf\x7f\xff\xfd\xf7=m\x0c\
\x00\x00\x08TAAA\xfe\xa9\xa7\x9ez\xd0\xb6\x00\x00\
\x80\x80\xadZ\xb5j\xba]\x01\x00\x00\xc1\xaa\xae\xae\xde\
\xb5k\xd7\xaej\x9b\x02\x00\x00\x02v\xe8\xd0\xa1\xfe\xf6\
\x04\x00\x00\x04\xcc\x0b=\x00\x00@\xc0\xde|\xf3\xcd\xa9\
6\x04\x00\x00\x04l\xd0\xa0A\xfb\xed\x07\x00\x00\x08X\
kkk\x1f\xdb\x01\x00\x00\x02VWW\xb7\xdcn\x00\
\x00\x80`e\xb3\xd9\x9c\xbd\x00\x00\x00A[\xbf~\xfd\
D{\x01\x00\x00\x82USS\xb3\xb5\xa9\xa9\xa9\xccV\
\x00\x00\x80`\xddw\xdf}\x7f\xb6\x11\x00\x00 `K\
\x97.\x9dk\x1f\x00\x00@\xb0\x06\x0c\x18pp\xf3\xe6\
\xcd\xbf\xb1\x0d\x00\x00 `\x0d\x0d\x0dUv\x01\x00\x00\
\x04\xeb\xb6\xdbn[\xd9\xde\xde^l\x13\x00\x00\x80\x17\
z\x04\x00\x00D\xe8\xbc\xf3\xce;j\x07\x00\x00@\xc0\
***\xf6~\xfa\xe9\xa7\xa3\xed\x00\x00\x00\x08\xd6\x94\
)S\xdeu\xff\x00p\x10\x01\x7f\xb4\xd3\xbd\xcb!\x04\
\x04 \x9b\xcd\xe6^{\xed\xb5\xdb\xdd\xbb\x1cB0\x00\
\xaa\xaa\xaa\x1a\xdc\xb7\x00\x10\x14\x80\x03\x07\x0e\x0ct\xdf\
\x02@0\x00\xe6\xcc\x99\xf3lgg\xa7\xbf\xf1\x0b\x00\
\xd1\x00\xc8d2\x9d\xeeX\x00\x08\x08\xc0G\x1f}4\
\xce\xfd\x0a\x00\xc1\x00\xa8\xad\xad\xddx\xf8\xf0\xe1\xbe\xee\
V\x00\x08\x08@.\x97\xcb\xbaW\x01 \x18\x00\xcb\x96\
-\xfb\xbd\xfb\x14\x00\x02\x02\xd0\xabW\xaf#\xeeR\x00\
\x08\x08\xc0\xe8\xd1\xa3?q\x8f\x02@@\x00n\xb8\xe1\
\x86\xb7\x8f\x1e=\xda\xd3=\x0a\x00\x81\x00(((\xc8\
?\xf9\xe4\x93\x0f\xb9?\x01 \x00o\xbc\xf1\xc6\x0c\
w'\x00\x04\x03`\xd8\xb0a\x0d;w\xee\x1c\xe1\xde\
\x04\x80`\x00L\x9b6m\xb5\xfb\x12\x00\x82\x02\xe0G\
;\x05\x80\x80\x00\xac]\xbbv\xaa{\x12\x00\x02\x02P\
VV\xb6\xdf\x1d\x09\x00\x01\x01hmm\xed\xe3~\x04\
\x80\x80\x00\xcc\x9c9s\x85\xbb\x11\x00\x82\x01PTT\
\xd4\xe1N\x04\x80\x80\x00|\xf1\xc5\x17\x97\xb8\x0f\x01 \
\x00c\xc6\x8c\xf9\xd8]\x08\x00\x01\x01\xb8\xe7\x9e{\
\x9ew\x0f\x02@@\x00\x96,Y2\xcf\x1d\x08\x00\xc1\
\x00\xe8\xdf\xbf\xff\xc1M\x9b6]\xe5\xfc\x05\x80\x80\x00\
|\xfd\xf5\xd7\xd5\xce^\x00\x08\x06\xc0\xad\xb7\xde\xfa\xb7\
\xf6\xf6\xf6b\xe7.\x00\x04\x04\xc0\x0b=\x02\xc0iV\
iii\xdb\xa9~\xf0{\xf6\xecy\xd4Y\x0b\x00\xa7\
a\xd7]w\xdd\xbb\xa7\xf2\xe1\xff\xe6\x9bo\x868g\
\x01\xe04\xed\xb1\xc7\x1e{\xe2T=\xfc\x93&MZ\
\xe7\x8c\x05\x80\xd3\xb8\xae\xae\xaeLee\xe5\x9e\x93\xfd\
\xf0;[\x01\xe0\x0ci\xd1\xa2E\xf3O\xd6\x83\x9f\xcd\
fs\xaf\xbe\xfa\xea\xef\x9c\xab\x00\x10\xec\xaf\x01\x17\x5c\
p\xc1\xbf\x9c\xa5\x00p\x066y\xf2\xe4u\xbf\xe6\xe1\
\x1f;v\xec\xd6\xa6\xa6\xa62g)\x00\x9c\xa156\
6V\xfd\x92\x87\x7f\xce\x9c9\xcf:?\x01\xe0,(\
\x97\xcbe\xe7\xcf\x9f\xbf\xe8\xe7<\xf8\xc3\x87\x0f\xdf\xb5\
q\xe3\xc6\xf1\xceM\x008\xcb\xba\xe3\x8e;\x96\x17\x17\
\x17\x1f\xfb\xa9\x87\xbf\xaa\xaa\xaa!\x97\xcbe\x9d\x95\x00\
p\x96\xd6\xd2\xd2\xd2g\xe6\xcc\x99+F\x8f\x1e\xfdI\
aaaGYY\xd9\xfe\xda\xda\xda\x8d\x1f~\xf8\xe1\
x\xff\xadW\x00\x90\x04\x00I\x00\x90\x04\x00I\x00\x90\
\x04\x00I\x00\x90\x04\x00I\x00\x90\x04\x00I\x00\x90\x04\
\x00I\x00\x90\x04\x00I\x00\x90\x04\x00I\x00\x90\x04\x00\
I\x00\x90\x04\x00I\x00\x90\x04\x00I\x00\x90\x04\x00I\
\x00\x90\xf4_\xfd{\x00\x9ep\x97\x14J\x1d-\xa2\x00\
\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x0a\xcb\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x01\x00\x00\x00\x01\x00\x08\x06\x00\x00\x00\x5cr\xa8f\
\x00\x00\x00\x09pHYs\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95+\x0e\x1b\x00\x00\x00 cHRM\x00\x00z\
r\x00\x00\x80\xeb\x00\x00\xf5\x8d\x00\x00\x84\xec\x00\x00q\
\x90\x00\x00\xf6\x89\x00\x00;\x1d\x00\x00\x16G^\xee\x0e\
Z\x00\x00\x0aQIDATx\xda\xec\xddkl\x95\
\xf5\x1d\xc0\xf1\xd3c{l)\x02R \xd0R0\xe5\
\x12\xbcD\xb0\x16\x9c\x9aPD@\x9c\xc4\x17\x80\x06\xcd\
\x02t\x18B\xd4\x8c\x05\x91(\xa0\xc6\x10\x82\x93`\xd4\
\xc5\xe0\x5c\x08\xa2Q\x5c\x1a^\x10\x9cw\xa15\x19\x04\
AT\xd4\x18@\xba \x02\xa5\x5cZ\x82\xb57\xdag\
/\xdc\xa2S)\x85^8\xe7y>/>\xef\xc9\xf3\
\xff\xfd\xbeI\x0f\xcf%\x16\x04A\x0c\x88&\x17\x01\x04\
\x00\x10\x00@\x00\x00\x01\x00\x04\x00\x10\x00@\x00\x00\x01\
\x00\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\x10\x00@\x00\
\x00\x01\x00\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\x10\x00\
@\x00\x00\x01\x00\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\
\x10\x00@\x00\x00\x01\x00\x04\x00\x10\x00@\x00\x00\x01\x00\
\x01p\x11@\x00\x00\x01\x00\x04\x00\x10\x00@\x00\x00\x01\
\x00\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\x10\x00@\x00\
\x00\x01\x00\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\x10\x00\
@\x00\x00\x01\x00\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\
H\xed\x00\xb4\xb4\xb4\xc4\x1f\x7f\xfc\xf1'o\xbb\xed\xb6\
w\xb3\xb3\xb3kc\xb1X\x00a\x95\x95\x95U7i\
\xd2\xa4w\x97.]\xba\xac\xb9\xb99\x1e\xe9\x00,_\
\xbe|q~~\xfeA\x83A\x14\xe5\xe7\xe7\x1f\x5c\xb6\
l\xd9\xd2\xc8\x05`\xe2\xc4\x89\xef\x1b\x00\xf8\xc9\xc4\x89\
\x13\xdf\x8fD\x00***\x0a\x1c8\xfcZEEE\
A\xa8\x03\xd0\xd0\xd0\x90p\xd0pv\x0d\x0d\x0d\x89\xd0\
\x06`\xf1\xe2\xc5\xcb\x1d2\x9c\xdd\x92%K\x96\x872\
\x00\xb3g\xcf^\xeb\x80\xe1\xdcJJJ\xd6\x86*\x00\
\xd5\xd5\xd5\xbd233\xeb\x1c.\x9c[fff]\
MMM\xaf\xd0\x04`\xe6\xcc\x99\xeb\x1c,\xb4\xdd\xac\
Y\xb3\xd6\x85&\x00\xa3F\x8d\xfa\xd4\xa1B\xdb\x8d\x1c\
9\xf2\xf3\xd0\x04 ==\xbd\xc9\xa1B\xdb\xc5\xe3\xf1\
\xe6\xd0\x04\xa0\x7f\xff\xfeG\x1c*\xb4]nn\xee\xa1\
\xd0\x04\xa0\xb8\xb8\xb8\xdc\xa1B\xdb\x15\x17\x17\x97\x87&\
\x00\x1f}\xf4\xd1X\x87\x0amWVV6.4\x01\
hii\x89\x17\x14\x14\xecw\xb0pn\x83\x07\x0f>\
\xd0UO\x0av\xe9m\xc0\xc3\x86\x0d\xdb\xe7\x80\xa1\xf5\
\xe5\xaf\xab\xab\xcb\x0c\xe5\xad\xc0\xe5\xe5\xe5\xfe\x14\x80V\
\xbc\xf3\xce;\x93C\xfd4\xe0\xfc\xf9\xf3\x9fs\xd0\xf0\
k\x8f>\xfa\xe8\x8aH\xbc\x0f`\xf4\xe8\xd1;\x1c8\
\xfcd\xdc\xb8q[\x22\xf3B\x90\xca\xca\xca\xfeW\x5c\
q\xc5\xbf\x1d<\xc4\x82+\xaf\xbc\xf2\xeb\xea\xea\xea^\
\x91{%X\x22\x91h0\x00DY\xb7n\xddj#\
\xfbN\xc0W_}\xf5\x0f\x86\x80(\xdf\xee\xbbq\xe3\
\xc6;#\xffZp\xc3@\x14y-\xf8\x7fM\x980\
\xc1KB\x89\x94i\xd3\xa6m\x10\x80\x9f\xf9\xf6\xdbo\
\x07\x19\x0c\xa2\xa0\xaa\xaa\xaa\x9f\x0f\x83\xfc\x86\xee\xdd\xbb\
\x9f6 \x84YNN\xce\x09_\x06\xf2{\x00\x11\x94\
\x96\x96\x16$\xdd\xbe%\xdb?\xa8\xa5\xa5%~\xf7\xdd\
w\xff\xc3\xc0\x10&\xf3\xe6\xcd[\xed\xdb\x80mT_\
_\x9fih\x08\x933g\xce\xa4\x0b\xc0y\xf8\xe6\x9b\
o\x86\xf6\xe9\xd3\xe7\x98\xe1!\x95\x0d\x1a4\xe8\xc0\xe1\
\xc3\x87s}\x1d\xf8\x02l\xdd\xba\xf5&CD*\xfb\
\xe2\x8b/\xae\xf1y\xf0vX\xb9r\xe5B\x83D*\
z\xf9\xe5\x97g'\xfb~%}\x00\x82 \x88\xdd\x7f\
\xff\xfd/\x18(R\xc9\x13O<\xf1d*\xecVJ\
\x04 \x08\x82Xaa\xe1'\x06\x8bT0~\xfc\xf8\
\x0fSe\xafR&\x00A\x10\xc4\xbe\xfc\xf2\xcbk\x0c\
\x18\xc9\xec\xc0\x81\x03\x83Ri\xa7R*\x00A\x10\xc4\
222|d\x84\xa4\x94\x9d\x9d]\x9bj\xfb\x94r\
\x01p\xb7 \x9e\xee\x8bx\x00|l\x94d\xb3p\xe1\
\xc2\x95\x02\xd0\x85jjjz\x19<\x92AW\xbe\xc6\
[\x00|s\x90$2l\xd8\xb0}\xa9\xbcC)\x1d\
\x80 \x08b\x9b6m\x9ab\x10\xb9\x18\xb6n\xddz\
S\xaa\xefO\xca\x07\xc0\x8f\x82x\xb47\xe2\x01hi\
i\x89O\x9b6m\x83\xc1\xa4+<\xf0\xc0\x03/\x84\
aoB\x13\x80\xff\x192d\x88\x0f\x90\xd2\xa9\xae\xbf\
\xfe\xfaO\xc2\xb43\xa1\x0a\xc0\xde\xbd{\x87\x1bR:\
\xd3\x91#G\xfa\x0b@\x12{\xe3\x8d7f\xa4\xa5\xa5\
\x19V:T\x22\x91h\xd8\xbcy\xf3\xf8\xb0\xedK\xe8\
\x02\x10\x04A\xec\xa9\xa7\x9ez\xc4\xd0\xd2\x91^y\xe5\
\x95\x99a\xdc\x95P\x06 \x08\x82\xd8\xed\xb7\xdf\xfe\xb6\
\xc1\xa5#\x94\x94\x94\xac\x0d\xeb\x9e\x846\x00\xa7O\x9f\
\xee>j\xd4\xa8O\x0d0\xed1a\xc2\x84\xf7\x1b\x1b\
\x1b\x13\x02\x90\xa2z\xf4\xe8q\xca s!rss\
\x0f\x85}?B\x1f\x80 \x08bk\xd6\xac\xf9\xa3\x81\
\xe6|\xbc\xf5\xd6[\xbf\x8f\xc2nD\x22\x00\xee\x16$\
\x0a\x8f\xf6\x0a@+\x1a\x1a\x1a\x12\xc5\xc5\xc5\xe5\x86\x9b\
\xd6\xcc\x981c}KKK\x5c\x00B\xe8\xe4\xc9\x93\
\xbd\x0d9\xad\xa9\xaf\xaf\xcf\x8c\xd2ND*\x00A\x10\
\xc4>\xfe\xf8\xe31\x06\x9d\xdf\xb2\x7f\xff\xfe\x82\xa8\xed\
C\xe4\x02\x10\x04A,\x1e\x8f7\x1bx~.++\
\xab.\x8a\xbb\x10\xc9\x00\x04A\x10\x9b?\x7f\xfes\x06\
\x9fX,\x16<\xf3\xcc3\x0b\xa2\xba\x07\x91\x0d@s\
ss\xdc\xf0\x13\xa5_\xfc\x05\xe0\x17\x8e\x1e=\xda\xaf\
\xa0\xa0\xc0#\xc4\x115z\xf4\xe8\x1d\xb5\xb5\xb5\xdd\x04\
\xe2\x12\x89D\x83\x85\x88\x96\xcb/\xbf\xbc\xda\xec\x0b\
@,\x08\x82\xd8k\xaf\xbdv\xafG\x88\xa3\xf5ho\
YY\xd98\xb3/\x00\xee\x16\xf47\xbf\x00\xf0\xa3I\
\x93&\xbdkA\xc2\xed\xbe\xfb\xee\xfb\xbbY\x17\x80\xb3\
\xca\xcb\xcb;dQ\xc2i\xe4\xc8\x91\x9f\x9bq\x01h\
\xd5g\x9f}6\xea\xb2\xcb.;ma\xc2%//\
\xef\xd0w\xdf}7\xd0\x8c\x0b\x80\xdf\x03\x22&==\
\xbd\xc9L\x0b\xc0y\x7fk\xe0\x9e{\xeeYo\x81R\
\xdb\xe2\xc5\x8b\x97\x9bg\x01\xb8 \xf5\xf5\xf5\x99\x96(\
\xb5E\xe9\xd1^\x01\xe8\x04\xfb\xf7\xef/\xe8\xdb\xb7\xef\
1\xcb\x94ZF\x8c\x18\xf1\xf5\xc9\x93'{\x9ba\x01\
h\xb7m\xdb\xb6\xfd\xceR\xa5\x96\x8a\x8a\x8a\x02\xb3+\
\x00\x1df\xd5\xaaU\x0b,V\xf2\x8b\xc7\xe3\xcd\x1b7\
n\xbc\xd3\xcc\x0a@\x87{\xf0\xc1\x07\xffj\xc9\x92\xdb\
\xf3\xcf?\xff'\xb3*\x00\x9d\xa6\xa8\xa8h\x87EK\
NS\xa7N\xdd`F\x05\xa0SUVV\xf6\xb7l\
\xc9\xe9\x87\x1f~\xe8fF\x05\xa0\xd3m\xde\xbcy\xbc\
G\x88\x93G\xef\xde\xbdO\xec\xd9\xb3g\xb8\xd9\x14\x00\
w\x0bFLZZ\x9a\xa7\xfb\x04\xe0\xe2())Y\
k\x09/\xae\x15+V<b\x16\x05\xe0\xa2\xa9\xa9\xa9\
\xe9e\x11/\x8e3g\xce\xa4\x9bA\x01\xb8\xe8\x06\x0c\
\x18p\xc4Bv\xad\xeb\xae\xbb\xeeS\xb3'\x00I\xe3\
\xcd7\xdf\x9cb1\xbb\xc6\xee\xdd\xbb\xaf5s\x02\xe0\
G\xc1\x08\xca\xc8\xc8\xf0h\xaf\x00$\xef#\xc4\x96\xd4\
\xfb\xfc\x04 \xc2\x8e\x1f?\xdeg\xe8\xd0\xa1\xfb,k\
\xc7\xba\xe5\x96[\xb6466&\xcc\x98\x00$\xbd}\
\xfb\xf6\x0d\xb5\xb4\x1d\xab\xba\xba\xba\x97\xd9\x12\x80\x94Q\
ZZ:\xdd\xb7\x06\xda/;;\xbbv\xe7\xce\x9dE\
fJ\x00R\xce\xd3O?\xbd\xc8\x12\xb7\xcf\xa6M\x9b\
\xa6\x98%\x01HYw\xdcq\xc7?-\xf2\x85y\xe8\
\xa1\x87V\x99!\x01Hi\xdf\x7f\xff}\xf7\xc2\xc2\xc2\
O,\xf4\xf9\x99>}z\xa9\xf7\xf9\x09@h\xf4\xec\
\xd9\xf3\x94\xc5n\x9b\xa1C\x87\xee33\x02\x10*\x1f\
|\xf0\xc1\x84\x8c\x8c\x8c&\x0b\xde\xba\x9c\x9c\x9c\x13{\
\xf7\xee\xf5h\xaf\x00\xb8[\xd0\xa3\xbd\x08@\xc846\
6&,\xbb\xbb\xfc\x04 \xc2\xae\xba\xea\xaa\xaf,\xfc\
\xff\x9b2e\xca&\xb3!\x00\x91\xb1s\xe7\xce\x22\x8b\
\xff\xa3\xc3\x87\x0f\xe7\x9a\x09\x01\x88\x9cK.\xb9\xa49\
\xea\xcb\xdf\xb3g\xcfSfA\x00\x22k\xc1\x82\x05\xab\
\xa2\xba\xfc\xeb\xd6\xad\x9bi\x06\x04\xc0#\xc4~\xf4C\
\x00\xa2\xab\xaa\xaa\xaa\xdf\x90!C\xf6Ge\xf1o\xbd\
\xf5\xd6\x0f=\xda+\x00\xfc\xc2\xa5\x97^\x1a\xfao\x0d\
\x0c\x180\xe0\x88\xb3\x16\x00~\xc3\xfa\xf5\xebg\x84=\
\x00\xbbv\xed*t\xd6\x02\xc0Y\xcc\x9d;\xf7oa\
]\xfe\x17_|q\x9e3\x16\x00Z\xd1\xd4\xd4\x94>\
y\xf2\xe4\xb7\xc3\xb6\xfc\x8b\x16-\xfa\x8b\xf3\x15\x00\xda\
h\xe0\xc0\x81\x07\xc3\xb2\xfc7\xdf|\xf3\xbf\x9c\xa9\x00\
p\x1ev\xef\xde}m\x8f\x1e=R\xfe\x11\xe2\xe1\xc3\
\x87\xef9~\xfcx\x1fg*\x00D\xec\xe9\xc1\xac\xac\
\xac:g(\x00\xb4\xc3\x92%K\x96\xa7\xe2\xf2\xc7\xe3\
\xf1\xe6\x0d\x1b6Lu\x86\x02@;544$\xdc\
\xe5\x87\x00DXEEEA\xbf~\xfd\x8e&\xfb\xe2\
\x17\x15\x15\xed\xa8\xad\xad\xed\xe6\xcc\x04\x80\x0e\xb6}\xfb\
\xf61\xc9\x1e\x80\xca\xca\xca\xfe\xceJ\x00\xe8$\xcf>\
\xfb\xec\x9f\x93u\xf9\xb7l\xd92\xce\x19\x09\x00\x11\xfd\
\x9f\x01g#\x00t\x81\xda\xda\xdanc\xc6\x8c\xd9\x9e\
,\x8b?g\xce\x9c5\xceE\x00\xe8BG\x8f\x1e\xed\
\x97,\x01hjjJw&\x02@\x17+++\x1b\
\x97H$.\xda#\xc4yyy\x87\x0e\x1e<8\xd0\
Y\x08\x00\x11\xfbM ==\xbd\xc9\xb5\x17\x00\x92\xc0\
\x9c9s\xd6tu\x00^z\xe9\xa5\xb9\xae\xbd\x00\x90\
$N\x9d:\xd5\xa3\xab\x96\xdfG;\x05\x80$\x94\x9b\
\x9b{\xa8\xb3\x97\x7f\xec\xd8\xb1\xe5\xae\xb5\x00\x90\xa4\xba\
w\xef~\xba\xb3\x96\x7f\xc4\x88\x11_\xbb\xc6\x02@\x92\
\xdf.\xdc\xb7o\xdfc\x1d\xbd\xfc7\xdcp\xc3\xf6\xaa\
\xaa\xaa~\xae\xb1\x00\x90\x02\x1e~\xf8\xe1\x95\x1d\xf1\xe5\
\xa1\x9c\x9c\x9c\x13\xaf\xbf\xfe\xfa\xbd\xae\xa9\x00\x90bv\
\xed\xdaU\xd8\xde\xff\xea;v\xec\x98\xb7\xf9\x08\x00\xa9\
\xee\xb1\xc7\x1e[v\xf5\xd5W\x7f\xd5\x96\x8fv\xac^\
\xbd\xda\xdb{\x05\x800*--\x9d~\xd7]w\x95\
\xdex\xe3\x8d\xdb\xf2\xf3\xf3\x0f\x0e\x1e<\xf8@qq\
q\xf9\xacY\xb3\xd6\xbd\xf7\xde{\x93\x5c#\x01\x00\x04\
\x00\x10\x00@\x00\x00\x01\x00\x04\x00\x10\x00@\x00\x00\x01\
\x00\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\x10\x00@\x00\
\x00\x01\x00\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\x10\x00\
@\x00\x00\x01\x00\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\
\x10\x00@\x00\x00\x01\x00\x04\x00p\x11@\x00\x00\x01\x00\
\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\x10\x00@\x00\x00\
\x01\x00\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\x10\x00@\
\x00\x00\x01\x00\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\x10\
\x00@\x00\x00\x01\x00\x04\x00\x10\x00@\x00\x00\x01\x00\x04\
\x008\xb7\xff\x0c\x00:\xc5\x97\x14,\x17\x85\x18\x00\x00\
\x00\x00IEND\xaeB`\x82\
\x00\x00\x0a\xa5\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x01\x00\x00\x00\x01\x00\x08\x06\x00\x00\x00\x5cr\xa8f\
\x00\x00\x00\x09pHYs\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95+\x0e\x1b\x00\x00\x00 cHRM\x00\x00z\
r\x00\x00\x80\xeb\x00\x00\xf5\x8d\x00\x00\x84\xec\x00\x00q\
\x90\x00\x00\xf6\x89\x00\x00;\x1d\x00\x00\x16G^\xee\x0e\
Z\x00\x00\x0a+IDATx\xda\xec\xdd[l\x94\
e\x1a\xc0\xf1\xe9\xd8\x8e-\xad\x80\x9c\x02-\x05S\x0e\
\xc1C\x04k\xc1U\x13\x8a\x08\x88+\xf1\x02\xd0\xa0\xd9\
\x00]\x0c!j\x96\x0d\x22Q@\x8d!\x04W\x82Q\
7\x1b\x5c7\x04\xd1(n\x1a.\x08\xae'\x14Z\x93\
\x85 \x88\x8a\x1a\x03\xd8n\x10\x81R\x90\x96`m\xa7\
\xa5\xed^l\x1aY\x15,\xb4\x9d\xce|\xdf\xef\xe2w\
O\xbe\xf7y\xfe\x09\xd3\xef\x9d\x89\xb4\xb6\xb6F\x80p\
\xf2\x10@\x00\x00\x01\x00\x04\x00\x10\x00@\x00\x00\x01\x00\
\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\x10\x00@\x00\x00\
\x01\x00\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\x10\x00@\
\x00\x00\x01\x00\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\x10\
\x00@\x00\x00\x01\x00\x04\x00\x10\x00@\x00\x00\x01\x00\x04\
\x00\x10\x00\x10\x00\x0f\x01\x04\x00\x10\x00@\x00\x00\x01\x00\
\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\x10\x00@\x00\x00\
\x01\x00\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\x10\x00@\
\x00\x00\x01\x00\x04\x00\x10\x00@\x00\x00\x01\x00\x04\x00\x10\
\x00@\x00\x00\x01\xa0\xfd\xde\x7f\xff\xfd)s\xe6\xcc\xd9\
P\x5c\x5c\x5c>t\xe8\xd0C\xf9\xf9\xf9\x87o\xbe\xf9\
\xe6\x9d\xf7\xdcsOiii\xe9L\xcfH\x00\x08\xa0\
\xb5k\xd7.\xb8\xfd\xf6\xdb?\x8cD\x22\xad\x17r\xed\
\xb5\xd7~\xf5\xc4\x13O\xac\xf0\xcc\x04\x80\x14w\xe2\xc4\
\x89~\xe9\xe9\xe9M\xbf\xb5\xf4\x17\xb2w\xef\xdeB\xcf\
R\x00H1o\xbc\xf1\xc6\xfd}\xfb\xf6\xfd\xbe#\xcb\
\x1f\x89DZ/\xbb\xec\xb2\xe6G\x1f}t\xb5g*\
\x00\xa4\x80\xea\xea\xea\x017\xddt\xd3\xae\x8e.\xfe\xcf\
\xf5\xef\xdf\xff\xc4\xae]\xbb\xc6y\xc6\x02@\x12\x1b5\
j\xd4\xd7\x9d\xbd\xfcmrrr\xcex\xc6\x02@\x92\
\x1a?~|yW-\x7f\x9b\xdc\xdc\xdc#\x9e\xb5\x00\
\x90dZZZ\xa2]\xbd\xfcmN\x9f>\xdd\xd33\
\x17\x00\x92\xc4\xcb/\xbf<?Q\xcb\xdff\xde\xbcy\
\xeb<{\x01 \x09t\xf4O}\x97\xca\xb3\x17\x00\xba\
\xd1\xe1\xc3\x87\x07\xe7\xe5\xe5\x1d\xe9\x8e\xe5\x8fD\x22\xad\
\xb1X,^VV6\xc1Y\x08\x00\x09\xd6\xd4\xd4\x94\
\xde]\x8b\xffs\xc7\x8f\x1f\x1f\xe0L\x04\x80\x04\x9a7\
o\xde\xbad\x09\xc0\xb8q\xe3v\xd5\xd5\xd5\xf5p.\
\x02@\x22\x0e-I\x16\xdfg\x02\x02@\x82m\xdf\xbe\
}B\xb2\x06\xe0\xf9\xe7\x9f\xff\xb33\x12\x00\xbaHU\
U\xd5\xc0d]\xfe6^\x17\x16\x00\xba@]]]\
\x8f\xa2\xa2\xa2\xdd\xc9\x1e\x80\x01\x03\x06\x1c\xaf\xac\xac,\
pf\x02@\x08\xfe\xdf\x7f>\xf1x<\xe6\xdc\x04\x80\
\x0e\xda\xb4i\xd3\xf4h4\xda\x9cj\x01\x88D\x22\xad\
\xcb\x96-[\xe9\x0c\x05\x80\x0e\xc8\xca\xca\xaaO\xc5\xe5\
\xf7\x97\x01\x01\xa0\x03N\x9e<\xd9o\xe4\xc8\x91\xfbS\
y\xf9#\x91Hk\xcf\x9e=O\xef\xdb\xb7\xefzg\
*\x00\x5c\x84[o\xbd\xf5\xdf\xa9\xbe\xfcm\x06\x0f\x1e\
|\xd8\x99\x0a\x00\xed\xb4d\xc9\x92\xbf\x04e\xf9\xdbL\
\x9d:\xf5\x9d\xa6\xa6\xa6t\xe7+\x00\x5c\xc0K/\xbd\
\xb4 h\xcb\xdff\xfe\xfc\xf9\x7fw\xc6\x02\xc0y\xec\
\xdd\xbb\xb70\xa8\xcb\xdff\xe3\xc6\x8d\xb3\x9c\xb5\x00\xf0\
+\x06\x0d\x1at,\xe8\x01\xb8\xfc\xf2\xcb\xe3\xceZ\x00\
8Gccc\xac=?\xda\x11\x14\xc3\x86\x0d\xab\xa8\
\xae\xaev\x85X\x00H\xc5\xb7\xfc:KKKK\xd4\
\xf9\x0b@\xa8m\xd8\xb0avX\x03\xb0h\xd1\xa25\
f@\x00B\xabW\xaf^\xa7\xc3\xba\xfc\xe7\xfe\xf2\x90\
Y\x10\x80\xd09z\xf4hn\xd8\x97\xbf\xcd\x9e={\
\x8a\xcc\x84\x00\x84\xc6\xb4i\xd3\xb6X\xfc\xffw\xcd5\
\xd7|e6\x04\xc0\x87~!\xd6\xd8\xd8\xe8\x0a\xb1\x00\
\x04[ZZ\x9aew{P\x00\xc2\xe6\xc0\x81\x03#\
;\xe3\xe7\xba\x83.##\xa3\xe9\x83\x0f>\x98df\
\x04 P\x86\x0f\x1f~\xd0\x82\xb7O\xaf^\xbdN\x9b\
\x19\x01\x08\x84\x96\x96\x96\xe8\xcc\x993K-\xf6\xc5)\
,,\xfc\xe4\x87\x1f~\xc81C\x02\x90\xd2\x1ey\xe4\
\x915\x16\xfa\xd2\xdcu\xd7]\xff2C\x02\x90\xb2\xb6\
l\xd92\xcd\x22w\xcc\xb3\xcf>\xbb\xc4,\x09@\xca\
\xd9\xb3gOQvvv\x9d%\xee\x98\xb4\xb4\xb4\xd6\
\xd2\xd2\xd2\x99fJ\x00RFMMMo\xcb\xdb\xb9\
\x0e\x1e<8\xdcl\x09@J\x5c\xed\xbd\xed\xb6\xdb\xb6\
[\xda\xce5|\xf8\xf0\x83'O\x9e\xecg\xc6\x04\xc0\
[~!\xe6\x0a\xb1\x00$\xad\x8c\x8c\x8c&K\xeam\
A\x01\x08\xa1}\xfb\xf6]o9\x13\xe3\xad\xb7\xde\x9a\
f\xe6\x04 i\xdcp\xc3\x0d\x9fZ\xcc\xc4\x1a4h\
\xd01\xb3'\x00\xdd\xee\xec\xd9\xb3\xe9\x16\xb2{\xd4\xd6\
\xd6\xf66\x83\x02\xd0mV\xadZ\xf5\x98E\xec^%\
%%\xeb\xcd\xa2\x00\xb8\xda\xebCA\x04 1\xf6\xef\
\xdf?\xb2O\x9f>\xae\xf6&\x89X,\x16\xdf\xb6m\
\xdbD\xb3)\x00]\xee\xc7\x1f\x7f\xeca\xe9\x92SU\
U\xd5@3*\x00]j\xfa\xf4\xe9\x9b,[r*\
**\xdamF\x05\xa0\xcb\xbc\xf8\xe2\x8b\x7f\xb2h\xc9\
\xed\xe1\x87\x1f\xfe\xabY\x15\x80N\xb7y\xf3\xe6\xbb\xa3\
\xd1h\xb3%K~k\xd6\xacYdf\x05\xa0\xd3T\
VV\x16X\xac\xd4\xb2s\xe7\xce\xdf\x99]\x01\xe8\xb0\
S\xa7N\xf5\x195j\xd4\xd7\x96*\xb5\xf4\xef\xdf\xff\
DEEE\x81\x19\x16\x80\x0e}\x9f\x9feJm\x0d\
\x0d\x0d\x99fY\x00.\xc9\xd2\xa5KWZ\xa2\xd4v\
\xdf}\xf7mt\x85X\x00.Zzz\xba\xab\xbd\xde\
\x16\x14\x80\xb0\xf9\xee\xbb\xef\x06\xe7\xe5\xe5\x1d\xb14\xc1\
r\xc5\x15W\x9c\xf9\xec\xb3\xcf\xc6\x98q\x01\xb8\xa0\xd1\
\xa3G\x7fna\x82)//\xef\x88\x19\x17\x80\xf3z\
\xe0\x81\x07\xfeaQ\x82m\xca\x94)\xef\x99u\x01\xf0\
}~>\x0f@\x00\xfe\xa7\xac\xaclB,\x16\x8b[\
\x8e\xf0\xfc\xd6\xc0\xeb\xaf\xbf~\xbf\xd9\x17\x80Hkk\
k\xe4\xca+\xaf\xac\xb1\x18\xe1\xbbBl\xf6C\x1e\x80\
\xba\xba\xba\x1ec\xc7\x8e\xddm!\xc2\xa9\xa0\xa0\xa0\xe2\
\xf8\xf1\xe3\x03\x04\xc0\xff\xf9\x09\xb1\xe6\xe6\xe6\xa8\x00\x84\
\xccs\xcf=\xb7\xc8\xf0\x13\x89DZ\x17.\x5c\xf8\x82\
\x00\x84HVVV\xbd\xc1\xe7\x5c\xd1h\xb4Y\x00B\
\xa0\xa2\xa2\xc2\xd5^~\xd5\xc7\x1f\x7f<N\x00\x02\xac\
\xa1\xa1!\xd3\xa0s!\xa7N\x9d\xea#\x00\x01\xbd\xda\
;k\xd6\xac\x8d\x86\x9c\x0b)...\x8f\xc7\xe31\
\x01\xf0\x89?\xde\x16\x14\x80 x\xfb\xed\xb7\x7fo\xa8\
\xb9\x18\xeb\xd6\xad\xfb\xa3\x00\x04@nn\xae\xab\xbd\x5c\
\x92\x9e={\x9e\x16\x80\x14\xd5\xd8\xd8\x18\x9b4i\xd2\
V\x83LG\x8c\x193\xe6\xd33g\xce\xe4\x08@\x8a\
)))Yo\x80\xe9\x0cw\xdey\xe7;\x02\x90B\
^}\xf5\xd5\xd9\x06\x97\xce\xf4\xcc3\xcf<&\x00)\
`\xdb\xb6m\x13]\xed\xa5+\xae\x10\xbf\xf9\xe6\x9b\xb3\
\x04 \x89\x1d;vl\xa0a\xa5+\x1d8p`\xa4\
\x00$\xa9\x1bo\xbc\xf1\x13CJW\x1a6lX\x85\
\x00$\xa1\x87\x1ez\xe8o\x06\x94D\x981c\xc6\xa6\
\xa0\xfc\xd6@ \x96?--\xcd`\xe2m\xc10\x06\
`\xc7\x8e\x1d\xb7\x18F\xba\xc3\x96-[\xa6\x09@7\
\x1a1b\xc4A\x83Hw\x1a8p\xe01\x01\xe8\x06\
\xf5\xf5\xf5\xae\xf6\x92\x14jkk{\x0b@\x02-^\
\xbcx\xb5\xc1#\x99\xcc\x9e={\x83\x00\xb8\xda\x8b\x0f\
\x05\x05\xa0+egg\xd7\x196\x92QFFF\x93\
\x00t\xa1C\x87\x0e\x0d1h$\xb3/\xbf\xfc\xf2:\
\x01\xe8\x02\x13'N\xfc\xd0\x80\x91\x0a\x0a\x0b\x0b?\x11\
\x80N\xf4\xd4SO=m\xb0H%\x0f>\xf8\xe0\xdf\
\x04\xa0\x13\xbc\xf2\xca+s\x0d\x14\xa9h\xf5\xea\xd5\x8b\
\x05\xa0\x03\xbe\xf8\xe2\x8b\xeb\x0c\x12\xa9l\xc7\x8e\x1d\xb7\
\x08\xc0%8z\xf4h\xee\x90!C\x0e\x19\x22RY\
\xbf~\xfdN|\xf3\xcd7\xc3\x05\xe0\x22\x9c={6\
\xdd\xf0\x10$\x0d\x0d\x0d\x99\x02\xd0N\x0b\x16,Xk\
h\x08\x92{\xef\xbd\xf7\x9f\xc9x\x85\xd8\xd5^\x08\xf1\
\xdb\x82I\xf5\x8f\xe9\xdb\xb7\xef\xf7\x86\x84 \xcb\xc9\xc9\
9#\x00\xbf\xa2\xba\xbaz\x80\x01!\x0c\xbe\xfd\xf6\xdb\
!\x02p\x8e\x193fl2\x18\x84\xc9\xa4I\x93\xb6\
\x0a\x80\xdb}\xf8< \xbc\x01\xd8\xbcy\xf3\xdd\xd1h\
\xb4\xd90\x10V\xaf\xbd\xf6\xda\x1fB\x1b\x80\x1e=z\
\xb8\xdaK\xa8\xc5b\xb1x\xe8\x02PSS\xd3\xfb\xea\
\xab\xaf\xfe\xda\x00@\xa4\xf5\xaa\xab\xae\xfaOUU\xd5\
\xc0\xd0\x04`\xc2\x84\x09\xdb\x1d<\xfcd\xec\xd8\xb1\xbb\
C\x11\x80\xc7\x1f\x7f|\x95\x03\x87_Z\xb8p\xe1\x0b\
\x81\x0e\xc0\xbb\xef\xbe;\xd5A\xc3\xf9\x95\x97\x97\x8f\x0f\
d\x00\xea\xeb\xeb3\x87\x0e\x1d\xeav\x1f\x5c\xc0\x88\x11\
#\x0e\xc6\xe3\xf1X\xa0\x02\xd0\xdc\xdc\x1c\xb5\xfc\xd0>\
\x05\x05\x05\x15\x89\xba8\x94\x90\x00\x94\x95\x95Mp\xb0\
\xd0~\x1f}\xf4\xd1\xf8\xc0\x04\xa0\xb8\xb8\xb8\xdc\xa1B\
\xfb\x15\x17\x17\x97\x07&\x00\xb9\xb9\xb9G\x1c*\xb4_\
\xa2~s0!\x01\xf0\xba/\x5c\x9c\xf4\xf4\xf4\xa6\xc0\
\x04`\xf4\xe8\xd1\x9f;Th\xbf1c\xc6|\x1a\x98\
\x00\xcc\x993g\x83C\x85\xf6K\xd4\x8f\x8d&$\x00\
\xb5\xb5\xb5\xbd333\xeb\x1d,\xfc\xb6\xcc\xcc\xcc\xfa\
\x9a\x9a\x9a\xde\x81z\x11\xa8\xa4\xa4d\xbd\xc3\x85\xdf6\
w\xee\xdc\xf5\x81|\x15x\xd9\xb2e+\x1d0\x9c\xdf\
\xd2\xa5KW\x06\xf6.@<\x1e\x8f9d8\xbfD\
\xbe\x06\xdc-\xb7\x01+++\x0b\x1c4\xfcRee\
eA(\xbe\x0f`\xf2\xe4\xc9[\x1d8\xfcd\xf2\xe4\
\xc9[C\xf7\x95`+V\xacX\x9e\x9f\x9f\x7f\xd8\x00\
\x10F\xf9\xf9\xf9\x87W\xae\x5c\xb94\xd4\xdf\x0a\xdc\xdc\
\xdc\x1c]\xbe|\xf9\x8a)S\xa6\xbc\x97\x95\x95\xe5O\
\x85\x04Zvvv\xdd\x1dw\xdc\xf1\xde\x93O>\xf9\
t2\xfcTXR\xff<8 \x00\x80\x00\x00\x02\x00\
\x08\x00 \x00\x80\x00\x00\x02\x00\x08\x00 \x00\x80\x00\x00\
\x02\x00\x08\x00 \x00\x80\x00\x80\x00\x00\x02\x00\x08\x00 \
\x00\x80\x00\x00\x02\x00\x08\x00 \x00\x80\x00\x00\x02\x00\x08\
\x00 \x00\x80\x00\x00\x02\x00\x08\x00 \x00\x80\x00\x00\x02\
\x00\x08\x00 \x00\x80\x00\x00\x02\x00\x08\x00 \x00\x80\x00\
\x00\x02\x00\x08\x00 \x00\x80\x00\x00\x02\x00\x08\x00 \x00\
\x80\x00\x00\x02\x00\x08\x00 \x00\x80\x00\x80\x00\x00\x02\x00\
\x08\x00 \x00\x80\x00\x00\x02\x00\x08\x00 \x00\x80\x00\x00\
\x02\x00\x08\x00 \x00\x80\x00\x00\x02\x00$\xb1\xff\x0e\x00\
\xb5\xcb\x97\x14\x8f|Z\x82\x00\x00\x00\x00IEND\
\xaeB`\x82\
"
qt_resource_name = b"\
\x00\x05\
\x00o\xa6S\
\x00i\
\x00c\x00o\x00n\x00s\
\x00\x04\
\x00\x075t\
\x00l\
\x00o\x00a\x00d\
\x00\x07\
\x08\x99m\x9c\
\x00a\
\x00r\x00r\x00o\x00w\x00_\x00l\
\x00\x07\
\x08\x99m\xa2\
\x00a\
\x00r\x00r\x00o\x00w\x00_\x00r\
\x00\x07\
\x08\x99m\x94\
\x00a\
\x00r\x00r\x00o\x00w\x00_\x00d\
\x00\x07\
\x08\x99m\xa5\
\x00a\
\x00r\x00r\x00o\x00w\x00_\x00u\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x05\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01x\xe4\xec\xb8\xde\
\x00\x00\x00F\x00\x00\x00\x00\x00\x01\x00\x00;\xe1\
\x00\x00\x01x\xe4\xec\xb8\xde\
\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x01\x00\x00%B\
\x00\x00\x01x\xe4\xec\xb8\xde\
\x00\x00\x002\x00\x00\x00\x00\x00\x01\x00\x000q\
\x00\x00\x01x\xe4\xec\xb8\xde\
\x00\x00\x00Z\x00\x00\x00\x00\x00\x01\x00\x00F\xb0\
\x00\x00\x01x\xe4\xec\xb8\xde\
"
def qInitResources():
QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
AlignR
|
/AlignR-2.0.5.tar.gz/AlignR-2.0.5/AlignmentReporter/UI/Qt/AlignmentReporterRessources.py
|
AlignmentReporterRessources.py
|
# AlignmentUtilis
`AlignmentUtilis` is a collection utilities of sequence alignment algorithms,
- Needleman-Wunsch and Smith-Watermen algorithms to conduct sequence alignment with affine gap penalty
- Naive exact matching to conduct reads alignment problem
- ...
## How to get it?
```shell
pip install AlignmentUtilis
```
## How to use it?
```shell
# 1. PairwiseSequenceAlignment
from AlignmentUtilis.Pairwise import *
# Test
seq1 = "TCGTAGACGA"
seq2 = "ATAGAATGCGG"
# Run Global Alignment
PairwiseSequenceAlignment.Runalignment(seq1, seq2, 1, -1, -2, -1, local=False)
# Run Local Alignment
PairwiseSequenceAlignment.Runalignment(seq1, seq2, 1, -1, -2, -1, local=True)
# 2. Naive exact matching
from AlignmentUtilis.Naive import *
# Naive Exact Macthing Basic Utility Test
test_occurrences = Naive.naive_exact_matching('AG', 'AGCTTAGATAGC')
print('The pattern is AG')
print('The target sequence is AGCTTAGATAGC')
print(f'The start position of exact matching is {test_occurrences}')
# 3. Booyer-Moore algorithm to reduce the unnecessary alignments
from AlignmentUtilis.BM import *
# BoyerMoore Test
p = 'TCAA'
p_bm = BoyerMoore(p)
print(p_bm.amap)
print(p_bm.bad_character_rule(2, 'T'))
# boyer_moore Test
t = 'ACGTCGTGCGGTGAGTCGGTAGCGTAGCTAGATACAATCAAGAGAGAGTGCGGAGTGCGAGTCAA'
occurrences = boyer_moore(p, p_bm, t)
print(occurrences)
# 4. Kmer indexing of target sequence, and simple application of query sequence alignment
from AlignmentUtilis.KmerIndex import *
t = 'GCTACGATCTAGAATCTA'
p = 'TCTA'
index = Index(t, 2)
print(queryIndex(p, t, index))
```
## License
MIT License
Copyright (c) 2022 Youpu Chen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
AlignmentUtilis
|
/AlignmentUtilis-0.1.0.tar.gz/AlignmentUtilis-0.1.0/README.md
|
README.md
|
from typing import Optional, Union, List, Dict
class APIResponses:
RESPONSE_MESSAGES = {
"Lütfen cep telefonunuza gönderilen kodu giriniz": 1,
"Girmiş olduğunuz cep telefonu numarası sistemde kayıtlı değildir, lütfen kontrol edip tekrar deneyiniz": 0,
"Algida dünyasına yönlendiriliyorsun.": 1,
"Girdiğiniz kod doğrulanamamıştır, lütfen tekrar deneyiniz.": 0,
"Kısa bir süre içerisinde çok fazla istekte bulundunuz, lütfen daha sonra tekrar deneyiniz": 0,
}
@staticmethod
def process_response(response: Optional[Union[List[Dict], Dict]]) -> Dict:
if not isinstance(response, list):
if not isinstance(response, dict):
return {"status": False}
else:
if not isinstance(response[0], dict):
return {"status": False}
else: response = response[0]
response = response.get('Message')
if response in APIResponses.RESPONSE_MESSAGES:
return {"status": APIResponses.RESPONSE_MESSAGES[response], "message": response}
else:
return {"status": False, "message": response}
@staticmethod
def process_verification(response) -> Dict:
return APIResponses.process_response(response.json())
@staticmethod
def process_code_verification(response):
keys_to_check = ['UCID', 'RefreshToken', 'Token', 'CustomerID']
data = response.json()
if not isinstance(data, list):
if not isinstance(data, dict):
return {"status": False}
else:
if not isinstance(data[0], dict):
return {"status": False}
else: data = data[0]
api_response = APIResponses.process_response(data)
if all(key in data for key in keys_to_check):
api_response.update(data)
return api_response
@staticmethod
def process_login(response):
# Login API yanıtını işle
pass
@staticmethod
def process_user_data(response):
# User data API yanıtını işle
pass
@staticmethod
def process_update_settings(response):
# Update settings API yanıtını işle
pass
@staticmethod
def process_app_action(response):
# App action API yanıtını işle
pass
|
Alika
|
/Alika-0.0.1.tar.gz/Alika-0.0.1/alika/api/api_responses.py
|
api_responses.py
|
import requests
from typing import Optional, Dict
from .api_responses import APIResponses
class APIService:
def __init__(self, base_url: str):
self.base_url = base_url
self.headers = {"Accept": "application/json, text/plain, */*",
"Content-Type": "application/json",
"Accept-Encoding": "gzip, deflate",
"User-Agent": "okhttp/4.9.1"}
self.app_key = "830DA10A-FA97-4244-8B40-E97EC8F085D9"
def _set_authorization(self, token: Optional[str] = None) -> dict:
headers = self.headers.copy()
if token: headers["Authorization"] = f"Bearer {token}"
return headers
def _make_request(self, method: str, endpoint: str, data: Optional[Dict] = None, headers: Optional[Dict] = None) -> requests.Response:
url = self.base_url + endpoint
response = requests.request(method, url, json=data, headers=headers)
return response
def send_verification_code(self, phone_number: str) -> Dict:
headers = self._set_authorization()
headers["Captchatoken"] = "token"
endpoint = "/promo/customerLogin"
data = {"appKey": self.app_key, "msisdn": phone_number}
response = self._make_request("POST", endpoint, data=data, headers=headers)
return APIResponses.process_verification(response)
def verify_code(self, phone_number: str, verification_code: str) -> Dict:
headers = self._set_authorization()
headers["Captchatoken"] = "token"
endpoint = "/promo/checkPincode"
data = {"appKey": self.app_key, "msisdn": phone_number, "pincode": verification_code}
response = self._make_request("POST", endpoint, data=data, headers=headers)
return APIResponses.process_code_verification(response)
def login(self, phone_number: str, token: Optional[str] = None) -> Dict:
pass
def get_user_data(self, token: str) -> Dict:
pass
def update_user_settings(self, token: str, new_settings: Dict) -> Dict:
pass
def perform_app_action(self, token: str, action_data: Dict) -> Dict:
pass
|
Alika
|
/Alika-0.0.1.tar.gz/Alika-0.0.1/alika/api/api_service.py
|
api_service.py
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
|
Alisha-ProbPack
|
/Alisha_ProbPack-0.1.tar.gz/Alisha_ProbPack-0.1/Alisha_ProbPack/Gaussiandistribution.py
|
Gaussiandistribution.py
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n)
|
Alisha-ProbPack
|
/Alisha_ProbPack-0.1.tar.gz/Alisha_ProbPack-0.1/Alisha_ProbPack/Binomialdistribution.py
|
Binomialdistribution.py
|
import sys
DEFAULT_VERSION = "0.6c11"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090',
'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4',
'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7',
'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5',
'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de',
'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b',
'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2',
'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
except pkg_resources.DistributionNotFound:
pass
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
|
Aliyun
|
/Aliyun-0.1.1.tar.gz/Aliyun-0.1.1/ez_setup.py
|
ez_setup.py
|
import urllib2
from urllib import urlencode
REST_METHODS = ['GET', 'POST', 'DELETE', 'HEAD', 'OPTIONS', 'PUT']
class REST_BASE(object):
def __init__(self):
self.headers = {}
self.finename = None
self.stdin = False
self.params = []
self.host = 'http://api.aliyun-dev.com'
'''
if options.accept:
self.headers['accept'] = options.accept
for head in options.headers:
pass
if options.params:
if self._allow_params:
self.params = [tuple(p.split('=', 1)) for p in options.param]
'''
def request(self, uri, data = None, auth=None):
if data:
request = urllib2.Request(self.host + uri, urlencode(data))
else:
request = urllib2.Request(self.host + uri)
request.get_method = lambda: self._method
result = urllib2.urlopen(request)
return result.read()
class REST_GET(REST_BASE):
_method = 'GET'
_allow_request_body = False
_allow_params = True
class REST_HEAD(REST_BASE):
_method = 'HEAD'
_allow_request_body = False
_allow_params = True
class REST_POST(REST_BASE):
_method = 'POST'
_allow_request_body = True
_allow_params = True
class REST_PUT(REST_BASE):
_method = 'PUT'
_allow_request_body = True
_allow_params = True
class REST_DELETE(REST_BASE):
_method = 'DELETE'
_allow_request_body = False
_allow_params = True
class REST_OPTIONS(REST_BASE):
_method = 'OPTIONS'
_allow_request_body = False
_allow_params = False
def simple_rest_factory(method):
method = method.lower()
if method == 'get':
return REST_GET()
elif method == 'post':
return REST_POST()
if __name__ == '__main__':
from urllib import quote, urlencode
print "Test REST_POST"
statement = """insert overwrite table src_dest select * from src"""
post = simple_rest_factory('post')
uri = '/query/'
data = {'statement': statement,}
print post.request(uri, data)
print "Test REST_GET"
job_name = 'taobao/job_20110725033501158_21405'
get = simple_rest_factory('get')
uri = '/status/%s/' % quote(job_name.encode('utf-8')).replace('/', '%252F')
print uri
print get.request(uri)
|
Aliyun
|
/Aliyun-0.1.1.tar.gz/Aliyun-0.1.1/src/aliyun/rest.py
|
rest.py
|
import readline
from authentication import authenticate
from exceptions import CommandNotFound
from sys import exit
from imp import find_module
from os import listdir
from getpass import getpass
from modules import simple_module_factory
from metadata import COMMANDS
LOGIN_RETRY_MAX = 3
class Shell(object):
def __init__(self):
self.module = None
self.module_commands = None
self.module_list = None
self.build_module_list()
self.parse_system_commands(COMMANDS)
self.login()
def message_of_service(self):
""" Message of service """
return """
-----------------------------------------------------------------------
Thanks for choosing Aliyun product. This shell environment is for you
to interact with our public services. More details please use "help".
-----------------------------------------------------------------------
"""
def parse_system_commands(self, commands):
self.system_commands = dict()
field_list = ['name', 'function', 'description']
for item in commands:
command = dict(zip(field_list, item))
self.system_commands[command['name']] = command
def parse_module_commands(self, commands):
self.module_commands = dict()
field_list = ['name', 'function', 'description']
for item in commands:
command = dict(zip(field_list, item))
self.module_commands[command['name']] = command
def help(self, args):
""" Help description """
target = args.strip()[5:]
if target:
if target in self.system_commands:
output = "\n%s\t\t%s\n" % (self.system_commands[target]['name'], self.system_commands[target]['description'])
elif self.module_commands and target in self.module_commands:
output = "\n%s\t\t%s\n" % (self.module_commands[target]['name'], self.module_commands[target]['description'])
else:
output = """
Nothing found
Please try to run 'help' for a list of all accessible topics
"""
else:
output = """
System Commands
------------------------------------
"""
for key in self.system_commands:
output += "%s\t\t%s\n" % (self.system_commands[key]['name'], self.system_commands[key]['description'])
if self.module_commands:
output += """
Module Commands
------------------------------------
"""
for key in self.module_commands:
output += "%s\t\t%s\n" % (self.module_commands[key]['name'], self.module_commands[key]['description'])
return output
def prompt(self):
prompt = 'aliyun'
if self.module:
prompt += '::%s' % (self.module.metadata.NAME)
prompt += '> '
return prompt
def build_module_list(self):
ignore, pathname, ignore = find_module('aliyun')
self.module_list = [package
for package in listdir('%s/modules' % (pathname))
if not package.endswith(('.py', '.pyc', '.pyo'))
]
def list(self, args):
output = "Available modules:\n"
output += '\n'.join(self.module_list)
return output
def load(self, args):
module = args.split()[1]
if module in self.module_list:
self.module = simple_module_factory(module)
self.parse_module_commands(self.module.metadata.COMMANDS)
output = "\n%s" % (self.module.metadata.MESSAGE)
else:
output = "%s: does not exist" % (module)
return output
def unload(self, args):
self.module = None
self.module_commands = None
def quit(self, args):
raise EOFError()
def exit(self, args):
self.quit(args)
def process_input(self, input):
output = ''
items = input.split()
command = items[0]
# System commands always get priority
if command in self.system_commands:
output = getattr(self, self.system_commands[command]['function'])(input)
elif self.module and command in self.module_commands:
output += getattr(self.module.functions, self.module_commands[command]['function'])(input)
else:
raise CommandNotFound(command)
return output
def login(self):
retry = 0
while True:
try:
if retry >= LOGIN_RETRY_MAX:
print "Permission denied (login attemps have been reported)."
exit(1)
username = raw_input('Username: ')
if not username:
continue
retry += 1
password = getpass('Password: ')
self.user = authenticate(username = username, password = password)
if self.user is not None:
print self.user.get_last_login_message()
self.start_shell()
else:
print "Permission denied, please try again."
except KeyboardInterrupt:
print 'Ctrl-C -- exit!\nAborted'
exit(1)
except EOFError:
print "Permission denied, please try again."
def start_shell(self):
print self.message_of_service()
while True:
try:
input = raw_input(self.prompt())
if input:
output = self.process_input(input)
if output:
print output
except CommandNotFound as e:
print "%s: command not found" % (e.value)
pass
except KeyboardInterrupt:
print 'Ctrl-C -- exit!\nAborted'
exit(1)
except EOFError:
print 'Have a nice day ^____^'
exit(0)
|
Aliyun
|
/Aliyun-0.1.1.tar.gz/Aliyun-0.1.1/src/aliyun/shell.py
|
shell.py
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#from sklearn.linear_model import LinearRegression,Ridge,Lasso,RidgeCV,LassoCV,ElasticNet,ElasticNetCV
from sklearn.preprocessing import StandardScaler
import numpy as np
class all_in_1:
def __init__(self,dataset,out,visualize=False,standard=False,stats=False,future_sel=False,outlayer_removel=False,multicol=False,remove_null=False):
self.dataset=dataset
self.out=out
self.v=visualize
self.out=outlayer_removel
self.m=multicol
self.std=standard
self.s=stats
self.f=future_sel
if self.std==True:
self.standardtation(dataset,out)
if self.v==True:
self.visualize_data(dataset,out)
if self.s==True:
self.stats_data(dataset,out)
if self.m==True:
self.multi_col1(dataset,out)
if self.out==True:
self.find_outlayer(dataset,out)
if remove_null==True:
self.remove_nan_value(dataset,out)
if self.f==True:
self.featue_selection(dataset,out)
def stats_data(self,dataset,out):
print('our dataset Stats Analysis :')
try:
print('our datasets basic informations ')
print()
print(dataset.describe())
print('---'*20)
print()
print('dataset croreation')
print(dataset.corr())
print('---'*20)
print()
print('---'*20)
print()
replace_i=[]
replace_i1=[]
try:
import statsmodels.formula.api as sm
col=dataset.columns
s1=''
replace_i=[]
replace_i1=[]
for i in col:
if i.startswith('Serial' or 'serial' or 'SERIAL'):
continue
new=i.replace(' ','_')
replace_i.append(new)
for i in col:
new=i.replace(' ','_')
replace_i1.append(new)
for i,j in enumerate(replace_i):
if i<len(replace_i)-1:
s1=s1+j+'+'
else:
s1=s1+j
print(s1)
out=out.replace(' ','_')
new_d=np.asarray(dataset)
new_d1=pd.DataFrame(new_d,columns=replace_i1)
print(new_d1.columns)
lm=sm.ols(formula=f'{out} ~ {s1}',data=new_d1).fit()
print('stats table')
print()
print(lm.summary())
except Exception as e:
print('error ',e)
print('calculate the multi colinearity ')
self.multi_col1(dataset,out)
except Exception as e:
print('erorr stats ',e)
def remove_nan_value(self,dataset,out):
li=[dataset._get_numeric_data().columns]
try:
for i in range(len(dataset.columns)):
if dataset.columns[i] in li[0]:
if dataset[dataset.columns[i]].nunique()<6:
dataset[dataset.columns[i]].fillna(dataset[dataset.columns[i]].mode()[0],inplace=True)
else:
dataset[dataset.columns[i]].fillna(dataset[dataset.columns[i]].mean(),inplace=True)
else:
dataset[dataset.columns[i]].fillna(dataset[dataset.columns[i]].mode()[0],inplace=True)
return dataset
except Exception as e:
print('error ',e)
def find_outlayer(self,dataset,out):
"""
this function find and remove the outlayers in our dataset
and return tha new_dataset
"""
out_layer_list=[]
col=dataset.columns
ind=[ i for i,j in enumerate(dataset.dtypes) if j=='float64' or 'int64' or 'float32' or 'int32']
col_name=dataset.columns[ind]
for i in col_name:
try:
if dataset[i].isna().sum()<=0:
q1=np.percentile(dataset[i],25)
q3=np.percentile(dataset[i],75)
iq=q3-q1
upper=q3+(1.5*iq)
lower=q1-(1.5*iq)
print(f'our datase upper limit : {upper},our datase upper limit : {lower}')
for i1,j in enumerate(dataset[i]):
if j<upper and j>lower:
pass
else:
print(f'col of {i} index is {i1} val is {j} ')
out_layer_list.append(i1)
else:
print('some val was null val so you first remove null values')
inp1=input('if you want remove outayer yes ---1 o no---0')
if inp1=='1':
self.remove_nan_value(dataset,out)
except Exception as e:
print('eror ',e)
inp=input('if you want the emove the outlayersyes--1 or no---0')
if inp=='1':
a=(dataset.iloc[out_layer_list]).index
dataset.drop(a,inplace=True)
print('our new dataset shape :',dataset.shape)
def visualize_data(self,d,o):
"""
this function visualize in our dataset to better undestanding to our dataset
"""
inp=input('if you want to save all the plots y---1 or n---0')
try:
if inp=='1':
inp1=input('enter path ')
except Exception as e:
print('enter 0 or 1 ')
# int_col,object_col=[],[]
cat_col,num_col=[],[]
df=d
print(type(df))
col=d.columns
col=d.columns
for i in range(len(col)):
if d[col[i]].nunique()<6:
cat_col.append(col[i])
else:
num_col.append(col[i])
for i in range(len(num_col)):
print(f'{num_col[i]} vs {o}')
plt.xlabel(num_col[i])
plt.xlabel(o)
plt.scatter(x=num_col[i],y=o,data=d)
plt.show()
print('--'*20)
if inp1!='':
try:
plt.savefig(inp1+num_col[i]+'_vs_'+o+'.png')
except Exception as e:
print('please enter valid path ',e)
for i in range(len(cat_col)):
print()
sns.barplot(x=cat_col[i],y=o,data=d)
plt.show()
print('--'*20)
if inp1!='':
try:
plt.savefig(inp1+cat_col[i]+'_vs_'+o+'.png')
except Exception as e:
print('please enter valid path ',e)
inp=input('you want hist yes--1 no--0')
if inp=='1':
for i in d.columns:
plt.xlabel(i)
plt.ylabel('count')
plt.hist(x=d[i])
plt.show()
print('--'*20)
def featue_selection(self,dataset,out):
"""
this function find best feature to our dataset
this function used in corelation comparison method used
if our dataset feature < 35% to correlation in output data remove the that paticular feature
return the new dataset
"""
cor=dataset.corr()
ind=np.where(cor[out]<0.35)
a=cor.iloc[ind].index
print('this columns lower contribute our out come ' )
print()
print(a)
inp=input('if you want to remove columns y--1 or n--0')
if inp=='1':
dataset.drop(columns=a,inplace=True)
print('successfully removed the columns')
return dataset
def multi_col1(self,dataset,out):
"""
this function find the multicolinearity feature in our datset
this function used variance_inflation_factor method
return new_dataset
"""
from statsmodels.stats.outliers_influence import variance_inflation_factor
li=dataset._get_numeric_data().columns
try:
# print(out)
#data=dataset.drop(out,axis=1)
final_data=dataset[li]
x=np.asarray(final_data)
data_vif=[variance_inflation_factor(x,i)for i in range(x.shape[1])]
print('our dataset vif values ')
print()
print(data_vif)
inp=input('if you want to remove the mlti col feature y --- 1 or n----0 ')
if inp=='1':
col_ind=[ i for i,j in enumerate(data_vif) if j>10]
print(col_ind)
col_name=dataset.columns[col_ind]
print(col_name)
dataset.drop(col_name,axis=1,inplace=True)
print('our final columns in our dataset ',dataset.columns)
return dataset
except Exception as e:
print('error mul ',e)
def standardtation(self,dataset,out):
"""
this function standardtizeing in our dataset
return standadtize dataset
"""
data=dataset.drop(out,axis=1)
li=data._get_numeric_data().columns
scaler=StandardScaler()
arr=scaler.fit_transform(dataset[li])
final=pd.DataFrame(data=arr,columns=li)
dataset[li]=final
return dataset
# data=pd.read_csv('C:\\Users\\sathi\Downloads\\Admission_Predict.csv')
# out='Chance of Admit'
# a=all_in_1(data,out,visualize=True
|
All-In-1-sathishbilli
|
/All_In_1_sathishbilli-0.0.3-py3-none-any.whl/All_In_1/app.py
|
app.py
|
import webbrowser
import tkinter as tk
class calc:
"""
A maths class that contains a calculator and some other good stuff
"""
def prtclc(a, operator, b):
"""
prtclc stands for print calculator wich is a calculator that prints the answer on the screen
"""
if operator == "+":
answr = a + b
elif operator == "-":
answr = a - b
elif operator == "*":
answr = a * b
elif operator == "/":
answr = a / b
else:
raise ValueError(operator, " is not a operator. The operators are + , - , * and / . If you wrote one of those make sure it is a string, not a variable.")
print(answr)
def even(number_to_check):
"""
A function that checks if number_to_check is even and return True or False
"""
if number_to_check % 2 == 0:
return True
else:
return False
def odd(number_to_check):
"""
A function that checks if number_to_check is odd and returns either True or False as an output
"""
if not number_to_check % 2 == 0:
return True
else:
return False
class www:
"""
A class that contains internet functions
NEEDS webbrowser module and internet
"""
def visit(url):
"""Opens the url in your standard webbrowser"""
webbrowser.open(url)
class buggie:
"""A class with functions that goes on forever"""
def zero():
"Zero Zero Zero... What else do you need to know"""
while True:
print("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
def window():
"""A alpha function that does not work as supposed"""
while True:
root = tk.Tk()
window = tk.Toplevel(root)
tk.mainloop()
class og:
"""Well, it's the og class"""
def og():
"""Some sort of hello world command"""
print("hello world")
class classes:
"""A class made for documentation. The classes in the documentation is Calc , www , buggie , og and classes (a special documentation class)"""
|
All-you-need-module
|
/All%20you%20need%20module-0.0.1.tar.gz/All you need module-0.0.1/AYNM/aynm.py
|
aynm.py
|
from trac.attachment import Attachment
from trac.resource import ResourceNotFound
from trac.util.html import html
from trac.util.text import pretty_size
from trac.wiki.macros import WikiMacroBase
class AllAttachmentsMacro(WikiMacroBase):
"""Shows all attachments on the Trac site.
The first argument is the filter for which attachments to show.
The filter can have the value 'ticket' or 'wiki'. Omitting the filter
argument shows all attachments.
Examples:
{{{
[[AllAttachments()]] # Show all attachments
[[AllAttachments(ticket)]] # Show attachments linked to tickets
[[AllAttachments(wiki)]] # Show attachments linked to wiki pages
}}}
"""
def expand_macro(self, formatter, name, content):
attachment_type = ""
if content:
argv = [arg.strip() for arg in content.split(',')]
if len(argv) > 0:
attachment_type = argv[0]
with self.env.db_transaction as db:
if attachment_type is None or attachment_type == "":
attachments = db("""
SELECT type,id,filename,size,time,
description,author,ipnr FROM attachment
""")
else:
attachments = db("""
SELECT type,id,filename,size,time,
description,author,ipnr FROM attachment
WHERE type=%s
""", (attachment_type, ))
formatters = {
'wiki': formatter.href.wiki,
'ticket': formatter.href.ticket,
'milestone': formatter.href.milestone,
}
types = {
'wiki': '',
'ticket': 'ticket ',
'milestone': 'milestone ',
}
return html.ul(
[html.li(
html.a(filename, href=formatter.href.attachment(type + '/' +
id + '/' +
filename)),
" (", html.span(pretty_size(size), title=size), ") - added by ",
html.em(author), " to ",
html.a(types[type] + ' ' + id, href=formatters[type](id)), ' ')
for type, id, filename, size, time, description, author, ipnr
in attachments
if self._has_perm(type, id, filename, formatter.context)])
def _has_perm(self, parent_realm, parent_id, filename, context):
try:
attachment = Attachment(self.env, parent_realm, parent_id, filename)
except ResourceNotFound:
return False
return 'ATTACHMENT_VIEW' in context.req.perm(attachment.resource)
|
AllAttachmentsMacro
|
/AllAttachmentsMacro-0.2.tar.gz/AllAttachmentsMacro-0.2/allattachments/api.py
|
api.py
|
====================================
AllPairs test combinations generator
====================================
AllPairs is an open source test combinations generator written in
Python, developed and maintained by MetaCommunications Engineering.
The generator allows one to create a set of tests using "pairwise
combinations" method, reducing a number of combinations of variables
into a lesser set that covers most situations.
For more info on pairwise testing see http://www.pairwise.org.
The easiest way to get started is to check out usage examples in
the "examples" directory and online at
https://apps.sourceforge.net/trac/allpairs/browser/examples/.
Features
--------
* Produces good enough dataset.
* Pythonic, iterator-style enumeration interface.
* Allows to filter out "invalid" combinations during search for the
next combination.
* Allows to exclude "previously tested" pairs/combinations.
* Goes beyond pairs! If/when required can generate n-wise
combinations.
Installation
------------
To install AllPairs to your Python's site-packages directory, run
this command from the command prompt:
python setup.py install
Alternatively, you can just copy the entire "metacomm" directory
somewhere into your Python path.
Known issues
------------
* Not optimal - there are tools that can create smaller set covering
all the pairs. However, they are missing some other important
features and/or do not integrate well with Python.
* Lousy written filtering function may lead to full permutation of
parameters.
* Version 2.0 has become slower (a side-effect of introducing ability
to produce n-wise combinations).
Feedback
--------
Please submit patches, bug reports, and feature requests here:
http://apps.sourceforge.net/trac/allpairs/newticket
Other inquires can be directed to
metacomm(at)users.sourceforge.net
|
AllPairs
|
/AllPairs-2.0.1.zip/AllPairs-2.0.1/README.txt
|
README.txt
|
import pairs_storage
from combinatorics import xuniqueCombinations
class item:
def __init__(self, id, value):
self.id = id
self.value = value
self.weights = []
def __str__(self):
return str(self.__dict__)
def get_max_comb_number( arr, n ):
items = [len(x) for x in arr]
#print items
f = lambda x,y:x*y
total = sum([ reduce(f, z) for z in xuniqueCombinations( items, n) ])
return total
class all_pairs2:
def __iter__( self ):
return self
def __init__( self, options, filter_func = lambda x: True, previously_tested = [[]], n = 2 ):
"""
TODO: check that input arrays are:
- (optional) has no duplicated values inside single array / or compress such values
"""
if len( options ) < 2:
raise Exception("must provide more than one option")
for arr in options:
if not len(arr):
raise Exception("option arrays must have at least one item")
self.__filter_func = filter_func
self.__n = n
self.__pairs = pairs_storage.pairs_storage(n)
self.__max_unique_pairs_expected = get_max_comb_number( options, n )
self.__working_arr = []
for i in range( len( options )):
self.__working_arr.append( [ item("a%iv%i" % (i,j), value) \
for j, value in enumerate(options[i] ) ] )
for arr in previously_tested:
if len(arr) == 0:
continue
elif len(arr) != len(self.__working_arr):
raise Exception("previously tested combination is not complete")
if not self.__filter_func(arr):
raise Exception("invalid tested combination is provided")
tested = []
for i, val in enumerate(arr):
idxs = [item(node.id, 0) for node in self.__working_arr[i] if node.value == val]
if len(idxs) != 1:
raise Exception("value from previously tested combination is not found in the options or found more than once")
tested.append(idxs[0])
self.__pairs.add_sequence(tested)
def next( self ):
assert( len(self.__pairs) <= self.__max_unique_pairs_expected )
p = self.__pairs
if len(self.__pairs) == self.__max_unique_pairs_expected:
# no reasons to search further - all pairs are found
raise StopIteration
previous_unique_pairs_count= len(self.__pairs)
chosen_values_arr = [None] * len(self.__working_arr)
indexes = [None] * len(self.__working_arr)
direction = 1
i = 0
while -1 < i < len(self.__working_arr):
if direction == 1: # move forward
self.resort_working_array( chosen_values_arr[:i], i )
indexes[i] = 0
elif direction == 0 or direction == -1: # scan current array or go back
indexes[i] += 1
if indexes[i] >= len( self.__working_arr[i] ):
direction = -1
if i == 0:
raise StopIteration
i += direction
continue
direction = 0
else:
raise Exception("next(): unknown 'direction' code.")
chosen_values_arr[i] = self.__working_arr[i][ indexes[i] ]
if self.__filter_func( self.get_values_array( chosen_values_arr[:i+1] ) ):
assert(direction > -1)
direction = 1
else:
direction = 0
i += direction
if len( self.__working_arr ) != len(chosen_values_arr):
raise StopIteration
self.__pairs.add_sequence( chosen_values_arr )
if len(self.__pairs) == previous_unique_pairs_count:
# could not find new unique pairs - stop
raise StopIteration
# replace returned array elements with real values and return it
return self.get_values_array( chosen_values_arr )
def get_values_array( self, arr ):
return [ item.value for item in arr ]
def resort_working_array( self, chosen_values_arr, num ):
for item in self.__working_arr[num]:
data_node = self.__pairs.get_node_info( item )
new_combs = []
for i in range(0, self.__n):
# numbers of new combinations to be created if this item is appended to array
new_combs.append( set([pairs_storage.key(z) for z in xuniqueCombinations( chosen_values_arr+[item], i+1)]) - self.__pairs.get_combs()[i] )
# weighting the node
item.weights = [ -len(new_combs[-1]) ] # node that creates most of new pairs is the best
item.weights += [ len(data_node.out) ] # less used outbound connections most likely to produce more new pairs while search continues
item.weights += [ len(x) for x in reversed(new_combs[:-1])]
item.weights += [ -data_node.counter ] # less used node is better
item.weights += [ -len(data_node.in_) ] # otherwise we will prefer node with most of free inbound connections; somehow it works out better ;)
self.__working_arr[num].sort( lambda a,b: cmp(a.weights, b.weights) )
# statistics, internal stuff
def get_pairs_found( self ):
return self.__pairs
__export__ = [ all_pairs2, get_max_comb_number ]
|
AllPairs
|
/AllPairs-2.0.1.zip/AllPairs-2.0.1/metacomm/combinatorics/all_pairs2.py
|
all_pairs2.py
|
AllTray
=========
Make a System Tray Icon for all application.
Install
-------
Simple Install with pip::
pip install alltray --user
From source::
python setup.py install
Usage
-----
::
$ alltray --help
usage: alltray.py [-h] [--config CONFIG] [--tooltip TOOLTIP] [--icon ICON]
[command]
positional arguments:
command To run command
optional arguments:
-h, --help show this help message and exit
--config CONFIG command group
--tooltip TOOLTIP tray tool tip
--icon ICON command icon
example::
alltray --config chinadns
Pack execute or app file
-------------------------
For window, use cx_Freeze_
For mac os x, still has some issue...
cx_Freeze
~~~~~~~~~
::
python cx_Freeze_setup.py bdist
pyinstaller
~~~~~~~~~~~
1. install pywin32 from http://sourceforge.net/projects/pywin32/files/
2. pip install pyinstaller
3. [option] install upx from http://upx.sourceforge.net/
4. Run ``pyinstaller --clean -i Apps-wheelchair.icns -w alltray.py``
Execute file will be found in "dist" directory.
.. important::
+ The execute file only be exectuted in ASCII directory. No support other encoding directory. It just is pyinstaller bug.
+ The single execute file could not be run in window 64bit! bug!
py2exe
~~~~~~~
too old, please ignore it.
::
python py2exe_setup.py py2exe
Icon
-----
http://www.iconarchive.com/show/nuoveXT-2-icons-by-saki/Apps-wheelchair-icon.html
Screenshot
----------
+ Window
.. image:: screenshot_window.png
+ Ubuntu
.. image:: screenshot_ubuntu.png
|
AllTray
|
/AllTray-0.1.1.tar.gz/AllTray-0.1.1/README.rst
|
README.rst
|
import sys
import subprocess
import threading
import locale
import argparse
import shlex
import os.path
from functools import partial
from PyQt4 import QtGui, QtCore
from alltray import __version__
class TrayDialog(QtGui.QDialog):
logThread = None
def __init__(self, settings, parent=None):
super(TrayDialog, self).__init__(parent)
self.settings = settings
icon_path = settings.value('icon_path').toString()
if icon_path.isEmpty():
if sys.platform == 'win32':
icon = QtGui.QIcon(QtGui.QFileIconProvider().icon(QtCore.QFileInfo(sys.argv[0])))
# elif sys.platform == 'darwin':
# icon = QtGui.QIcon('Apps-wheelchair.icns')
elif sys.platform == 'linux2':
icon = QtGui.QIcon.fromTheme('preferences-desktop-accessibility')
else:
icon = QtGui.QIcon('Apps-wheelchair.ico')
else:
icon = QtGui.QIcon(QtGui.QFileIconProvider().icon(QtCore.QFileInfo(icon_path)))
self.setWindowIcon(icon)
self.setWindowTitle(self.tr('All Tray'))
self.setLayout(self.getLayout())
self.tray = QtGui.QSystemTrayIcon(self)
self.tray.setContextMenu(self.getMenu())
self.tray.setIcon(icon)
tooltip = settings.value('tooltip').toString()
if tooltip.isEmpty():
tooltip = self.tr('[All Tray] I\'m here!')
self.tray.setToolTip(tooltip)
self.tray.activated.connect(self.trayActivated)
app_cmd = settings.value('app_cmd').toString()
self.general_ctl.icon_path.setText(icon_path)
self.general_ctl.app_path.appendPlainText(app_cmd)
self.general_ctl.tooltip.setText(tooltip)
app_run = settings.value('app_run').toBool()
self.general_ctl.app_run.setChecked(app_run)
if app_run:
self.tray.show()
def getLayout(self):
main_layout = QtGui.QVBoxLayout()
tabWidget = QtGui.QTabWidget(self)
self.general_ctl = GeneralWidget(self.settings, tabWidget)
self.log_ctl = LogWidget(tabWidget)
dlg = QtGui.QDialog(self)
about = AboutWidget(dlg)
about_layout = QtGui.QVBoxLayout()
about_layout.addWidget(about)
about_layout.addStretch(1)
dlg.setLayout(about_layout)
tabWidget.addTab(self.general_ctl, self.tr('General'))
tabWidget.addTab(self.log_ctl, self.tr('Log'))
tabWidget.addTab(dlg, self.tr('About'))
self.buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Apply | QtGui.QDialogButtonBox.Close)
self.buttonBox.clicked.connect(self.applyCommand)
main_layout.addWidget(tabWidget)
main_layout.addWidget(self.buttonBox)
return main_layout
def getMenu(self):
exit_action = QtGui.QAction(
self.tr('&Exit'), self, triggered=self.quit)
about_action = QtGui.QAction(
self.tr('&About'), self, triggered=self.about)
show_action = QtGui.QAction(
self.tr('&Show Alltray'),
self,
triggered=partial(self.trayActivated, QtGui.QSystemTrayIcon.Trigger)
)
menu = QtGui.QMenu(self)
menu.addAction(show_action)
menu.addAction(about_action)
menu.addSeparator()
menu.addAction(exit_action)
return menu
def trayActivated(self, reason):
if reason in (
QtGui.QSystemTrayIcon.Trigger,
QtGui.QSystemTrayIcon.DoubleClick):
self.show()
def closeEvent(self, event):
self.saveSettings()
if self.tray.isVisible():
self.hide()
event.ignore()
def applyCommand(self, button):
if self.buttonBox.buttonRole(button) == QtGui.QDialogButtonBox.RejectRole:
self.close()
elif self.buttonBox.buttonRole(button) == QtGui.QDialogButtonBox.ApplyRole:
icon_path = self.general_ctl.icon_path.text()
app_cmd = self.general_ctl.app_path.toPlainText()
tooltip = self.general_ctl.tooltip.text()
self.saveSettings()
icon = QtGui.QFileIconProvider().icon(
QtCore.QFileInfo(icon_path))
self.setWindowIcon(icon)
self.tray.setToolTip(tooltip)
self.tray.show()
self.hide()
self.runCommand(app_cmd)
def saveSettings(self):
icon_path = self.general_ctl.icon_path.text()
app_cmd = self.general_ctl.app_path.toPlainText()
tooltip = self.general_ctl.tooltip.text()
app_run = self.general_ctl.app_run.isChecked()
self.settings.setValue('icon_path', icon_path)
self.settings.setValue('app_cmd', app_cmd)
self.settings.setValue('tooltip', tooltip)
self.settings.setValue('app_run', app_run)
def runCommand(self, app_cmd):
if not app_cmd:
self.log_ctl.append('no cmd')
return
cmd = shlex.split(unicode(app_cmd.toUtf8(), encoding='utf8'))
if os.path.dirname(cmd[0]):
cmd[0] = os.path.realpath(cmd[0])
if self.logThread:
self.process.kill()
self.logThread.kill()
self.logThread.join()
self.log_ctl.append(' '.join(cmd))
kwargs = {}
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
kwargs['shell'] = False
if sys.platform == 'win32':
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
si.dwFlags |= subprocess.STARTF_USESTDHANDLES
kwargs['startupinfo'] = si
self.process = subprocess.Popen(
cmd,
**kwargs
)
self.logThread = LogThread(self.process, self)
self.logThread.start()
def killCommand(self):
if self.logThread:
self.process.kill()
self.logThread.kill()
self.logThread.join()
self.logThread = None
def about(self):
dlg = AboutDialog(self)
dlg.show()
def quit(self):
self.tray.hide()
self.killCommand()
QtGui.qApp.quit()
class GeneralWidget(QtGui.QWidget):
def __init__(self, settings, parent=None):
super(GeneralWidget, self).__init__(parent)
main_layout = QtGui.QVBoxLayout()
icon_folder = QtGui.QFileIconProvider().icon(QtGui.QFileIconProvider.Folder)
# command line application don't has icon, so you may set any icon.
iconGroup = QtGui.QGroupBox(self.tr('Icon'))
hlayout = QtGui.QHBoxLayout()
self.icon_ctl = QtGui.QLabel()
self.icon_ctl.setPixmap(self.windowIcon().pixmap(32, 32))
self.icon_ctl.setFrameStyle(QtGui.QFrame.StyledPanel | QtGui.QFrame.Plain)
hlayout.addWidget(self.icon_ctl)
vlayout = QtGui.QVBoxLayout()
icon_label = QtGui.QLabel(self.tr('path:'))
vlayout.addWidget(icon_label)
self.icon_path = QtGui.QLineEdit()
icon_browser = QtGui.QPushButton(icon_folder, '')
icon_browser.setFlat(True)
icon_browser.clicked.connect(self.iconBrowser)
h_layout = QtGui.QHBoxLayout()
h_layout.addWidget(self.icon_path)
h_layout.addWidget(icon_browser)
h_layout.setStretch(0, 1)
vlayout.addLayout(h_layout)
hlayout.addLayout(vlayout)
iconGroup.setLayout(hlayout)
appGroup = QtGui.QGroupBox(self.tr('Application'))
vlayout = QtGui.QVBoxLayout()
hlayout = QtGui.QHBoxLayout()
app_label = QtGui.QLabel(self.tr('path:'))
self.app_path = QtGui.QPlainTextEdit()
app_browser = QtGui.QPushButton(icon_folder, '')
app_browser.setFlat(True)
app_browser.clicked.connect(self.applicationBrowser)
hlayout.addWidget(app_label)
hlayout.addWidget(app_browser)
hlayout.setStretch(0, 1)
vlayout.addLayout(hlayout)
vlayout.addWidget(self.app_path)
vlayout.setStretch(1, 1)
appGroup.setLayout(vlayout)
tooltipGroup = QtGui.QGroupBox(self.tr('Tooltip'))
vlayout = QtGui.QVBoxLayout()
self.tooltip = QtGui.QLineEdit()
vlayout.addWidget(self.tooltip)
tooltipGroup.setLayout(vlayout)
self.app_run = QtGui.QCheckBox(self.tr('Run directly, not show dialog.'))
self.app_run.setChecked(False)
main_layout.addWidget(iconGroup)
main_layout.addWidget(appGroup)
main_layout.addWidget(tooltipGroup)
main_layout.addWidget(self.app_run)
main_layout.addStretch(1)
self.setLayout(main_layout)
def getFilePath(self):
file_path = QtGui.QFileDialog.getOpenFileName(
self,
self.tr('Selected file'),
'',
"All Files (*)",
)
return file_path
def iconBrowser(self):
path = self.getFilePath()
self.icon_path.setText(path)
icon = QtGui.QFileIconProvider().icon(
QtCore.QFileInfo(path))
self.icon_ctl.setPixmap(icon.pixmap(32, 32))
def applicationBrowser(self):
path = self.getFilePath()
self.app_path.appendPlainText(path)
class LogWidget(QtGui.QWidget):
appended = QtCore.pyqtSignal(str)
def __init__(self, parent=None):
super(LogWidget, self).__init__(parent)
self.mono_font = QtGui.QFont('Monospace')
main_layout = QtGui.QVBoxLayout()
self.text_ctl = QtGui.QPlainTextEdit(self)
self.text_ctl.setFont(self.mono_font)
self.text_ctl.setLineWrapMode(QtGui.QPlainTextEdit.NoWrap)
self.text_ctl.setReadOnly(True)
main_layout.addWidget(self.text_ctl)
self.setLayout(main_layout)
self.appended.connect(self.append)
def sizeHint(self):
# width = QtGui.QFontMetrics(self.mono_font).width('=' * 80)
width = 500
return QtCore.QSize(width, -1)
@QtCore.pyqtSlot(str)
def append(self, line):
self.text_ctl.moveCursor(QtGui.QTextCursor.End)
self.text_ctl.moveCursor(QtGui.QTextCursor.StartOfLine)
self.text_ctl.appendPlainText(line)
class AboutWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(AboutWidget, self).__init__(parent)
hlayout = QtGui.QHBoxLayout()
hlayout.addStretch(1)
icon_ctl = QtGui.QLabel()
icon_ctl.setPixmap(self.windowIcon().pixmap(32, 32))
hlayout.addWidget(icon_ctl)
vlayout = QtGui.QVBoxLayout()
vlayout.addWidget(QtGui.QLabel(
self.tr('All Tray v%s' % __version__)
))
vlayout.addWidget(QtGui.QLabel(self.tr('Tray all application.')))
hlayout.addLayout(vlayout)
hlayout.addStretch(1)
self.setLayout(hlayout)
class AboutDialog(QtGui.QDialog):
def __init__(self, parent=None):
super(AboutDialog, self).__init__(parent)
self.setWindowTitle(self.tr('All Tray'))
main_layout = QtGui.QVBoxLayout()
main_layout.addWidget(AboutWidget(self))
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok)
buttonBox.accepted.connect(self.close)
main_layout.addWidget(buttonBox)
self.setLayout(main_layout)
def closeEvent(self, event):
self.hide()
event.ignore()
class LogThread():
process = None
t1 = None
def __init__(self, process, logWin):
self.logWin = logWin
self.process = process
self.system_encoding = locale.getpreferredencoding()
self.log('System encoding: %s' % self.system_encoding, force=True)
def kill(self):
pass
def join(self):
self.t1.join()
self.t2.join()
def log(self, text, force=False):
if force or not self.logWin.isHidden():
self.logWin.log_ctl.appended.emit(text)
def start(self):
"""
It will get log information if application flush its buffer.
bug:
sys.stdout, sys.stderr block stream sometimes
"""
def tee_pipe(pipe, log):
for line in iter(pipe.readline, ''):
line = unicode(line, self.system_encoding)
log(line.rstrip('\n\r'))
self.t1 = threading.Thread(target=tee_pipe, args=(self.process.stdout, self.log))
self.t1.start()
self.t2 = threading.Thread(target=tee_pipe, args=(self.process.stderr, self.log))
self.t2.start()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='default', help='command group')
parser.add_argument('--tooltip', help='tray tool tip')
parser.add_argument('--icon', help='command icon')
parser.add_argument('command', nargs='?', help='To run command')
args = parser.parse_args()
if sys.platform == 'win32':
if args.config == 'default':
args.config = 'alltray.ini'
settings = QtCore.QSettings(args.config, QtCore.QSettings.IniFormat)
else:
settings = QtCore.QSettings('alltray', args.config)
if args.icon:
settings.setValue('icon_path', args.icon)
if args.command:
settings.setValue('app_cmd', args.command)
if args.tooltip:
settings.setValue('tooltip', args.tooltip)
app = QtGui.QApplication(sys.argv)
if not QtGui.QSystemTrayIcon.isSystemTrayAvailable():
QtGui.QMessageBox.critical(
None,
"Sorry",
"I couldn't detect any system tray on this system."
)
sys.exit(1)
win = TrayDialog(settings)
app_cmd = settings.value('app_cmd').toString()
app_run = settings.value('app_run').toBool()
if app_run:
win.hide()
win.runCommand(app_cmd)
else:
win.show()
sys.exit(app.exec_())
|
AllTray
|
/AllTray-0.1.1.tar.gz/AllTray-0.1.1/alltray/tray.py
|
tray.py
|
# Allagash
[](https://github.com/apulverizer/allagash/actions)
[](LICENSE)
[](https://anaconda.org/conda-forge/allagash)
A spatial optimization library for covering problems. Full documentation is available [here](https://apulverizer.github.io/allagash)
----
### Installing with conda
To install with geopandas run:
`conda install -c conda-forge allagash geopandas`
To install with arcgis run:
`conda install -c conda-forge -c esri allagash arcgis`
To install without a spatial library run:
`conda install -c conda-forge allagash`
----
### Installing with pip
To install with geopandas run:
`pip install allagash[geopandas]`
To install with arcgis run:
`pip install allagash[arcgis]`
To install without a spatial library run:
`pip install allagash`
----
### Running Locally
1. Clone the repo `git clone [email protected]:apulverizer/allagash.git`
2. Create the conda environment `conda env create --file environment.yml`
3. Activate the new environment `conda activate allagash`
4. Install pre-commit hooks `pre-commit install`
5. Install allagash locally `pip install -e . --no-deps`
6. Launch jupyter notebook `jupyter notebook`
You should now be able to run the example notebooks.
You can choose to install and use another solver that is supported by [Pulp](https://github.com/coin-or/pulp):
- [GLPK](https://www.gnu.org/software/glpk/) (included in conda environment)
- [COIN-OR CBC](https://github.com/coin-or/Cbc)
- [CPLEX](https://www.ibm.com/analytics/cplex-optimizer)
- [Gurobi](https://www.gurobi.com/)
----
### Running Tests Locally
1. Run tests `pytest --nbval`
----
### Building Documentation
1. From the repo directory run `sphinx-build -b html ./src-doc ./docs -a`
This will deploy html documentation to the docs folder.
----
### Running with Docker
You can build the local docker image that includes Allagash, Python, Jupyter, GLPK, and COIN-OR CBC.
1. Builder the docker image `docker build . -t apulverizer/allagash:latest`
2. Launch Jupyter notebook `docker run -i -t --user=allagash -p 8888:8888 apulverizer/allagash:latest /bin/bash -c "jupyter notebook --ip='*' --port=8888 --no-browser"`
You should now be able to run the example notebooks.
You can test the notebooks as well by running `docker run --user=allagash apulverizer/allagash:latest /bin/bash -c "py.test --nbval"`
If you'd like to mount a directory of local data/files into the container, you can add `-v <your-local-dir>:/home/allagash/<dir-name>` when running `docker run`
----
### Running Tests with Docker
You can build a docker container that will run the tests (mounted into the container)
1. `docker build . --file build.Dockerfile --tag apulverizer/allagash:build`
2. `docker run --user=allagash -v $PWD/tests:/home/allagash/tests -v $PWD/src-doc:/home/allagash/src-doc apulverizer/allagash:build /bin/bash -c "py.test --nbval"`
|
Allagash
|
/Allagash-0.4.3.tar.gz/Allagash-0.4.3/README.md
|
README.md
|
import operator
import pulp
from .coverage import Coverage
from pandas.api.types import is_string_dtype
class Problem:
_problem_types = ["lscp", "mclp", "bclp"]
_delineator = "$"
def __init__(self, pulp_problem, coverages, problem_type):
"""
A representation of the linear programming problem that can be solved.
This is not intended to be created on it's own but rather from one of the factory methods
:meth:`~allagash.problem.Problem.lscp` or :meth:`~allagash.problem.Problem.mclp`
.. code-block:: python
Problem.lscp(coverage)
Problem.lscp([coverage1, coverage2])
:param ~pulp.LpProblem pulp_problem: The pulp problem that will be solved
:param list[~allagash.coverage.Coverage] coverages: The coverages that were used to build the problem
:param str problem_type: The type of problem that was generated
"""
self._validate(pulp_problem, coverages, problem_type)
self._pulp_problem = pulp_problem
if isinstance(coverages, Coverage):
self._coverages = [coverages]
else:
self._coverages = coverages
self._problem_type = problem_type.lower()
def _validate(self, problem, coverages, problem_type):
if not isinstance(problem, pulp.LpProblem):
raise TypeError(
f"Expected 'LpProblem' type for problem, got '{type(problem)}'"
)
if not isinstance(problem_type, str):
raise TypeError(
f"Expected 'str' type for problem_type, got '{type(problem_type)}'"
)
if problem_type.lower() not in self._problem_types:
raise ValueError(f"Invalid problem_type: '{problem_type}'")
if not isinstance(coverages, (list, Coverage)):
raise TypeError(
f"Expected 'Coverage' or 'list' type for coverages, got '{type(coverages)}'"
)
@property
def pulp_problem(self):
"""
:return: The pulp problem
:rtype: ~pulp.LpProblem
"""
return self._pulp_problem
@property
def coverages(self):
"""
:return: The coverage used to create the problem
:rtype: list[~allagash.coverage.Coverage]
"""
return self._coverages
@property
def problem_type(self):
"""
:return: The type of problem that this is
:rtype: str
"""
return self._problem_type
def solve(self, solver):
"""
:param ~pulp.solvers.LpSolver solver: The solver to use for this problem
:return: The solution for this problem
:rtype: ~allagash.problem.Problem
"""
if not isinstance(solver, pulp.LpSolver):
raise TypeError(
f"Expected 'LpSolver' type for solver, got '{type(solver)}'"
)
self._pulp_problem.solve(solver)
if self._pulp_problem.status == 0:
raise NotSolvedException("Unable to solve the problem")
elif self._pulp_problem.status == -1:
raise InfeasibleException("Infeasible problem")
elif self._pulp_problem.status == -2:
raise UnboundedException("Unbounded problem")
elif self._pulp_problem.status == -3:
raise UndefinedException("Undefined problem")
return self
@classmethod
def lscp(cls, coverages):
"""
Creates a new :class:`~allagash.problem.Problem` object representing the Location Covering Set Problem
:param list[~allagash.coverage.Coverage] coverages: The coverages to be used to create the problem
:return: The created problem
:rtype: ~allagash.problem.Problem
"""
if not isinstance(coverages, (Coverage, list)):
raise TypeError(
f"Expected 'Coverage' or 'list' type for coverages, got '{type(coverages)}'"
)
if isinstance(coverages, Coverage):
coverages = [coverages]
if not all([c.coverage_type == coverages[0].coverage_type for c in coverages]):
raise ValueError(
"Invalid coverages. Coverages must have the same coverage type."
)
if coverages[0].coverage_type != "binary":
raise ValueError("LSCP can only be generated from binary coverage.")
if not all(x.demand_name == coverages[0].demand_name for x in coverages):
raise ValueError("All Coverages must have the same 'demand_name'")
prob = cls._generate_lscp_problem(coverages)
return Problem(prob, coverages, problem_type="lscp")
@classmethod
def bclp(cls, coverages, max_supply):
"""
Creates a new :class:`~allagash.problem.Problem` object representing the Backup Covering Location Problem
:param list[~allagash.coverage.Coverage] coverages: The coverages to be used to create the problem
:param dict[~allagash.coverage.Coverage,int] max_supply: The maximum number of supply locations to allow
:return: The created problem
:rtype: ~allagash.problem.Problem
"""
if not isinstance(coverages, (Coverage, list)):
raise TypeError(
f"Expected 'Coverage' or 'list' type for coverages, got '{type(coverages)}'"
)
if isinstance(coverages, Coverage):
coverages = [coverages]
if not all([c.coverage_type == coverages[0].coverage_type for c in coverages]):
raise ValueError(
"Invalid coverages. Coverages must have the same coverage type."
)
if coverages[0].coverage_type != "binary":
raise ValueError("BCLP can only be generated from binary coverage.")
if not isinstance(max_supply, dict):
raise TypeError(
f"Expected 'dict' type for max_supply, got '{type(max_supply)}'"
)
for k, v in max_supply.items():
if not isinstance(k, Coverage):
raise TypeError(
f"Expected 'Coverage' type as key in max_supply, got '{type(k)}'"
)
if k.demand_col is None:
raise TypeError("Coverages used in BCLP must have 'demand_col'")
if not isinstance(v, int):
raise TypeError(
f"Expected 'int' type as value in max_supply, got '{type(v)}'"
)
if not all(x.demand_name == coverages[0].demand_name for x in coverages):
raise ValueError("All Coverages must have the same 'demand_name'")
prob = cls._generate_bclp_problem(coverages, max_supply)
return Problem(prob, coverages, problem_type="bclp")
@classmethod
def mclp(cls, coverages, max_supply):
"""
Creates a new :class:`~allagash.problem.Problem` object representing the Maximum Covering Location Problem
:param list[~allagash.coverage.Coverage] coverages: The coverages to be used to create the problem
:param dict[~allagash.coverage.Coverage,int] max_supply: The maximum number of supply locations to allow
:return: The created problem
:rtype: ~allagash.problem.Problem
"""
if not isinstance(coverages, (Coverage, list)):
raise TypeError(
f"Expected 'Coverage' or 'list' type for coverages, got '{type(coverages)}'"
)
if isinstance(coverages, Coverage):
coverages = [coverages]
if not all([c.coverage_type == coverages[0].coverage_type for c in coverages]):
raise ValueError(
"Invalid coverages. Coverages must have the same coverage type."
)
if coverages[0].coverage_type != "binary":
raise ValueError("MCLP can only be generated from binary coverage.")
if not isinstance(max_supply, dict):
raise TypeError(
f"Expected 'dict' type for max_supply, got '{type(max_supply)}'"
)
for k, v in max_supply.items():
if not isinstance(k, Coverage):
raise TypeError(
f"Expected 'Coverage' type as key in max_supply, got '{type(k)}'"
)
if k.demand_col is None:
raise TypeError("Coverages used in MCLP must have 'demand_col'")
if not isinstance(v, int):
raise TypeError(
f"Expected 'int' type as value in max_supply, got '{type(v)}'"
)
if not all(x.demand_name == coverages[0].demand_name for x in coverages):
raise ValueError("All Coverages must have the same 'demand_name'")
prob = cls._generate_mclp_problem(coverages, max_supply)
return Problem(prob, coverages, problem_type="mclp")
@staticmethod
def _generate_lscp_problem(coverages): # noqa: C901
demand_vars = {}
for c in coverages:
if c.demand_name not in demand_vars:
demand_vars[c.demand_name] = {}
for index, _ in c.df.iterrows():
name = f"{c.demand_name}{Problem._delineator}{index}"
demand_vars[c.demand_name][index] = pulp.LpVariable(
name, 0, 1, pulp.LpInteger
)
supply_vars = {}
for c in coverages:
if c.demand_col:
df = c.df.drop(columns=c.demand_col)
else:
df = c.df
if c.supply_name not in supply_vars:
supply_vars[c.supply_name] = {}
for s in df.columns.to_list():
name = f"{c.supply_name}{Problem._delineator}{s}"
supply_vars[c.supply_name][s] = pulp.LpVariable(
name, 0, 1, pulp.LpInteger
)
prob = pulp.LpProblem("LSCP", pulp.LpMinimize)
to_sum = []
for _, v in supply_vars.items():
to_sum.append(v)
prob += pulp.lpSum(to_sum)
sums = {}
for c in coverages:
if c.demand_name not in sums:
sums[c.demand_name] = {}
if c.demand_col:
df = c.df.drop(columns=c.demand_col)
else:
df = c.df
for index, demand in df.iterrows():
if index not in sums[c.demand_name]:
sums[c.demand_name][index] = []
cov = demand.T
for i, value in cov.iteritems():
if value is True:
sums[c.demand_name][index].append(supply_vars[c.supply_name][i])
for c in coverages:
for k, v in demand_vars[c.demand_name].items():
if not to_sum:
sums[c.demand_name][v] = [
pulp.LpVariable(
f"__dummy{Problem._delineator}{v}", 0, 0, pulp.LpInteger
)
]
for demand_name, v in sums.items():
for i, to_sum in v.items():
prob += pulp.lpSum(to_sum) >= 1, f"D{demand_name}{i}"
return prob
@staticmethod
def _generate_bclp_problem(coverages, max_supply): # noqa: C901
demand_vars = {}
for c in coverages:
if c.demand_name not in demand_vars:
demand_vars[c.demand_name] = {}
for index, _ in c.df.iterrows():
name = f"{c.demand_name}{Problem._delineator}{index}"
demand_vars[c.demand_name][index] = pulp.LpVariable(
name, 0, 1, pulp.LpInteger
)
supply_vars = {}
for c in coverages:
if c.demand_col:
df = c.df.drop(columns=c.demand_col)
else:
df = c.df
if c.supply_name not in supply_vars:
supply_vars[c.supply_name] = {}
for s in df.columns.to_list():
name = f"{c.supply_name}{Problem._delineator}{s}"
supply_vars[c.supply_name][s] = pulp.LpVariable(
name, 0, 1, pulp.LpInteger
)
# add objective
prob = pulp.LpProblem("BCLP", pulp.LpMaximize)
demands = {}
for c in coverages:
for _, demand_var in demand_vars[c.demand_name].items():
d = demand_var.name.split(Problem._delineator)[1]
if d not in demands:
if is_string_dtype(coverages[0].df.index.dtype):
query = f"{coverages[0].df.index.name} == '{d}'"
else:
query = f"{coverages[0].df.index.name} == {d}"
v = c.df.query(query)[c.demand_col].tolist()[0]
demands[d] = v * demand_var
to_sum = []
for k, v in demands.items():
to_sum.append(v)
prob += pulp.lpSum(to_sum)
# coverage constraints
sums = {}
for c in coverages:
if c.demand_name not in sums:
sums[c.demand_name] = {}
if c.demand_col:
df = c.df.drop(columns=c.demand_col)
else:
df = c.df
for index, demand in df.iterrows():
if index not in sums[c.demand_name]:
sums[c.demand_name][index] = [
-demand_vars[c.demand_name][index],
-1,
]
cov = demand.T
for i, value in cov.iteritems():
if value is True:
sums[c.demand_name][index].append(supply_vars[c.supply_name][i])
for k, v in sums.items():
for index, to_sum in v.items():
prob += pulp.lpSum(to_sum) >= 0, f"D{index}"
# Number of supply locations
for c in coverages:
to_sum = []
for k, v in supply_vars[c.supply_name].items():
to_sum.append(v)
prob += (
pulp.lpSum(to_sum) <= max_supply[c],
f"Num{Problem._delineator}{c.supply_name}",
)
return prob
@staticmethod
def _generate_mclp_problem(coverages, max_supply): # noqa: C901
demand_vars = {}
for c in coverages:
if c.demand_name not in demand_vars:
demand_vars[c.demand_name] = {}
for index, _ in c.df.iterrows():
name = f"{c.demand_name}{Problem._delineator}{index}"
demand_vars[c.demand_name][index] = pulp.LpVariable(
name, 0, 1, pulp.LpInteger
)
supply_vars = {}
for c in coverages:
if c.demand_col:
df = c.df.drop(columns=c.demand_col)
else:
df = c.df
if c.supply_name not in supply_vars:
supply_vars[c.supply_name] = {}
for s in df.columns.to_list():
name = f"{c.supply_name}{Problem._delineator}{s}"
supply_vars[c.supply_name][s] = pulp.LpVariable(
name, 0, 1, pulp.LpInteger
)
# add objective
prob = pulp.LpProblem("MCLP", pulp.LpMaximize)
demands = {}
for c in coverages:
for _, demand_var in demand_vars[c.demand_name].items():
d = demand_var.name.split(Problem._delineator)[1]
if d not in demands:
if is_string_dtype(coverages[0].df.index.dtype):
query = f"{coverages[0].df.index.name} == '{d}'"
else:
query = f"{coverages[0].df.index.name} == {d}"
v = c.df.query(query)[c.demand_col].tolist()[0]
demands[d] = v * demand_var
to_sum = []
for k, v in demands.items():
to_sum.append(v)
prob += pulp.lpSum(to_sum)
# coverage constraints
sums = {}
for c in coverages:
if c.demand_name not in sums:
sums[c.demand_name] = {}
if c.demand_col:
df = c.df.drop(columns=c.demand_col)
else:
df = c.df
for index, demand in df.iterrows():
if index not in sums[c.demand_name]:
sums[c.demand_name][index] = [-demand_vars[c.demand_name][index]]
cov = demand.T
for i, value in cov.iteritems():
if value is True:
sums[c.demand_name][index].append(supply_vars[c.supply_name][i])
for k, v in sums.items():
for index, to_sum in v.items():
prob += pulp.lpSum(to_sum) >= 0, f"D{index}"
# Number of supply locations
for c in coverages:
to_sum = []
for k, v in supply_vars[c.supply_name].items():
to_sum.append(v)
prob += (
pulp.lpSum(to_sum) <= max_supply[c],
f"Num{Problem._delineator}{c.supply_name}",
)
return prob
def selected_supply(self, coverage, operation=operator.eq, value=1):
"""
Gets the list of the supply locations that were selected when the optimization problem was solved.
:param ~allagash.coverage.Coverage coverage: The coverage that selected locations may be found in.
:param function operation: The operation to use when determining whether a location was selected
:param int value: The value to apply the operation to
:return: The list of location ids of the selected locations
:rtype: list
"""
if self._pulp_problem.status != 1:
raise RuntimeError("Problem not optimally solved yet")
from allagash.coverage import Coverage
if not isinstance(coverage, Coverage):
raise TypeError(
f"Expected 'Coverage' type for coverage, got '{type(coverage)}'"
)
if not callable(operation):
raise TypeError(f"Expected callable for operation, got '{type(operation)}'")
if not isinstance(value, (int, float)):
raise TypeError(f"Expected 'int' or 'float' for value, got '{type(value)}'")
ids = []
for var in self._pulp_problem.variables():
if var.name.split(self._delineator)[0] == coverage.supply_name:
if operation(var.varValue, value):
ids.append(var.name.split(self._delineator)[1])
return ids
def selected_demand(self, coverage):
"""
Gets the list of the demand locations that were selected when the optimization problem was solved.
:param ~allagash.coverage.Coverage coverage: The coverage that the demand locations may be found in. If multiple
coverages were used that have the same demand, locations covered by any other coverages will also
be returned.
:return: The list of location ids of the covered locations
:rtype: list
"""
if self._pulp_problem.status != 1:
raise RuntimeError("Problem not optimally solved yet")
from allagash.coverage import Coverage
if not isinstance(coverage, Coverage):
raise TypeError(
f"Expected 'Coverage' type for coverage, got '{type(coverage)}'"
)
if self.problem_type in ["lscp", "bclp"]:
for c in self.coverages:
if c.demand_name == c.demand_name:
return c.df.index.tolist()
else:
raise ValueError(
f"Unable to find demand named '{coverage.demand_name}'"
)
else:
ids = []
for var in self._pulp_problem.variables():
if var.name.split(self._delineator)[0] == coverage.demand_name:
if var.varValue >= 1:
ids.append(var.name.split(self._delineator)[1])
return ids
class NotSolvedException(Exception):
def __init__(self, message):
"""
An exception indicating the problem was not solved
:param str message: A descriptive message about the exception
"""
super().__init__(message)
class InfeasibleException(Exception):
def __init__(self, message):
"""
An exception indicating the problem as an infeasible solution
:param str message: A descriptive message about the exception
"""
super().__init__(message)
class UnboundedException(Exception):
def __init__(self, message):
"""
An exception indicating the solution is unbounded
:param str message: A descriptive message about the exception
"""
super().__init__(message)
class UndefinedException(Exception):
def __init__(self, message):
"""
An exception indicating the problem was not solved for an undefined reason
:param str message: A descriptive message about the exception
"""
super().__init__(message)
|
Allagash
|
/Allagash-0.4.3.tar.gz/Allagash-0.4.3/allagash/problem.py
|
problem.py
|
import random
import string
import pandas as pd
class Coverage:
def __init__(
self,
dataframe,
demand_col=None,
demand_name="demand",
supply_name=None,
coverage_type="binary",
):
"""
An object that stores the relationship between a set of demand locations and a set of supply locations.
Use this initializer if the coverage matrix has already been created, otherwise this can be created from two
geodataframes using the :meth:`~allagash.coverage.Coverage.from_geodataframes` or
:meth:`~allagash.coverage.Coverage.from_spatially_enabled_dataframes` factory methods.
.. code-block:: python
Coverage.from_geodataframes(df1, df2, "Demand_Id", "Supply_Id")
:param ~pandas.DataFrame dataframe: A dataframe containing a matrix of demand (rows) and supply (columns).
An additional column containing the demand values can optionally be provided.
:param str demand_col: (optional) The name of the column storing the demand value.
:param str demand_name: (optional) The name of the demand to use. If not supplied, 'demand' is used'.
:param str supply_name: (optional) The name of the supply to use. If not supplied, a random name is generated.
:param str coverage_type: (optional) The type of coverage this represents. If not supplied, the default is
"binary". Options are "binary" and "partial".
"""
self._validate_init(
coverage_type, dataframe, demand_col, demand_name, supply_name
)
self._demand_col = demand_col
self._dataframe = dataframe
if not demand_name:
self._demand_name = "".join(random.choices(string.ascii_uppercase, k=6))
else:
self._demand_name = demand_name
if not supply_name:
self._supply_name = "".join(random.choices(string.ascii_uppercase, k=6))
else:
self._supply_name = supply_name
self._coverage_type = coverage_type.lower()
@staticmethod
def _validate_init(coverage_type, dataframe, demand_col, demand_name, supply_name):
if not isinstance(dataframe, pd.DataFrame):
raise TypeError(
f"Expected 'Dataframe' type for dataframe, got '{type(dataframe)}'"
)
if not isinstance(demand_col, str) and demand_col is not None:
raise TypeError(
f"Expected 'str' type for demand_col, got '{type(demand_col)}'"
)
if not isinstance(demand_name, str) and demand_name is not None:
raise TypeError(
f"Expected 'str' type for demand_name, got '{type(demand_name)}'"
)
if not isinstance(supply_name, str) and supply_name is not None:
raise TypeError(
f"Expected 'str' type for supply_name, got '{type(supply_name)}'"
)
if not isinstance(coverage_type, str):
raise TypeError(
f"Expected 'str' type for coverage_type, got '{type(coverage_type)}'"
)
if demand_col and demand_col not in dataframe.columns:
raise ValueError(f"'{demand_col}' not in dataframe")
if coverage_type.lower() not in ("binary", "partial"):
raise ValueError(f"Invalid coverage type '{coverage_type}'")
if coverage_type.lower() == "partial" and demand_col is None:
raise ValueError(
"'demand_col' is required when generating partial coverage"
)
@property
def df(self):
"""
:return: The geodataframe the dataset is based on
:rtype: ~geopandas.GeoDataFrame
"""
return self._dataframe
@property
def demand_name(self):
"""
:return: The name of the demand
:rtype: str
"""
return self._demand_name
@property
def supply_name(self):
"""
:return: The name of the supply
:rtype: str
"""
return self._supply_name
@property
def coverage_type(self):
"""
:return: The type of coverage
:rtype: str
"""
return self._coverage_type
@property
def demand_col(self):
"""
:return: The name of the demand column in the underlying dataframe
:rtype: str or None
"""
return self._demand_col
@classmethod
def from_geodataframes(
cls,
demand_df,
supply_df,
demand_id_col,
supply_id_col,
demand_name="demand",
supply_name=None,
demand_col=None,
coverage_type="binary",
):
"""
Creates a new Coverage from two GeoDataFrames representing the demand and supply locations. The coverage
is determined by intersecting the two dataframes.
:param ~geopandas.GeoDataFrame demand_df: The GeoDataFrame containing the demand locations
:param ~geopandas.GeoDataFrame supply_df: The GeoDataFrame containing the supply locations
:param str demand_id_col: The name of the column that has unique identifiers for the demand locations
:param str supply_id_col: The name of the column that has unique identifiers for the supply locations
:param str demand_name: (optional) The name of the demand to use. If not supplied, 'demand' is used.
:param str supply_name: (optional) The name of the supply to use. If not supplied, a random name is generated.
:param str demand_col: (optional) The name of the column that stores the amount of demand for the demand
locations. Required if generating partial coverage.
:param str coverage_type: (optional) The type of coverage this represents. If not supplied, the default is
"binary". Options are "binary" and "partial".
:return: The coverage
:rtype: ~allagash.coverage.Coverage
"""
cls._validate_from_geodataframes(
coverage_type,
demand_col,
demand_df,
demand_id_col,
demand_name,
supply_df,
supply_id_col,
)
data = []
if coverage_type.lower() == "binary":
for index, row in demand_df.iterrows():
contains = supply_df.geometry.contains(row.geometry).tolist()
if demand_col:
contains.insert(0, row[demand_col])
# Add the id column to the end, it will be used as index and removed later
contains.append(row[demand_id_col])
data.append(contains)
elif coverage_type.lower() == "partial":
for index, row in demand_df.iterrows():
demand_area = row.geometry.area
intersection_area = supply_df.geometry.intersection(
row.geometry
).geometry.area
partial_coverage = (
(intersection_area / demand_area) * row[demand_col]
).tolist()
if demand_col:
partial_coverage.insert(0, row[demand_col])
partial_coverage.insert(0, row[demand_id_col])
data.append(partial_coverage)
else:
raise ValueError(f"Invalid coverage type '{coverage_type}'")
columns = supply_df[supply_id_col].tolist()
if demand_col:
columns.insert(0, demand_col)
# id column will be used as index when dataframe is created
columns.append(demand_id_col)
# Set index after to avoid issue with multiindex being created
df = pd.DataFrame.from_records(data, columns=columns).set_index(demand_id_col)
return Coverage(
df,
demand_col=demand_col,
demand_name=demand_name,
supply_name=supply_name,
coverage_type=coverage_type,
)
@classmethod
def from_spatially_enabled_dataframes(
cls,
demand_df,
supply_df,
demand_id_col,
supply_id_col,
demand_name="demand",
supply_name=None,
demand_col=None,
coverage_type="binary",
demand_geometry_col="SHAPE",
supply_geometry_col="SHAPE",
):
"""
Creates a new Coverage from two spatially enabled (arcgis) dataframes representing the demand and supply locations.
The coverage is determined by intersecting the two dataframes.
:param ~pandas.DataFrame demand_df: The spatially enabled dataframe containing the demand locations
:param ~pandas.DataFrame supply_df: The spatially enavled dataframe containing the supply locations
:param str demand_id_col: The name of the column that has unique identifiers for the demand locations
:param str supply_id_col: The name of the column that has unique identifiers for the supply locations
:param str demand_name: (optional) The name of the demand to use. If not supplied, 'demand' is used'.
:param str supply_name: (optional) The name of the supply to use. If not supplied, a random name is generated.
:param str demand_col: (optional) The name of the column that stores the amount of demand for the demand
locations. Required if generating partial coverage.
:param str coverage_type: (optional) The type of coverage this represents. If not supplied, the default is
"binary". Options are "binary" and "partial".
:param str demand_geometry_col: (optional) The name of the field storing the geometry in the demand dataframe.
If not supplied, the default is "SHAPE".
:param str supply_geometry_col: (optional) The name of the field storing the geometry in the supply dataframe.
If not supplied, the default is "SHAPE".
:return: The coverage
:rtype: ~allagash.coverage.Coverage
"""
cls._validate_from_spatially_enabled_dataframes(
coverage_type,
demand_col,
demand_df,
demand_id_col,
demand_name,
supply_df,
supply_id_col,
demand_geometry_col,
supply_geometry_col,
)
data = []
if coverage_type.lower() == "binary":
for index, row in demand_df.iterrows():
contains = (
supply_df[supply_geometry_col]
.geom.contains(row[demand_geometry_col])
.tolist()
)
if demand_col:
contains.insert(0, row[demand_col])
# Add the id column to the end, it will be used as index and removed later
contains.append(row[demand_id_col])
data.append(contains)
elif coverage_type.lower() == "partial":
for index, row in demand_df.iterrows():
partial_coverage = []
demand_area = row[demand_geometry_col].area
# Cannot vectorize this because if the intersection returns an empty polygon with rings
# The conversion to shapely fails when trying to get the area
for _, s_row in supply_df.iterrows():
intersection = s_row[supply_geometry_col].intersect(
row[demand_geometry_col]
)
area = intersection.area if not intersection.is_empty else 0
partial_coverage.append((area / demand_area) * row[demand_col])
if demand_col:
partial_coverage.insert(0, row[demand_col])
# Add the id column to the end, it will be used as index and removed later
partial_coverage.append(row[demand_id_col])
data.append(partial_coverage)
else:
raise ValueError(f"Invalid coverage type '{coverage_type}'")
columns = supply_df[supply_id_col].tolist()
if demand_col:
columns.insert(0, demand_col)
columns.append(demand_id_col)
df = pd.DataFrame.from_records(data, columns=columns).set_index(demand_id_col)
return Coverage(
df,
demand_col=demand_col,
demand_name=demand_name,
supply_name=supply_name,
coverage_type=coverage_type,
)
@classmethod
def _validate_from_geodataframes(
cls,
coverage_type,
demand_col,
demand_df,
demand_id_col,
demand_name,
supply_df,
supply_id_col,
):
if not isinstance(demand_df, pd.DataFrame):
raise TypeError(
f"Expected 'Dataframe' type for demand_df, got '{type(demand_df)}'"
)
if not isinstance(supply_df, pd.DataFrame):
raise TypeError(
f"Expected 'Dataframe' type for supply_df, got '{type(supply_df)}'"
)
if not isinstance(demand_id_col, str):
raise TypeError(
f"Expected 'str' type for demand_id_col, got '{type(demand_id_col)}'"
)
if not isinstance(supply_id_col, str):
raise TypeError(
f"Expected 'str' type for demand_id_col, got '{type(supply_id_col)}'"
)
if not isinstance(demand_name, str) and demand_name is not None:
raise TypeError(
f"Expected 'str' type for demand_name, got '{type(demand_name)}'"
)
if not isinstance(coverage_type, str):
raise TypeError(
f"Expected 'str' type for coverage_type, got '{type(coverage_type)}'"
)
if demand_col and demand_col not in demand_df.columns:
raise ValueError(f"'{demand_col}' not in dataframe")
if demand_id_col and demand_id_col not in demand_df.columns:
raise ValueError(f"'{demand_id_col}' not in dataframe")
if supply_id_col and supply_id_col not in supply_df.columns:
raise ValueError(f"'{supply_id_col}' not in dataframe")
if coverage_type.lower() not in ("binary", "partial"):
raise ValueError(f"Invalid coverage type '{coverage_type}'")
if coverage_type.lower() == "partial" and demand_col is None:
raise ValueError("demand_col is required when generating partial coverage")
@classmethod
def _validate_from_spatially_enabled_dataframes(
cls,
coverage_type,
demand_col,
demand_df,
demand_id_col,
demand_name,
supply_df,
supply_id_col,
demand_geometry_col,
supply_geometry_col,
):
if not isinstance(demand_df, pd.DataFrame):
raise TypeError(
f"Expected 'Dataframe' type for demand_df, got '{type(demand_df)}'"
)
if not isinstance(supply_df, pd.DataFrame):
raise TypeError(
f"Expected 'Dataframe' type for supply_df, got '{type(supply_df)}'"
)
if not isinstance(demand_id_col, str):
raise TypeError(
f"Expected 'str' type for demand_id_col, got '{type(demand_id_col)}'"
)
if not isinstance(supply_id_col, str):
raise TypeError(
f"Expected 'str' type for demand_id_col, got '{type(supply_id_col)}'"
)
if not isinstance(demand_name, str) and demand_name is not None:
raise TypeError(
f"Expected 'str' type for demand_name, got '{type(demand_name)}'"
)
if not isinstance(coverage_type, str):
raise TypeError(
f"Expected 'str' type for coverage_type, got '{type(coverage_type)}'"
)
if demand_col and demand_col not in demand_df.columns:
raise ValueError(f"'{demand_col}' not in dataframe")
if demand_id_col and demand_id_col not in demand_df.columns:
raise ValueError(f"'{demand_id_col}' not in dataframe")
if supply_id_col and supply_id_col not in supply_df.columns:
raise ValueError(f"'{supply_id_col}' not in dataframe")
if demand_geometry_col and demand_geometry_col not in demand_df.columns:
raise ValueError(f"'{demand_geometry_col}' not in dataframe")
if supply_geometry_col and supply_geometry_col not in supply_df.columns:
raise ValueError(f"'{supply_geometry_col}' not in dataframe")
if coverage_type.lower() not in ("binary", "partial"):
raise ValueError(f"Invalid coverage type '{coverage_type}'")
if coverage_type.lower() == "partial" and demand_col is None:
raise ValueError("demand_col is required when generating partial coverage")
|
Allagash
|
/Allagash-0.4.3.tar.gz/Allagash-0.4.3/allagash/coverage.py
|
coverage.py
|
AllanTools
==========
.. image:: https://badge.fury.io/py/AllanTools.svg
:target: https://badge.fury.io/py/AllanTools
.. image:: https://travis-ci.org/aewallin/allantools.svg?branch=master
:target: https://travis-ci.org/aewallin/allantools
.. image:: http://readthedocs.org/projects/allantools/badge/?version=latest
:target: http://allantools.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://coveralls.io/repos/github/aewallin/allantools/badge.svg?branch=master
:target: https://coveralls.io/github/aewallin/allantools?branch=master
.. image:: https://app.fossa.io/api/projects/git%2Bgithub.com%2Faewallin%2Fallantools.svg?type=shield
:target: https://app.fossa.io/projects/git%2Bgithub.com%2Faewallin%2Fallantools?ref=badge_shield
A python library for calculating Allan deviation and related
time & frequency statistics. `LGPL v3+ license <https://www.gnu.org/licenses/lgpl.html>`_.
* Development at https://github.com/aewallin/allantools
* Installation package at https://pypi.python.org/pypi/AllanTools
* Discussion group at https://groups.google.com/d/forum/allantools
* Documentation available at https://allantools.readthedocs.org
Input data should be evenly spaced observations of either fractional frequency,
or phase in seconds. Deviations are calculated for given tau values in seconds.
===================================== ====================================================
Function Description
===================================== ====================================================
``adev()`` Allan deviation
``oadev()`` Overlapping Allan deviation
``mdev()`` Modified Allan deviation
``tdev()`` Time deviation
``hdev()`` Hadamard deviation
``ohdev()`` Overlapping Hadamard deviation
``totdev()`` Total deviation
``mtotdev()`` Modified total deviation
``ttotdev()`` Time total deviation
``htotdev()`` Hadamard total deviation
``theo1()`` Theo1 deviation
``mtie()`` Maximum Time Interval Error
``tierms()`` Time Interval Error RMS
``gradev()`` Gap resistant overlapping Allan deviation
===================================== ====================================================
Noise generators for creating synthetic datasets are also included:
* violet noise with f^2 PSD
* white noise with f^0 PSD
* pink noise with f^-1 PSD
* Brownian or random walk noise with f^-2 PSD
More details on available statistics and noise generators : `full list of available functions <functions.html>`_
see /tests for tests that compare allantools output to other
(e.g. Stable32) programs. More test data, benchmarks, ipython notebooks,
and comparisons to known-good algorithms are welcome!
Installation
------------
Install from pypi::
pip install allantools
Latest version + examples, tests, test data, iPython notebooks : clone from github, then install ::
python setup.py install
(see `python setup.py --help install` for install options)
These commands should be run as root for system-wide installation, or
you can use the `--user` option to install for your account only.
Exact command names may vary depending on your OS / package manager / target python version.
Basic usage
-----------
Minimal example, phase data
~~~~~~~~~~~~~~~~~~~~~~~~~~~
We can call allantools with only one parameter - an array of phase data.
This is suitable for time-interval measurements at 1 Hz, for example
from a time-interval-counter measuring the 1PPS output of two clocks.
::
>>> import allantools
>>> x = allantools.noise.white(10000) # Generate some phase data, in seconds.
>>> (taus, adevs, errors, ns) = allantools.oadev(x)
when only one input parameter is given, phase data in seconds is assumed
when no rate parameter is given, rate=1.0 is the default
when no taus parameter is given, taus='octave' is the default
Frequency data example
~~~~~~~~~~~~~~~~~~~~~~
Note that allantools assumes non-dimensional frequency data input.
Normalization, by e.g. dividing all data points with the average
frequency, is left to the user.
::
>>> import allantools
>>> import pylab as plt
>>> import numpy as np
>>> t = np.logspace(0, 3, 50) # tau values from 1 to 1000
>>> y = allantools.noise.white(10000) # Generate some frequency data
>>> r = 12.3 # sample rate in Hz of the input data
>>> (t2, ad, ade, adn) = allantools.oadev(y, rate=r, data_type="freq", taus=t) # Compute the overlapping ADEV
>>> fig = plt.loglog(t2, ad) # Plot the results
>>> # plt.show()
*New in 2016.11* : simple top-level `API <api.html>`_, using dedicated classes for data handling and plotting.
::
import allantools # https://github.com/aewallin/allantools/
import numpy as np
# Compute a deviation using the Dataset class
a = allantools.Dataset(data=np.random.rand(1000))
a.compute("mdev")
# New in 2019.7 : write results to file
a.write_result("output.dat")
# Plot it using the Plot class
b = allantools.Plot()
# New in 2019.7 : additional keyword arguments are passed to
# matplotlib.pyplot.plot()
b.plot(a, errorbars=True, grid=True)
# You can override defaults before "show" if needed
b.ax.set_xlabel("Tau (s)")
b.show()
Jupyter notebooks with examples
-------------------------------
Jupyter notebooks are interactive python scripts, embedded in a browser,
allowing you to manipulate data and display plots like easily. For guidance
on installing jupyter, please refer to https://jupyter.org/install.
See /examples for some examples in notebook format.
github formats the notebooks into nice web-pages, for example
* https://github.com/aewallin/allantools/blob/master/examples/noise-color-demo.ipynb
* https://github.com/aewallin/allantools/blob/master/examples/three-cornered-hat-demo.ipynb
Authors
-------
* Anders E.E. Wallin, anders.e.e.wallin "at" gmail.com , https://github.com/aewallin
* Danny Price, https://github.com/telegraphic
* Cantwell G. Carson, carsonc "at" gmail.com
* Frédéric Meynadier, https://github.com/fmeynadier
* Yan Xie, https://github.com/yxie-git
* Erik Benkler, https://github.com/EBenkler
|
AllanTools
|
/AllanTools-2019.9.tar.gz/AllanTools-2019.9/README.rst
|
README.rst
|
import os
import json
import numpy as np
#import scipy.stats # used in confidence_intervals()
#import scipy.signal # decimation in lag-1 acf
from . import ci # edf, confidence intervals
# Get version number from json metadata
pkginfo_path = os.path.join(os.path.dirname(__file__),
'allantools_info.json')
with open(pkginfo_path) as fp:
pkginfo = json.load(fp)
__version__ = pkginfo["version"]
def tdev(data, rate=1.0, data_type="phase", taus=None):
""" Time deviation.
Based on modified Allan variance.
.. math::
\\sigma^2_{TDEV}( \\tau ) = { \\tau^2 \\over 3 }
\\sigma^2_{MDEV}( \\tau )
Note that TDEV has a unit of seconds.
NIST [SP1065]_ eqn (15), page 18.
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional).
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency. Defaults to "phase".
taus: np.array
Array of tau values, in seconds, for which to compute statistic.
Optionally set taus=["all"|"octave"|"decade"] for automatic
tau-list generation.
Returns
-------
(taus, tdev, tdev_error, ns): tuple
Tuple of values
taus: np.array
Tau values for which td computed
tdev: np.array
Computed time deviations (in seconds) for each tau value
tdev_errors: np.array
Time deviation errors
ns: np.array
Values of N used in mdev_phase()
Notes
-----
http://en.wikipedia.org/wiki/Time_deviation
"""
phase = input_to_phase(data, rate, data_type)
(taus, md, mde, ns) = mdev(phase, rate=rate, taus=taus)
td = taus * md / np.sqrt(3.0)
tde = td / np.sqrt(ns)
return taus, td, tde, ns
def mdev(data, rate=1.0, data_type="phase", taus=None):
""" Modified Allan deviation.
Used to distinguish between White and Flicker Phase Modulation.
.. math::
\\sigma^2_{MDEV}(m\\tau_0) = { 1 \\over 2 (m \\tau_0 )^2 (N-3m+1) }
\\sum_{j=1}^{N-3m+1} \\lbrace
\\sum_{i=j}^{j+m-1} {x}_{i+2m} - 2x_{i+m} + x_{i} \\rbrace^2
see http://www.leapsecond.com/tools/adev_lib.c
NIST [SP1065]_ eqn (14), page 17.
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional).
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency. Defaults to "phase".
taus: np.array
Array of tau values, in seconds, for which to compute statistic.
Optionally set taus=["all"|"octave"|"decade"] for automatic
tau-list generation.
Returns
-------
(taus2, md, mde, ns): tuple
Tuple of values
taus2: np.array
Tau values for which td computed
md: np.array
Computed mdev for each tau value
mde: np.array
mdev errors
ns: np.array
Values of N used in each mdev calculation
"""
phase = input_to_phase(data, rate, data_type)
(phase, ms, taus_used) = tau_generator(phase, rate, taus=taus)
data, taus = np.array(phase), np.array(taus)
md = np.zeros_like(ms)
mderr = np.zeros_like(ms)
ns = np.zeros_like(ms)
# this is a 'loop-unrolled' algorithm following
# http://www.leapsecond.com/tools/adev_lib.c
for idx, m in enumerate(ms):
m = int(m) # without this we get: VisibleDeprecationWarning:
# using a non-integer number instead of an integer
# will result in an error in the future
tau = taus_used[idx]
# First loop sum
d0 = phase[0:m]
d1 = phase[m:2*m]
d2 = phase[2*m:3*m]
e = min(len(d0), len(d1), len(d2))
v = np.sum(d2[:e] - 2* d1[:e] + d0[:e])
s = v * v
# Second part of sum
d3 = phase[3*m:]
d2 = phase[2*m:]
d1 = phase[1*m:]
d0 = phase[0:]
e = min(len(d0), len(d1), len(d2), len(d3))
n = e + 1
v_arr = v + np.cumsum(d3[:e] - 3 * d2[:e] + 3 * d1[:e] - d0[:e])
s = s + np.sum(v_arr * v_arr)
s /= 2.0 * m * m * tau * tau * n
s = np.sqrt(s)
md[idx] = s
mderr[idx] = (s / np.sqrt(n))
ns[idx] = n
return remove_small_ns(taus_used, md, mderr, ns)
def adev(data, rate=1.0, data_type="phase", taus=None):
""" Allan deviation.
Classic - use only if required - relatively poor confidence.
.. math::
\\sigma^2_{ADEV}(\\tau) = { 1 \\over 2 \\tau^2 }
\\langle ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 \\rangle
= { 1 \\over 2 (N-2) \\tau^2 }
\\sum_{n=1}^{N-2} ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2
where :math:`x_n` is the time-series of phase observations, spaced
by the measurement interval :math:`\\tau`, and with length :math:`N`.
Or alternatively calculated from a time-series of fractional frequency:
.. math::
\\sigma^{2}_{ADEV}(\\tau) = { 1 \\over 2 }
\\langle ( \\bar{y}_{n+1} - \\bar{y}_n )^2 \\rangle
where :math:`\\bar{y}_n` is the time-series of fractional frequency
at averaging time :math:`\\tau`
NIST [SP1065]_ eqn (6) and (7), pages 14 and 15.
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional).
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency. Defaults to "phase".
taus: np.array
Array of tau values, in seconds, for which to compute statistic.
Optionally set taus=["all"|"octave"|"decade"] for automatic
tau-list generation.
Returns
-------
(taus2, ad, ade, ns): tuple
Tuple of values
taus2: np.array
Tau values for which td computed
ad: np.array
Computed adev for each tau value
ade: np.array
adev errors
ns: np.array
Values of N used in each adev calculation
"""
phase = input_to_phase(data, rate, data_type)
(phase, m, taus_used) = tau_generator(phase, rate, taus)
ad = np.zeros_like(taus_used)
ade = np.zeros_like(taus_used)
adn = np.zeros_like(taus_used)
for idx, mj in enumerate(m): # loop through each tau value m(j)
(ad[idx], ade[idx], adn[idx]) = calc_adev_phase(phase, rate, mj, mj)
return remove_small_ns(taus_used, ad, ade, adn)
def calc_adev_phase(phase, rate, mj, stride):
""" Main algorithm for adev() (stride=mj) and oadev() (stride=1)
see http://www.leapsecond.com/tools/adev_lib.c
stride = mj for nonoverlapping allan deviation
Parameters
----------
phase: np.array
Phase data in seconds.
rate: float
The sampling rate for phase or frequency, in Hz
mj: int
M index value for stride
stride: int
Size of stride
Returns
-------
(dev, deverr, n): tuple
Array of computed values.
Notes
-----
stride = mj for nonoverlapping Allan deviation
stride = 1 for overlapping Allan deviation
References
----------
* http://en.wikipedia.org/wiki/Allan_variance
* http://www.leapsecond.com/tools/adev_lib.c
NIST [SP1065]_ eqn (7) and (11) page 16
"""
mj = int(mj)
stride = int(stride)
d2 = phase[2 * mj::stride]
d1 = phase[1 * mj::stride]
d0 = phase[::stride]
n = min(len(d0), len(d1), len(d2))
if n == 0:
RuntimeWarning("Data array length is too small: %i" % len(phase))
n = 1
v_arr = d2[:n] - 2 * d1[:n] + d0[:n]
s = np.sum(v_arr * v_arr)
dev = np.sqrt(s / (2.0 * n)) / mj * rate
deverr = dev / np.sqrt(n)
return dev, deverr, n
def oadev(data, rate=1.0, data_type="phase", taus=None):
""" overlapping Allan deviation.
General purpose - most widely used - first choice
.. math::
\\sigma^2_{OADEV}(m\\tau_0) = { 1 \\over 2 (m \\tau_0 )^2 (N-2m) }
\\sum_{n=1}^{N-2m} ( {x}_{n+2m} - 2x_{n+1m} + x_{n} )^2
where :math:`\\sigma^2_x(m\\tau_0)` is the overlapping Allan
deviation at an averaging time of :math:`\\tau=m\\tau_0`, and
:math:`x_n` is the time-series of phase observations, spaced by the
measurement interval :math:`\\tau_0`, with length :math:`N`.
NIST [SP1065]_ eqn (11), page 16.
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional).
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency. Defaults to "phase".
taus: np.array
Array of tau values, in seconds, for which to compute statistic.
Optionally set taus=["all"|"octave"|"decade"] for automatic
tau-list generation.
Returns
-------
(taus2, ad, ade, ns): tuple
Tuple of values
taus2: np.array
Tau values for which td computed
ad: np.array
Computed oadev for each tau value
ade: np.array
oadev errors
ns: np.array
Values of N used in each oadev calculation
"""
phase = input_to_phase(data, rate, data_type)
(phase, m, taus_used) = tau_generator(phase, rate, taus)
ad = np.zeros_like(taus_used)
ade = np.zeros_like(taus_used)
adn = np.zeros_like(taus_used)
for idx, mj in enumerate(m): # stride=1 for overlapping ADEV
(ad[idx], ade[idx], adn[idx]) = calc_adev_phase(phase, rate, mj, 1)
return remove_small_ns(taus_used, ad, ade, adn)
def ohdev(data, rate=1.0, data_type="phase", taus=None):
""" Overlapping Hadamard deviation.
Better confidence than normal Hadamard.
.. math::
\\sigma^2_{OHDEV}(m\\tau_0) = { 1 \\over 6 (m \\tau_0 )^2 (N-3m) }
\\sum_{i=1}^{N-3m} ( {x}_{i+3m} - 3x_{i+2m} + 3x_{i+m} - x_{i} )^2
where :math:`x_i` is the time-series of phase observations, spaced
by the measurement interval :math:`\\tau_0`, and with length :math:`N`.
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional).
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency. Defaults to "phase".
taus: np.array
Array of tau values, in seconds, for which to compute statistic.
Optionally set taus=["all"|"octave"|"decade"] for automatic
tau-list generation.
Returns
-------
(taus2, hd, hde, ns): tuple
Tuple of values
taus2: np.array
Tau values for which td computed
hd: np.array
Computed hdev for each tau value
hde: np.array
hdev errors
ns: np.array
Values of N used in each hdev calculation
"""
phase = input_to_phase(data, rate, data_type)
(phase, m, taus_used) = tau_generator(phase, rate, taus)
hdevs = np.zeros_like(taus_used)
hdeverrs = np.zeros_like(taus_used)
ns = np.zeros_like(taus_used)
for idx, mj in enumerate(m):
(hdevs[idx],
hdeverrs[idx],
ns[idx]) = calc_hdev_phase(phase, rate, mj, 1)
return remove_small_ns(taus_used, hdevs, hdeverrs, ns)
def hdev(data, rate=1.0, data_type="phase", taus=None):
""" Hadamard deviation.
Rejects frequency drift, and handles divergent noise.
.. math::
\\sigma^2_{HDEV}( \\tau ) = { 1 \\over 6 \\tau^2 (N-3) }
\\sum_{i=1}^{N-3} ( {x}_{i+3} - 3x_{i+2} + 3x_{i+1} - x_{i} )^2
where :math:`x_i` is the time-series of phase observations, spaced
by the measurement interval :math:`\\tau`, and with length :math:`N`.
NIST [SP1065]_ eqn (17) and (18), page 20
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional).
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency. Defaults to "phase".
taus: np.array
Array of tau values, in seconds, for which to compute statistic.
Optionally set taus=["all"|"octave"|"decade"] for automatic
tau-list generation.
"""
phase = input_to_phase(data, rate, data_type)
(phase, m, taus_used) = tau_generator(phase, rate, taus)
hdevs = np.zeros_like(taus_used)
hdeverrs = np.zeros_like(taus_used)
ns = np.zeros_like(taus_used)
for idx, mj in enumerate(m):
(hdevs[idx],
hdeverrs[idx],
ns[idx]) = calc_hdev_phase(phase, rate, mj, mj) # stride = mj
return remove_small_ns(taus_used, hdevs, hdeverrs, ns)
def calc_hdev_phase(phase, rate, mj, stride):
""" main calculation fungtion for HDEV and OHDEV
Parameters
----------
phase: np.array
Phase data in seconds.
rate: float
The sampling rate for phase or frequency, in Hz
mj: int
M index value for stride
stride: int
Size of stride
Returns
-------
(dev, deverr, n): tuple
Array of computed values.
Notes
-----
http://www.leapsecond.com/tools/adev_lib.c
1 N-3
s2y(t) = --------------- sum [x(i+3) - 3x(i+2) + 3x(i+1) - x(i) ]^2
6*tau^2 (N-3m) i=1
N=M+1 phase measurements
m is averaging factor
NIST [SP1065]_ eqn (18) and (20) pages 20 and 21
"""
tau0 = 1.0 / float(rate)
mj = int(mj)
stride = int(stride)
d3 = phase[3 * mj::stride]
d2 = phase[2 * mj::stride]
d1 = phase[1 * mj::stride]
d0 = phase[::stride]
n = min(len(d0), len(d1), len(d2), len(d3))
v_arr = d3[:n] - 3 * d2[:n] + 3 * d1[:n] - d0[:n]
s = np.sum(v_arr * v_arr)
if n == 0:
n = 1
h = np.sqrt(s / 6.0 / float(n)) / float(tau0 * mj)
e = h / np.sqrt(n)
return h, e, n
def totdev(data, rate=1.0, data_type="phase", taus=None):
""" Total deviation.
Better confidence at long averages for Allan deviation.
.. math::
\\sigma^2_{TOTDEV}( m\\tau_0 ) = { 1 \\over 2 (m\\tau_0)^2 (N-2) }
\\sum_{i=2}^{N-1} ( {x}^*_{i-m} - 2x^*_{i} + x^*_{i+m} )^2
Where :math:`x^*_i` is a new time-series of length :math:`3N-4`
derived from the original phase time-series :math:`x_n` of
length :math:`N` by reflection at both ends.
FIXME: better description of reflection operation.
the original data x is in the center of x*:
x*(1-j) = 2x(1) - x(1+j) for j=1..N-2
x*(i) = x(i) for i=1..N
x*(N+j) = 2x(N) - x(N-j) for j=1..N-2
x* has length 3N-4
tau = m*tau0
NIST [SP1065]_ eqn (25) page 23
FIXME: bias correction http://www.wriley.com/CI2.pdf page 5
Parameters
----------
phase: np.array
Phase data in seconds. Provide either phase or frequency.
frequency: np.array
Fractional frequency data (nondimensional). Provide either
frequency or phase.
rate: float
The sampling rate for phase or frequency, in Hz
taus: np.array
Array of tau values for which to compute measurement
"""
phase = input_to_phase(data, rate, data_type)
(phase, m, taus_used) = tau_generator(phase, rate, taus)
N = len(phase)
# totdev requires a new dataset
# Begin by adding reflected data before dataset
x1 = 2.0 * phase[0] * np.ones((N - 2,))
x1 = x1 - phase[1:-1]
x1 = x1[::-1]
# Reflected data at end of dataset
x2 = 2.0 * phase[-1] * np.ones((N - 2,))
x2 = x2 - phase[1:-1][::-1]
# check length of new dataset
assert len(x1)+len(phase)+len(x2) == 3*N - 4
# Combine into a single array
x = np.zeros((3*N - 4))
x[0:N-2] = x1
x[N-2:2*(N-2)+2] = phase # original data in the middle
x[2*(N-2)+2:] = x2
devs = np.zeros_like(taus_used)
deverrs = np.zeros_like(taus_used)
ns = np.zeros_like(taus_used)
mid = len(x1)
for idx, mj in enumerate(m):
mj = int(mj)
d0 = x[mid + 1:]
d1 = x[mid + mj + 1:]
d1n = x[mid - mj + 1:]
e = min(len(d0), len(d1), len(d1n))
v_arr = d1n[:e] - 2.0 * d0[:e] + d1[:e]
dev = np.sum(v_arr[:mid] * v_arr[:mid])
dev /= float(2 * pow(mj / rate, 2) * (N - 2))
dev = np.sqrt(dev)
devs[idx] = dev
deverrs[idx] = dev / np.sqrt(mid)
ns[idx] = mid
return remove_small_ns(taus_used, devs, deverrs, ns)
def ttotdev(data, rate=1.0, data_type="phase", taus=None):
""" Time Total Deviation
Modified total variance scaled by tau^2 / 3
NIST [SP1065]_ eqn (28) page 26. Note that [SP1065]_ erroneously has tau-cubed here (!).
"""
(taus, mtotdevs, mde, ns) = mtotdev(data, data_type=data_type,
rate=rate, taus=taus)
td = taus*mtotdevs / np.sqrt(3.0)
tde = td / np.sqrt(ns)
return taus, td, tde, ns
def mtotdev(data, rate=1.0, data_type="phase", taus=None):
""" PRELIMINARY - REQUIRES FURTHER TESTING.
Modified Total deviation.
Better confidence at long averages for modified Allan
FIXME: bias-correction http://www.wriley.com/CI2.pdf page 6
The variance is scaled up (divided by this number) based on the
noise-type identified.
WPM 0.94
FPM 0.83
WFM 0.73
FFM 0.70
RWFM 0.69
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional).
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency. Defaults to "phase".
taus: np.array
Array of tau values, in seconds, for which to compute statistic.
Optionally set taus=["all"|"octave"|"decade"] for automatic
tau-list generation.
NIST [SP1065]_ eqn (27) page 25
"""
phase = input_to_phase(data, rate, data_type)
(phase, ms, taus_used) = tau_generator(phase, rate, taus,
maximum_m=float(len(phase))/3.0)
devs = np.zeros_like(taus_used)
deverrs = np.zeros_like(taus_used)
ns = np.zeros_like(taus_used)
for idx, mj in enumerate(ms):
devs[idx], deverrs[idx], ns[idx] = calc_mtotdev_phase(phase, rate, mj)
return remove_small_ns(taus_used, devs, deverrs, ns)
def calc_mtotdev_phase(phase, rate, m):
""" PRELIMINARY - REQUIRES FURTHER TESTING.
calculation of mtotdev for one averaging factor m
tau = m*tau0
NIST [SP1065]_ Eqn (27), page 25.
Computed from a set of N - 3m + 1 subsequences of 3m points.
1. A linear trend (frequency offset) is removed from the subsequence
by averaging the first and last halves of the subsequence and
dividing by half the interval.
2. The offset-removed subsequence is extended at both ends
by uninverted, even reflection.
[Howe1999]_
D.A. Howe and F. Vernotte, "Generalization of the Total Variance
Approach to the Modified Allan Variance," Proc.
31 st PTTI Meeting, pp. 267-276, Dec. 1999.
"""
tau0 = 1.0/rate
N = len(phase) # phase data, N points
m = int(m)
n = 0 # number of terms in the sum, for error estimation
dev = 0.0 # the deviation we are computing
err = 0.0 # the error in the deviation
#print('calc_mtotdev N=%d m=%d' % (N,m) )
for i in range(0, N-3*m+1):
# subsequence of length 3m, from the original phase data
xs = phase[i:i+3*m]
assert len(xs) == 3*m
# Step 1.
# remove linear trend. by averaging first/last half,
# computing slope, and subtracting
half1_idx = int(np.floor(3*m/2.0))
half2_idx = int(np.ceil(3*m/2.0))
# m
# 1 0:1 2:2
mean1 = np.mean(xs[:half1_idx])
mean2 = np.mean(xs[half2_idx:])
if int(3*m)%2 == 1: # m is odd
# 3m = 2k+1 is odd, with the averages at both ends over k points
# the distance between the averages is then k+1 = (3m-1)/2 +1
slope = (mean2-mean1) / ((0.5*(3*m-1)+1)*tau0)
else: # m is even
# 3m = 2k is even, so distance between averages is k=m/2
slope = (mean2-mean1) / (0.5*3*m*tau0)
# remove the linear trend
x0 = [x - slope*idx*tau0 for (idx, x) in enumerate(xs)]
x0_flip = x0[::-1] # left-right flipped version of array
# Step 2.
# extend sequence, by uninverted even reflection
# extended sequence xstar, of length 9m,
xstar = np.concatenate((x0_flip, x0, x0_flip))
assert len(xstar) == 9*m
# now compute mdev on these 9m points
# 6m unique groups of m-point averages,
# use all possible overlapping second differences
# one term in the 6m sum: [ x_i - 2 x_i+m + x_i+2m ]^2
squaresum = 0.0
#print('m=%d 9m=%d maxj+3*m=%d' %( m, len(xstar), 6*int(m)+3*int(m)) )
# below we want the following sums (averages, see squaresum where we divide by m)
# xmean1=np.sum(xstar[j : j+m])
# xmean2=np.sum(xstar[j+m : j+2*m])
# xmean3=np.sum(xstar[j+2*m : j+3*m])
# for speed these are not computed with np.sum or np.mean in each loop
# instead they are initialized at m=0, and then just updated
for j in range(0, 6*m): # summation of the 6m terms.
# faster inner sum, based on Stable32 MTC.c code
if j == 0:
# intialize the sum
xmean1 = np.sum(xstar[0:m])
xmean2 = np.sum(xstar[m:2*m])
xmean3 = np.sum(xstar[2*m:3*m])
else:
# j>=1, subtract old point, add new point
xmean1 = xmean1 - xstar[j-1] + xstar[j+m-1] #
xmean2 = xmean2 - xstar[m+j-1] + xstar[j+2*m-1] #
xmean3 = xmean3 - xstar[2*m+j-1] + xstar[j+3*m-1] #
squaresum += pow((xmean1 - 2.0*xmean2 + xmean3)/float(m), 2)
squaresum = (1.0/(6.0*m)) * squaresum
dev += squaresum
n = n+1
# scaling in front of double-sum
assert n == N-3*m+1 # sanity check on the number of terms n
dev = dev * 1.0/ (2.0*pow(m*tau0, 2)*(N-3*m+1))
dev = np.sqrt(dev)
error = dev / np.sqrt(n)
return (dev, error, n)
def htotdev(data, rate=1.0, data_type="phase", taus=None):
""" PRELIMINARY - REQUIRES FURTHER TESTING.
Hadamard Total deviation.
Better confidence at long averages for Hadamard deviation
Computed for N fractional frequency points y_i with sampling
period tau0, analyzed at tau = m*tau0
1. remove linear trend by averaging first and last half and divide by interval
2. extend sequence by uninverted even reflection
3. compute Hadamard for extended, length 9m, sequence.
FIXME: bias corrections from http://www.wriley.com/CI2.pdf
W FM 0.995 alpha= 0
F FM 0.851 alpha=-1
RW FM 0.771 alpha=-2
FW FM 0.717 alpha=-3
RR FM 0.679 alpha=-4
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional).
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency. Defaults to "phase".
taus: np.array
Array of tau values, in seconds, for which to compute statistic.
Optionally set taus=["all"|"octave"|"decade"] for automatic
tau-list generation.
"""
if data_type == "phase":
phase = data
freq = phase2frequency(phase, rate)
elif data_type == "freq":
phase = frequency2phase(data, rate)
freq = data
else:
raise Exception("unknown data_type: " + data_type)
rate = float(rate)
(freq, ms, taus_used) = tau_generator(freq, rate, taus,
maximum_m=float(len(freq))/3.0)
phase = np.array(phase)
freq = np.array(freq)
devs = np.zeros_like(taus_used)
deverrs = np.zeros_like(taus_used)
ns = np.zeros_like(taus_used)
# NOTE at mj==1 we use ohdev(), based on comment from here:
# http://www.wriley.com/paper4ht.htm
# "For best consistency, the overlapping Hadamard variance is used
# instead of the Hadamard total variance at m=1"
# FIXME: this uses both freq and phase datasets, which uses double the memory really needed...
for idx, mj in enumerate(ms):
if int(mj) == 1:
(devs[idx],
deverrs[idx],
ns[idx]) = calc_hdev_phase(phase, rate, mj, 1)
else:
(devs[idx],
deverrs[idx],
ns[idx]) = calc_htotdev_freq(freq, mj)
return remove_small_ns(taus_used, devs, deverrs, ns)
def calc_htotdev_freq(freq, m):
""" PRELIMINARY - REQUIRES FURTHER TESTING.
calculation of htotdev for one averaging factor m
tau = m*tau0
Parameters
----------
frequency: np.array
Fractional frequency data (nondimensional).
m: int
Averaging factor. tau = m*tau0, where tau0=1/rate.
"""
N = int(len(freq)) # frequency data, N points
m = int(m)
n = 0 # number of terms in the sum, for error estimation
dev = 0.0 # the deviation we are computing
for i in range(0, N-3*m+1):
# subsequence of length 3m, from the original phase data
xs = freq[i:i+3*m]
assert len(xs) == 3*m
# remove linear trend. by averaging first/last half,
# computing slope, and subtracting
half1_idx = int(np.floor(3*m/2.0))
half2_idx = int(np.ceil(3*m/2.0))
# m
# 1 0:1 2:2
mean1 = np.mean(xs[:half1_idx])
mean2 = np.mean(xs[half2_idx:])
if int(3*m)%2 == 1: # m is odd
# 3m = 2k+1 is odd, with the averages at both ends over k points
# the distance between the averages is then k+1 = (3m-1)/2 +1
slope = (mean2-mean1) / ((0.5*(3*m-1)+1))
else: # m is even
# 3m = 2k is even, so distance between averages is k=3m/2
slope = (mean2-mean1) / (0.5*3*m)
# remove the linear trend
x0 = [x - slope*(idx-np.floor(3*m/2)) for (idx, x) in enumerate(xs)]
x0_flip = x0[::-1] # left-right flipped version of array
# extended sequence, to length 9m, by uninverted even reflection
xstar = np.concatenate((x0_flip, x0, x0_flip))
assert len(xstar) == 9*m
# now compute totdev on these 9m points
# 6m unique groups of m-point averages,
# all possible overlapping second differences
# one term in the 6m sum: [ x_i - 2 x_i+m + x_i+2m ]^2
squaresum = 0.0
k = 0
for j in range(0, 6*int(m)): # summation of the 6m terms.
# old naive code
# xmean1 = np.mean(xstar[j+0*m : j+1*m])
# xmean2 = np.mean(xstar[j+1*m : j+2*m])
# xmean3 = np.mean(xstar[j+2*m : j+3*m])
# squaresum += pow(xmean1 - 2.0*xmean2 + xmean3, 2)
# new faster way of doing the sums
if j == 0:
# intialize the sum
xmean1 = np.sum(xstar[0:m])
xmean2 = np.sum(xstar[m:2*m])
xmean3 = np.sum(xstar[2*m:3*m])
else:
# j>=1, subtract old point, add new point
xmean1 = xmean1 - xstar[j-1] + xstar[j+m-1] #
xmean2 = xmean2 - xstar[m+j-1] + xstar[j+2*m-1] #
xmean3 = xmean3 - xstar[2*m+j-1] + xstar[j+3*m-1] #
squaresum += pow((xmean1 - 2.0*xmean2 + xmean3)/float(m), 2)
k = k+1
assert k == 6*m # check number of terms in the sum
squaresum = (1.0/(6.0*k)) * squaresum
dev += squaresum
n = n+1
# scaling in front of double-sum
assert n == N-3*m+1 # sanity check on the number of terms n
dev = dev * 1.0/(N-3*m+1)
dev = np.sqrt(dev)
error = dev / np.sqrt(n)
return (dev, error, n)
def theo1(data, rate=1.0, data_type="phase", taus=None):
""" PRELIMINARY - REQUIRES FURTHER TESTING.
Theo1 is a two-sample variance with improved confidence and
extended averaging factor range.
.. math::
\\sigma^2_{THEO1}(m\\tau_0) = { 1 \\over (m \\tau_0 )^2 (N-m) }
\\sum_{i=1}^{N-m} \\sum_{\\delta=0}^{m/2-1}
{1\\over m/2-\\delta}\\lbrace
({x}_{i} - x_{i-\\delta +m/2}) +
(x_{i+m}- x_{i+\\delta +m/2}) \\rbrace^2
Where :math:`10<=m<=N-1` is even.
FIXME: bias correction
NIST [SP1065]_ eq (30) page 29
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional).
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency. Defaults to "phase".
taus: np.array
Array of tau values, in seconds, for which to compute statistic.
Optionally set taus=["all"|"octave"|"decade"] for automatic
tau-list generation.
"""
phase = input_to_phase(data, rate, data_type)
tau0 = 1.0/rate
(phase, ms, taus_used) = tau_generator(phase, rate, taus, even=True)
devs = np.zeros_like(taus_used)
deverrs = np.zeros_like(taus_used)
ns = np.zeros_like(taus_used)
N = len(phase)
for idx, m in enumerate(ms):
m = int(m) # to avoid: VisibleDeprecationWarning: using a
# non-integer number instead of an integer will
# result in an error in the future
assert m % 2 == 0 # m must be even
dev = 0
n = 0
for i in range(int(N-m)):
s = 0
for d in range(int(m/2)): # inner sum
pre = 1.0 / (float(m)/2 - float(d))
s += pre*pow(phase[i]-phase[i-d+int(m/2)] +
phase[i+m]-phase[i+d+int(m/2)], 2)
n = n+1
dev += s
assert n == (N-m)*m/2 # N-m outer sums, m/2 inner sums
dev = dev/(0.75*(N-m)*pow(m*tau0, 2))
# factor 0.75 used here? http://tf.nist.gov/general/pdf/1990.pdf
# but not here? http://tf.nist.gov/timefreq/general/pdf/2220.pdf page 29
devs[idx] = np.sqrt(dev)
deverrs[idx] = devs[idx] / np.sqrt(N-m)
ns[idx] = n
return remove_small_ns(taus_used, devs, deverrs, ns)
def tierms(data, rate=1.0, data_type="phase", taus=None):
""" Time Interval Error RMS.
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional).
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency. Defaults to "phase".
taus: np.array
Array of tau values, in seconds, for which to compute statistic.
Optionally set taus=["all"|"octave"|"decade"] for automatic
tau-list generation.
"""
phase = input_to_phase(data, rate, data_type)
(data, m, taus_used) = tau_generator(phase, rate, taus)
count = len(phase)
devs = np.zeros_like(taus_used)
deverrs = np.zeros_like(taus_used)
ns = np.zeros_like(taus_used)
for idx, mj in enumerate(m):
mj = int(mj)
# This seems like an unusual way to
phases = np.column_stack((phase[:-mj], phase[mj:]))
p_max = np.max(phases, axis=1)
p_min = np.min(phases, axis=1)
phases = p_max - p_min
tie = np.sqrt(np.mean(phases * phases))
ncount = count - mj
devs[idx] = tie
deverrs[idx] = 0 / np.sqrt(ncount) # TODO! I THINK THIS IS WRONG!
ns[idx] = ncount
return remove_small_ns(taus_used, devs, deverrs, ns)
def mtie_rolling_window(a, window):
"""
Make an ndarray with a rolling window of the last dimension, from
http://mail.scipy.org/pipermail/numpy-discussion/2011-January/054401.html
Parameters
----------
a : array_like
Array to add rolling window to
window : int
Size of rolling window
Returns
-------
Array that is a view of the original array with a added dimension
of size window.
Note
----
This may consume large amounts of memory. See discussion:
https://mail.python.org/pipermail/numpy-discussion/2011-January/054364.html
https://mail.python.org/pipermail/numpy-discussion/2011-January/054370.html
"""
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > a.shape[-1]:
raise ValueError("`window` is too long.")
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def mtie(data, rate=1.0, data_type="phase", taus=None):
""" Maximum Time Interval Error.
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional).
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency. Defaults to "phase".
taus: np.array
Array of tau values, in seconds, for which to compute statistic.
Optionally set taus=["all"|"octave"|"decade"] for automatic
tau-list generation.
Notes
-----
this seems to correspond to Stable32 setting "Fast(u)"
Stable32 also has "Decade" and "Octave" modes where the
dataset is extended somehow?
"""
phase = input_to_phase(data, rate, data_type)
(phase, m, taus_used) = tau_generator(phase, rate, taus)
devs = np.zeros_like(taus_used)
deverrs = np.zeros_like(taus_used)
ns = np.zeros_like(taus_used)
for idx, mj in enumerate(m):
try:
# the older algorithm uses a lot of memory
# but can be used for short datasets.
rw = mtie_rolling_window(phase, int(mj + 1))
win_max = np.max(rw, axis=1)
win_min = np.min(rw, axis=1)
tie = win_max - win_min
dev = np.max(tie)
except:
if int(mj + 1) < 1:
raise ValueError("`window` must be at least 1.")
if int(mj + 1) > phase.shape[-1]:
raise ValueError("`window` is too long.")
mj = int(mj)
currMax = np.max(phase[0:mj])
currMin = np.min(phase[0:mj])
dev = currMax - currMin
for winStartIdx in range(1, int(phase.shape[0] - mj)):
winEndIdx = mj + winStartIdx
if currMax == phase[winStartIdx - 1]:
currMax = np.max(phase[winStartIdx:winEndIdx])
elif currMax < phase[winEndIdx]:
currMax = phase[winEndIdx]
if currMin == phase[winStartIdx - 1]:
currMin = np.min(phase[winStartIdx:winEndIdx])
elif currMin > phase[winEndIdx]:
currMin = phase[winEndIdx]
if dev < currMax - currMin:
dev = currMax - currMin
ncount = phase.shape[0] - mj
devs[idx] = dev
deverrs[idx] = dev / np.sqrt(ncount)
ns[idx] = ncount
return remove_small_ns(taus_used, devs, deverrs, ns)
#
# !!!!!!!
# FIXME: mtie_phase_fast() is incomplete.
# !!!!!!!
#
def mtie_phase_fast(phase, rate=1.0, data_type="phase", taus=None):
""" fast binary decomposition algorithm for MTIE
See: [Bregni2001]_ STEFANO BREGNI "Fast Algorithms for TVAR and MTIE Computation in
Characterization of Network Synchronization Performance"
"""
rate = float(rate)
phase = np.asarray(phase)
k_max = int(np.floor(np.log2(len(phase))))
phase = phase[0:pow(2, k_max)] # truncate data to 2**k_max datapoints
assert len(phase) == pow(2, k_max)
#k = 1
taus = [pow(2, k) for k in range(k_max)]
#while k <= k_max:
# tau = pow(2, k)
# taus.append(tau)
#print tau
# k += 1
print("taus N=", len(taus), " ", taus)
devs = np.zeros(len(taus))
deverrs = np.zeros(len(taus))
ns = np.zeros(len(taus))
taus_used = np.array(taus) # [(1.0/rate)*t for t in taus]
# matrices to store results
mtie_max = np.zeros((len(phase)-1, k_max))
mtie_min = np.zeros((len(phase)-1, k_max))
for kidx in range(k_max):
k = kidx+1
imax = len(phase)-pow(2, k)+1
#print k, imax
tie = np.zeros(imax)
ns[kidx] = imax
#print np.max( tie )
for i in range(imax):
if k == 1:
mtie_max[i, kidx] = max(phase[i], phase[i+1])
mtie_min[i, kidx] = min(phase[i], phase[i+1])
else:
p = int(pow(2, k-1))
mtie_max[i, kidx] = max(mtie_max[i, kidx-1],
mtie_max[i+p, kidx-1])
mtie_min[i, kidx] = min(mtie_min[i, kidx-1],
mtie_min[i+p, kidx-1])
#for i in range(imax):
tie[i] = mtie_max[i, kidx] - mtie_min[i, kidx]
#print tie[i]
devs[kidx] = np.amax(tie) # maximum along axis
#print "maximum %2.4f" % devs[kidx]
#print np.amax( tie )
#for tau in taus:
#for
devs = np.array(devs)
print("devs N=", len(devs), " ", devs)
print("taus N=", len(taus_used), " ", taus_used)
return remove_small_ns(taus_used, devs, deverrs, ns)
########################################################################
#
# gap resistant Allan deviation
#
def gradev(data, rate=1.0, data_type="phase", taus=None,
ci=0.9, noisetype='wp'):
""" gap resistant overlapping Allan deviation
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional). Warning : phase data works better (frequency data is
first trantformed into phase using numpy.cumsum() function, which can
lead to poor results).
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency. Defaults to "phase".
taus: np.array
Array of tau values, in seconds, for which to compute statistic.
Optionally set taus=["all"|"octave"|"decade"] for automatic
tau-list generation.
ci: float
the total confidence interval desired, i.e. if ci = 0.9, the bounds
will be at 0.05 and 0.95.
noisetype: string
the type of noise desired:
'wp' returns white phase noise.
'wf' returns white frequency noise.
'fp' returns flicker phase noise.
'ff' returns flicker frequency noise.
'rf' returns random walk frequency noise.
If the input is not recognized, it defaults to idealized, uncorrelated
noise with (N-1) degrees of freedom.
Returns
-------
taus: np.array
list of tau vales in seconds
adev: np.array
deviations
[err_l, err_h] : list of len()==2, np.array
the upper and lower bounds of the confidence interval taken as
distances from the the estimated two sample variance.
ns: np.array
numper of terms n in the adev estimate.
"""
if data_type == "freq":
print("Warning : phase data is preferred as input to gradev()")
phase = input_to_phase(data, rate, data_type)
(data, m, taus_used) = tau_generator(phase, rate, taus)
ad = np.zeros_like(taus_used)
ade_l = np.zeros_like(taus_used)
ade_h = np.zeros_like(taus_used)
adn = np.zeros_like(taus_used)
for idx, mj in enumerate(m):
(dev, deverr, n) = calc_gradev_phase(data,
rate,
mj,
1,
ci,
noisetype)
# stride=1 for overlapping ADEV
ad[idx] = dev
ade_l[idx] = deverr[0]
ade_h[idx] = deverr[1]
adn[idx] = n
# Note that errors are split in 2 arrays
return remove_small_ns(taus_used, ad, [ade_l, ade_h], adn)
def calc_gradev_phase(data, rate, mj, stride, confidence, noisetype):
""" see http://www.leapsecond.com/tools/adev_lib.c
stride = mj for nonoverlapping allan deviation
stride = 1 for overlapping allan deviation
see http://en.wikipedia.org/wiki/Allan_variance
1 1
s2y(t) = --------- sum [x(i+2) - 2x(i+1) + x(i) ]^2
2*tau^2
"""
d2 = data[2 * int(mj)::int(stride)]
d1 = data[1 * int(mj)::int(stride)]
d0 = data[::int(stride)]
n = min(len(d0), len(d1), len(d2))
v_arr = d2[:n] - 2 * d1[:n] + d0[:n]
n = len(np.where(np.isnan(v_arr) == False)[0]) # only average for non-nans
if n == 0:
RuntimeWarning("Data array length is too small: %i" % len(data))
n = 1
N = len(np.where(np.isnan(data) == False)[0])
s = np.nansum(v_arr * v_arr) # a summation robust to nans
dev = np.sqrt(s / (2.0 * n)) / mj * rate
#deverr = dev / np.sqrt(n) # old simple errorbars
if noisetype == 'wp':
alpha = 2
elif noisetype == 'wf':
alpha = 0
elif noisetype == 'fp':
alpha = -2
else:
alpha = None
if n > 1:
edf = ci.edf_simple(N, mj, alpha)
deverr = ci.confidence_interval(dev, confidence, edf)
else:
deverr = [0, 0]
return dev, deverr, n
########################################################################
#
# Various helper functions and utilities
#
def input_to_phase(data, rate, data_type):
""" Take either phase or frequency as input and return phase
"""
if data_type == "phase":
return data
elif data_type == "freq":
return frequency2phase(data, rate)
else:
raise Exception("unknown data_type: " + data_type)
def tau_generator(data, rate, taus=None, v=False, even=False, maximum_m=-1):
""" pre-processing of the tau-list given by the user (Helper function)
Does sanity checks, sorts data, removes duplicates and invalid values.
Generates a tau-list based on keywords 'all', 'decade', 'octave'.
Uses 'octave' by default if no taus= argument is given.
Parameters
----------
data: np.array
data array
rate: float
Sample rate of data in Hz. Time interval between measurements
is 1/rate seconds.
taus: np.array
Array of tau values for which to compute measurement.
Alternatively one of the keywords: "all", "octave", "decade".
Defaults to "octave" if omitted.
v:
verbose output if True
even:
require even m, where tau=m*tau0, for Theo1 statistic
maximum_m:
limit m, where tau=m*tau0, to this value.
used by mtotdev() and htotdev() to limit maximum tau.
Returns
-------
(data, m, taus): tuple
List of computed values
data: np.array
Data
m: np.array
Tau in units of data points
taus: np.array
Cleaned up list of tau values
"""
if rate == 0:
raise RuntimeError("Warning! rate==0")
if taus is None: # empty or no tau-list supplied
taus = "octave" # default to octave
elif isinstance(taus, list) and taus == []:
taus = "octave"
if taus is "all":
taus = (1.0/rate)*np.linspace(1.0, len(data), len(data))
elif taus is "octave":
maxn = np.floor(np.log2(len(data)))
taus = (1.0/rate)*np.logspace(0, int(maxn), int(maxn+1), base=2.0)
elif taus is "decade": # 1, 2, 4, 10, 20, 40, spacing similar to Stable32
maxn = np.floor(np.log10(len(data)))
taus = []
for k in range(int(maxn+1)):
taus.append(1.0*(1.0/rate)*pow(10.0, k))
taus.append(2.0*(1.0/rate)*pow(10.0, k))
taus.append(4.0*(1.0/rate)*pow(10.0, k))
data, taus = np.array(data), np.array(taus)
rate = float(rate)
m = [] # integer averaging factor. tau = m*tau0
if maximum_m == -1: # if no limit given
maximum_m = len(data)
# FIXME: should we use a "stop-ratio" like Stable32
# found in Table III, page 9 of "Evolution of frequency stability analysis software"
# max(AF) = len(phase)/stop_ratio, where
# function stop_ratio
# adev 5
# oadev 4
# mdev 4
# tdev 4
# hdev 5
# ohdev 4
# totdev 2
# tierms 4
# htotdev 3
# mtie 2
# theo1 1
# theoH 1
# mtotdev 2
# ttotdev 2
taus_valid1 = taus < (1 / float(rate)) * float(len(data))
taus_valid2 = taus > 0
taus_valid3 = taus <= (1 / float(rate)) * float(maximum_m)
taus_valid = taus_valid1 & taus_valid2 & taus_valid3
m = np.floor(taus[taus_valid] * rate)
m = m[m != 0] # m is tau in units of datapoints
m = np.unique(m) # remove duplicates and sort
if v:
print("tau_generator: ", m)
if len(m) == 0:
print("Warning: sanity-check on tau failed!")
print(" len(data)=", len(data), " rate=", rate, "taus= ", taus)
taus2 = m / float(rate)
if even: # used by Theo1
m_even_mask = ((m % 2) == 0)
m = m[m_even_mask]
taus2 = taus2[m_even_mask]
return data, m, taus2
def tau_reduction(ms, rate, n_per_decade):
"""Reduce the number of taus to maximum of n per decade (Helper function)
takes in a tau list and reduces the number of taus to a maximum amount per
decade. This is only useful if more than the "decade" and "octave" but
less than the "all" taus are wanted. E.g. to show certain features of
the data one might want 100 points per decade.
NOTE: The algorithm is slightly inaccurate for ms under n_per_decade, and
will also remove some points in this range, which is usually fine.
Typical use would be something like:
(data,m,taus)=tau_generator(data,rate,taus="all")
(m,taus)=tau_reduction(m,rate,n_per_decade)
Parameters
----------
ms: array of integers
List of m values (assumed to be an "all" list) to remove points from.
rate: float
Sample rate of data in Hz. Time interval between measurements
is 1/rate seconds. Used to convert to taus.
n_per_decade: int
Number of ms/taus to keep per decade.
Returns
-------
m: np.array
Reduced list of m values
taus: np.array
Reduced list of tau values
"""
ms = np.int64(ms)
keep = np.bool8(np.rint(n_per_decade*np.log10(ms[1:])) -
np.rint(n_per_decade*np.log10(ms[:-1])))
# Adjust ms size to fit above-defined mask
ms = ms[:-1]
assert len(ms) == len(keep)
ms = ms[keep]
taus = ms/float(rate)
return ms, taus
def remove_small_ns(taus, devs, deverrs, ns):
""" Remove results with small number of samples.
If n is small (==1), reject the result
Parameters
----------
taus: array
List of tau values for which deviation were computed
devs: array
List of deviations
deverrs: array or list of arrays
List of estimated errors (possibly a list containing two arrays :
upper and lower values)
ns: array
Number of samples for each point
Returns
-------
(taus, devs, deverrs, ns): tuple
Identical to input, except that values with low ns have been removed.
"""
ns_big_enough = ns > 1
o_taus = taus[ns_big_enough]
o_devs = devs[ns_big_enough]
o_ns = ns[ns_big_enough]
if isinstance(deverrs, list):
assert len(deverrs) < 3
o_deverrs = [deverrs[0][ns_big_enough], deverrs[1][ns_big_enough]]
else:
o_deverrs = deverrs[ns_big_enough]
if len(o_devs) == 0:
print("remove_small_ns() nothing remains!?")
raise UserWarning
return o_taus, o_devs, o_deverrs, o_ns
def trim_data(x):
"""
Trim leading and trailing NaNs from dataset
This is done by browsing the array from each end and store the index of the
first non-NaN in each case, the return the appropriate slice of the array
"""
# Find indices for first and last valid data
first = 0
while np.isnan(x[first]):
first += 1
last = len(x)
while np.isnan(x[last - 1]):
last -= 1
return x[first:last]
def three_cornered_hat_phase(phasedata_ab, phasedata_bc,
phasedata_ca, rate, taus, function):
"""
Three Cornered Hat Method
Given three clocks A, B, C, we seek to find their variances
:math:`\\sigma^2_A`, :math:`\\sigma^2_B`, :math:`\\sigma^2_C`.
We measure three phase differences, assuming no correlation between
the clocks, the measurements have variances:
.. math::
\\sigma^2_{AB} = \\sigma^2_{A} + \\sigma^2_{B}
\\sigma^2_{BC} = \\sigma^2_{B} + \\sigma^2_{C}
\\sigma^2_{CA} = \\sigma^2_{C} + \\sigma^2_{A}
Which allows solving for the variance of one clock as:
.. math::
\\sigma^2_{A} = {1 \\over 2} ( \\sigma^2_{AB} +
\\sigma^2_{CA} - \\sigma^2_{BC} )
and similarly cyclic permutations for :math:`\\sigma^2_B` and
:math:`\\sigma^2_C`
Parameters
----------
phasedata_ab: np.array
phase measurements between clock A and B, in seconds
phasedata_bc: np.array
phase measurements between clock B and C, in seconds
phasedata_ca: np.array
phase measurements between clock C and A, in seconds
rate: float
The sampling rate for phase, in Hz
taus: np.array
The tau values for deviations, in seconds
function: allantools deviation function
The type of statistic to compute, e.g. allantools.oadev
Returns
-------
tau_ab: np.array
Tau values corresponding to output deviations
dev_a: np.array
List of computed values for clock A
References
----------
http://www.wriley.com/3-CornHat.htm
"""
(tau_ab, dev_ab, err_ab, ns_ab) = function(phasedata_ab,
data_type='phase',
rate=rate, taus=taus)
(tau_bc, dev_bc, err_bc, ns_bc) = function(phasedata_bc,
data_type='phase',
rate=rate, taus=taus)
(tau_ca, dev_ca, err_ca, ns_ca) = function(phasedata_ca,
data_type='phase',
rate=rate, taus=taus)
var_ab = dev_ab * dev_ab
var_bc = dev_bc * dev_bc
var_ca = dev_ca * dev_ca
assert len(var_ab) == len(var_bc) == len(var_ca)
var_a = 0.5 * (var_ab + var_ca - var_bc)
var_a[var_a < 0] = 0 # don't return imaginary deviations (?)
dev_a = np.sqrt(var_a)
err_a = [d/np.sqrt(nn) for (d, nn) in zip(dev_a, ns_ab)]
return tau_ab, dev_a, err_a, ns_ab
########################################################################
#
# simple conversions between frequency, phase(seconds), phase(radians)
#
def frequency2phase(freqdata, rate):
""" integrate fractional frequency data and output phase data
Parameters
----------
freqdata: np.array
Data array of fractional frequency measurements (nondimensional)
rate: float
The sampling rate for phase or frequency, in Hz
Returns
-------
phasedata: np.array
Time integral of fractional frequency data, i.e. phase (time) data
in units of seconds.
For phase in units of radians, see phase2radians()
"""
dt = 1.0 / float(rate)
# Protect against NaN values in input array (issue #60)
# Reintroduces data trimming as in commit 503cb82
freqdata = trim_data(freqdata)
# Erik Benkler (PTB): Subtract mean value before cumsum in order to
# avoid precision issues when we have small frequency fluctuations on
# a large average frequency
freqdata = freqdata - np.nanmean(freqdata)
phasedata = np.cumsum(freqdata) * dt
phasedata = np.insert(phasedata, 0, 0) # FIXME: why do we do this?
# so that phase starts at zero and len(phase)=len(freq)+1 ??
return phasedata
def phase2radians(phasedata, v0):
""" Convert phase in seconds to phase in radians
Parameters
----------
phasedata: np.array
Data array of phase in seconds
v0: float
Nominal oscillator frequency in Hz
Returns
-------
fi:
phase data in radians
"""
fi = [2*np.pi*v0*xx for xx in phasedata]
return fi
def phase2frequency(phase, rate):
""" Convert phase in seconds to fractional frequency
Parameters
----------
phase: np.array
Data array of phase in seconds
rate: float
The sampling rate for phase, in Hz
Returns
-------
y:
Data array of fractional frequency
"""
y = rate*np.diff(phase)
return y
def frequency2fractional(frequency, mean_frequency=-1):
""" Convert frequency in Hz to fractional frequency
Parameters
----------
frequency: np.array
Data array of frequency in Hz
mean_frequency: float
(optional) The nominal mean frequency, in Hz
if omitted, defaults to mean frequency=np.mean(frequency)
Returns
-------
y:
Data array of fractional frequency
"""
if mean_frequency == -1:
mu = np.mean(frequency)
else:
mu = mean_frequency
y = [(x-mu)/mu for x in frequency]
return y
# end of file allantools.py
|
AllanTools
|
/AllanTools-2019.9.tar.gz/AllanTools-2019.9/allantools/allantools.py
|
allantools.py
|
import math
import numpy
import scipy.signal # for welch PSD
def numpy_psd(x, f_sample=1.0):
""" calculate power spectral density of input signal x
x = signal
f_sample = sampling frequency in Hz. i.e. 1/fs is the time-interval
in seconds between datapoints
scale fft so that output corresponds to 1-sided PSD
output has units of [X^2/Hz] where X is the unit of x
"""
psd_of_x = ((2.0 / (float(len(x)) * f_sample))
* numpy.abs(numpy.fft.rfft(x))**2)
f_axis = numpy.linspace(0, f_sample/2.0, len(psd_of_x)) # frequency axis
return f_axis, psd_of_x
def scipy_psd(x, f_sample=1.0, nr_segments=4):
""" PSD routine from scipy
we can compare our own numpy result against this one
"""
f_axis, psd_of_x = scipy.signal.welch(x,
f_sample,
nperseg=len(x)/nr_segments)
return f_axis, psd_of_x
def white(num_points=1024, b0=1.0, fs=1.0):
""" White noise generator
Generate time series with white noise that has constant PSD = b0,
up to the nyquist frequency fs/2.
The PSD is at 'height' b0 and extends from 0 Hz up to the nyquist
frequency fs/2 (prefactor math.sqrt(b0*fs/2.0))
Parameters
----------
num_points: int, optional
number of samples
b0: float, optional
desired power-spectral density in [X^2/Hz] where X is the unit of x
fs: float, optional
sampling frequency, i.e. 1/fs is the time-interval between
datapoints
Returns
-------
White noise sample: numpy.array
"""
return math.sqrt(b0*fs/2.0)*numpy.random.randn(num_points)
def brown(num_points=1024, b2=1.0, fs=1.0):
""" Brownian or random walk (diffusion) noise with 1/f^2 PSD
Not really a color... rather Brownian or random-walk.
Obtained by integrating white-noise.
Parameters
----------
num_points: int, optional
number of samples
b2: float, optional
desired power-spectral density is b2*f^-2
fs: float, optional
sampling frequency, i.e. 1/fs is the time-interval between
datapoints
Returns
-------
Random walk sample: numpy.array
"""
return (1.0/float(fs))*numpy.cumsum(white(num_points,
b0=b2*(4.0*math.pi*math.pi),
fs=fs))
def violet(num_points=1024):
""" Violet noise with /f^2 PSD
Obtained by differentiating white noise
Parameters
----------
num_points: int, optional
number of samples
Returns
-------
Violet noise sample: numpy.array
"""
# diff() reduces number of points by one.
return numpy.diff(numpy.random.randn(num_points+1))
def pink(num_points=1024, depth=80):
""" Pink noise (approximation) with 1/f PSD
Fills a sample with results from a pink noise generator
from http://pydoc.net/Python/lmj.sound/0.1.1/lmj.sound.noise/,
based on the Voss-McCartney algorithm, discussion and code examples at
http://www.firstpr.com.au/dsp/pink-noise/
Parameters
----------
num_points: int, optional
number of samples
depth: int, optional
number of iteration for each point. High numbers are slower but
generates a more correct spectrum on low-frequencies end.
Returns
-------
Pink noise sample: numpy.array
"""
a = []
s = iterpink(depth)
for n in range(num_points): # FIXME: num_points is unused here.
a.append(next(s))
return numpy.array(a)
def iterpink(depth=20):
"""Generate a sequence of samples of pink noise.
pink noise generator
from http://pydoc.net/Python/lmj.sound/0.1.1/lmj.sound.noise/
Based on the Voss-McCartney algorithm, discussion and code examples at
http://www.firstpr.com.au/dsp/pink-noise/
depth: Use this many samples of white noise to calculate the output. A
higher number is slower to run, but renders low frequencies with more
correct power spectra.
Generates a never-ending sequence of floating-point values. Any continuous
set of these samples will tend to have a 1/f power spectrum.
"""
values = numpy.random.randn(depth)
smooth = numpy.random.randn(depth)
source = numpy.random.randn(depth)
sumvals = values.sum()
i = 0
while True:
yield sumvals + smooth[i]
# advance the index by 1. if the index wraps, generate noise to use in
# the calculations, but do not update any of the pink noise values.
i += 1
if i == depth:
i = 0
smooth = numpy.random.randn(depth)
source = numpy.random.randn(depth)
continue
# count trailing zeros in i
c = 0
while not (i >> c) & 1:
c += 1
# replace value c with a new source element
sumvals += source[i] - values[c]
values[c] = source[i]
|
AllanTools
|
/AllanTools-2019.9.tar.gz/AllanTools-2019.9/allantools/noise.py
|
noise.py
|
from . import allantools
class Dataset(object):
""" Dataset class for Allantools
:Example:
::
import numpy as np
# Load random data
a = allantools.Dataset(data=np.random.rand(1000))
# compute mdev
a.compute("mdev")
print(a.out["stat"])
compute() returns the result of the computation and also stores it in the
object's ``out`` member.
"""
def __init__(self, data=None, rate=1.0, data_type="phase", taus=None):
""" Initialize object with input data
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional)
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency. Defaults to "phase".
taus: np.array
Array of tau values, in seconds, for which to compute statistic.
Optionally set taus=["all"|"octave"|"decade"] for automatic
calculation of taus list
Returns
-------
Dataset()
A Dataset() instance
"""
#: input data Dict,
self.inp = {"data": None,
"rate": None,
"data_type": None,
"taus": None}
#: output data Dict, to be populated by compute()
self.out = {"taus": None,
"stat": None,
"stat_err": None,
"stat_n": None,
"stat_unc": None,
"stat_id": None}
self.inp["data"] = data
self.inp["rate"] = rate
self.inp["data_type"] = data_type
self.inp["taus"] = taus
def set_input(self, data,
rate=1.0, data_type="phase", taus=None):
""" Optionnal method if you chose not to set inputs on init
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional)
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency. Defaults to "phase".
taus: np.array
Array of tau values, in seconds, for which to compute statistic.
Optionally set taus=["all"|"octave"|"decade"] for automatic
"""
self.inp["data"] = data
self.inp["rate"] = rate
self.inp["data_type"] = data_type
self.inp["taus"] = taus
def compute(self, function):
"""Evaluate the passed function with the supplied data.
Stores result in self.out.
Parameters
----------
function: str
Name of the :mod:`allantools` function to evaluate
Returns
-------
result: dict
The results of the calculation.
"""
try:
func = getattr(allantools, function)
except AttributeError:
raise AttributeError("function must be defined in allantools")
whitelisted = ["theo1", "mtie", "tierms"]
if function[-3:] != "dev" and function not in whitelisted:
# this should probably raise a custom exception type so
# it's easier to distinguish from other bad things
raise RuntimeError("function must be one of the 'dev' functions")
result = func(self.inp["data"], rate=self.inp["rate"],
data_type=self.inp["data_type"], taus=self.inp["taus"])
keys = ["taus", "stat", "stat_err", "stat_n"]
result = {key: result[i] for i, key in enumerate(keys)}
self.out = result.copy()
self.out["stat_id"] = function
return result
def write_results(self, filename, digits=5, header_params={}):
""" Output result to text
Save calculation results to disk. Will overwrite any existing file.
Parameters
----------
filename: str
Path to the output file
digits: int
Number of significant digits in output
header_params: dict
Arbitrary dict of params to be included in header
Returns
-------
None
"""
with open(filename, 'w') as fp:
fp.write("# Generated by Allantools {}\n".format(
allantools.__version__))
fp.write("# Input data type: {}\n".format(self.inp["data_type"]))
fp.write("# Input data rate: {}\n".format(self.inp["rate"]))
for key, val in header_params.items():
fp.write("# {}: {}\n".format(key, val))
# Fields
fp.write(("{af:>5s} {tau:>{width}s} {n:>10s} {alpha:>5s} "
"{minsigma:>{width}} "
"{sigma:>{width}} "
"{maxsigma:>{width}} "
"\n").format(
af="AF",
tau="Tau",
n="N",
alpha="alpha",
minsigma="min_" + self.out["stat_id"],
sigma=self.out["stat_id"],
maxsigma="max_" + self.out["stat_id"],
width=digits + 5
)
)
out_fmt = ("{af:5d} {tau:.{prec}e} {n:10d} {alpha:5s} "
"{minsigma:.{prec}e} "
"{sigma:.{prec}e} "
"{maxsigma:.{prec}e} "
"\n")
for i in range(len(self.out["taus"])):
fp.write(out_fmt.format(
af=int(self.out["taus"][i] / self.out["taus"][0]),
tau=self.out["taus"][i],
n=int(self.out["stat_n"][i]),
alpha="NaN", # Not implemented yet
minsigma=self.out["stat"][i] - self.out["stat_err"][i]/2,
sigma=self.out["stat"][i],
maxsigma=(self.out["stat"][i] +
self.out["stat_err"][i]/2),
prec=digits-1,
))
|
AllanTools
|
/AllanTools-2019.9.tar.gz/AllanTools-2019.9/allantools/dataset.py
|
dataset.py
|
class Plot(object):
""" A class for plotting data once computed by Allantools
:Example:
::
import allantools
import numpy as np
a = allantools.Dataset(data=np.random.rand(1000))
a.compute("mdev")
b = allantools.Plot()
b.plot(a)
b.show()
Uses matplotlib. self.fig and self.ax stores the return values of
matplotlib.pyplot.subplots(). plot() sets various defaults, but you
can change them by using standard matplotlib method on self.fig and self.ax
"""
def __init__(self, no_display=False):
""" set ``no_display`` to ``True`` when we don't have an X-window
(e.g. for tests)
"""
try:
import matplotlib
if no_display:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
self.plt = plt
except ImportError:
raise RuntimeError("Matplotlib is required for plotting")
self.fig, self.ax = plt.subplots()
self.ax.set_xscale("log")
self.ax.set_yscale("log")
def plot(self, atDataset,
errorbars=False,
grid=False,
**kwargs
):
""" Use matplotlib methods for plotting
Additional keywords arguments are passed to
:py:func:`matplotlib.pyplot.plot`.
Parameters
----------
atDataset : allantools.Dataset()
a dataset with computed data
errorbars : boolean
Plot errorbars. Defaults to False
grid : boolean
Plot grid. Defaults to False
"""
if errorbars:
self.ax.errorbar(atDataset.out["taus"],
atDataset.out["stat"],
yerr=atDataset.out["stat_err"],
**kwargs
)
else:
self.ax.plot(atDataset.out["taus"],
atDataset.out["stat"],
**kwargs
)
self.ax.set_xlabel("Tau")
self.ax.set_ylabel(atDataset.out["stat_id"])
self.ax.grid(grid, which="minor", ls="-", color='0.65')
self.ax.grid(grid, which="major", ls="-", color='0.25')
def show(self):
"""Calls matplotlib.pyplot.show()
Keeping this separated from ``plot()`` allows to tweak display before
rendering
"""
self.plt.show()
def save(self, f):
"""Save figure to file
"""
self.plt.savefig(f)
|
AllanTools
|
/AllanTools-2019.9.tar.gz/AllanTools-2019.9/allantools/plot.py
|
plot.py
|
import numpy
class dev_realtime(object):
""" Base-class for real-time statistics """
def __init__(self, afs=[1], tau0=1.0, auto_afs=False, pts_per_decade=4):
self.x = [] # phase time-series
self.afs = afs # averaging factor, tau = af*tau0
self.auto_afs = auto_afs
self.af_taus = numpy.logspace(0, 1, pts_per_decade+1)[:-1] # will fail at >=6 (?), need to remove duplicates?
self.af_idx = 0 # logspace index, to keep track of afs in auto-af mode
self.af_decade = 0 # logspace decade
if auto_afs:
self.afs = numpy.array([1])
self.dev = numpy.zeros(len(afs)) # resulting xDEV
self.tau0 = tau0 # time-interval between points
def update_af(self):
""" used in auto-AF mode,
- check if we can add another AF
- if yes, add it.
"""
next_idx = self.af_idx+1
next_decade = self.af_decade
if next_idx == len(self.af_taus):
next_idx = 0
next_decade = self.af_decade + 1
next_af = int(numpy.round(pow(10.0, next_decade) * self.af_taus[next_idx])) # next possible AF
if len(self.x) >= (2*next_af+1): # can compute next AF
self.afs = numpy.append(self.afs, next_af) # new AF
self.add_af() # tell subclass to update internal variables
#self.S = numpy.append(self.S, 0) # new S, FIXME: S defined in subclass!
self.dev = numpy.append(self.dev, 0) # new dev
self.af_idx = next_idx
self.af_decade = next_decade
else:
pass
#print "no new AF "
def add_frequency(self, f):
""" add new frequency point, in units of Hz """
if not self.x: # empty sequence
self.add_phase(0) # initialize
self.add_phase(self.x[-1] + f) # integration
def taus(self):
""" return taus, in unit of seconds """
return self.tau0*numpy.array(self.afs)
def add_af(self):
pass # define in subclass!
def devs(self):
""" return deviation """
return self.dev
class oadev_realtime(dev_realtime):
""" Overlapping Allan deviation in real-time from a stream of phase/frequency samples.
Dobrogowski & Kasznia
https://doi.org/10.1109/FREQ.2007.4319204
"""
def __init__(self, afs=[1], tau0=1.0, auto_afs=False, pts_per_decade=4):
super(oadev_realtime, self).__init__(afs=afs, tau0=tau0, auto_afs=auto_afs, pts_per_decade=pts_per_decade)
self.S = numpy.zeros(len(afs)) # sum-of-squares
def add_phase(self, xnew):
""" add new phase point, in units of seconds """
self.x.append(xnew)
for idx, af in enumerate(self.afs):
if len(self.x) >= (2*af+1):
self.update_S(idx)
if self.auto_afs:
self.update_af()
def update_S(self, idx):
""" update S, sum-of-squares """
af = self.afs[idx]
i = len(self.x)-1 # last pt
S_new = pow(self.x[i] - 2*self.x[i-af] + self.x[i-2*af], 2)
self.S[idx] = self.S[idx] + S_new
self.dev[idx] = numpy.sqrt((1.0/(2*pow(af*self.tau0, 2)*(i+1-2*af))) * self.S[idx])
def add_af(self):
self.S = numpy.append(self.S, 0)
class ohdev_realtime(dev_realtime):
""" Overlapping Hadamard deviation in real-time from a stream of phase/frequency samples.
[Dobrogowski2007]_
Dobrogowski & Kasznia
https://doi.org/10.1109/FREQ.2007.4319204
"""
def __init__(self, afs=[1], tau0=1.0, auto_afs=False, pts_per_decade=4):
super(ohdev_realtime, self).__init__(afs=afs, tau0=tau0, auto_afs=auto_afs, pts_per_decade=pts_per_decade)
self.S = numpy.zeros(len(afs)) # sum-of-squares
def add_af(self):
self.S = numpy.append(self.S, 0)
def add_phase(self, xnew):
""" add new phase point """
self.x.append(xnew)
for idx, af in enumerate(self.afs):
if len(self.x) > 3*af:
self.update_S(idx)
if self.auto_afs:
self.update_af()
def update_S(self, idx):
""" update S, sum-of-squares """
af = self.afs[idx]
i = len(self.x)-1 # last pt
#print i,self.x
S_new = pow(self.x[i] - 3*self.x[i-af] + 3*self.x[i-2*af] - self.x[i-3*af], 2)
self.S[idx] = self.S[idx] + S_new
self.dev[idx] = numpy.sqrt((1.0/(6.0*pow(af*self.tau0, 2)*(i+1.0-3*af))) * self.S[idx])
class tdev_realtime(dev_realtime):
""" Time deviation and Modified Allan deviation in real-time from a stream of phase/frequency samples.
Dobrogowski & Kasznia
https://doi.org/10.1109/FREQ.2007.4319204
"""
def __init__(self, afs=[1], tau0=1.0, auto_afs=False, pts_per_decade=4):
super(tdev_realtime, self).__init__(afs=afs, tau0=tau0, auto_afs=auto_afs, pts_per_decade=pts_per_decade)
self.S = numpy.zeros(len(afs)) # sum-of-squares
self.So = numpy.zeros(len(afs)) # overall sum-of-squares
def add_phase(self, xnew):
""" add new phase point """
self.x.append(xnew)
for idx, af in enumerate(self.afs):
if len(self.x) >= 3*af+1: # 3n+1 samples measured
self.update_S(idx)
elif len(self.x) >= 2*af+1: # 2n+1 samples measured
self.update_S3n(idx)
if self.auto_afs:
self.update_af()
def add_af(self):
self.S = numpy.append(self.S, 0)
self.So = numpy.append(self.So, 0)
def update_S3n(self, idx):
""" eqn (13) of paper """
af = self.afs[idx]
j = len(self.x)-1 # last pt
self.S[idx] = self.S[idx] + self.x[j] - 2*self.x[j-af] + self.x[j-2*af]
if len(self.x) == 3*af:
# last call to this fctn
self.So[idx] = pow(self.S[idx], 2)
self.update_dev(idx)
def update_dev(self, idx):
# Eqn (14)
num_pts = len(self.x)
af = self.afs[idx]
self.dev[idx] = numpy.sqrt((1.0/6.0)*(1.0/(num_pts-3*af+1.0))*(1.0/pow(af, 2))*(self.So[idx]))
def update_S(self, idx):
""" update S, sum-of-squares """
af = self.afs[idx]
assert(len(self.x) >= 3*af+1)
i = len(self.x)-1 # last pt
# Eqn (12)
S_new = -1*self.x[i-3*af] + 3*self.x[i-2*af] - 3*self.x[i-af] + self.x[i]
self.S[idx] = self.S[idx] + S_new
# Eqn (11)
self.So[idx] = self.So[idx] + pow(self.S[idx], 2) #??? S_(i-1) in paper for TDEV-sqrt?
self.update_dev(idx)
def mdev(self):
""" scale tdev to output mdev """
mdev = self.dev.copy()
for idx, af in enumerate(self.afs):
mdev[idx] = mdev[idx]*numpy.sqrt(3)/(af*self.tau0)
return mdev
# end of file realtime.py
|
AllanTools
|
/AllanTools-2019.9.tar.gz/AllanTools-2019.9/allantools/realtime.py
|
realtime.py
|
import numpy as np
class Noise(object):
""" Generate discrete colored noise
Python / Numpy implementation of:
Kasdin, N.J., Walter, T., "Discrete simulation of power law noise [for
oscillator stability evaluation]," Frequency Control Symposium, 1992.
46th., Proceedings of the 1992 IEEE, pp.274,283, 27-29 May 1992
http://dx.doi.org/10.1109/FREQ.1992.270003
:Example:
::
import numpy as np
noise = allantools.Noise(nr=2*8, qd=1.0e-20, b=-1)
noise.generateNoise()
print noise.time_series
"""
def __init__(self, nr=2, qd=1, b=0):
""" Initialize object with input data
Parameters
-------
nr: integer
length of generated time-series
must be power of two
qd: float
discrete variance
b: float
noise type:
0 : White Phase Modulation (WPM)
-1 : Flicker Phase Modulation (FPM)
-2 : White Frequency Modulation (WFM)
-3 : Flicker Frequency Modulation (FFM)
-4 : Random Walk Frequency Modulation (RWFM)
Returns
-------
Noise()
A Noise() instance
"""
self.nr = nr
self.qd = qd
self.b = b
self.time_series = np.array([])
def set_input(self, nr=2, qd=1, b=0):
""" Set inputs after initialization
Parameters
-------
nr: integer
length of generated time-series
number must be power of two
qd: float
discrete variance
b: float
noise type:
0 : White Phase Modulation (WPM)
-1 : Flicker Phase Modulation (FPM)
-2 : White Frequency Modulation (WFM)
-3 : Flicker Frequency Modulation (FFM)
-4 : Random Walk Frequency Modulation (RWFM)
"""
self.nr = nr
self.qd = qd
self.b = b
def generateNoise(self):
""" Generate noise time series based on input parameters
Returns
-------
time_series: np.array
Time series with colored noise.
len(time_series) == nr
"""
# Fill wfb array with white noise based on given discrete variance
wfb = np.zeros(self.nr*2)
wfb[:self.nr] = np.random.normal(0, np.sqrt(self.qd), self.nr)
# Generate the hfb coefficients based on the noise type
mhb = -self.b/2.0
hfb = np.zeros(self.nr*2)
hfb = np.zeros(self.nr*2)
hfb[0] = 1.0
indices = np.arange(self.nr-1)
hfb[1:self.nr] = (mhb+indices)/(indices+1.0)
hfb[:self.nr] = np.multiply.accumulate(hfb[:self.nr])
# Perform discrete Fourier transform of wfb and hfb time series
wfb_fft = np.fft.rfft(wfb)
hfb_fft = np.fft.rfft(hfb)
# Perform inverse Fourier transform of the product of wfb and hfb FFTs
time_series = np.fft.irfft(wfb_fft*hfb_fft)[:self.nr]
self.time_series = time_series
def phase_psd_from_qd(self, tau0=1.0):
""" return phase power spectral density coefficient g_b
for noise-type defined by (qd, b, tau0)
where tau0 is the interval between data points
Colored noise generated with (qd, b, tau0) parameters will
show a phase power spectral density of
S_x(f) = Phase_PSD(f) = g_b * f^b
Kasdin & Walter eqn (39)
"""
return self.qd*2.0*pow(2.0*np.pi, self.b)*pow(tau0, self.b+1.0)
def frequency_psd_from_qd(self, tau0=1.0):
""" return frequency power spectral density coefficient h_a
for the noise type defined by (qd, b, tau0)
Colored noise generated with (qd, b, tau0) parameters will
show a frequency power spectral density of
S_y(f) = Frequency_PSD(f) = h_a * f^a
where the slope a comes from the phase PSD slope b:
a = b + 2
Kasdin & Walter eqn (39)
"""
a = self.b + 2.0
return self.qd*2.0*pow(2.0*np.pi, a)*pow(tau0, a-1.0)
def adev(self, tau0, tau):
""" return predicted ADEV of noise-type at given tau
"""
prefactor = self.adev_from_qd(tau0=tau0, tau=tau)
c = self.c_avar()
avar = pow(prefactor, 2)*pow(tau, c)
return np.sqrt(avar)
def mdev(self, tau0, tau):
""" return predicted MDEV of noise-type at given tau
"""
prefactor = self.mdev_from_qd(tau0=tau0, tau=tau)
c = self.c_mvar()
mvar = pow(prefactor, 2)*pow(tau, c)
return np.sqrt(mvar)
def c_avar(self):
""" return tau exponent "c" for noise type.
AVAR = prefactor * h_a * tau^c
"""
if self.b == -4:
return 1.0
elif self.b == -3:
return 0.0
elif self.b == -2:
return -1.0
elif self.b == -1:
return -2.0
elif self.b == 0:
return -2.0
def c_mvar(self):
""" return tau exponent "c" for noise type.
MVAR = prefactor * h_a * tau^c
"""
if self.b == -4:
return 1.0
elif self.b == -3:
return 0.0
elif self.b == -2:
return -1.0
elif self.b == -1:
return -2.0
elif self.b == 0:
return -3.0
def adev_from_qd(self, tau0=1.0, tau=1.0):
""" prefactor for Allan deviation for noise
type defined by (qd, b, tau0)
Colored noise generated with (qd, b, tau0) parameters will
show an Allan variance of:
AVAR = prefactor * h_a * tau^c
where a = b + 2 is the slope of the frequency PSD.
and h_a is the frequency PSD prefactor S_y(f) = h_a * f^a
The relation between a, b, c is:
a b c(AVAR) c(MVAR)
-----------------------
-2 -4 1 1
-1 -3 0 0
0 -2 -1 -1
+1 -1 -2 -2
+2 0 -2 -3
Coefficients from:
S. T. Dawkins, J. J. McFerran and A. N. Luiten, "Considerations on
the measurement of the stability of oscillators with frequency
counters," in IEEE Transactions on Ultrasonics, Ferroelectrics, and
Frequency Control, vol. 54, no. 5, pp. 918-925, May 2007.
doi: 10.1109/TUFFC.2007.337
"""
g_b = self.phase_psd_from_qd(tau0)
f_h = 0.5/tau0
if self.b == 0:
coeff = 3.0*f_h / (4.0*pow(np.pi, 2)) # E, White PM, tau^-1
elif self.b == -1:
coeff = (1.038+3*np.log(2.0*np.pi*f_h*tau))/(4.0*pow(np.pi, 2))# D, Flicker PM, tau^-1
elif self.b == -2:
coeff = 0.5 # C, white FM, 1/sqrt(tau)
elif self.b == -3:
coeff = 2*np.log(2) # B, flicker FM, constant ADEV
elif self.b == -4:
coeff = 2.0*pow(np.pi, 2)/3.0 # A, RW FM, sqrt(tau)
return np.sqrt(coeff*g_b*pow(2.0*np.pi, 2))
def mdev_from_qd(self, tau0=1.0, tau=1.0):
# FIXME: tau is unused here - can we remove it?
""" prefactor for Modified Allan deviation for noise
type defined by (qd, b, tau0)
Colored noise generated with (qd, b, tau0) parameters will
show an Modified Allan variance of:
MVAR = prefactor * h_a * tau^c
where a = b + 2 is the slope of the frequency PSD.
and h_a is the frequency PSD prefactor S_y(f) = h_a * f^a
The relation between a, b, c is:
a b c(AVAR) c(MVAR)
-----------------------
-2 -4 1 1
-1 -3 0 0
0 -2 -1 -1
+1 -1 -2 -2
+2 0 -2 -3
Coefficients from:
S. T. Dawkins, J. J. McFerran and A. N. Luiten, "Considerations on
the measurement of the stability of oscillators with frequency
counters," in IEEE Transactions on Ultrasonics, Ferroelectrics, and
Frequency Control, vol. 54, no. 5, pp. 918-925, May 2007.
doi: 10.1109/TUFFC.2007.337
"""
g_b = self.phase_psd_from_qd(tau0)
#f_h = 0.5/tau0 #unused!?
if self.b == 0:
coeff = 3.0/(8.0*pow(np.pi, 2)) # E, White PM, tau^-{3/2}
elif self.b == -1:
coeff = (24.0*np.log(2)-9.0*np.log(3))/8.0/pow(np.pi, 2) # D, Flicker PM, tau^-1
elif self.b == -2:
coeff = 0.25 # C, white FM, 1/sqrt(tau)
elif self.b == -3:
coeff = 2.0*np.log(3.0*pow(3.0, 11.0/16.0)/4.0) # B, flicker FM, constant MDEV
elif self.b == -4:
coeff = 11.0/20.0*pow(np.pi, 2) # A, RW FM, sqrt(tau)
return np.sqrt(coeff*g_b*pow(2.0*np.pi, 2))
# end of file noise_kasdin.py
|
AllanTools
|
/AllanTools-2019.9.tar.gz/AllanTools-2019.9/allantools/noise_kasdin.py
|
noise_kasdin.py
|
import numpy as np
import scipy.stats # used in confidence_intervals()
import scipy.signal # decimation in lag-1 acf
########################################################################
# Confidence Intervals
ONE_SIGMA_CI = scipy.special.erf(1/np.sqrt(2))
# = 0.68268949213708585
def confidence_interval(dev, edf, ci=ONE_SIGMA_CI):
""" returns confidence interval (dev_min, dev_max)
for a given deviation dev, equivalent degrees of freedom edf,
and degree of confidence ci.
Parameters
----------
dev: float
Mean value (e.g. adev) around which we produce the confidence interval
edf: float
Equivalent degrees of freedon
ci: float, defaults to scipy.special.erf(1/math.sqrt(2))
for 1-sigma standard error set
ci = scipy.special.erf(1/math.sqrt(2))
= 0.68268949213708585
Returns
-------
(dev_min, dev_max): (float, float)
Confidence interval
"""
ci_l = min(np.abs(ci), np.abs((ci-1))) / 2
ci_h = 1 - ci_l
# function from scipy, works OK, but scipy is large and slow to build
chi2_l = scipy.stats.chi2.ppf(ci_l, edf)
chi2_h = scipy.stats.chi2.ppf(ci_h, edf)
variance = dev*dev
var_l = float(edf) * variance / chi2_h # NIST SP1065 eqn (45)
var_h = float(edf) * variance / chi2_l
return (np.sqrt(var_l), np.sqrt(var_h))
def confidence_interval_noiseID(x, dev, af, dev_type="adev", data_type="phase", ci=ONE_SIGMA_CI):
""" returns confidence interval (dev_min, dev_max)
for a given deviation dev = Xdev( x, tau = af*(1/rate) )
steps:
1) identify noise type
2) compute EDF
3) compute confidence interval
Parameters
----------
x: numpy.array
time-series
dev: float
Mean value (e.g. adev) around which we produce the confidence interval
af: int
averaging factor
dev_type: string
adev, oadev, mdev, tdev, hdev, ohdev
data_type:
"phase" or "freq"
ci: float, defaults to scipy.special.erf(1/math.sqrt(2))
for 1-sigma standard error set
ci = scipy.special.erf(1/math.sqrt(2))
= 0.68268949213708585
Returns
-------
(dev_min, dev_max): (float, float)
Confidence interval
"""
# 1) noise ID
dmax = 2
if (dev_type is "hdev") or (dev_type is "ohdev"):
dmax = 3
alpha_int = autocorr_noise_id(x, int(af), data_type=data_type, dmin=0, dmax=dmax)[0]
# 2) EDF
if dev_type is "adev":
edf = edf_greenhall(alpha=alpha_int, d=2, m=af, N=len(x),
overlapping=False, modified=False)
elif dev_type is "oadev":
edf = edf_greenhall(alpha=alpha_int, d=2, m=af, N=len(x),
overlapping=True, modified=False)
elif (dev_type is "mdev") or (dev_type is "tdev"):
edf = edf_greenhall(alpha=alpha_int, d=2, m=af, N=len(x),
overlapping=True, modified=True)
elif dev_type is "hdev":
edf = edf_greenhall(alpha=alpha_int, d=3, m=af, N=len(x),
overlapping=False, modified=False)
elif dev_type is "ohdev":
edf = edf_greenhall(alpha=alpha_int, d=3, m=af, N=len(x),
overlapping=True, modified=False)
else:
raise NotImplementedError
# 3) confidence interval
(low, high) = confidence_interval(dev, edf, ci)
return (low, high)
########################################################################
# Noise Identification using R(n)
def rn(x, af, rate):
""" R(n) ratio for noise identification
ratio of MVAR to AVAR
"""
(taus, devs, errs, ns) = at.adev(x, taus=[af*rate], data_type='phase', rate=rate)
oadev_x = devs[0]
(mtaus, mdevs, errs, ns) = at.mdev(x, taus=[af*rate], data_type='phase', rate=rate)
mdev_x = mdevs[0]
return pow(mdev_x/oadev_x, 2)
def rn_theory(af, b):
""" R(n) ratio expected from theory for given noise type
alpha = b + 2
"""
# From IEEE1139-2008
# alpha beta ADEV_mu MDEV_mu Rn_mu
# -2 -4 1 1 0 Random Walk FM
# -1 -3 0 0 0 Flicker FM
# 0 -2 -1 -1 0 White FM
# 1 -1 -2 -2 0 Flicker PM
# 2 0 -2 -3 -1 White PM
# (a=-3 flicker walk FM)
# (a=-4 random run FM)
if b == 0:
return pow(af, -1)
elif b == -1:
# f_h = 0.5/tau0 (assumed!)
# af = tau/tau0
# so f_h*tau = 0.5/tau0 * af*tau0 = 0.5*af
avar = (1.038+3*np.log(2*np.pi*0.5*af)) / (4.0*pow(np.pi, 2))
mvar = 3*np.log(256.0/27.0)/(8.0*pow(np.pi, 2))
return mvar/avar
else:
return pow(af, 0)
def rn_boundary(af, b_hi):
"""
R(n) ratio boundary for selecting between [b_hi-1, b_hi]
alpha = b + 2
"""
return np.sqrt(rn_theory(af, b)*rn_theory(af, b-1)) # geometric mean
########################################################################
# Noise Identification using B1
def b1(x, af, rate):
""" B1 ratio for noise identification
(and bias correction?)
ratio of Standard Variace to AVAR
Howe, Beard, Greenhall, Riley,
A TOTAL ESTIMATOR OF THE HADAMARD FUNCTION USED FOR GPS OPERATIONS
32nd PTTI, 2000
https://apps.dtic.mil/dtic/tr/fulltext/u2/a484835.pdf
Barnes, 1974
https://tf.nist.gov/general/pdf/11.pdf
"""
(taus, devs, errs, ns) = adev(x, taus=[af*rate], data_type="phase", rate=rate)
oadev_x = devs[0]
avar = pow(oadev_x, 2.0)
# variance of y, at given af
y = np.diff(x)
y_cut = np.array(y[:len(y)-(len(y)%af)]) # cut to length
assert len(y_cut)%af == 0
y_shaped = y_cut.reshape((int(len(y_cut)/af), af))
y_averaged = np.average(y_shaped, axis=1) # average
var = np.var(y_averaged, ddof=1)
return var/avar
def b1_theory(N, mu):
""" Expected B1 ratio for given time-series length N and exponent mu
FIXME: add reference (paper & link)
The exponents are defined as
S_y(f) = h_a f^alpha (power spectrum of y)
S_x(f) = g_b f^b (power spectrum of x)
bias = const * tau^mu
and (b, alpha, mu) relate to eachother by:
b alpha mu
0 +2 -2
-1 +1 -2 resolve between -2 cases with R(n)
-2 0 -1
-3 -1 0
-4 -2 +1
-5 -3 +2
-6 -4 +3 for HDEV, by applying B1 to frequency data, and add +2 to resulting mu
"""
# see Table 3 of Howe 2000
if mu == 2:
return float(N)*(float(N)+1.0)/6.0
elif mu == 1:
return float(N)/2.0
elif mu == 0:
return N*np.log(N)/(2.0*(N-1.0)*np.log(2))
elif mu == -1:
return 1
elif mu == -2:
return (pow(N, 2)-1.0)/(1.5*N*(N-1.0))
else:
up = N*(1.0-pow(N, mu))
down = 2*(N-1.0)*(1-pow(2.0, mu))
return up/down
assert False # we should never get here
def b1_boundary(b_hi, N):
"""
B1 ratio boundary for selecting between [b_hi-1, b_hi]
alpha = b + 2
"""
b_lo = b_hi-1
b1_lo = b1_theory(N, b_to_mu(b_lo))
b1_hi = b1_theory(N, b_to_mu(b_hi))
if b1_lo >= -4:
return np.sqrt(b1_lo*b1_hi) # geometric mean
else:
return 0.5*(b1_lo+b1_hi) # arithemtic mean
def b_to_mu(b):
"""
return mu, parameter needed for B1 ratio function b1()
alpha = b + 2
"""
a = b + 2
if a == +2:
return -2
elif a == +1:
return -2
elif a == 0:
return -1
elif a == -1:
return 0
elif a == -2:
return 1
elif a == -3:
return 2
elif a == -4:
return 3
assert False
########################################################################
# Noise Identification using ACF
def lag1_acf(x, detrend_deg=1):
""" Lag-1 autocorrelation function
as defined in Riley 2004, Eqn (2)
used by autocorr_noise_id()
Parameters
----------
x: numpy.array
time-series
Returns
-------
ACF: float
Lag-1 autocorrelation for input time-series x
Notes
-----
* a faster algorithm based on FFT might be better!?
* numpy.corrcoeff() gives similar but not identical results.
#c = np.corrcoef( np.array(x[:-lag]), np.array(x[lag:]) )
#r1 = c[0,1] # lag-1 autocorrelation of x
"""
mu = np.mean(x)
a = 0
b = 0
for n in range(len(x)-1):
a = a + (x[n]-mu)*(x[n+1]-mu)
#for n in range(len(x)):
for xn in x:
b = b+pow(xn-mu, 2)
return a/b
def autocorr_noise_id(x, af, data_type="phase", dmin=0, dmax=2):
""" Lag-1 autocorrelation based noise identification
Parameters
----------
x: numpy.array
phase or fractional frequency time-series data
minimum recommended length is len(x)>30 roughly.
af: int
averaging factor
data_type: string {'phase', 'freq'}
"phase" for phase data in seconds
"freq" for fractional frequency data
dmin: int
minimum required number of differentiations in the algorithm
dmax: int
maximum number of differentiations
defaults to 2 for ADEV
set to 3 for HDEV
Returns
-------
alpha_int: int
noise-slope as integer
alpha: float
noise-slope as float
d: int
number of differentiations of the time-series performed
Notes
-----
http://www.stable32.com/Auto.pdf
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.503.9864&rep=rep1&type=pdf
Power law noise identification using the lag 1 autocorrelation
Riley,W.J. et al.
18th European Frequency and Time Forum (EFTF 2004)
https://ieeexplore.ieee.org/document/5075021
"""
d = 0 # number of differentiations
lag = 1
if data_type is "phase":
if af > 1:
#x = scipy.signal.decimate(x, af, n=1, ftype='fir')
x = x[0:len(x):af] # decimate by averaging factor
x = detrend(x, deg=2) # remove quadratic trend (frequency offset and drift)
elif data_type is "freq":
# average by averaging factor
y_cut = np.array(x[:len(x)-(len(x)%af)]) # cut to length
assert len(y_cut)%af == 0
y_shaped = y_cut.reshape((int(len(y_cut)/af), af))
x = np.average(y_shaped, axis=1) # average
x = detrend(x, deg=1) # remove frequency drift
# require minimum length for time-series
if len(x) < 30:
print("autocorr_noise_id() Don't know how to do noise-ID for time-series length= %d"%len(x))
raise NotImplementedError
while True:
r1 = lag1_acf(x)
rho = r1/(1.0+r1)
if d >= dmin and (rho < 0.25 or d >= dmax):
p = -2*(rho+d)
#print r1
#assert r1 < 0
#assert r1 > -1.0/2.0
phase_add2 = 0
if data_type is "phase":
phase_add2 = 2
alpha = p+phase_add2
alpha_int = int(-1.0*np.round(2*rho) - 2.0*d) + phase_add2
#print "d=",d,"alpha=",p+2
return alpha_int, alpha, d, rho
else:
x = np.diff(x)
d = d + 1
assert False # we should not get here ever.
def detrend(x, deg=1):
"""
remove polynomial from data.
used by autocorr_noise_id()
Parameters
----------
x: numpy.array
time-series
deg: int
degree of polynomial to remove from x
Returns
-------
x_detrended: numpy.array
detrended time-series
"""
t = range(len(x))
p = np.polyfit(t, x, deg)
residual = x - np.polyval(p, t)
return residual
########################################################################
# Equivalent Degrees of Freedom
def edf_greenhall_simple(alpha, d, m, S, F, N):
""" Eqn (13) from Greenhall2004 """
L = m/F+m*d # length of filter applied to phase samples
M = 1 + np.floor(S*(N-L) / m)
J = min(M, (d+1)*S)
inv_edf = (1.0/(pow(greenhall_sz(0, F, alpha, d), 2)*M))* \
greenhall_BasicSum(J, M, S, F, alpha, d)
return 1.0/inv_edf
def edf_greenhall(alpha, d, m, N, overlapping=False, modified=False, verbose=False):
""" returns Equivalent degrees of freedom
Parameters
----------
alpha: int
noise type, +2...-4
d: int
1 first-difference variance
2 Allan variance
3 Hadamard variance
require alpha+2*d>1
m: int
averaging factor
tau = m*tau0 = m*(1/rate)
N: int
number of phase observations (length of time-series)
overlapping: bool
True for oadev, ohdev
modified: bool
True for mdev, tdev
Returns
-------
edf: float
Equivalent degrees of freedom
Greenhall, Riley, 2004
https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20050061319.pdf
UNCERTAINTY OF STABILITY VARIANCES BASED ON FINITE DIFFERENCES
Notes
-----
Used for the following deviations (see http://www.wriley.com/CI2.pdf page 8)
adev()
oadev()
mdev()
tdev()
hdev()
ohdev()
"""
if modified:
F = 1 # F filter factor, 1 modified variance, m unmodified variance
else:
F = int(m)
if overlapping: # S stride factor, 1 nonoverlapped estimator,
S = int(m) # m overlapped estimator (estimator stride = tau/S )
else:
S = 1
assert(alpha+2*d > 1.0)
L = m/F+m*d # length of filter applied to phase samples
M = 1 + np.floor(S*(N-L) / m)
J = min(M, (d+1)*S)
J_max = 100
r = M/S
if int(F) == 1 and modified: # case 1, modified variances, all alpha
if J <= J_max:
inv_edf = (1.0/(pow(greenhall_sz(0, 1, alpha, d), 2)*M))* \
greenhall_BasicSum(J, M, S, 1, alpha, d)
if verbose:
print("case 1.1 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
elif r > d+1:
(a0, a1) = greenhall_table1(alpha, d)
inv_edf = (1.0/r)*(a0-a1/r)
if verbose:
print("case 1.2 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
else:
m_prime = J_max/r
inv_edf = (1.0/(pow(greenhall_sz(0, F, alpha, d), 2)*J_max))* \
greenhall_BasicSum(J_max, J_max, m_prime, 1, alpha, d)
if verbose:
print("case 1.3 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
elif int(F) == int(m) and int(alpha) <= 0 and not modified:
# case 2, unmodified variances, alpha <= 0
if J <= J_max:
if m*(d+1) <= J_max:
m_prime = m
variant = "a"
else:
m_prime = float('inf')
variant = "b"
inv_edf = (1.0/(pow(greenhall_sz(0, m_prime, alpha, d), 2)*M))* \
greenhall_BasicSum(J, M, S, m_prime, alpha, d)
if verbose:
print("case 2.1%s edf= %3f" % (variant, float(1.0/inv_edf)))
return 1.0/inv_edf
elif r > d+1:
(a0, a1) = greenhall_table2(alpha, d)
inv_edf = (1.0/r)*(a0-a1/r)
if verbose:
print("case 2.2 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
else:
m_prime = J_max/r
inv_edf = (1.0/(pow(greenhall_sz(0, float('inf'), alpha, d), 2)*J_max))* \
greenhall_BasicSum(J_max, J_max, m_prime, float('inf'), alpha, d)
if verbose:
print("case 2.3 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
elif int(F) == int(m) and int(alpha) == 1 and not modified:
# case 3, unmodified variances, alpha=1
if J <= J_max:
inv_edf = (1.0/(pow(greenhall_sz(0, m, 1, d), 2)*M))* \
greenhall_BasicSum(J, M, S, m, 1, d) # note: m<1e6 to avoid roundoff
if verbose:
print("case 3.1 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
elif r > d+1:
(a0, a1) = greenhall_table2(alpha, d)
(b0, b1) = greenhall_table3(alpha, d)
inv_edf = (1.0/(pow(b0+b1*np.log(m), 2)*r))*(a0-a1/r)
if verbose:
print("case 3.2 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
else:
m_prime = J_max/r
(b0, b1) = greenhall_table3(alpha, d)
inv_edf = (1.0/(pow(b0+b1*np.log(m), 2)*J_max))* \
greenhall_BasicSum(J_max, J_max, m_prime, m_prime, 1, d)
if verbose:
print("case 3.3 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
elif int(F) == int(m) and int(alpha) == 2 and not modified:
# case 4, unmodified variances, alpha=2
K = np.ceil(r)
if K <= d:
raise NotImplementedError # FIXME: add formula from the paper here!
else:
a0 = scipy.special.binom(4*d, 2*d) / pow(scipy.special.binom(2*d, d), 2)
a1 = d/2.0
inv_edf = (1.0/M)*(a0-a1/r)
if verbose:
print("case 4.2 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
print("greenhall_edf() no matching case!")
raise NotImplementedError
#assert(0) # ERROR
def greenhall_BasicSum(J, M, S, F, alpha, d):
""" Eqn (10) from Greenhall2004 """
first = pow(greenhall_sz(0, F, alpha, d), 2)
second = (1-float(J)/float(M))*pow(greenhall_sz(float(J)/float(S), F, alpha, d), 2)
third = 0
for j in range(1, int(J)):
third += 2*(1.0-float(j)/float(M))*pow(greenhall_sz(float(j)/float(S), F, alpha, d), 2)
return first+second+third
def greenhall_sz(t, F, alpha, d):
""" Eqn (9) from Greenhall2004 """
if d == 1:
a = 2*greenhall_sx(t, F, alpha)
b = greenhall_sx(t-1.0, F, alpha)
c = greenhall_sx(t+1.0, F, alpha)
return a-b-c
elif d == 2:
a = 6*greenhall_sx(t, F, alpha)
b = 4*greenhall_sx(t-1.0, F, alpha)
c = 4*greenhall_sx(t+1.0, F, alpha)
dd = greenhall_sx(t-2.0, F, alpha)
e = greenhall_sx(t+2.0, F, alpha)
return a-b-c+dd+e
elif d == 3:
a = 20.0*greenhall_sx(t, F, alpha)
b = 15.0*greenhall_sx(t-1.0, F, alpha)
c = 15.0*greenhall_sx(t+1.0, F, alpha)
dd = 6.0*greenhall_sx(t-2.0, F, alpha)
e = 6.0*greenhall_sx(t+2.0, F, alpha)
f = greenhall_sx(t-3.0, F, alpha)
g = greenhall_sx(t+3.0, F, alpha)
return a-b-c+dd+e-f-g
assert(0) # ERROR
def greenhall_sx(t, F, alpha):
""" Eqn (8) from Greenhall2004
"""
if F == float('inf'):
return greenhall_sw(t, alpha+2)
a = 2*greenhall_sw(t, alpha)
b = greenhall_sw(t-1.0/float(F), alpha)
c = greenhall_sw(t+1.0/float(F), alpha)
return pow(F, 2)*(a-b-c)
def greenhall_sw(t, alpha):
""" Eqn (7) from Greenhall2004
"""
alpha = int(alpha)
if alpha == 2:
return -np.abs(t)
elif alpha == 1:
if t == 0:
return 0
else:
return pow(t, 2)*np.log(np.abs(t))
elif alpha == 0:
return np.abs(pow(t, 3))
elif alpha == -1:
if t == 0:
return 0
else:
return pow(t, 4)*np.log(np.abs(t))
elif alpha == -2:
return np.abs(pow(t, 5))
elif alpha == -3:
if t == 0:
return 0
else:
return pow(t, 6)*np.log(np.abs(t))
elif alpha == -4:
return np.abs(pow(t, 7))
assert(0) # ERROR
def greenhall_table3(alpha, d):
""" Table 3 from Greenhall 2004 """
assert(alpha == 1)
idx = d-1
table3 = [(6.0, 4.0), (15.23, 12.0), (47.8, 40.0)]
return table3[idx]
def greenhall_table2(alpha, d):
""" Table 2 from Greenhall 2004 """
row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6
assert(row_idx in [0, 1, 2, 3, 4, 5])
col_idx = int(d-1)
table2 = [[(3.0/2.0, 1.0/2.0), (35.0/18.0, 1.0), (231.0/100.0, 3.0/2.0)], # alpha=+2
[(78.6, 25.2), (790.0, 410.0), (9950.0, 6520.0)],
[(2.0/3.0, 1.0/6.0), (2.0/3.0, 1.0/3.0), (7.0/9.0, 1.0/2.0)], # alpha=0
[(-1, -1), (0.852, 0.375), (0.997, 0.617)], # -1
[(-1, -1), (1.079, 0.368), (1.033, 0.607)], #-2
[(-1, -1), (-1, -1), (1.053, 0.553)], #-3
[(-1, -1), (-1, -1), (1.302, 0.535)], # alpha=-4
]
#print("table2 = ", table2[row_idx][col_idx])
return table2[row_idx][col_idx]
def greenhall_table1(alpha, d):
""" Table 1 from Greenhall 2004 """
row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6
col_idx = int(d-1)
table1 = [[(2.0/3.0, 1.0/3.0), (7.0/9.0, 1.0/2.0), (22.0/25.0, 2.0/3.0)], # alpha=+2
[(0.840, 0.345), (0.997, 0.616), (1.141, 0.843)],
[(1.079, 0.368), (1.033, 0.607), (1.184, 0.848)],
[(-1, -1), (1.048, 0.534), (1.180, 0.816)], # -1
[(-1, -1), (1.302, 0.535), (1.175, 0.777)], #-2
[(-1, -1), (-1, -1), (1.194, 0.703)], #-3
[(-1, -1), (-1, -1), (1.489, 0.702)], # alpha=-4
]
#print("table1 = ", table1[row_idx][col_idx])
return table1[row_idx][col_idx]
def edf_totdev(N, m, alpha):
""" Equivalent degrees of freedom for Total Deviation
FIXME: what is the right behavior for alpha outside 0,-1,-2?
NIST SP1065 page 41, Table 7
"""
alpha = int(alpha)
if alpha in [0, -1, -2]:
# alpha 0 WFM
# alpha -1 FFM
# alpha -2 RWFM
NIST_SP1065_table7 = [(1.50, 0.0), (1.17, 0.22), (0.93, 0.36)]
(b, c) = NIST_SP1065_table7[int(abs(alpha))]
return b*(float(N)/float(m))-c
# alpha outside 0, -1, -2:
return edf_simple(N, m, alpha)
def edf_mtotdev(N, m, alpha):
""" Equivalent degrees of freedom for Modified Total Deviation
NIST SP1065 page 41, Table 8
"""
assert(alpha in [2, 1, 0, -1, -2])
NIST_SP1065_table8 = [(1.90, 2.1), (1.20, 1.40), (1.10, 1.2), (0.85, 0.50), (0.75, 0.31)]
#(b, c) = NIST_SP1065_table8[ abs(alpha-2) ]
(b, c) = NIST_SP1065_table8[abs(alpha-2)]
edf = b*(float(N)/float(m))-c
print("mtotdev b,c= ", (b, c), " edf=", edf)
return edf
def edf_simple(N, m, alpha):
"""Equivalent degrees of freedom.
Simple approximate formulae.
Parameters
----------
N : int
the number of phase samples
m : int
averaging factor, tau = m * tau0
alpha: int
exponent of f for the frequency PSD:
'wp' returns white phase noise. alpha=+2
'wf' returns white frequency noise. alpha= 0
'fp' returns flicker phase noise. alpha=+1
'ff' returns flicker frequency noise. alpha=-1
'rf' returns random walk frequency noise. alpha=-2
If the input is not recognized, it defaults to idealized, uncorrelated
noise with (N-1) degrees of freedom.
Notes
-----
S. Stein, Frequency and Time - Their Measurement and
Characterization. Precision Frequency Control Vol 2, 1985, pp 191-416.
http://tf.boulder.nist.gov/general/pdf/666.pdf
Returns
-------
edf : float
Equivalent degrees of freedom
"""
N = float(N)
m = float(m)
if alpha in [2, 1, 0, -1, -2]:
# NIST SP 1065, Table 5
if alpha == +2:
edf = (N + 1) * (N - 2*m) / (2 * (N - m))
if alpha == 0:
edf = (((3 * (N - 1) / (2 * m)) - (2 * (N - 2) / N)) *
((4*pow(m, 2)) / ((4*pow(m, 2)) + 5)))
if alpha == 1:
a = (N - 1)/(2 * m)
b = (2 * m + 1) * (N - 1) / 4
edf = np.exp(np.sqrt(np.log(a) * np.log(b)))
if alpha == -1:
if m == 1:
edf = 2 * (N - 2) /(2.3 * N - 4.9)
if m >= 2:
edf = 5 * N**2 / (4 * m * (N + (3 * m)))
if alpha == -2:
a = (N - 2) / (m * (N - 3)**2)
b = (N - 1)**2
c = 3 * m * (N - 1)
d = 4 * m **2
edf = a * (b - c + d)
else:
edf = (N - 1)
print("Noise type not recognized. Defaulting to N - 1 degrees of freedom.")
return edf
########################################################################
# end of ci.py
|
AllanTools
|
/AllanTools-2019.9.tar.gz/AllanTools-2019.9/allantools/ci.py
|
ci.py
|
__all__ = [
'__version__',
'adev',
'oadev',
'mdev',
'hdev',
'ohdev',
'calc_hdev_phase',
'tdev',
'totdev',
'mtotdev',
'calc_mtotdev_phase',
'ttotdev',
'htotdev',
'calc_htotdev_freq',
'theo1',
'mtie',
'mtie_phase_fast',
'tierms',
'frequency2phase',
'phase2frequency',
'phase2radians',
'frequency2fractional',
'three_cornered_hat_phase',
'noise',
'gradev',
'trim_data',
'edf_simple',
'edf_greenhall',
'edf_totdev',
'edf_mtotdev',
'confidence_interval',
'confidence_interval_noiseID',
'autocorr_noise_id',
'uncertainty_estimate',
'Dataset',
'Noise',
'Plot'
]
from .allantools import __version__
from .allantools import frequency2phase
from .allantools import phase2frequency
from .allantools import phase2radians
from .allantools import frequency2fractional
from .allantools import three_cornered_hat_phase
from .allantools import adev
from .allantools import oadev
from .allantools import mdev
from .allantools import hdev
from .allantools import ohdev
from .allantools import calc_hdev_phase
from .allantools import tdev
from .allantools import totdev
from .allantools import ttotdev
from .allantools import mtotdev
from .allantools import calc_mtotdev_phase
from .allantools import htotdev
from .allantools import calc_htotdev_freq
from .allantools import theo1
from .allantools import mtie
from .allantools import mtie_phase_fast
from .allantools import tierms
from .allantools import gradev
from .allantools import trim_data
# ci.py contains functions for confidence intervals
from .ci import edf_simple
from .ci import edf_greenhall
from .ci import edf_totdev
from .ci import edf_mtotdev
from .ci import confidence_interval
from .ci import autocorr_noise_id
from .ci import confidence_interval_noiseID
# noise generation
from . import noise
from .dataset import Dataset
from .plot import Plot
from .noise_kasdin import Noise
# realtime statistics
from .realtime import oadev_realtime
from .realtime import ohdev_realtime
from .realtime import tdev_realtime
# end of file __init__.py
|
AllanTools
|
/AllanTools-2019.9.tar.gz/AllanTools-2019.9/allantools/__init__.py
|
__init__.py
|
.. contents::
Introduction
============
Let's say that you want to access a slow streaming site to see something (obviously: something not
protected by copyright).
The streaming site use URLs in that format:
http://legal-streaming-site.org/program-name/season5/episode4/
Every page contains some HTML code like the following::
....
<div id="video-container">
...
<embed src="http://someotherurl.org/qwerty.flv" ...
...
<div>
...
Let say this is the URL for the episode 4 of the fifth season of your program.
You know that this program has 6 seasons with 22 episode each.
As said before: this site is very slow so you prefer downloading episodes in background
then watch them later.
To download them you need to watch the HTML inside the page and get some resources
(commonly: and FLV file).
The best would be download *all* episode in a single (long running) operation instead of manually
doing it.
**Allanon** will help you exactly in such tasks.
You simply need to provide it:
* a simple URL or a *dynamic URL pattern*
* a *query selector* for resources inside the page
Quick example (you can keep it single lined)::
$ allanon --search="#movie-container embed" \
> "http://legal-streaming-site.org/program-name/season{1:6}/episode{1:22}"
Documentation
=============
Installation
------------
You can use `distribute`__ or `pip`__ to install the utility in your Python environment.
__ http://pypi.python.org/pypi/distribute
__ http://pypi.python.org/pypi/pip
::
$ easy_install Allanon
or alternately::
$ pip install Allanon
Invocation
----------
After installing you will be able to run the ``allanon`` script from command line.
For example: run the following for access the utility help::
$ allanon --help
Basic usage (you probably don't need Allanon at all for this)
-------------------------------------------------------------
The ``allanon`` script accept an URL (or a list of URLs) to be downloaded::
$ allanon http://myhost/folder/image1.jpg http://myhost/folder/image2.jpg ...
Every command line URL given to Allanon can be a simple URL or an *URL model* like the following::
$ allanon "http://myhost/folder/image{1:50}.jpg"
This will crawl 50 different URLs automatically.
Main usage (things became interesting now)
------------------------------------------
The ``allanon`` script take an additional ``--search`` parameter (see the first example given
above).
When you provide it, you are meaning:
"*I don't want to download those URLs directly, but those URLs contain links to
file that I really want*".
The search parameter format must be CSS 3 compatible, like the one supported the famous
`jQuery library`__, and it's based onto the `pyquery`__ library.
See it's documentation for more details about what you can look for.
__ http://api.jquery.com/category/selectors/
__ http://packages.python.org/pyquery/
Extreme usage
-------------
The ``--search`` parameter can be provided multiple times::
$ allanon --search="ul.image-repos a" \
> --search="div.image-containers img" \
> "http://image-repository-sites.org/category{1:30}.html"
When you provide (for example) two different search parameters, you are meaning:
"*I don't want to download resources at given URLs. Those URLs contain links to secondary pages,
and inside those pages there're links to resources I want to download*"
Filters are applied in the given order, so:
* Allanon will search inside 30 pages named *category1.html*, *category2.html*, ...
* inside those pages, Allanon will look for all links inside ``ul`` tags with CSS class
*image-repos* and recursively crawl them.
* inside those pages, Allanon will looks for images inside ``div`` with class *image-containers*.
* images will be downloaded.
Potentially you can continue this way, providing a third level of filters, and so on.
Naming and storing downloaded resources
---------------------------------------
By default Allanon download all files in the current directory so a filename conflict
is possible.
You can control how/where download, changing dynamically the filename using the
``--filename`` option and/or change the directory where to store files with the
``--directory`` option.
An example::
$ allanon --filename="%HOST-%INDEX-section%1-version%3-%FULLNAME" \
> "http://foo.org/pdf-repo-{1:10}/file{1:50}.pdf?version={0:3}"
As you seen ``--filename`` accept some *markers* that can be used to better organize
resources:
``%HOST``
Will be replaced with the hostname used in the URL.
``%INDEX``
Is a progressive from 1 to the number of downloaded resources.
``%X``
When using dynamic URLs models you can refer to the current number of an URL
section.
In this case "%1" is the current "pdf-repo-*x*" number and "%3" is the "version"
parameter value.
``%FULLNAME``
The original filename (the one used if ``--filename`` is not provided).
You can also use the ``%NAME`` and ``%EXTENSION`` to get only the name of the file
(without extension) or simply the extension.
The ``--directory`` option can be a simple directory name or a directory path (in unix-like
format, for example "``foo/bar/baz``").
An example::
$ allanon --directory="/home/keul/%HOST/%1" \
> "http://foo.org/pdf-repo-{1:10}/file{1:50}.pdf" \
> "http://baz.net/pdf-repo-{1:10}/file{1:50}.pdf"
Also the ``--directory`` option supports some of the markers: you can use ``%HOST``, ``%INDEX`` and ``%X``
with the same meaning given above.
TODO
====
This utility is in alpha stage, a lot of thing can goes wrong when downloading and many features
are missing:
* verbosity controls
* bandwidth control
* multi-thread (let's look at `grequests`__)
* Python 3
__ https://github.com/kennethreitz/grequests
If you find other bugs or want to ask for missing features, use the `product's issue tracker`__.
__ https://github.com/keul/Allanon/issues
|
Allanon
|
/Allanon-0.2.zip/Allanon-0.2/README.rst
|
README.rst
|
import sys
import os.path
import time
from optparse import OptionParser, OptionGroup
from allanon import config
from allanon.url_generator import get_dynamic_urls
from allanon.url_generator import search_resources
from allanon.resouce_grabber import ResourceGrabber
VERSION = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "version.txt")).read().strip()
DESCRIPTION = """
Crawl a replicable set of URLs, then download resources from them.
URLs can be composed by a variable range(s) like:
http://foo.org/{1:10}/page?section={1:4}
This will make this utility to crawl through a set of URLs like this:
http://foo.org/1/page?section=1
http://foo.org/1/page?section=2
...
http://foo.org/2/page?section=1
...
http://foo.org/10/page?section=4
""".strip()
parser = OptionParser(usage="Usage: %prog [option, ...] url_model [url_model, ...]",
version="%prog " + VERSION,
description=DESCRIPTION,
prog="allanon")
parser.remove_option("--help")
parser.add_option('--help', '-h',
action="store_true", default=False,
help='show this help message and exit')
parser.add_option('--search', '-s', dest="search_queries", default=[], action="append",
metavar="QUERY",
help="Query for other URLs inside every argument URLs and download them instead "
"of the URL itself.\n"
"See the pyquery documentation for more info about the query "
"format (http://packages.python.org/pyquery/).\n"
"Can be provided multiple times to recursively search for links to "
"pages until resources are found (last search filter must always "
"points to the final resource to download).")
parser.add_option('--directory', '-d', dest="destination_directory", default=os.getcwd(),
metavar="TARGET_DIR",
help="Directory where to store all resources that will be downloaded.\n"
"Default is the current directory.\n"
"Can be also a directory path string in nix format (like \"foo/bar\"), "
"in that case all intermediate directories will be created.\n"
"You can use some markers for creating a dynamic name.\n"
"Use %x (%1, %2, ...) to include the current URLs range "
"(if any). Use %1 for the first range in the URL, %2 for "
"the second, and so on.\n"
"Use %HOST for include the original host where the resource has "
"been downloaded.\n"
"Use %INDEX for include a progressive number of downloaded resources.\n"
)
parser.add_option('--filename', '-f', dest="filename_model", default=None, metavar="FILENAME",
help="Download resources with a custom, dynamic, filename.\n"
"You can use some markers for creating a dynamic name.\n"
"Use %x (%1, %2, ...) to include the current URLs range "
"(if any). Use %1 for the first range in the URL, %2 for "
"the second, and so on.\n"
"Use %HOST for include the original host where the resource has "
"been downloaded.\n"
"Use %INDEX for include a progressive number of downloaded resources.\n"
"Use %NAME for include the original filename (without extension).\n"
"Use %EXTENSION for include the original file extensions.\n"
"Use %FULLNAME for include the original filename (with extension).\n"
"Default is \"%FULLNAME\"")
parser.add_option("--check-duplicate", '-c', action="store_true", dest="duplicate_check", default=False,
help="When finding a duplicate filename check they are duplicates. "
"In this case, do not save the new file. Default action is to keep all "
"resources handling filename collision, without checking files content.")
group = OptionGroup(parser, "Request options",
"This set of options control how Allanon connect to remote servers."
)
group.add_option('--user-agent', '-u', dest="user_agent", default=None, metavar="USER_AGENT",
help="Change the User-Agent header sent with every request.\n"
"Default is \"Allanon Crawler %s\"." % VERSION)
group.add_option('--timeout', '-t', dest="timeout", default=60.0, type="float",
help="Number of seconds to wait for server response before giving up.\n"
"Default is 60. Use 0 for disable timeout.")
group.add_option('--sleep-time', dest="sleep", default=1.0, type="float",
help="Number of seconds to wait after each downloaded resource.\n"
"Use this to not overload a server or being banned.\n"
"Default is 1.")
parser.add_option_group(group)
def main(options=None, *args):
if not options:
# invocation from command line
options, args = parser.parse_args()
if len(args)<1 or options.help:
# personal version of the help, to being able to keep \n in description
result = ['Allanon: a crawler for visit a predictable set of URLs, '
'and download resources from them\n']
result.append(parser.get_usage())
result.append(DESCRIPTION+"\n")
result.append(parser.format_option_help(parser.formatter))
result.append('By Luca Fabbri - luca<at>keul.it\n')
result.append('See https://github.com/keul/Allanon for detailed documentation or '
'provide bug report.')
print "\n".join(result)
sys.exit(0)
if options.user_agent:
config.USER_AGENT = options.user_agent
if options.timeout:
config.TIMEOUT = options.timeout
if options.sleep:
config.SLEEP_TIME = options.sleep
# first, command line URLs sequence
try:
urls = get_dynamic_urls(args)
index_digit_len = 0
# optimization: we don't need to count all the URLs in that case
if options.filename_model and '%INDEX' in options.filename_model:
urls = tuple(urls)
index_digit_len = len(str(len(urls)))
# in case we are not directly downloading, we need to look for inner resources
if options.search_queries:
urls = search_resources(urls, options.search_queries)
for index, urls_data in enumerate(urls):
url, ids, max_ids = urls_data
rg = ResourceGrabber(url)
rg.download(options.destination_directory, options.filename_model, ids, index+1,
ids_digit_len=max_ids,
index_digit_len=index_digit_len,
duplicate_check=options.duplicate_check)
time.sleep(options.sleep)
except KeyboardInterrupt:
print "\nTerminated by user action"
sys.exit(1)
if __name__ == '__main__':
main()
|
Allanon
|
/Allanon-0.2.zip/Allanon-0.2/src/allanon/main.py
|
main.py
|
import re
import hashlib
import tempfile
import sys
import os.path
import traceback
import urllib
from shutil import copyfile
from urlparse import urlparse
import requests
from progress.bar import Bar
from progress.spinner import PieSpinner
from allanon import config
from allanon.html_crawler import search_in_html
CONTENT_DISPOSITION_MODEL = r"""^.*filename\s*="?\s*(?P<filename>.*?)"?;?$"""
cdre = re.compile(CONTENT_DISPOSITION_MODEL, re.IGNORECASE)
DYNA_ID_MODEL = r"""(\%\d+)"""
dynaid_re = re.compile(DYNA_ID_MODEL)
EXTENSION_MODEL = r"""^(?P<name>.+?)(?P<index>_\d+)?(?P<extension>\.[a-zA-Z]{2,4})?$"""
def _int_format(i, ilen):
if not ilen:
return str(i)
return ("%%0%dd" % ilen) % i
def _try_new_filename(filename):
"""
Getting a filename in the form foo_X.ext where X,
it generate a new filename as foo_Y.ext, where Y is X+1
In the case that _X is missing (like foo.ext), Y=1 i used
Extension is optional
"""
match = re.match(EXTENSION_MODEL, filename)
if match:
name, version, extension = match.groups()
if version:
version = "_%d" % (int(version[1:])+1)
else:
version = "_1"
filename = name + version + (extension if extension else '')
return filename
class ResourceGrabber(object):
def __init__(self, url):
self.url = url
self.url_info = urlparse(url)
self.request = None
self.timeout = config.TIMEOUT
@property
def html(self):
self._open()
return self.request.text if self.request else None
def _open(self):
if self.request is None:
print "Getting %s" % self.url
try:
self.request = requests.get(self.url, headers=config.headers(), stream=True,
timeout=self.timeout)
except requests.exceptions.Timeout:
print "Can't get resource at %s. Request timed out" % self.url
return
if self.request.status_code>=200 and self.request.status_code<300:
print "Done"
else:
print "Can't get resource at %s. HTTP error %d" % (self.url,
self.request.status_code)
def _get_filename(self, filename_model=None, ids=[], index=0,
ids_digit_len=0, index_digit_len=0):
content_disposition = self.request.headers.get('content-disposition', '')
filename_re = cdre.match(content_disposition)
filename = ""
if filename_re:
filename = filename_re.groupdict().get('filename')
else:
path = self.url_info.path
if path and path!='/':
if path.endswith('/'):
path = path[:-1]
filename = path.split('/')[-1]
else:
# let's use hostname
filename = self.url_info.hostname
filename = urllib.unquote(filename)
if filename_model:
filename = self._generate_filename_from_model(filename,
filename_model=filename_model,
ids=ids,
index=index,
ids_digit_len=ids_digit_len,
index_digit_len=index_digit_len)
return filename
def _string_interpolation(self, model, ids=[], index=0,
ids_digit_len=[], index_digit_len=0):
# replace %x with proper ids
cnt = 0
while dynaid_re.search(model):
match = dynaid_re.search(model)
dynaid = match.group()
model = model.replace(dynaid, _int_format(ids[cnt],
ids_digit_len[cnt]), 1)
cnt+=1
# replace %INDEX with the progressive
if model.find("%INDEX")>-1:
model = model.replace("%INDEX", _int_format(index, index_digit_len))
# replace %HOST with current host
if model.find("%HOST")>-1:
model = model.replace("%HOST", self.url_info.hostname)
return model
def _generate_filename_from_model(self, original, filename_model, ids=[], index=0,
ids_digit_len=[], index_digit_len=0):
filename = self._string_interpolation(filename_model, ids, index, ids_digit_len, index_digit_len)
# *** Other interpolation (only file's specific) ***
# replace %NAME with original filename
if filename.find("%NAME")>-1:
filename = filename.replace("%NAME", original[:original.rfind('.')])
# replace %EXTENSION with original extension
if filename.find("%EXTENSION")>-1:
filename = filename.replace("%EXTENSION", original[original.rfind('.')+1:])
# replace %EXTENSION with original extension
if filename.find("%FULLNAME")>-1:
filename = filename.replace("%FULLNAME", original)
return filename
def _create_subdirs(self, directory, ids=[], index=0,
ids_digit_len=[], index_digit_len=0):
"""Given a directory name, or a directory path string in nix format
(e.g: foo/bar), create all intermediate directories.
Return the new (existing) final directory absolute path
"""
directory = self._string_interpolation(directory, ids, index, ids_digit_len, index_digit_len)
if not os.path.exists(directory):
os.makedirs(directory)
return directory
def _get_resource_content(self, file_out, filename):
"""Save data stored in the current request object in a file"""
content_length = self.request.headers.get('content-length', '')
size = int(content_length) if content_length else 0
if size:
progress = Bar("Getting %s" % filename, fill='#', suffix='%(percent)d%%', max=size)
else:
progress = PieSpinner("Getting %s " % filename)
try:
for chunk in self.request.iter_content(config.CHUNK_SIZE):
file_out.write(chunk)
progress.next(config.CHUNK_SIZE if size else 1)
except:
print "Error while getting %s" % self.url
traceback.print_exc(file=sys.stdout)
return None
finally:
progress.finish()
return file_out.name
def download(self, directory, filename_model=None, ids=[], index=0,
ids_digit_len=[], index_digit_len=0, duplicate_check=False):
"""Download a remote resource. Return the new path or None if no resource has been created"""
self._open()
if not self.request:
return
directory = self._create_subdirs(directory, ids=ids, index=index,
ids_digit_len=ids_digit_len,
index_digit_len=index_digit_len)
filename = self._get_filename(filename_model=filename_model, ids=ids, index=index,
ids_digit_len=ids_digit_len,
index_digit_len=index_digit_len)
path = os.path.join(directory, filename)
cache = None
if duplicate_check and os.path.exists(path):
# Before trying to find a free filename, check is this file is a duplicate
with open(path, 'rb') as saved:
md5_saved = hashlib.md5(saved.read()).digest()
with tempfile.NamedTemporaryFile(delete=False) as tmp:
cache = self._get_resource_content(tmp, filename)
tmp.seek(0)
md5_remote = hashlib.md5(tmp.read()).digest()
if md5_saved==md5_remote:
# same file
print "Resource at %s is a duplicate of %s" % (self.url,
path)
return
while os.path.exists(path):
# continue trying until we get a good filename
filename = _try_new_filename(filename)
path = os.path.join(directory, filename)
if self.request.status_code>=200 and self.request.status_code<300:
if cache:
# re-use file in temp directory, used for md5 checksum
copyfile(cache, path)
os.remove(cache)
else:
with open(path, 'wb') as f:
print "Writing resource to %s" % path
self._get_resource_content(f, filename)
return path
def download_resources(self, query, directory, filename_model=None, ids=[], index=0,
ids_digit_len=[], index_digit_len=0, duplicate_check=False):
self._open()
if not self.request:
return
resources = search_in_html(self.html, query, self.url)
for url in resources:
rg = ResourceGrabber(url)
rg.download(directory, filename_model=filename_model, ids=ids, index=index,
ids_digit_len=ids_digit_len, index_digit_len=ids_digit_len,
duplicate_check=duplicate_check)
def get_internal_links(self, *args, **kwargs):
self._open()
if not self.request:
return
level = kwargs.get('level', 0)
if self.request.status_code >=200 and self.request.status_code<300:
links = search_in_html(self.html, args[level], self.url)
for link in links:
rg = ResourceGrabber(link)
if len(args)>level+1:
for inner_link in rg.get_internal_links(*args, level=level+1):
yield inner_link
else:
yield link
|
Allanon
|
/Allanon-0.2.zip/Allanon-0.2/src/allanon/resouce_grabber.py
|
resouce_grabber.py
|
import re
from allanon.resouce_grabber import ResourceGrabber
SPREAD_MODEL = r"""\{(?P<start>\d+)\:(?P<end>\d+)\}"""
spre = re.compile(SPREAD_MODEL)
def generate_urls(url, level=0):
"""
Using a string (commonly an URL) that contains a range section like this:
foo {n:m} bar
This will iterate through a set of results like those:
foo n bar
foo n+1 bar
foo n+2 bar
...
foo m bar
This will also work when n>m:
foo n bar
foo n-1 bar
foo n-2 bar
...
foo m bar
The range section can be used also multiple times:
foo {n:m} bar {x:y} baz
This will generate:
foo n bar x baz
foo n bar x+1 baz
...
foo n bar y baz
foo n+1 bar x baz
...
foo m bar y baz
"""
match = spre.search(url)
if match:
start, end = match.groups()
start = int(start); end = int(end)
step = start<=end and 1 or -1
for x in xrange(start, end+step, step):
ids = [x]
max_ids = [len(str(max(start, end+step)))]
new_url = spre.sub(str(x), url, 1)
if new_url.find("{")==-1:
yield new_url, ids, max_ids
for y, inner_ids, inner_max_ids in generate_urls(new_url, level+1):
yield y, ids + inner_ids, max_ids + inner_max_ids
elif level==0:
# first attempt doesn't match: then I'll return original URL
yield url, [], []
def get_dynamic_urls(raw_urls, outer_ids=[], outer_max_ids=[]):
for raw_url in raw_urls:
for url, ids, max_ids in generate_urls(raw_url):
ids = ids or outer_ids
max_ids = max_ids or outer_max_ids
yield url, ids, max_ids
def search_resources(urls, search_queries):
for generated_url in urls:
url, ids, max_ids = generated_url
rg = ResourceGrabber(url)
inner_urls = rg.get_internal_links(*search_queries)
for url in inner_urls:
yield url, ids, max_ids
|
Allanon
|
/Allanon-0.2.zip/Allanon-0.2/src/allanon/url_generator.py
|
url_generator.py
|
Allegra - Copyright (C) 2006 Laurent A.V. Szyster | Copyleft GPL 2.0
Allegra is an innovative library for web peer applications development.
It provides a Python framework for asynchronous network peer programming,
a simple stack of Internet standards implementations, and two new network
applications: a new metabase peer and a practical web peer.
http://laurentszyster.be/blog/allegra/
REQUIREMENTS
You need CPython 2.4 to run Allegra. If you don't, get it first.
INSTALL
Extract this archive and change directory to the extracted root
cd allegra
Then install Allegra's library
python setup.py install
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/README.txt
|
README.txt
|
"http://laurentszyster.be/blog/finalization/"
from allegra import loginfo, async_loop
# Finalize
class Finalization (object):
finalization = None
async_finalized = async_loop._finalized
def __del__ (self):
if self.finalization != None:
self.async_finalized.append (self)
def collect ():
import gc
collected = gc.collect ()
if collected == 0:
return
assert None == loginfo.log ('%d' % collected, 'collected')
for cycle in gc.garbage:
try:
cylce.finalization = None
except:
pass
assert None == loginfo.log ('%r' % cycle, 'garbage')
# Branch
class Branch (Finalization):
def __init__ (self, finalizations):
self.finalizations = finalizations
def __call__ (self, finalized):
for finalization in self.finalizations:
finalization (finalized)
def branch (branched, finalization):
try:
branched.finalization.finalizations.append (finalization)
except:
branched.finalization = Branch ([
branched.finalization, finalization
])
# Continue
class Continuation (object):
finalization = None
async_finalized = async_loop._finalized
def __call__ (self): pass
def __del__ (self):
if self.finalization != None:
self.async_finalized.append (self)
def continuation (finalizations):
"combines continuations into one execution path"
i = iter (finalizations)
first = continued = i.next ()
try:
while True:
continued.finalization = i.next ()
continued = continued.finalization
except StopIteration:
pass
return first, continued
class Continue (Continuation):
def __init__ (self, finalizations):
self.__call__, self.continued = continuation (
finalizations
)
# Join
#
# the equivalent of "thread joining" with finalization does not really need
# a specific interface because it is enough to set the "joining" finalized
# as the finalization of all "joined" finalized.
#
# joined.finalization = joining
#
# how simple ...
def join (finalizations, continuation):
def finalize (finalized):
for joined in finalizations:
joined.finalization = continuation
return finalize
class Join (Continuation):
def __init__ (self, finalizations):
self.finalizations = finalizations
def __call__ (self, finalized):
if self.finalizations:
# start
for joined in self.finalizations:
joined.finalization = self
self.finalizations = None
#
# join
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/finalization.py
|
finalization.py
|
"http://laurentszyster.be/blog/thread_loop/"
import threading, collections
from allegra import netstring, loginfo, finalization, select_trigger
class Trunked_deque (object):
"a deque implementation to trunk protected deque safely"
def __len__ (self):
"return 1, a trunked deque has allways one item"
return 1
def __getitem__ (self, index):
"return None or raise IndexError"
if index == 0:
return None
raise IndexError, 'trunked deque'
def append (self, item):
"drop any item appended to the deque"
pass
def popleft (self):
"return None, the closing item for a deque consumer"
return None
pop = popleft
appendleft = append
class Protected_deque (object):
"a thread-safe wrapper for a deque"
def __init__ (self, queue=None):
self.deque = collections.deque (queue or [])
self.mon = threading.RLock ()
self.cv = threading.Condition (self.mon)
def __repr__ (self):
"return a safe netstring representation of the deque"
r = []
try:
self.cv.acquire ()
l = len (self.deque)
for item in self.deque:
try:
r.append ('%r' % (item,))
except:
r.append (
'item id="%x"' % id (item)
)
finally:
self.cv.release ()
return netstring.encode ((
'protected_deque queued="%d"' % l,
netstring.encode (r)
))
def __len__ (self):
"return the queue's length"
try:
self.cv.acquire ()
l = len (self.deque)
finally:
self.cv.release ()
return l
def __getitem__ (self, index):
"return the queue's length"
try:
self.cv.acquire ()
return self.deque[index]
finally:
self.cv.release ()
def append (self, item):
"push an item at the end the deque and notify"
try:
self.cv.acquire ()
self.deque.append (item)
self.cv.notify ()
finally:
self.cv.release ()
__call__ = append
def popleft (self):
"wait for a first item in the deque and pop it"
try:
self.cv.acquire ()
while len (self.deque) == 0:
self.cv.wait ()
item = self.deque.popleft ()
finally:
self.cv.release ()
return item
def appendleft (self, item):
"push an item of items in front of the deque"
try:
self.cv.acquire ()
self.deque.appendleft (item)
self.cv.notify ()
finally:
self.cv.release ()
def pop (self):
"wait for a last item in the deque and pop it"
try:
self.cv.acquire ()
while len (self.deque) == 0:
self.cv.wait ()
item = self.deque.pop ()
finally:
self.cv.release ()
return item
def trunk (self):
"""Replace the deque with a trunked deque implementation
and return the replaced deque instance."""
try:
self.cv.acquire ()
trunked = self.deque
self.deque = Trunked_deque ()
finally:
self.cv.release ()
return trunked
#
# In effect, trunking implies closing a running thread loop
# and dropping any item queued thereafter, which is precisely
# The Right Thing To Do when a thread loop queue is stopped:
# prevent accessors to push references that won't be popped
# out and leak.
class Thread_loop (threading.Thread, select_trigger.Select_trigger):
"a thread loop, with thread-safe asynchronous logging"
def __init__ (self, queue=None):
self.thread_loop_queue = queue or Protected_deque ()
select_trigger.Select_trigger.__init__ (self)
threading.Thread.__init__ (self)
self.setDaemon (1)
def __repr__ (self):
return 'thread-loop id="%x"' % id (self)
def run (self):
"""The Thread Loop
If thread_loop_init() is True call queued instance until
None is popped or and exception is raised and not catched
by thread_loop_throw. Finally, if thread_loop_delete() is
True, trunk the thread loop queue.
"""
if self.thread_loop_init ():
next = self.thread_loop_queue.popleft # ? maybe safer
while True:
queued = next () # ... sure faster
if queued == None:
break
try:
queued[0] (*queued[1])
except:
if self.thread_loop_throw ():
del queued
break
else:
del queued
#
# note that I make sure to delete the tuple which
# would otherwise hold a reference to the method and
# arguments of the call threaded, preventing garbage
# collection hence finalization and was the source
# of subtle bugs ...
#
if self.thread_loop_delete ():
trunked = self.thread_loop_queue.trunk ()
if trunked:
assert None == self.select_trigger_log (
netstring.encode ([
'%r' % (i,) for i in trunked
]), 'debug'
)
#
# ... continue with the Select_trigger.finalization, unless
# there are circular references for this instance, caveat!
def thread_loop (self, queued):
"assert debug log and push a simple callable in the queue"
assert None == self.log ('%r %r' % queued, 'queued')
self.thread_loop_queue (queued)
def thread_loop_stop (self):
"assert debug log and push the stop item None in the queue"
assert None == self.log ('stop-when-done', 'debug')
self.thread_loop_queue (None)
def thread_loop_init (self):
"return True, assert a debug log of the thread loop start"
assert None == self.log ('start', 'debug')
return True
def thread_loop_throw (self):
"return False, log a compact traceback via the select trigger"
self.select_trigger_traceback ()
return False
def thread_loop_delete (self):
"return True, assert a debug log of the thread loop start"
assert None == self.log ('stop', 'debug')
return True
class Synchronizer (loginfo.Loginfo):
def __init__ (self, size=2):
self.synchronizer_size = size
self.synchronized_thread_loops = []
self.synchronized_instance_count = []
self.synchronized_count = 0
def __repr__ (self):
return 'synchronizer pid="%x" count="%d"' % (
id (self), self.synchronized_count
)
def synchronizer_append (self):
assert None == self.log (
'append %d' % len (self.synchronized_thread_loops),
'synchronizer'
)
t = Thread_loop ()
t.thread_loop_queue.synchronizer_index = len (
self.synchronized_thread_loops
)
self.synchronized_thread_loops.append (t)
self.synchronized_instance_count.append (0)
t.start ()
def synchronize (self, instance):
assert not hasattr (instance, 'synchronized')
if self.synchronized_count == len (
self.synchronized_thread_loops
) < self.synchronizer_size:
self.synchronizer_append ()
index = self.synchronized_instance_count.index (
min (self.synchronized_instance_count)
)
t = self.synchronized_thread_loops[index]
instance.synchronized = t.thread_loop_queue
instance.select_trigger = t.select_trigger
self.synchronized_instance_count[index] += 1
self.synchronized_count += 1
assert None == self.log ('%r' % instance, 'synchronized')
def desynchronize (self, instance):
assert hasattr (instance, 'synchronized')
i = instance.synchronized.synchronizer_index
count = self.synchronized_instance_count[i]
self.synchronized_count += -1
self.synchronized_instance_count[i] += -1
instance.select_trigger = instance.synchronized = None
if self.synchronized_count == 0:
assert None == self.log ('stop %d threads' % len (
self.synchronized_thread_loops
), 'synchronizer')
for t in self.synchronized_thread_loops:
t.thread_loop_queue (None)
self.synchronized_thread_loops = []
assert None == self.log ('%r' % instance, 'desynchronized')
def synchronize (instance):
if instance.synchronizer == None:
instance.__class__.synchronizer = Synchronizer (
instance.synchronizer_size
)
instance.synchronizer.synchronize (instance)
def desynchronize (instance):
instance.synchronizer.desynchronize (instance)
def synchronized (instance):
assert isinstance (instance, finalization.Finalization)
if instance.synchronizer == None:
instance.__class__.synchronizer = Synchronizer (
instance.synchronizer_size
)
instance.synchronizer.synchronize (instance)
instance.finalization = instance.synchronizer.desynchronize
# Notes about the Synchronizer
#
# The purpose is to but deliver non-blocking interfaces to synchronous API.
#
# The synchronizer is an resizable array of thread loop queues. Synchronized
# instances are attached to one of these queues. When a synchronized instance
# is finalized, that reference is released and the array is notified. When no
# more instance is attached to a thread loop queue, its thread exits. If the
# limit set on the array size is not reached, a new thread loop is created for
# each new synchronized instance. The default limit is set to 4.
#
# This interface is purely asynchronous: methods synchronized should be able
# to access the select_trigger to manipulate the Synchronizer, or more
# mundanely to push data to asynchat ...
#
#
# Limits
#
# There is no easy way to prevent an instance to stall its thread loop queue
# and all the other instances methods synchronized to it. The only practical
# algorithm to detect a stalled method (and "fix" it), is to set a limit on
# the size of the synchronized queue and when that limit is reached to replace
# the stalled thread loop by a new one. However, this would leave the stalled
# thread to hang forever if the stalling method is running amok or blocking
# forever too. Setting a timeout on each synchronized method is impossible
# since there is no way to infer reliably a maximum execution time, certainly
# in such case of concurrent processes.
#
# Basicaly, there is no practical and effective way to fix a thread broken by
# an infinite loop or a stalled-forever wait state. So, this implementation
# does not even attempt to correct the effects of such bugs on the other
# synchronized instance methods.
#
#
# Beware!
#
# Synchronized methods must be tested separately. Yet it is trivial, because
# you may either test them asynchronously from within an async_loop host or,
# since they are synchronous, directly from the Python prompt.
#
# My advice is to use synchronized method in two cases. Either you don't want
# to learn asynchronous programming (don't have time for that). Or you know
# how, but need to access a blocking API that happens to be thread safe and
# releases the Python GIL.
#
# For instance:
#
# os.open (...).read ()
#
# or
#
# bsddb.db.DB ().open (...)
#
# may be blocking and should be synchronized.
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/thread_loop.py
|
thread_loop.py
|
# Copyright (C) 2005 Laurent A.V. Szyster
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# http://www.gnu.org/copyleft/gpl.html
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# You should have received a copy of the GNU General Public License
# along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
"http://laurentszyster.be/blog/async_loop/"
import gc, select, errno, time, collections, heapq
from allegra import loginfo
Exit = KeyboardInterrupt
# Poll I/O
def _io_select (map, timeout, limit):
"poll for I/O a limited number of writable/readable dispatchers"
r = []
w = []
concurrent = map.items ()
rest = limit - len (concurrent)
if rest < 0:
concurrent = concurrent[:limit]
else:
rest = 0
for fd, dispatcher in concurrent:
if dispatcher.readable ():
r.append (fd)
if dispatcher.writable ():
w.append (fd)
if len (r) + len (w) == 0:
time.sleep (timeout)
return limit - rest, 0
try:
r, w, e = select.select (r, w, [], timeout)
except select.error, err:
if err[0] != errno.EINTR:
raise
else:
return limit - rest, 0
for fd in r:
try:
dispatcher = map[fd]
except KeyError:
continue
try:
dispatcher.handle_read_event ()
except Exit:
raise
except:
dispatcher.handle_error ()
for fd in w:
try:
dispatcher = map[fd]
except KeyError:
continue
try:
dispatcher.handle_write_event ()
except Exit:
raise
except:
dispatcher.handle_error ()
return limit - rest, len (r) + len (w)
#
# note that the number of distinct active dispatchers may actually
# be lower than the one reported: to get an exact cound would
# require to use sets, something like: len (set (r) + set (w))
def _io_poll (map, timeout, limit):
"poll for I/O a limited number of writable/readable dispatchers"
timeout = int (timeout*1000)
pollster = select.poll ()
R = select.POLLIN | select.POLLPRI
W = select.POLLOUT
RW = R | W
concurrent = map.items ()
rest = limit - len (concurrent)
if rest < 0:
concurrent = concurrent[:limit]
else:
rest = 0
for fd, dispatcher in concurrent:
if dispatcher.readable ():
if dispatcher.writable ():
pollster.register (fd, RW)
else:
pollster.register (fd, R)
elif dispatcher.writable ():
pollster.register (fd, W)
try:
p = pollster.poll (timeout)
except select.error, err:
if err[0] != errno.EINTR:
raise
else:
for fd, flags in p:
try:
dispatcher = map[fd]
except KeyError:
continue
try:
if flags & R:
dispatcher.handle_read_event ()
if flags & W:
dispatcher.handle_write_event ()
except Exit:
raise
except:
dispatcher.handle_error()
return limit - rest, len (p)
# select the best I/O poll function available for this system
if hasattr (select, 'poll'):
_io = _io_poll
else:
_io = _io_select
# Poll Memory (Finalizations, ie: CPython __del__ decoupled)
_finalized = collections.deque ()
def _finalize ():
"call all finalizations queued"
while True:
try:
finalized = _finalized.popleft ()
except IndexError:
break
try:
finalized.finalization = finalized.finalization (
finalized
) # finalize and maybe continue ...
except Exit:
finalized.finalization = None
raise
except:
finalized.finalization = None
loginfo.traceback () # log exception
# Poll Time (Scheduled Events)
precision = 0.1
_scheduled = []
def _clock ():
"call all events scheduled before now, maybe recurr in the future"
now = time.time ()
future = now + precision
while _scheduled:
# get the next defered ...
event = heapq.heappop (_scheduled)
if event[0] > now:
heapq.heappush (_scheduled, event)
break # ... nothing to defer now.
try:
# ... do defer and ...
continued = event[1] (event[0])
except Exit:
raise
except:
loginfo.traceback ()
else:
if continued != None:
# ... maybe recurr in the future
if continued[0] < future:
continued = (future, continued[1])
heapq.heappush (_scheduled, continued)
# Poll Signals (Exceptions Handler)
_catchers = []
def _catched ():
"call async_loop.Exit exception catchers"
assert None == loginfo.log ('async_catch', 'debug')
if _catchers:
for catcher in tuple (_catchers):
if catcher ():
_catchers.remove (catcher)
return True
if __debug__:
for dispatcher in _dispatched.values ():
loginfo.log (
'%r' % dispatcher, 'undispatched'
)
for event in _scheduled:
loginfo.log (
'%r' % (event, ), 'unscheduled'
)
for finalized in _finalized:
loginfo.log (
'%r' % (finalized, ), 'unfinalized'
)
return False
# Application Programming Interfaces
def schedule (when, scheduled):
"schedule a call to scheduled after when"
heapq.heappush (_scheduled, (when, scheduled))
def catch (catcher):
"register an catcher for the Exit exception"
_catchers.append (catcher)
concurrency = 512
_dispatched = {}
def dispatch ():
"dispatch I/O, time and finalization events"
assert None == loginfo.log ('async_dispatch_start', 'debug')
while _dispatched or _scheduled or _finalized or gc.collect () > 0:
try:
_io (_dispatched, precision, concurrency)
_clock ()
_finalize ()
except Exit:
if not _catched ():
break
except:
loginfo.traceback ()
assert None == loginfo.log ('async_dispatch_stop', 'debug')
def io_meter (loop, when=None):
"decorate the loop module's I/O poll function with meters, log info"
loop._io_when = when or time.time ()
loop._io_run = 0
loop._io_load = loop._io_concurrency = loop._io_activity = 0.0
def _io_metering (map, timeout, limit):
loop._io_run += 1
dispatched = len (loop._dispatched)
concurrent, polled = _io (map, timeout, limit)
if concurrent > 0:
loop._io_activity += float (active) / concurrent
concurrent = float (concurrent)
loop._io_load += concurrent / limit
loop._io_concurrency += concurrent / dispatched
loop._io_metered = loop._io
loop._io = _io_metering
loginfo.log ('io-metered', 'info')
def io_meters (loop):
"return statistics about a metered I/O loop"
return (
loop._io_when, loop._io_run,
loop._io_run / (time.time () - loop._io_when),
loop._io_load / loop._io_run,
loop._io_concurrency / loop._io_run,
loop._io_activity / loop._io_run
)
def io_unmeter (loop):
"log meter statistics and remove the metering decoration"
meters = io_meters (loop)
loginfo.log (
'io-unmetered'
' seconds="%f" run="%d"'
' rps="%f" load="%f"'
' concurrency="%f" activity="%f"' % meters, 'info'
)
del (
loop._io_when, loop._io_run,
loop._io_load, _io_concurrency, loop._io_activity
)
loop._io = loop._io_metered
return meters
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/async_loop.py
|
async_loop.py
|
"http://laurentszyster.be/blog/async_limits/"
import time
from allegra import async_loop
# Metering for stream and datagram sockets
def meter_recv (dispatcher, when):
"decorate a stream transport with an input meter"
dispatcher.ac_in_meter = 0
dispatcher.ac_in_when = when
metered_recv = dispatcher.recv
def recv (buffer_size):
data = metered_recv (buffer_size)
dispatcher.ac_in_meter += len (data)
dispatcher.ac_in_when = time.time ()
return data
dispatcher.recv = recv
def meter_send (dispatcher, when):
"decorate a stream transport with an output meter"
dispatcher.ac_out_meter = 0
dispatcher.ac_out_when = when
metered_send = dispatcher.send
def send (data):
sent = metered_send (data)
dispatcher.ac_out_meter += sent
dispatcher.ac_out_when = time.time ()
return sent
dispatcher.send = send
def meter_recvfrom (dispatcher, when):
"decorate a datagram transport with an input meter"
dispatcher.ac_in_meter = 0
dispatcher.ac_in_when = when
metered_recvfrom = dispatcher.recvfrom
def recvfrom (datagram_size):
data, peer = metered_recvfrom (datagram_size)
dispatcher.ac_in_meter += len (data)
dispatcher.ac_in_when = time.time ()
return data, peer
dispatcher.recvfrom = recvfrom
def meter_sendto (dispatcher, when):
"decorate a datagram transport with an output meter"
dispatcher.ac_out_meter = 0
dispatcher.ac_out_when = when
metered_sendto = dispatcher.sendto
def sendto (self):
sent = metered_sendto (data, peer)
dispatcher.ac_out_meter += sent
dispatcher.ac_out_when = time.time ()
return sent
dispatcher.sendto = sendto
# Inactivity Limits
def inactive_in (dispatcher, when):
"overflow if connected, not closing and input is inactive"
return not dispatcher.closing and dispatcher.connected and (
when - dispatcher.ac_in_when
) > dispatcher.limit_inactive
def inactive_out (dispatcher, when):
"overflow if connected, not closing and output is inactive"
return not dispatcher.closing and dispatcher.connected and (
when - dispatcher.ac_out_when
) > dispatcher.limit_inactive
def inactive (dispatcher, when):
"overflow if connected, not closing and I/O is inactive"
return not dispatcher.closing and dispatcher.connected and (
when - max (
dispatcher.ac_in_when, dispatcher.ac_out_when
)
) > dispatcher.limit_inactive
# Throttling Decorators
def throttle_readable (dispatcher, when, Bps):
"decorate a metered dispatcher with an input throttle"
dispatcher.ac_in_throttle = Bps ()
dispatcher.ac_in_throttle_when = when
dispatcher.ac_in_throttle_Bps = Bps
throttled_readable = dispatcher.readable
def readable ():
return (
dispatcher.ac_in_meter < dispatcher.ac_in_throttle
and throttled_readable ()
)
dispatcher.readable = readable
def throttle_writable (dispatcher, when, Bps):
"decorate a metered dispatcher with an output throttle"
dispatcher.ac_out_throttle = Bps ()
dispatcher.ac_out_throttle_when = when
dispatcher.ac_out_throttle_Bps = Bps
throttled_writable = dispatcher.writable
def writable ():
return (
dispatcher.ac_out_meter < dispatcher.ac_out_throttle
and throttled_writable ()
)
dispatcher.writable = writable
# Throttling limits
def throttle_in (dispatcher, when):
"allocate input bandiwth to a throttled dispatcher"
if dispatcher.ac_in_meter >= dispatcher.ac_in_throttle:
dispatcher.ac_in_throttle += int ((
when - max (
dispatcher.ac_in_when,
dispatcher.ac_in_throttle_when
)
) * dispatcher.ac_in_throttle_Bps ())
dispatcher.ac_in_throttle_when = when
return False
#
# when the dispatcher exceeded its limit, allocate bandwith at a given
# rate for the period between "when" - approximatively but steadily
# "now" - and the last I/O or the last allocation, which ever comes
# later. in effect it grants the dispatcher the bandwith it is entitled
# to for the immediate past.
#
# the async_throttle_in method is supposed to be called by a
# periodical defered. for peers with long-lived dispatchers it is
# faster to periodically allocate bandwith than to do it whenever
# we send or receive, or every time we check for readability or
# writability.
def throttle_out (dispatcher, when):
"allocate output bandiwth to a throttled dispatcher"
if dispatcher.ac_out_meter >= dispatcher.ac_out_throttle:
dispatcher.ac_out_throttle += int ((
when - max (
dispatcher.ac_out_when,
dispatcher.ac_out_throttle_when
)
) * dispatcher.ac_out_throttle_Bps ())
dispatcher.ac_out_throttle_when = when
return False
def throttle (dispatcher, when):
"allocate I/O bandiwth to a throttled dispatcher"
throttle_in (dispatcher, when)
throttle_out (dispatcher, when)
return False
# Limit recurrence factory
def limit_schedule (dispatcher, when, interval, limit, unlimit):
"instanciate and schedule a limit recurrence"
# set the limit flag down
dispatcher.limit_stop = False
def scheduled (when):
if dispatcher.closing or dispatcher.limit_stop:
# closing or limit flag raised, remove
unlimit (dispatcher)
return
if limit (dispatcher, when):
# limit overflowed, remove and handle close
unlimit (dispatcher)
dispatcher.handle_close ()
return
# recur at interval
return (when + interval, scheduled)
async_loop.schedule (when + interval, scheduled)
# I like that one (nested namespaces rule ,-)
# Conveniences: ready-made metering, inactivity check and throttling
def limit_in (dispatcher, when):
"overflow if input is inactive, throttle it otherwise"
return (
inactive_in (dispatcher, when) or
throttle_in (dispatcher, when)
)
def limit_out (dispatcher, when):
"overflow if output is inactive, throttle it otherwise"
return (
inactive_out (dispatcher, when) or
throttle_out (dispatcher, when)
)
def limit (dispatcher, when):
"overflow if I/O are inactive, throttle them otherwise"
return (
inactive (dispatcher, when) or
throttle (dispatcher, when)
)
# for stream transport
def limit_recv (dispatcher, interval, timeout, Bps):
"meter recv and throttle readable, schedule throttling in"
when = time.time ()
dispatcher.limit_inactive = timeout
meter_recv (dispatcher, when)
throttle_readable (dispatcher, when, Bps)
limit_schedule (
dispatcher, when, interval, limit_in, unlimit_recv
)
def unlimit_recv (dispatcher):
"unmeter recv and unthrottle readable"
del (
dispatcher.recv,
dispatcher.readable,
dispatcher.ac_in_throttle_Bps
)
def limit_send (dispatcher, interval, timeout, Bps):
"meter send and throttle writable, schedule throttling out"
when = time.time ()
dispatcher.limit_inactive = timeout
meter_send (dispatcher, when)
throttle_writable (dispatcher, when, Bps)
limit_schedule (
dispatcher, when, interval, limit_out, unlimit_send
)
def unlimit_send (dispatcher):
"unmeter send and unthrottle writable"
del (
dispatcher.send,
dispatcher.writable,
dispatcher.ac_out_throttle_Bps
)
def limit_stream (dispatcher, interval, timeout, inBps, outBps):
"meter and throttle stream I/O, schedule throttling"
when = time.time ()
dispatcher.limit_inactive = timeout
meter_recv (dispatcher, when)
throttle_readable (dispatcher, when, inBps)
meter_send (dispatcher, when)
throttle_writable (dispatcher, when, inBps)
limit_schedule (
dispatcher, when, interval, limit, unlimit_stream
)
def unlimit_stream (dispatcher):
"unmeter and unthrottle stream I/O"
del (
dispatcher.recv,
dispatcher.readable,
dispatcher.ac_in_throttle_Bps,
dispatcher.send,
dispatcher.writable,
dispatcher.ac_out_throttle_Bps
)
# for datagram transport
def limit_recvfrom (dispatcher, interval, timeout, Bps):
"meter recvfrom and throttle readable, schedule throttling in"
when = time.time ()
dispatcher.limit_inactive = timeout
meter_recvfrom (dispatcher, when)
throttle_readable (dispatcher, when, Bps)
limit_schedule (
dispatcher, when, interval, limit_in, unlimit_recvfrom
)
def unlimit_recvfrom (dispatcher):
"unmeter recvfrom and unthrottle readable"
del (
dispatcher.recvfrom,
dispatcher.readable,
dispatcher.ac_in_throttle_Bps
)
def limit_sendto (dispatcher, interval, timeout, Bps):
"meter sendto and throttle writable, schedule throttling out"
when = time.time ()
dispatcher.limit_inactive = timeout
meter_sendto (dispatcher, when)
throttle_writable (dispatcher, when, Bps)
limit_schedule (
dispatcher, when, interval, limit_out, unlimit_sendto
)
def unlimit_sendto (dispatcher):
"unmeter sendto and unthrottle writable"
del (
dispatcher.sendto,
dispatcher.writable,
dispatcher.ac_out_throttle_Bps
)
def limit_datagram (dispatcher, interval, timeout, inBps, outBps):
"meter and throttle datagram I/O, schedule throttling"
when = time.time ()
dispatcher.limit_inactive = timeout
meter_recvfrom (dispatcher, when)
throttle_readable (dispatcher, when, inBps)
meter_sendto (dispatcher, when)
throttle_writable (dispatcher, when, inBps)
limit_schedule (
dispatcher, when, interval, limit, unlimit_datagram
)
def unlimit_datagram (dispatcher):
"unmeter and unthrottle datagram I/O"
del (
dispatcher.recvfrom,
dispatcher.readable,
dispatcher.ac_in_throttle_Bps,
dispatcher.sendto,
dispatcher.writable,
dispatcher.ac_out_throttle_Bps
)
# Note about this implementation
#
# other kind of limits - like an absolute limit on the maximum i/o or
# duration per dispatcher - should be implemented in the final class.
#
#
# The Case for Throttling
#
# Asynchat allows to save server's resources by limiting the i/o buffers
# but for a peer on the edges of the network, the bottleneck is bandwith
# not memory. Compare the 16KBps upload limit of a low-end connection with
# the 512MB of RAM available in most PCs those days ... it would take nine
# hours to upload that much data in such small pipe.
#
# It is a basic requirement for a peer to throttle its traffic to a fraction
# of the bandwith generaly available. Because there *are* other applications
# and system functions that need a bit of bandwith, and peer application tend
# to exhaust network resources pretty fast.
#
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/async_limits.py
|
async_limits.py
|
"http://laurentszyster.be/blog/netstring/"
class NetstringsError (Exception): pass
def encode (strings):
"encode an sequence of 8-bit byte strings as netstrings"
return ''.join (['%d:%s,' % (len (s), s) for s in strings])
def decode (buffer):
"decode the netstrings found in the buffer, trunk garbage"
size = len (buffer)
prev = 0
while prev < size:
pos = buffer.find (':', prev)
if pos < 1:
break
try:
next = pos + int (buffer[prev:pos]) + 1
except:
break
if next >= size:
break
if buffer[next] == ',':
yield buffer[pos+1:next]
else:
break
prev = next + 1
def validate (buffer, length):
"decode the netstrings, but keep garbage and fit to size"
size = len (buffer)
prev = 0
while prev < size and length:
pos = buffer.find (':', prev)
if pos < 1:
if prev == 0:
raise StopIteration # not a netstring!
break
try:
next = pos + int (buffer[prev:pos]) + 1
except:
break
if next >= size:
break
if buffer[next] == ',':
length -= 1
yield buffer[pos+1:next]
else:
break
prev = next + 1
if length:
length -= 1
yield buffer[max (prev, pos+1):]
while length:
length -= 1
yield ''
def outline (encoded, format, indent):
"recursively format nested netstrings as a CRLF outline"
n = tuple (decode (encoded))
if len (n) > 0:
return ''.join ((outline (
e, indent + format, indent
) for e in n))
return format % encoded
def netstrings (instance):
"encode a tree of instances as nested netstrings"
t = type (instance)
if t == str:
return instance
if t in (tuple, list, set, frozenset):
return encode ((netstrings (i) for i in instance))
if t == dict:
return encode ((netstrings (i) for i in instance.items ()))
try:
return '%s' % instance
except:
return '%r' % instance
def netlist (encoded):
"return a list of strings or [encoded] if no netstrings found"
return list (decode (encoded)) or [encoded]
def nettree (encoded):
"decode the nested encoded strings in a tree of lists"
leaves = [nettree (s) for s in decode (encoded)]
if len (leaves) > 0:
return leaves
return encoded
def netlines (encoded, format='%s\n', indent=' '):
"beautify a netstring as an outline ready to log"
n = tuple (decode (encoded))
if len (n) > 0:
return format % ''.join ((outline (
e, format, indent
) for e in n))
return format % encoded
def netoutline (encoded, indent=''):
"recursively format nested netstrings as an outline with length"
n = tuple (decode (encoded))
if len (n) > 0:
return '%s%d:\n%s%s,\n' % (
indent, len (encoded), ''.join ((netoutline (
e, indent + ' '
) for e in n)), indent)
return '%s%d:%s,\n' % (indent, len (encoded), encoded)
def netpipe (more, BUFFER_MAX=0):
"""A practical netstrings pipe generator
Decode the stream of netstrings produced by more (), raise
a NetstringsError exception on protocol failure or StopIteration
when the producer is exhausted.
If specified, the BUFFER_MAX size must be more than twice as
big as the largest netstring piped through (although netstrings
strictly smaller than BUFFER_MAX may pass through without raising
an exception).
"""
buffer = more ()
while buffer:
pos = buffer.find (':')
if pos < 0:
raise NetstringsError, '1 not a netstring'
try:
next = pos + int (buffer[:pos]) + 1
except:
raise NetstringsError, '2 not a valid length'
if 0 < BUFFER_MAX < next:
raise (
NetstringsError,
'4 buffer overflow (%d bytes)' % BUFFER_MAX
)
while next >= len (buffer):
data = more ()
if data:
buffer += data
else:
raise NetstringsError, '5 end of pipe'
if buffer[next] == ',':
yield buffer[pos+1:next]
else:
raise NetstringsError, '3 missing coma'
buffer = buffer[next+1:]
if buffer == '' or buffer.isdigit ():
buffer += more ()
#
# Note also that the first call to more must return at least the
# encoded length of the first netstring, which practically is (or
# should be) allways the case (for instance, piping in a netstring
# sequence from a file will be done by blocks of pages, typically
# between 512 and 4096 bytes, maybe more certainly not less).
if __name__ == '__main__':
import sys
assert None == sys.stderr.write (
'Allegra Netstrings'
' - Copyright 2005 Laurent A.V. Szyster'
' | Copyleft GPL 2.0\n\n'
)
if len (sys.argv) > 1:
command = sys.argv[1]
else:
command = 'outline'
if command in ('outline', 'decode'):
if len (sys.argv) > 2:
if len (sys.argv) > 3:
try:
buffer_max = int (sys.argv[3])
except:
sys.stderr.write (
'3 invalid buffer max\n'
)
sys.exit (3)
else:
buffer_max = 0
try:
buffer_more = int (sys.argv[2])
except:
sys.stderr.write ('2 invalid buffer size\n')
sys.exit (2)
else:
buffer_max = 0
buffer_more = 4096
def more ():
return sys.stdin.read (buffer_more)
count = 0
try:
if command == 'outline':
write = sys.stdout.write
for n in netpipe (more, buffer_max):
write (netlines (n))
count += 1
else:
for n in netpipe (more, buffer_max):
count += 1
finally:
assert None == sys.stderr.write ('%d' % count)
elif command == 'encode':
write = sys.stdout.write
for line in sys.stdin.xreadlines ():
write ('%d:%s,' % (len (line)-1, line[:-1]))
else:
sys.stderr.write ('1 invalid command\n')
sys.exit (1)
sys.exit (0)
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/netstring.py
|
netstring.py
|
"http://laurentszyster.be/blog/prompt/"
import sys, types
def compact_traceback (exc_info=None):
"""return a compact traceback tuple from sys.exc_info(), like:
(['error name',
('filename', 'lineno', 'function'),
...
], 'error message')
a compact traceback is a simple data structure made of 8-bit byte
strings, ready to be serialized."""
t, v, tb = exc_info or sys.exc_info ()
if type (t) == types.ClassType:
t = t.__name__
elif type (t) != str:
t = str (t)
tbinfo = []
assert tb # Must have a traceback ?
while tb:
tbinfo.append ((
tb.tb_frame.f_code.co_filename,
tb.tb_frame.f_code.co_name,
str (tb.tb_lineno)
))
tb = tb.tb_next
del tb # just to be safe ?
return t, str (v), tbinfo
def python_eval (co, env):
"""try to eval the compiled co in the environement env
return either ('eval', result) or ('excp', traceback)"""
try:
return ('eval', eval (co, env))
except:
return ('excp', compact_traceback ())
def python_exec (co, env):
"""try to exec the compiled co in the environement env
return either ('exec', None) or ('excp', traceback)"""
try:
exec co in env
except:
return ('excp', compact_traceback ())
else:
return ('exec', None)
def python_prompt (line, env):
"""try eval first, if that fails try exec, return ('eval', result)
('exec', None) or ('excp', traceback)"""
try:
try:
co = compile (line, 'python_line', 'eval')
except SyntaxError:
co = compile (line, 'python_line', 'exec')
method, result = python_exec (co, env)
else:
method, result = python_eval (co, env)
except:
return ('excp', compact_traceback ())
else:
return (method, result)
# Synopsis
#
# >>> from allegra import prompt
# >>> env = {}
# >>> prompt.python_prompt ('1+1', env)
# ('eval', 2)
# >>> prompt.python_prompt ('a=1+1', env)
# ('exec', None)
# >>> env['a']
# 2
# >>> prompt.python_prompt ('foobar', env)
# ('excp', (
# 'exceptions.NameError',
# "name 'foobar' is not defined",
# [
# ('prompt.py', 'python_eval', '53'),
# ('python_line', '?', '0')
# ]
# ))
# >>> try:
# ... foobar
# ... except:
# ... prompt.compact_traceback ()
# ...
# (
# 'exceptions.NameError',
# "name 'foobar' is not defined",
# [('<stdin>', '?', '2')]
# )
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/prompt.py
|
prompt.py
|
"http://laurentszyster.be/blog/producer/"
import types
class File (object):
"producer wrapper for file[-like] objects"
def __init__ (self, file, chunk=1<<14): # 16KB buffer
self.file = file
self.chunk = chunk
def more (self):
return self.file.read (self.chunk)
def producer_stalled (self):
return False
class Simple (object):
"scanning producer for a large string"
def __init__ (self, data, chunk=1<<14): # 16KB buffer
lb = len (data)
self.content_length = lambda: lb
self.more = self.produce (data, chunk).next
def produce (self, data, chunk):
lb = len (data)
start = 0
while start < lb:
end = start + chunk
yield data[start:end]
start = end
del data, self.content_length, self.more
yield ''
def producer_stalled (self):
return False
class Stalled_generator (object):
# the simplest stallable generator, a usefull construct for any
# generator based producer that is set as a finalization or a
# handler of diverse asynchronous or synchronized callback ...
def __call__ (self, *args):
self.generator = iter ((
'Stalled_generator.__call__ not implemented',
))
generator = None
def more (self):
try:
return self.generator.next ()
except StopIteration:
return ''
def producer_stalled (self):
return self.generator == None
class Composite (object):
# This is a more "modern" composite producer than the original
# one, with support for stalled producers and generators. it is the
# bread & butter of Allegra's PRESTo! with the Buffer.
def __init__ (self, head, body, glob=1<<14): # 16KB globber
assert (
type (head) == types.StringType and
type (body) == types.GeneratorType
)
self.current = head
self.generator = body
self.glob = glob
def more (self):
if self.current == '':
return ''
buffer = ''
limit = self.glob
while True:
if type (self.current) == str:
buffer += self.current
try:
self.current = self.generator.next ()
except StopIteration:
self.current = ''
break
if len (buffer) > limit:
break
elif self.current.producer_stalled ():
assert buffer != '' # watch this!
break
else:
data = self.current.more ()
if data:
buffer += data
if len (buffer) > limit:
break
else:
continue
try:
self.current = self.generator.next ()
except StopIteration:
self.current = ''
break
return buffer
def producer_stalled (self):
try:
return self.current.producer_stalled ()
except:
return False
# Note that this class also makes the original Medusa's lines, buffer
# and globbing producer redundant. What this class does it to glob
# as much strings as possible from a MIME like data structure:
#
# head = 'string'
# body = (generator of 'string' or producer ())
#
# It's a practical producer for asynchronous REST responses composed
# of simple strings and maybe-stalling producers. The overhead of
# another loop buys globbing and helps the peer fill its channel's
# buffers more efficiently for TCP/IP.
class Tee (object):
def __init__ (self, producer):
self.index = -1
self.producer = producer
try:
self.buffers = producer.tee_buffers
except AttributeError:
self.buffers = producer.tee_buffers = []
def more (self):
self.index += 1
try:
return self.buffers[self.index]
except IndexError:
data = self.producer.more ()
self.buffers.append (data)
return data
def producer_stalled (self):
if self.index + 1 < len (self.buffers):
return False
return self.producer.producer_stalled ()
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/producer.py
|
producer.py
|
# Copyright (C) 2005 Laurent A.V. Szyster
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# http://www.gnu.org/copyleft/gpl.html
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# You should have received a copy of the GNU General Public License
# along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
"http://laurentszyster.be/blog/async_chat/"
import collections, socket
from allegra import async_core
def find_prefix_at_end (haystack, needle):
"given 'haystack', see if any prefix of 'needle' is at its end."
l = len (needle) - 1
while l and not haystack.endswith (needle[:l]):
l -= 1
return l
def collect_chat (c, buffer):
"collect a buffer for a channel or collector"
lb = len (buffer)
while lb:
terminator = c.get_terminator ()
if terminator is None or terminator == '':
c.collect_incoming_data (buffer)
buffer = ''
elif isinstance (terminator, int):
if lb < terminator:
c.collect_incoming_data (buffer)
buffer = ''
c.set_terminator (terminator - lb)
else:
c.collect_incoming_data (buffer[:terminator])
buffer = buffer[terminator:]
c.set_terminator (0)
if c.found_terminator ():
c.collector_stalled = True
break
else:
tl = len (terminator)
index = buffer.find (terminator)
if index != -1:
if index > 0:
c.collect_incoming_data (
buffer[:index]
)
buffer = buffer[index+tl:]
if c.found_terminator ():
c.collector_stalled = True
break
else:
index = find_prefix_at_end (
buffer, terminator
)
if index:
if index != lb:
c.collect_incoming_data (
buffer[:-index]
)
buffer = buffer[-index:]
break
else:
c.collect_incoming_data (buffer)
buffer = ''
lb = len (buffer)
return buffer
class Dispatcher (async_core.Dispatcher):
ac_in_buffer_size = ac_out_buffer_size = 1 << 14
terminator = None
collector_stalled = False
collector_is_simple = False
collector_depth = 32
def __init__ (self):
self.ac_in_buffer = ''
self.ac_out_buffer = ''
self.output_fifo = collections.deque ()
def __repr__ (self):
return 'async-chat id="%x"' % id (self)
def readable (self):
"predicate for inclusion in the poll loop for input"
return not (
self.collector_stalled or
len (self.ac_in_buffer) > self.ac_in_buffer_size
)
def writable (self):
"predicate for inclusion in the poll loop for output"
try:
return not (
self.output_fifo[
0
].producer_stalled () and
self.connected
)
except:
return not (
(self.ac_out_buffer == '') and
not self.output_fifo and
self.connected
)
def handle_read (self):
"try to refill the input buffer and collect it"
try:
data = self.recv (self.ac_in_buffer_size)
except socket.error, why:
self.handle_error ()
return
self.ac_in_buffer = collect_chat (
self, self.ac_in_buffer + data
)
def handle_write (self):
"maybe refill the output buffer and try to send it"
obs = self.ac_out_buffer_size
buffer = self.ac_out_buffer
if len (buffer) < obs:
fifo = self.output_fifo
while fifo:
p = fifo[0]
if p == None:
if buffer == '':
fifo.popleft ()
self.handle_close ()
return
break
elif type (p) == str:
fifo.popleft ()
buffer += p
if len (buffer) < obs:
continue
break
if p.producer_stalled ():
break
data = p.more ()
if data:
buffer += data
break
fifo.popleft ()
if buffer:
sent = self.send (buffer[:obs])
if sent:
self.ac_out_buffer = buffer[sent:]
else:
self.ac_out_buffer = buffer
else:
self.ac_out_buffer = ''
def close (self):
"close the dispatcher and maybe terminate the collector"
async_core.Dispatcher.close (self)
if not self.collector_stalled:
depth = self.collector_depth
while depth and not self.found_terminator ():
depth -= 1
if depth < 1:
self.log (
'%d' % self.collector_depth,
'collector-leak'
)
def close_when_done (self):
"""automatically close this channel once the outgoing queue
is empty, or handle close now if it is allready empty"""
if self.output_fifo:
self.output_fifo.append (None)
else:
self.handle_close () # when done is now!
def async_chat_push (self, p):
"push a string or producer on the output deque"
assert type (p) == str or hasattr (p, 'more')
self.output_fifo.append (p)
# push_with_producer = push = async_chat_push
def async_chat_pull (self):
"stall no more and collect the input buffer"
self.collector_stalled = False
if self.ac_in_buffer:
self.ac_in_buffer = collect_chat (
self, self.ac_in_buffer
)
def set_terminator (self, terminator):
"set the channel's terminator"
self.terminator = terminator
def get_terminator (self):
"get the channel's terminator"
return self.terminator
def collect_incoming_data (self, data):
"assert debug log of collected data"
assert None == self.log (data, 'collect-incoming-data')
def found_terminator (self):
"assert debug log of terminator found"
assert None == self.log (
self.get_terminator (), 'found-terminator'
)
return True # do not pipeline
# Note about this implementation
#
# This is a refactored version of asynchat.py as found in Python 2.4, and
# modified as to support stallable producers and collectors, loginfo and
# finalization.
#
# Stallable Producer and Collector
#
# In order to support non-blocking asynchronous and synchronized peer,
# the async_chat module introduces stallable collector and generalize
# the stallable producer of Medusa's proxy.
#
# Besides the fact that stallable reactors are a requirement for peers
# that do not block, they have other practical benefits. For instance,
# a channel with an collector_stalled and an empty output_fifo will not
# be polled for I/O.
#
# This implementation use collection.deque for output FIFO queues instead
# of a class wrapper, and the push () method actually does what it is
# supposed to do and pushes a string at the end that output queue, not a
# Simple instance.
#
# The channel's method collect_incoming_data is called to collect data
# between terminators. Its found_terminator method is called whenever
# the current terminator is found, and if that method returns True, then
# no more buffer will be consumed until the channel's collector_stalled
# is not set to False by a call to async_collect.
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/async_chat.py
|
async_chat.py
|
"http://laurentszyster.be/blog/async_core/"
import exceptions, sys, os, time, socket, collections
from errno import (
EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET,
ENOTCONN, ESHUTDOWN, EINTR, EISCONN
)
from allegra import loginfo, async_loop, finalization
class Dispatcher (loginfo.Loginfo, finalization.Finalization):
connected = accepting = closing = False
socket = addr = family_and_type = _fileno = None
def create_socket (self, family, type):
"create a socket and add the dispatcher to the I/O map"
self.family_and_type = family, type
self.socket = socket.socket (family, type)
self.socket.setblocking (0)
self._fileno = self.socket.fileno ()
self.add_channel ()
def set_connection (self, conn, addr):
"set a connected socket and add the dispatcher to the I/O map"
conn.setblocking (0)
self.socket = conn
self.addr = addr
self._fileno = conn.fileno ()
self.add_channel ()
self.connected = True
def set_reuse_addr (self):
"try to re-use a server port if possible"
try:
self.socket.setsockopt (
socket.SOL_SOCKET,
socket.SO_REUSEADDR,
self.socket.getsockopt (
socket.SOL_SOCKET,
socket.SO_REUSEADDR
) | 1
)
except socket.error:
pass
def listen (self, num):
"listen and set the dispatcher's accepting state"
self.accepting = True
if os.name == 'nt' and num > 5:
num = 1
return self.socket.listen (num)
def bind (self, addr):
"bind to addr and set the dispatcher's addr property"
self.addr = addr
return self.socket.bind (addr)
def connect (self, address):
"try to connect and set the dispatcher's connected state"
err = self.socket.connect_ex (address)
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK):
return
if err in (0, EISCONN):
self.addr = address
self.connected = True
self.handle_connect ()
else:
raise socket.error, err
def accept (self):
"try to accept a connection"
try:
conn, addr = self.socket.accept()
return conn, addr
except socket.error, why:
if why[0] == EWOULDBLOCK:
pass
else:
raise
def close (self):
"close the socket and remove the dispatcher from the I/O map"
try:
self.socket.close ()
except:
pass # closing a
self.socket = None
self.del_channel ()
self.connected = False
self.closing = True # == (self.socket == None)
assert None == self.log ('close', 'debug')
# The transport API for stream and datagram sockets
def send (self, data):
"try to send data through a stream socket"
try:
result = self.socket.send (data)
return result
except socket.error, why:
if why[0] == EWOULDBLOCK:
return 0
else:
raise
return 0
def recv (self, buffer_size):
"try to receive bytes from a stream socket or handle close"
try:
data = self.socket.recv (buffer_size)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
self.handle_close()
return ''
else:
return data
except MemoryError:
# according to Sam Rushing, this is a place where
# MemoryError tend to be raised by Medusa. the rational
# is that under high load, like a DDoS (or the /.
# effect :-), recv is the function that will be called
# most *and* allocate the more memory.
#
sys.exit ("Out of Memory!") # do not even try to log!
except socket.error, why:
# winsock sometimes throws ENOTCONN
if why[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN]:
self.handle_close()
return ''
else:
raise
def sendto (self, data, peer):
"try to send data through a datagram socket"
try:
return self.socket.sendto (data, peer)
except socket.error, why:
if why[0] == EWOULDBLOCK:
return 0
else:
raise
return 0
def recvfrom (self, datagram_size):
"try to receive from a datagram socket, maybe handle close"
try:
return self.socket.recvfrom (datagram_size)
except socket.error, why:
if why[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN]:
self.handle_close()
return '', None
else:
raise
# The "iner" API applied in async_loop
async_map = async_loop._dispatched
def add_channel (self):
"add the dispatcher to the asynchronous I/O map"
self.async_map[self._fileno] = self
def del_channel (self):
"removes the dispatcher from the asynchronous I/O map"
fd = self._fileno
try:
del self.async_map[fd]
except KeyError:
pass
def readable (self):
"predicate for inclusion as readable in the poll loop"
return True
def writable (self):
"predicate for inclusion as writable in the poll loop"
return True
def handle_read_event (self):
"articulate read event as accept, connect or read."
if self.accepting:
# for an accepting socket, getting a read implies
# that we are connected
#
# TODO: is this actually usefull? I mean, a listener
# is not a stream connection, not really ...
#
if not self.connected:
self.connected = True
self.handle_accept ()
elif not self.connected:
self.handle_connect ()
self.connected = True
self.handle_read ()
else:
self.handle_read ()
def handle_write_event (self):
"articulate write event as connect or write."
if not self.connected:
# getting a write implies that we are connected
self.handle_connect ()
self.connected = True
self.handle_write ()
# The protocol API for stream and datagram sockets
def handle_error (self):
"log a traceback and close, or raise SystemExit again"
t, v = sys.exc_info ()[:2]
if t is SystemExit:
raise t, v
self.loginfo_traceback ()
self.close () # self.handle_close () ... or nothing?
def handle_close (self):
self.close ()
def handle_read (self):
"to subclass: assert unhandled read event debug log"
assert None == self.log ('unhandled read event', 'debug')
def handle_write (self):
"to subclass: assert unhandled write event debug log"
assert None == self.log ('unhandled write event', 'debug')
def handle_connect (self):
"to subclass: assert unhandled connect event debug log"
assert None == self.log ('unhandled connect event', 'debug')
def handle_accept (self):
"to subclass: assert unhandled accept event debug log"
assert None == self.log ('unhandled accept event', 'debug')
# and finaly ...
def finalization (self, finalized):
"assert debug log of the instance finalization"
assert None == self.log ('finalized', 'debug')
# Asynchronous File I/O: UNIX pipe and stdio only
#
# What follows is the original comments by Sam Rushing:
#
# After a little research (reading man pages on various unixen, and
# digging through the linux kernel), I've determined that select()
# isn't meant for doing doing asynchronous file i/o.
# Heartening, though - reading linux/mm/filemap.c shows that linux
# supports asynchronous read-ahead. So _MOST_ of the time, the data
# will be sitting in memory for us already when we go to read it.
#
# What other OS's (besides NT) support async file i/o? [VMS?]
#
# Regardless, this is useful for pipes, and stdin/stdout...
if os.name == 'posix':
import fcntl
class File_wrapper (object):
"wrap a file with enough of a socket like interface"
def __init__ (self, fd):
self.fd = fd
def recv (self, *args):
return apply (os.read, (self.fd, ) + args)
def send (self, *args):
return apply (os.write, (self.fd, ) + args)
read = recv
write = send
def close (self):
return os.close (self.fd)
def fileno (self):
return self.fd
def set_file (self, fd):
"set a file descriptor and add the dispatcher (POSIX only)"
flags = fcntl.fcntl (fd, fcntl.F_GETFL, 0) | os.O_NONBLOCK
fcntl.fcntl (fd, fcntl.F_SETFL, flags)
self._fileno = fd
self.socket = File_wrapper (fd)
self.add_channel ()
self.connected = True
Dispatcher.set_file = set_file
# Conveniences
class Dispatcher_with_send (Dispatcher):
ac_out_buffer_size = 1 << 14 # sweet sixteen kilobytes
def __init__ (self):
self.ac_out_buffer = ''
def writable (self):
"writable when there is output buffered or queued"
return not (
(self.ac_out_buffer == '') and self.connected
)
def handle_write (self):
"try to send a chunk of buffered output or handle error"
buffer = self.ac_out_buffer
sent = self.send (buffer[:self.ac_out_buffer_size])
if sent:
self.ac_out_buffer = buffer[sent:]
else:
self.ac_out_buffer = buffer
class Dispatcher_with_fifo (Dispatcher):
ac_out_buffer_size = 1 << 14 # sweet sixteen kilobytes
def __init__ (self):
self.ac_out_buffer = ''
self.output_fifo = collections.deque ()
def writable (self):
"writable when there is output buffered or queued"
return not (
(self.ac_out_buffer == '') and
not self.output_fifo and self.connected
)
def handle_write (self):
"""pull an output fifo of single strings into the buffer,
send a chunk of it or close if done"""
obs = self.ac_out_buffer_size
buffer = self.ac_out_buffer
fifo = self.output_fifo
while len (buffer) < obs and fifo:
s = fifo.popleft ()
if s == None:
if buffer == '':
self.handle_close ()
return
else:
fifo.append (None)
break
buffer += s
if buffer:
sent = self.send (buffer[:obs])
if sent:
self.ac_out_buffer = buffer[sent:]
else:
self.ac_out_buffer = buffer
else:
self.ac_out_buffer = ''
def close_when_done (self):
"queue None if there is output queued, or handle close now"
if self.output_fifo:
self.output_fifo_push (None)
else:
self.handle_close ()
# Note about this implementation
#
# This is a refactored version of the asyncore's original dispatcher class,
# with a new logging facility (loginfo). The poll functions and the
# asynchronous loop have been moved to async_loop, in a single module that
# integrates a non-blocking I/O loop, a heapq scheduler loop and a loop
# through a deque of finalizations.
#
# I also :
#
# 1. added set_connection (conn, addr), moving that logic out of __init__
# 2. refactored the File_dispatcher as a set_file (fd) method
# 3. removed the now redundant set_socket (sock) method.
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/async_core.py
|
async_core.py
|
"http://laurentszyster.be/blog/sync_stdio/"
import sys
from allegra import prompt, loginfo, async_loop, finalization, thread_loop
logger = loginfo.logger
class Sync_stdio (thread_loop.Thread_loop):
def __init__ (self):
self.async_loop_catch = async_loop._catched
async_loop._catched = self.async_prompt_catch
self.sync_stdout = logger.loginfo_stdout
self.sync_stderr = logger.loginfo_stderr
logger.loginfo_stdout = self.async_stdout
logger.loginfo_stderr = self.async_stderr
thread_loop.Thread_loop.__init__ (self)
def __repr__ (self):
return 'sync-stdio'
def async_stdout (self, data):
self.thread_loop_queue ((self.sync_stdout, (data,)))
def async_stderr (self, data):
self.thread_loop_queue ((self.sync_stderr, (data,)))
def async_stdio_stop (self):
async_loop._catched = self.async_loop_catch
self.async_loop_catch = None
logger.loginfo_stdout = self.sync_stdout
logger.loginfo_stderr = self.sync_stderr
try:
del self.log
except:
pass
self.thread_loop_queue (None)
return True
async_prompt_catch = async_stdio_stop
def thread_loop_delete (self):
assert None == self.select_trigger_log (
'stdio-stop', 'debug'
)
# del self.sync_stdout, self.sync_stderr
return True
class Sync_stdoe (Sync_stdio):
def __repr__ (self):
return 'sync-stdoe'
def thread_loop_init (self):
self.select_trigger_log (
'Press CTRL+C to stop synchronous I/O', 'info'
)
self.thread_loop_queue ((self.sync_stdin_close, ()))
return True
def sync_stdin_close (self):
sys.stdin.close ()
assert None == self.select_trigger_log (
'stdin_close', 'debug'
)
class Sync_prompt (Sync_stdio):
sync_prompt_prefix = '>>> '
sync_prompt_comment = '#'
sync_prompt_ready = not __debug__
def thread_loop_init (self):
if self.sync_prompt_ready:
self.thread_loop_queue ((self.sync_stdin, ()))
else:
self.select_trigger_log (
'press CTRL+C to open and close the console',
'info'
)
return True
def async_prompt_catch (self):
if self.sync_prompt_ready:
self.thread_loop_queue ((
self.sync_stderr, ('[CTRL+C]\n',)
))
self.sync_prompt_ready = False
else:
self.thread_loop_queue ((self.sync_stdin, ()))
self.sync_prompt_ready = True
return True
def sync_stdin (self):
self.sync_stderr (self.sync_prompt_prefix)
line = sys.stdin.readline ()
if line == '':
if sys.stdin.closed:
self.select_trigger ((
self.async_stdio_stop, ()
))
else:
self.select_trigger ((self.async_prompt, ()))
elif line == '\n':
self.select_trigger ((self.async_prompt, ()))
else:
self.select_trigger ((
self.async_readline, (line[:-1],)
))
def sync_stdin_script (self):
line = sys.stdin.readline ()
if line == '':
self.select_trigger ((self.async_prompt, ()))
elif line == '\n' or line.startswith (
self.sync_prompt_comment
):
self.thread_loop_queue ((self.sync_stdin, ()))
else:
self.select_trigger ((
self.async_readline, (line[:-1],)
))
def async_prompt (self):
self.sync_prompt_ready = False
def async_readline (self, line):
assert None == self.log (line)
self.thread_loop_queue ((self.sync_stdin, ()))
class Python_prompt (Sync_prompt):
def __init__ (self, env=None, info=None):
self.loginfo_info = info
self.python_prompt_env = env or {'prompt': self}
loginfo.log ('Python %s on %s' % (
sys.version, sys.platform
), info)
Sync_prompt.__init__ (self)
def __repr__ (self):
return 'python-prompt id="%x"' % id (self)
def async_readline (self, line):
method, result = prompt.python_prompt (
line, self.python_prompt_env
)
if method == 'excp':
self.loginfo_traceback (result)
elif result != None:
if __debug__:
self.async_stderr ('%r\n' % (result,))
else:
self.select_trigger ((loginfo.log, (
'%r' % (result, ), self.loginfo_info
)))
if self.async_loop_catch != None:
self.thread_loop_queue ((self.sync_stdin, ()))
def async_stdio_stop (self):
self.python_prompt_env = None # break circular reference
return Sync_prompt.async_stdio_stop (self)
if __name__ == '__main__':
import sys
info = None # log prompt results to STDOUT by default
if '-d' in sys.argv:
sys.argv.remove ('-d')
loginfo.Logger.log = loginfo.Logger.loginfo_netlines
elif not __debug__:
Python_prompt.sync_stdin = Sync_prompt.sync_stdin_script
info = 'prompt'
assert None == loginfo.log (
'Allegra Prompt'
' - Copyright 2005 Laurent A.V. Szyster'
' | Copyleft GPL 2.0', 'info'
)
Python_prompt (None, info).start ()
async_loop.dispatch ()
assert None == finalization.collect ()
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/sync_stdio.py
|
sync_stdio.py
|
"http://laurentszyster.be/blog/async_client/"
import time, socket, collections
try:
SOCKET_FAMILIES = (socket.AF_INET, socket.AF_UNIX)
except:
SOCKET_FAMILIES = (socket.AF_INET, )
from allegra import loginfo, async_loop, async_limits
def connect (dispatcher, addr, timeout=3.0, family=socket.AF_INET):
"create a socket, try to connect it and schedule a timeout"
assert (
not dispatcher.connected and timeout > 0 and
family in SOCKET_FAMILIES
)
dispatcher.client_when = time.time ()
dispatcher.client_timeout = timeout
try:
dispatcher.create_socket (family, socket.SOCK_STREAM)
dispatcher.connect (addr)
except:
dispatcher.loginfo_traceback ()
dispatcher.handle_error ()
return False
assert None == dispatcher.log ('connect', 'debug')
def connect_timeout (when):
"if not connected and not closing yet, handle close"
if not dispatcher.connected and not dispatcher.closing:
assert None == dispatcher.log (
'connect-timeout %f seconds' % (
when - dispatcher.client_when
), 'debug'
)
dispatcher.handle_close ()
async_loop.schedule (
dispatcher.client_when + timeout, connect_timeout
)
return True
def reconnect (dispatcher):
if dispatcher.addr:
dispatcher.closing = False
return connect (
dispatcher,
dispatcher.addr,
dispatcher.client_timeout,
dispatcher.family_and_type[0]
)
return False
class Connections (loginfo.Loginfo):
"a connection manager for async_client.Dispatcher instances"
ac_in_meter = ac_out_meter = 0
client_errors = client_when = client_dispatched = 0
def __init__ (
self, timeout=3.0, precision=1.0, family=socket.AF_INET
):
"initialize a new client manager"
assert (
timeout > 0 and precision > 0 and
family in SOCKET_FAMILIES
)
self.client_managed = {}
self.client_timeout = timeout
self.client_precision = precision
self.client_family = family
resolved (self)
inactive (self, timeout)
def __call__ (self, dispatcher, name):
"registed, decorate and connect a new dispatcher"
if self.client_connect (dispatcher, name):
now = time.time ()
dispatcher.async_client = self
self.client_decorate (dispatcher, now)
key = id (dispatcher)
self.client_managed[key] = dispatcher
dispatcher.client_key = key
if len (self.client_managed) == 1:
self.client_start (now)
else:
self.client_errors += 1
return dispatcher
def client_connect (self, dispatcher, name):
"resolve and/or connect a dispatcher"
dispatcher.client_name = name
addr = self.client_resolved (name)
if addr != None:
return connect (
dispatcher, addr,
self.client_timeout, self.client_family
)
if self.client_resolve == None:
self.client_unresolved (dispatcher, addr)
return False
def resolve (addr):
if addr == None:
self.client_unresolved (dispatcher, name)
return
if not connect (
dispatcher, addr,
self.client_timeout, self.client_family
):
self.client_errors += 1
self.client_resolve (name, resolve)
return True
def client_reconnect (self, dispatcher):
dispatcher.closing = False
self (dispatcher, dispatcher.client_name)
return dispatcher.closing
def client_unresolved (self, dispatcher, name):
"assert debug log and close an unresolved dispatcher"
assert None == dispatcher.log (
'%r unresolved' % (name, ), 'debug'
)
self.client_errors += 1
dispatcher.handle_close ()
def client_start (self, when):
"handle the client management startup"
self.client_when = when
async_loop.schedule (
when + self.client_precision, self.client_manage
)
assert None == self.log ('start', 'debug')
def client_manage (self, when):
"test limits overflow, recure or stop"
for dispatcher in self.client_dispatchers ():
if self.client_limit (dispatcher, when):
self.client_overflow (dispatcher)
if self.client_managed:
return (
when + self.client_precision,
self.client_manage
) # continue to defer
self.client_stop (when)
return None
def client_dispatchers (self):
"return a list of managed dispatchers"
return self.client_managed.values ()
def client_overflow (self, dispatcher):
"assert debug log and close an overflowed dispatcher"
assert None == dispatcher.log ('limit overflow', 'debug')
dispatcher.handle_close ()
def client_meter (self, dispatcher):
"assert debug log and account I/O meters of a dispatcher"
assert None == dispatcher.log (
'in="%d" out="%d"' % (
dispatcher.ac_in_meter,
dispatcher.ac_out_meter
), 'debug'
)
self.ac_in_meter += dispatcher.ac_in_meter
self.ac_out_meter += dispatcher.ac_out_meter
self.client_dispatched += 1
def client_close (self, dispatcher):
"remove the dispatcher from cache and meter dispatched"
del self.client_managed[dispatcher.client_key]
self.client_meter (dispatcher)
dispatcher.async_client = None
def client_stop (self, when):
"handle the client management stop"
assert None == self.log (
'stop errors="%d" dispatched="%d"'
' seconds="%f" in="%d" out="%d"' % (
self.client_errors,
self.client_dispatched,
(when - self.client_when),
self.ac_in_meter,
self.ac_out_meter
), 'debug')
self.client_errors = self.client_dispatched = \
self.ac_in_meter = self.ac_out_meter = 0
def close_when_done (self):
"close all client dispatchers when done"
for dispatcher in self.client_dispatchers ():
dispatcher.close_when_done ()
class Cache (Connections):
"a cache of managed connections"
def __init__ (
self, timeout=3.0, precision=1.0, family=socket.AF_INET
):
"initialize a new client cache"
assert (
timeout > 0 and precision > 0 and
family in SOCKET_FAMILIES
)
self.client_managed = {}
self.client_timeout = timeout
self.client_precision = precision
self.client_family = family
resolved (self)
inactive (self, timeout)
def __call__ (self, Dispatcher, name):
"""return a cached or a new dispatcher, maybe resolving and
connecting it first, closing it on connection error or if
it's socket address cannot be resolved"""
try:
return self.client_managed[name]
except KeyError:
pass
dispatcher = Dispatcher ()
if self.client_connect (dispatcher, name):
now = time.time ()
dispatcher.async_client = self
self.client_decorate (dispatcher, now)
self.client_managed[name] = dispatcher
dispatcher.client_key = name
if len (self.client_managed) == 1:
self.client_start (now)
else:
self.client_errors += 1
return dispatcher
class Pool (Connections):
"a pool of managed connections"
def __init__ (
self, Dispatcher, name,
size=2, timeout=3.0, precision=1.0, family=socket.AF_INET
):
"initialize a new client pool"
assert (
type (size) == int and size > 1 and
timeout > 0 and precision > 0 and
family in SOCKET_FAMILIES
)
self.client_managed = []
self.client_pool = size
self.client_name = name
self.client_called = 0
self.Client_dispatcher = Dispatcher
self.client_timeout = timeout
self.client_precision = precision
self.client_family = family
resolved (self)
inactive (self, timeout)
def __call__ (self):
"""return the next dispatcher pooled or instanciate a new
one, maybe resolving and connecting it first, closing it on
connection error or if it's socket address cannot be
resolved"""
size = len (self.client_managed)
if size >= self.client_pool:
self.client_called += 1
return self.client_managed[self.client_called % size]
now = time.time ()
dispatcher = self.Client_dispatcher ()
if self.client_connect (dispatcher, self.client_name):
dispatcher.async_client = self
self.client_decorate (dispatcher, now)
self.client_managed.append (dispatcher)
if len (self.client_managed) == 1:
self.client_start (now)
else:
self.client_errors += 1
return dispatcher
def client_reconnect (self, dispatcher):
return False # useless for a cached dispatcher!
def client_dispatchers (self):
"return a list of dispatchers pooled"
return list (self.client_managed)
def client_close (self, dispatcher):
"remove the dispatcher from pool and increment dispatched"
self.client_meter (dispatcher)
self.client_managed.remove (dispatcher)
dispatcher.async_client = None
def resolved (connections):
"allways resolved for unresolved dispatcher address"
connections.client_resolved = (lambda addr: addr)
connections.client_resolve = None
return connections
def meter (dispatcher, when):
"decorate a client dispatcher with stream meters"
async_limits.meter_recv (dispatcher, when)
async_limits.meter_send (dispatcher, when)
def close ():
del (
dispatcher.recv,
dispatcher.send,
dispatcher.close
)
dispatcher.close ()
dispatcher.async_client.client_close (dispatcher)
dispatcher.close = close
def no_limit (dispatcher, when):
return False
def unlimited (connections):
"meter I/O for unlimited client streams"
connections.client_decorate = meter
connections.client_limit = no_limit
return connections
def inactive (connections, timeout):
"meter I/O and limit inactivity for client streams"
assert timeout > 0
def decorate (dispatcher, when):
meter (dispatcher, when)
dispatcher.limit_inactive = connections.client_inactive
connections.client_decorate = decorate
connections.client_inactive = timeout
connections.client_limit = async_limits.inactive
return connections
def limited (connections, timeout, inBps, outBps):
"throttle I/O and limit inactivity for managed client streams"
assert (
timeout > 0 and
type (inBps ()) == int and inBps () > 0 and
type (outBps ()) == int and outBps () > 0
)
def throttle (dispatcher, when):
"decorate a client dispatcher with stream limits"
async_limits.meter_recv (dispatcher, when)
async_limits.meter_send (dispatcher, when)
dispatcher.limit_inactive = timeout
async_limits.throttle_readable (
dispatcher, when, connections.ac_in_throttle_Bps
)
async_limits.throttle_writable (
dispatcher, when, connections.ac_out_throttle_Bps
)
def close ():
del (
dispatcher.recv,
dispatcher.send,
dispatcher.readable,
dispatcher.writable,
dispatcher.close
)
dispatcher.close ()
dispatcher.async_client.client_close (dispatcher)
dispatcher.close = close
connections.client_decorate = throttle
connections.ac_in_throttle_Bps = inBps
connections.ac_out_throttle_Bps = outBps
connections.client_limit = async_limits.limit
return connections
def rationed (connections, timeout, inBps, outBps):
"ration I/O and limit inactivity for managed client streams"
assert (
timeout > 0 and
type (inBps) == int and inBps > 0 and
type (outBps) == int and outBps > 0
)
connections.ac_in_ration_Bps = inBps
connections.ac_out_ration_Bps = outBps
def throttle_in ():
return int (connections.ac_in_ration_Bps / max (len (
connections.client_managed
), 1))
def throttle_out ():
return int (connections.ac_out_ration_Bps / max (len (
connections.client_managed
), 1))
return limited (connections, timeout, throttle_in, throttle_out)
class Pipeline (object):
"a pipeline mix-in for dispatcher"
#pipeline_sleeping = False
pipeline_pipelining = False
pipeline_keep_alive = False
def pipeline_set (self, requests=None, responses=None):
"set new requests and responses deque"
self.pipeline_requests = requests or collections.deque ()
self.pipeline_responses = responses or collections.deque ()
#def pipeline (self, request):
# "pipeline a new request, wake up if sleeping"
# self.pipeline_requests.append (request)
# if self.pipeline_sleeping:
# self.pipeline_sleeping = False
# self.pipeline_wake_up ()
def pipeline_wake_up (self):
requests = self.pipeline_requests
if self.pipeline_pipelining and len (requests) > 1:
self.pipeline_requests = deque ()
self.output_fifo.extend ((
request[0] for request in requests
))
self.pipeline_responses.extend (requests)
else:
request = self.pipeline_requests.popleft ()
self.output_fifo.append (request[0])
self.pipeline_responses.append (request)
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/async_client.py
|
async_client.py
|
"http://laurentszyster.be/blog/async_net/"
import collections, socket
from allegra import async_core
class NetstringError (Exception): pass
def collect_net (next, buffer, collect, terminate):
"consume a buffer of netstrings into a stallable collector sink"
lb = len (buffer)
if next > 0:
if next > lb:
collect (buffer)
return next - lb, '', False # buffer more ...
if buffer[next] == ',':
collect (buffer[:next])
if terminate (None):
return 0, buffer[next+1:], True # stop now!
else:
raise NetstringError, '3 missing comma'
prev = next + 1
else:
prev = 0
while prev < lb:
pos = buffer.find (':', prev)
if pos < 0:
if prev > 0:
buffer = buffer[prev:]
if not buffer.isdigit ():
raise NetstringError, '1 not a netstring'
return 0, buffer, False # buffer more ...
try:
next = pos + int (buffer[prev:pos]) + 1
except:
raise NetstringError, '2 not a length'
if next >= lb:
collect (buffer[pos+1:])
return next - lb, '', False # buffer more
elif buffer[next] == ',':
if terminate (buffer[pos+1:next]):
return 0, buffer[next+1:], True # stop now!
else:
raise NetstringError, '3 missing comma'
prev = next + 1 # continue ...
return 0, '', False # buffer consumed.
class Dispatcher (async_core.Dispatcher):
ac_in_buffer_size = ac_out_buffer_size = 1 << 14 # sweet 16 kilobytes
terminator = 0
collector_stalled = False
def __init__ (self):
self.ac_in_buffer = ''
self.ac_out_buffer = ''
self.output_fifo = collections.deque ()
def __repr__ (self):
return 'async-net id="%x"' % id (self)
def readable (self):
"predicate for inclusion in the poll loop for input"
return not (
self.collector_stalled or
len (self.ac_in_buffer) > self.ac_in_buffer_size
)
def writable (self):
"predicate for inclusion in the poll loop for output"
return not (
(self.ac_out_buffer == '') and
not self.output_fifo and self.connected
)
def handle_read (self):
"try to buffer more input and parse netstrings"
try:
(
self.terminator,
self.ac_in_buffer,
self.collector_stalled
) = collect_net (
self.terminator,
self.ac_in_buffer + self.recv (
self.ac_in_buffer_size
),
self.async_net_collect,
self.async_net_terminate
)
except NetstringError, error:
self.async_net_error (error)
def handle_write (self):
"buffer out a fifo of strings, try to send or close if done"
obs = self.ac_out_buffer_size
buffer = self.ac_out_buffer
fifo = self.output_fifo
while len (buffer) < obs and fifo:
strings = fifo.popleft ()
if strings == None:
if buffer == '':
self.handle_close ()
return
else:
fifo.append (None)
break
buffer += ''.join ((
'%d:%s,' % (len (s), s) for s in strings
))
if buffer:
sent = self.send (buffer[:obs])
if sent:
self.ac_out_buffer = buffer[sent:]
else:
self.ac_out_buffer = buffer
else:
self.ac_out_buffer = ''
# A compatible interface with Async_chat.close_when_done used by
# Allegra's TCP clients and servers implementation.
def close_when_done (self):
"""close this channel when previously queued strings have
been sent, or close now if the queue is empty."""
if self.output_fifo:
self.output_fifo.append (None)
else:
self.handle_close ()
# The Async_net Interface
def async_net_out (self, strings):
"buffer netstrings of an iterable of 8-bit byte strings"
self.ac_out_buffer += ''.join ((
'%d:%s,' % (len (s), s) for s in strings
))
def async_net_push (self, strings):
"push an iterable of 8-bit byte strings for output"
assert hasattr (strings, '__iter__')
self.output_fifo.append (strings)
def async_net_pull (self):
"try to consume the input netstrings buffered"
if not self.ac_in_buffer:
self.collector_stalled = False
return
try:
(
self.terminator,
self.ac_in_buffer,
self.collector_stalled
) = collect_net (
self.terminator,
self.ac_in_buffer,
self.async_net_collect,
self.async_net_terminate
)
except NetstringError, error:
self.async_net_error (error)
async_net_in = ''
def async_net_collect (self, bytes):
"collect an incomplete netstring chunk into a buffer"
self.async_net_in += bytes
def async_net_terminate (self, bytes):
"terminate a collected or buffered netstring and continue"
if bytes == None:
bytes = self.async_net_in
self.async_net_in = ''
return self.async_net_continue (bytes)
def async_net_continue (self, bytes):
"assert debug log of collected netstrings"
assert None == self.log (bytes, 'async-net-continue')
return False
def async_net_error (self, message):
"log netstrings error and close the channel"
self.log (message, 'async-net-error')
self.handle_close ()
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/async_net.py
|
async_net.py
|
"http://laurentszyster.be/blog/synchronized/"
import collections, subprocess
from allegra import loginfo, finalization, thread_loop
# File producer and collector
def sync_open (self, filename, mode, bufsize):
try:
self.sync_file = open (filename, mode, bufsize)
except:
self.select_trigger ((self.async_close, ('eo', )))
else:
self.select_trigger ((self.async_open, (mode, )))
def sync_write (self, data):
try:
self.sync_file.write (data)
except:
self.select_trigger ((self.async_close, ('ew', )))
def sync_read (self):
try:
data = self.sync_file.read (self.sync_buffer)
except:
self.select_trigger ((self.async_close, ('er', )))
else:
if data:
self.select_trigger ((self.async_read, (data, )))
else:
sync_close (self, 'r')
def sync_close (self, mode):
try:
self.sync_file.close ()
except:
self.select_trigger ((self.async_close, ('ec', )))
else:
self.select_trigger ((self.async_close, (mode, )))
self.sync_file = None
class File_producer (object):
synchronizer = None
synchronizer_size = 2
async_buffers = ()
async_closed = False
def __init__ (self, filename, mode='rb', bufsize=1<<14):
assert (
type (filename) == str and
mode.startswith ('r') and
(0 < len (mode) < 3) and
buffer > 0
)
self.sync_buffer = bufsize
self.async_buffers = collections.deque([])
thread_loop.synchronize (self)
self.synchronized ((
sync_open, (self, filename, mode, bufsize)
))
def __repr__ (self):
return 'synchronized-file-producer id="%x"' % id (self)
def more (self):
try:
return self.async_buffers.popleft ()
except:
return ''
def producer_stalled (self):
return not (
self.async_closed or len (self.async_buffers) > 0
)
def async_open (self, mode):
self.synchronized ((sync_read, (self, )))
def async_read (self, data):
self.async_buffers.append (data)
self.synchronized ((sync_read, (self, )))
def async_close (self, mode):
self.async_closed = True
thread_loop.desynchronize (self)
class File_collector (object):
synchronizer = None
synchronizer_size = 2
collector_is_simple = True
async_closed = False
def __init__ (self, filename, mode='wb', bufsize=-1):
assert (
type (filename) == str and
not mode.startswith ('r') and
(0 < len (mode) < 3)
)
thread_loop.synchronize (self)
self.synchronized ((
sync_open, (self, filename, mode, bufsize)
))
def __repr__ (self):
return 'synchronized-file-collector id="%x"' % id (self)
def collect_incoming_data (self, data):
self.synchronized ((sync_write, (self, data,)))
def found_terminator (self):
self.synchronized ((sync_close, (self, 'w', )))
return True
def async_open (self, mode): pass
def async_close (self, mode):
self.async_closed = True
thread_loop.desynchronize (self)
# Subprocess reactor
def sync_popen (self, args):
try:
self.subprocess = subprocess.Popen (*args)
except Exception, error:
self.select_trigger ((self.async_except, (error, )))
else:
self.select_trigger ((self.async_popen, ()))
def sync_stdin (self, data):
try:
self.subprocess.stdin.write (data)
except Exception, error:
self.select_trigger ((self.async_except, (error, )))
def sync_stdout (self):
exit = self.subprocess.poll ()
if exit != None:
sync_wait (self)
return
try:
data = self.subprocess.stdout.read (self.sync_buffer)
except Exception, error:
self.select_trigger ((self.async_except, (error, )))
else:
if data:
self.select_trigger ((self.async_stdout, (data, )))
else:
sync_wait (self)
def sync_wait (self):
if self.subprocess == None:
self.select_trigger ((self.async_return, (None, )))
return
sub = self.subprocess
self.subprocess = None
sub.stdin.close ()
sub.stdout.close ()
if sub.stderr:
sub.stderr.close ()
self.select_trigger ((self.async_return, (sub.wait (), )))
class Popen_producer (object):
synchronizer = None
synchronizer_size = 2
subprocess = async_code = None
sync_buffer = 1<<16
def __init__ (self):
self.async_buffers = collections.deque([])
thread_loop.synchronize (self)
def more (self):
try:
return self.async_buffers.popleft ()
except:
return ''
def producer_stalled (self):
return (
self.async_code == None and
len (self.async_buffers) == 0
)
def async_popen (self):
self.synchronized ((sync_stdout, (self, )))
def async_stdout (self, data):
self.async_buffers.append (data)
self.synchronized ((sync_stdout, (self, )))
def async_stderr (self, data):
assert None == loginfo.log (
'async_error', 'not implemented'
)
def async_except (self, error):
assert None == loginfo.log (str (error), 'debug')
sync_wait (self)
def async_return (self, code):
self.async_code = code
thread_loop.desynchronize (self)
assert None == loginfo.log ('exit (%r)' % code, 'debug')
def popen_producer (
args, bufsize=0, executable=None,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=None, close_fds=False,
shell=False, cwd=None, env=None,
universal_newlines=False, startupinfo=None,
creationflags=0
):
assert (
stdin == subprocess.PIPE and
stdout == subprocess.PIPE and
stderr in (subprocess.PIPE, subprocess.STDOUT)
)
sp = Popen_producer ()
sp.synchronized ((sync_popen, (sp, (
args, bufsize, executable, stdin, stdout, stderr,
preexec_fn, close_fds, shell, cwd, env,
universal_newlines, startupinfo, creationflags
))))
return sp
class Popen_collector (object):
synchronizer = None
synchronizer_size = 2
collector_is_simple = True
subprocess = async_code = None
def __init__ (self):
thread_loop.synchronize (self)
def collect_incoming_data (self, data):
self.synchronized ((sync_stdin, (self, data,)))
def found_terminator (self):
self.synchronized ((sync_wait, (self, )))
return True
def async_popen (self):
assert None == loginfo.log ('async_popen', 'debug')
def async_stderr (self, data):
assert None == loginfo.log (
'async_error', 'not implemented'
)
def async_except (self, error):
assert None == loginfo.log (str (error), 'debug')
sync_wait (self)
def async_return (self, code):
self.async_code = code
thread_loop.desynchronize (self)
assert None == loginfo.log ('%r' % code, 'debug')
def popen_collector (
args, bufsize=0, executable=None,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=None, close_fds=False,
shell=False, cwd=None, env=None,
universal_newlines=False, startupinfo=None,
creationflags=0
):
assert (
stdin == subprocess.PIPE and
stdout == subprocess.PIPE and
stderr in (subprocess.PIPE, subprocess.STDOUT)
)
sc = Popen_collector ()
sc.synchronized ((sync_popen, (sc, (
args, bufsize, executable, stdin, stdout, stderr,
preexec_fn, close_fds, shell, cwd, env,
universal_newlines, startupinfo, creationflags
))))
return sc
class Popen_reactor (finalization.Finalization):
def __init__ (
self, args,
bufsize=0, executable=None,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=None, close_fds=False,
shell=False, cwd=None, env=None,
universal_newlines=False, startupinfo=None,
creationflags=0
):
assert (
stdin == subprocess.PIPE and
stdout == subprocess.PIPE and
stderr in (subprocess.PIPE, subprocess.STDOUT)
)
self.collector = Popen_collector ()
self.producer = Popen_producer ()
self.collector.async_popen = self.async_popen
self.collector.found_terminator = self.found_terminator
self.producer.async_return = self.async_return
self.collector.synchronized ((sync_popen, (self.collector, (
args, bufsize, executable, stdin, stdout, stderr,
preexec_fn, close_fds, shell, cwd, env,
universal_newlines, startupinfo, creationflags
))))
def async_popen (self):
self.producer.subprocess = self.collector.subprocess
self.producer.synchronized ((
sync_stdout, (self.producer, )
))
self.collector.async_popen = None
def found_terminator (self):
self.collector.found_terminator = None
def async_return (self, code):
scin = self.collector
spout = self.producer
scin.async_code = spout.async_code = code
self.producer = self.collector = None
spout.async_return = None
thread_loop.desynchronize (scin)
thread_loop.desynchronize (spout)
assert None == loginfo.log ('%r' % code, 'debug')
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/synchronized.py
|
synchronized.py
|
"http://laurentszyster.be/blog/loginfo/"
import sys
from allegra import netstring, prompt
def _write_maybe_flush (file):
assert hasattr (file, 'write')
if hasattr (file, 'flush'):
def write (data):
file.write (data)
file.flush ()
return
return write
return file.write
class Logger (object):
"Loginfo's log dispatcher implementation"
def __init__ (self):
self.loginfo_stdout = _write_maybe_flush (sys.stdout)
self.loginfo_stderr = _write_maybe_flush (sys.stderr)
self.loginfo_categories = {}
def loginfo_netstrings (self, data, info=None):
"log netstrings to STDOUT, a category handler or STDERR"
assert type (data) == str
if info == None:
self.loginfo_stdout ('%d:%s,' % (len (data), data))
elif self.loginfo_categories.has_key (info):
self.loginfo_categories[info] (
'%d:%s,' % (len (data), data)
)
else:
assert type (info) == str
encoded = netstring.encode ((info, data))
self.loginfo_stderr (
'%d:%s,' % (len (encoded), encoded)
)
def loginfo_netlines (self, data, info=None):
"log netoutlines to STDOUT, a category handler or STDERR"
assert type (data) == str
if info == None:
self.loginfo_stdout (netstring.netlines (data))
elif self.loginfo_categories.has_key (info):
self.loginfo_categories[info] (
netstring.netlines (data)
)
else:
assert type (info) == str
self.loginfo_stderr (netstring.netlines (
netstring.encode ((info, data))
))
if __debug__:
log = loginfo_netlines
else:
log = loginfo_netstrings
# the facility instance and module interfaces
logger = Logger ()
def log (*args):
logger.log (*args)
def compact_traceback (ctb):
"encode a compact traceback tuple as netstrings"
return netstring.encode ((
ctb[0],
netstring.encode ([' | '.join (x) for x in ctb[2]]),
ctb[1]
)), 'traceback'
def classic_traceback (ctb=None):
return netstring.encode ((
netstring.encode ([
'File "%s", line %s, in %s' % (
tb[0] or '<string>', tb[2], tb[1]
)
for tb in ctb[2]
]), '%s: %s' % ctb[:2]
)), 'Traceback (most recent call last):'
traceback_encode = compact_traceback
def traceback (ctb=None):
"return a compact traceback and log it in the 'traceback' category"
if ctb == None:
ctb = prompt.compact_traceback ()
logger.log (*traceback_encode (ctb))
return ctb
# redirection of all Python standard outputs to the logger
class _STDOUT (object):
def write (self, line):
logger.log (line)
sys.stdout = _STDOUT ()
def _displayhook (value):
if value != None:
logger.log ('%r' % (value,))
sys.displayhook = _displayhook
class _STDERR (object):
def write (self, line):
logger.log (line, 'stderr')
def _excepthook (*exc_info):
traceback (prompt.compact_traceback (exc_info))
sys.excepthook = _excepthook
# a class interface to mix in
class Loginfo (object):
loginfo_logger = logger
def __repr__ (self):
return '%s id="%x"' % (
self.__class__.__name__, id (self)
)
def loginfo_log (self, data, info=None):
"""log a message with this instance's __repr__ and an
optional category"""
self.loginfo_logger.log (netstring.encode ((
'%r' % self, '%s' % data
)), info)
log = loginfo_log
def loginfo_null (self, data, info=None):
"drop the message to log"
pass
def loginfo_logging (self):
"return True if the instance is logging"
return self.log != self.loginfo_null
def loginfo_toggle (self, logging=None):
"toggle logging on/off for this instance"
if logging == None:
if self.log == self.loginfo_null:
try:
del self.log
except:
self.log = self.loginfo_log
else:
try:
del self.log
except:
self.log = self.loginfo_null
return self.log != self.loginfo_null
if logging == True and self.log == self.loginfo_null:
self.log = self.loginfo_log
elif logging == False and self.log == self.loginfo_log:
self.log = self.loginfo_null
return logging
def loginfo_traceback (self, ctb=None):
"""return a compact traceback tuple and log it encoded as
netstrings, along with this instance's __repr__, in the
'traceback' category"""
if ctb == None:
ctb = prompt.compact_traceback ()
self.loginfo_log (*traceback_encode (ctb))
return ctb
def toggle (logging=None, Class=Loginfo):
"toggle logging on/off for the Class specified or Loginfo"
if logging == None:
if Class.log == Class.loginfo_null:
Class.log = Class.loginfo_log
return True
Class.log = Class.loginfo_null
return False
if logging == True and Class.log == Class.loginfo_null:
Class.log = Class.loginfo_log
elif logging == False and Class.log == Class.loginfo_log:
Class.log = Class.loginfo_null
return logging
# SYNOPSIS
#
# >>> from allegra import loginfo
#>>> loginfo.log ('message')
# message
# >>> loginfo.log ('message', 'info')
# info
# message
#
# >>> try:
# ... foobar ()
# ... except:
# ... ctb = loginfo.traceback ()
# traceback
# exceptions.NameError
# name 'foobar' is not defined
# <stdin> | ? | 2
#
# >>> logged = loginfo.Loginfo ()
# >>> logged.log ('message')
# Loginfo id="8da4e0"
# message
#
# >>> logged.log ('message', 'info')
# info
# Loginfo id="8da4e0"
# message
#
# The Loginfo interface and implementation provide a simpler, yet more
# powerfull and practical logging facility than the one currently integrated
# with Python.
#
# First is uses netstrings instead of CR or CRLF delimeted lines for I/O
# encoding, solving ahead many problems of the log consumers. Second it
# is well adapted to a practical development cycle and by defaults implement
# a simple scheme that suites well a shell pipe like:
#
# pipe < input 1> output 2> errors
#
# However "slow" this Python facily is, it offers a next-best trade-off
# between performance in production and flexibility for both debugging and
# maintenance. All debug and information logging can be effectively disabled,
# either skipped in case of:
#
# assert None == loginfo.log (...)
#
# or simply dropped, yielding not access to possibly blocking or buffering
# process handling the log output. The ability to filter out or filter in
# log at the instance or class level is enough to satisfy the most demanding
# application administrator (as for categories, there are none defined, do
# as it please you ;-).
#
# Last but not least, the loginfo_log is compatible with asyncore logging
# interfaces (which is no wonder, it is inspired from Medusa's original
# logging facility module).
#
#
# CAVEAT!
#
# since Loginfo instances update their own namespace with bound methods,
# they actually cycle, referencing themselves. so, when toggling off is
# a requirement if you need to finalize an instance that has been
# explicitely logged in or out.
#
# the trade-off is the following: most toggling happens at the class level
# in practice. From development to production, instance loginfo toggling
# will disapear. And if you ask to log an object at runtime, it's a practical
# way to *not* finalize it: you're observing something! And that's even more
# true if you have to manipulate instances in production ...
#
# Simple but not obvious :-)
#
#
# One Logger Only
#
# As time and applications go by, it made sense to do with only one logger
# facility, a root logger. That's the model of log4j, but with an extra
# simplification: STDOUT and STDERR are replaced by loginfo's interfaces
# and implementation.
#
# If you
#
# >>> print "something"
#
# in your application, it will actually be logged without categories nor
# context, directly to STDOUT. In the debug outline mode, this is exactly
# similar to writing "something" out with a newline at the end. However,
# in optimized mode, those print statement will be encoded in netstrings
# and without newline.
#
# Also, when an uncatched exception is raised, it will be logged as an
# outline or a netstring (or a multiline classic traceback, for people
# who really need that at debug time).
#
# The rational is that if your application needs to write to a file in all
# case, it either does not need a log or that file should not be STDOUT.
# Similarly, if your application does demand an error log, there is no
# reason not to have them writen to STDERR, preferrably in a format that is
# simple and safe to parse and present.
#
#
# Practicality beats purity
#
# Satisfying a demand of Andrew Dalke, loginfo gives the option of classic
# and compact tracebacks. The later are encoded in netstring and outlined
# at debug time, the former are allways multiline strings (the original
# format) that can be parsed by existing Python development tools.
#
# If you do use those tools to lookup source code at development time, do
#
# if __debug__:
# Loginfo.loginfo_traceback = Loginfo.loginfo_classic_traceback
# loginfo.traceback = loginfo._classic_traceback
#
#
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/loginfo.py
|
loginfo.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.