id
int64
0
458k
file_name
stringlengths
4
119
file_path
stringlengths
14
227
content
stringlengths
24
9.96M
size
int64
24
9.96M
language
stringclasses
1 value
extension
stringclasses
14 values
total_lines
int64
1
219k
avg_line_length
float64
2.52
4.63M
max_line_length
int64
5
9.91M
alphanum_fraction
float64
0
1
repo_name
stringlengths
7
101
repo_stars
int64
100
139k
repo_forks
int64
0
26.4k
repo_open_issues
int64
0
2.27k
repo_license
stringclasses
12 values
repo_extraction_date
stringclasses
433 values
2,287,000
serverperavg.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverperavg.cpython-39.pyc
a f¾`c‚ã@sHddlZddlZddlmZddlmZddlmZGdd„deƒZdS)éN)Ú clientPerAvg)ÚServer)ÚThreadcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)ÚPerAvgcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©úu/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverperavg.pyr s  zPerAvg.__init__cCsªt|jdƒD]p}| ¡|_| ¡||jdkrRtd|›d�ƒtdƒ| ¡|jD]}| ¡| ¡qX|  ¡|  ¡qtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z+ Evaluate global model with one step updatez Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr Úevaluate_one_stepÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)r ÚiÚclientrrrrs     z PerAvg.traincCs’g}|jD]}| t |j¡¡| ¡q | ¡}t|jƒD]\}}| |||j¡q<t |dƒdt |dƒ}|j  |¡t d  |¡ƒdS)Négğ?rzAverage Test Accurancy: {:.4f}) ÚclientsÚappendÚcopyÚdeepcopyÚmodelZtrain_one_stepÚ test_metricsÚ enumerateÚ clone_modelÚsumr r Úformat)r Z models_tempÚcÚstatsr#Útest_accrrrr4s   zPerAvg.evaluate_one_step)Ú__name__Ú __module__Ú __qualname__rrrÚ __classcell__rrrrrs !r) r(ÚtorchZflcore.clients.clientperavgrÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s    
2,060
Python
.pyt
22
92.363636
347
0.491417
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,001
serverreppt.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverreppt.cpython-37.pyc
B Ðecýã@shddlmZddlmZddlmZddlZddlZddlZddl Z ddl Z ddlZGdd„deƒZ dS)é)Ú clientREPPT)ÚServer)ÚThreadNcs<eZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Z‡ZS) Ú PFedRepPTcsntƒ ||¡||_| ¡| |t¡t |jj ¡|_ g|_ g|_ t d|j›d|j›�ƒt dƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__ÚargsÚset_slow_clientsÚ set_clientsrÚcopyÚdeepcopyÚmodelÚbaseÚ global_modelÚdiff_proÚclients_divergeÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfrÚtimes)Ú __class__©úH/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverreppt.pyr s zPFedRepPT.__init__c CsÌ�xft|jdƒD�]R}t ¡}| ¡|_| ¡||jdkr`td|›d�ƒtdƒ| ¡d}x"|jD]}|  ¡}||  ¡}qlWd}xdt |j dj j ¡|j dj j ¡ƒD]:\}}||} t | dk| t | ¡| ¡} |t | ¡}q´Wtd |  ¡¡ƒ|j |  ¡¡td |¡ƒ|j |¡| ¡| ¡|j t ¡|¡tdd d|jd ƒqWtd ƒtt|jƒƒtd ƒtt|jdd…ƒt|jdd…ƒƒ| ¡| ¡|  ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz"0 and 1 clients difference: {:.4f}z"Averaged prompr difference: {:.4f}z-------------------------z time costéÿÿÿÿz Best global accuracy.z Average time cost per round.)!ÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gaprÚevaluateÚtrainÚitemÚzipÚclientsr Ú generatorÚ parametersÚtorchÚwhereÚ zeros_likeÚsumÚformatrÚappendrÚreceive_modelsÚaggregate_parametersrÚmaxÚ rs_test_accÚlenÚ save_resultsÚsave_global_modelÚsave_client_model) rÚiÚs_tÚ temp_diff_proÚclientÚtemp_diff_pro_clientÚdiverge_clentsÚ new_paramÚ old_paramrrrrr%s@  0 (zPFedRepPT.traincCsŒt|jƒdkst‚d}x|jD]}||j7}qWg|_g|_g|_xD|jD]:}|j |j|¡|j |j¡|j t   |j j ¡¡qJWdS)Nr) r5r!ÚAssertionErrorÚ train_samplesÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr0Úidr r r r)rÚactive_train_samplesr<rrrr1Ms  zPFedRepPT.receive_modelscCsÂtj d|jd¡}tj |¡s(t |¡x”t|jƒD]†\}}tj ||jdt |ƒdt |j j ƒdt |j j ƒdt |j j ƒdt |j jƒdt |j jƒd¡}t |j|¡q4WdS)NÚmodelsr<Ú_clientÚ_z.pt)ÚosÚpathÚjoinÚdatasetÚexistsÚmakedirsÚ enumerater(Ú algorithmÚstrrÚ num_promptrrÚ plocal_stepsrr+Úsaver )rÚ model_pathÚc_idxÚcÚmodel_path_saverrrr8[s   pzPFedRepPT.save_client_modelc Cs8|jd|j}d}tj |¡s*t |¡t|jƒ�r4|d|jdt |j ƒdt |j j ƒdt |j j ƒdt |j jƒdt |j jƒdt |j jƒ}|d |¡}td|ƒt |d¡�f}|jd|jd�|jd|jd�|jd |jd�|jd |jd�|jd |jd�|jd |jd�WdQRXdS) NrJz ../results/z{}.h5z File path: Úwr4)ÚdataÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossrr)rNrRrKrLrOrPr5r4ÚgoalrSrrrTrrrUrr/rÚh5pyÚFileÚcreate_datasetr]r^r_rr)rÚalgoÚ result_pathÚ file_pathÚhfrrrr6cs   l zPFedRepPT.save_results) Ú__name__Ú __module__Ú __qualname__rr%r1r8r6Ú __classcell__rr)rrr s  0r) Z!system.flcore.clients.clientrepptrÚ system.flcore.servers.serverbaserÚ threadingrrr r+rKrarrrrrÚ<module>s   
4,071
Python
.pyt
35
115.057143
397
0.461977
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,002
servermtl.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/servermtl.cpython-37.pyc
B ·:cc ã@s@ddlZddlmZddlmZddlmZGdd„deƒZdS)éN)Ú clientMTL)ÚServer)ÚThreadcs4eZdZ‡fdd„Zdd„Zdd„Zdd„Z‡ZS) ÚFedMTLcsÈtƒ ||¡t| |j¡ƒ|_tj|j|jf|j d�|_ |j |_ t  |j|jf¡}t  |jdf¡}|d|j|  |j ¡d}| |j ¡|_| ¡| |t¡td|j›d|j›�ƒtdƒdS)N)Údeviceééz Join clients / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__ÚlenÚflattenÚ global_modelÚdimÚtorchÚzerosÚ join_clientsrÚW_globÚonesÚmmÚTÚtoÚomegaÚset_slow_clientsÚ set_clientsrÚprintÚ num_clients)ÚselfÚargsÚtimesÚIÚir)Ú __class__©úF/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/servermtl.pyr s zFedMTL.__init__cCs´xˆt|jdƒD]v}| ¡|_| ¡||jdkrTtd|›d�ƒtdƒ| ¡x0t|jƒD]"\}}|  |j |j |¡|  ¡q`WqWtdƒtt |jƒƒ| ¡| ¡dS)Nrrz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚaggregate_parametersÚeval_gaprÚevaluateÚ enumerateZreceive_valuesrrÚtrainÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rr ÚidxÚclientr"r"r#r,s z FedMTL.traincs,| ¡‰ˆ ¡}‡fdd„|Dƒ}t |¡S)Ncsg|]}ˆ| ¡‘qSr")r )Ú.0Úkey)Ú state_dictr"r#ú <listcomp><sz"FedMTL.flatten.<locals>.<listcomp>)r5ÚkeysrÚcat)rÚmodelr7ÚWr")r5r#r 9szFedMTL.flattencCsPtj|j|jf|jd�|_x0t|jƒD]"\}}| |j ¡|jdd…|f<q&WdS)N)r) rrrrrrr+r'r r9)rr1r2r"r"r#r(?szFedMTL.aggregate_parameters)Ú__name__Ú __module__Ú __qualname__r r,r r(Ú __classcell__r"r")r!r#rs r)rZflcore.clients.clientmtlrÚflcore.servers.serverbaserÚ threadingrrr"r"r"r#Ú<module>s   
2,354
Python
.pyt
15
155.733333
367
0.440598
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,003
serverbase.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverbase.cpython-39.pyc
a ubc‹#ã@sXddlZddlZddlZddlZddlZddlZddlZddlm Z Gdd„de ƒZ dS)éN)Úread_client_datac@s°eZdZdd„Zdd„Zdd„Zdd„Zd d „Zd d „Zd d„Z dd„Z dd„Z dd„Z dd„Z dd„Zdd„Zdd„Zdd„Zdd „Zd!d"„Zd*d$d%„Zd&d'„Zd+d(d)„Zd#S),ÚServercCsô|j|_|j|_|j|_|j|_|j|_|j|_t |j ¡|_ |j |_ |j |_ t |j |j ƒ|_|j|_|j|_|j|_|j|_|j|_d|_g|_g|_g|_g|_g|_g|_g|_g|_g|_g|_||_|j |_ |j!|_!|j"|_"|j#|_#dS)Néd)$ÚdeviceÚdatasetÚ global_roundsÚ local_stepsÚ batch_sizeÚlocal_learning_rateÚ learning_rateÚcopyÚdeepcopyÚmodelÚ global_modelÚ num_clientsÚ join_ratioÚintÚ join_clientsÚ algorithmÚ time_selectÚgoalÚtime_thretholdÚsave_folder_nameÚtop_cntÚclientsÚselected_clientsÚtrain_slow_clientsÚsend_slow_clientsÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsÚ rs_test_accÚ rs_test_aucÚ rs_train_lossÚtimesÚeval_gapÚclient_drop_rateÚtrain_slow_rateÚsend_slow_rate)ÚselfÚargsr$©r+ús/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverbase.pyÚ__init__ s>zServer.__init__c Csntt|jƒ|j|jƒD]R\}}}t|j|dd�}t|j|dd�}|||t|ƒt|ƒ||d�}|j  |¡qdS)NT)Úis_trainF)ÚidÚ train_samplesÚ test_samplesÚ train_slowÚ send_slow) ÚzipÚrangerrrrrÚlenrÚappend) r)r*Z clientObjÚir2r3Ú train_dataÚ test_dataÚclientr+r+r,Ú set_clients4s ûzServer.set_clientscCsVdd„t|jƒDƒ}dd„t|jƒDƒ}tj |t||jƒ¡}|D] }d||<qD|S)NcSsg|]}d‘qS)Fr+©Ú.0r8r+r+r,Ú <listcomp>Eóz.Server.select_slow_clients.<locals>.<listcomp>cSsg|]}|‘qSr+r+r=r+r+r,r?Fr@T)r5rÚnpÚrandomÚchoicer)r)Z slow_rateZ slow_clientsÚidxÚidx_r8r+r+r,Úselect_slow_clientsDs  zServer.select_slow_clientscCs | |j¡|_| |j¡|_dS©N)rFr'rr(r)r)r+r+r,Úset_slow_clientsMs ÿÿzServer.set_slow_clientscCsttjj|j|jdd�ƒ}|S)NF)Úreplace)ÚlistrArBrCrr)r)rr+r+r,Úselect_clientsSszServer.select_clientscCs.t|jƒdksJ‚|jD]}| |j¡qdS©Nr)r6rÚset_parametersr)r)r;r+r+r,Ú send_modelsXs zServer.send_modelscCs�t|jƒdksJ‚g|_d}g|_g|_|jD]8}|j |j¡||j7}|j |j¡|j |j¡q.t |jƒD]\}}|||j|<qrdSrL) r6rrrr r7r0r/rÚ enumerate)r)Z tot_samplesr;r8Úwr+r+r,Úreceive_models`s  zServer.receive_modelscCsft|jƒdksJ‚t |jd¡|_|j ¡D]}|j ¡q.t|j |jƒD]\}}|  ||¡qLdSrL) r6r r r rÚ parametersÚdataÚzero_r4rÚadd_parameters)r)ÚparamrPÚ client_modelr+r+r,Úaggregate_parametersos  zServer.aggregate_parameterscCs:t|j ¡| ¡ƒD] \}}|j|j ¡|7_qdSrG)r4rrRrSÚclone)r)rPrWZ server_paramZ client_paramr+r+r,rUyszServer.add_parameterscCsPtj d|j¡}tj |¡s&t |¡tj ||jdd¡}t |j |¡dS©NÚmodelsÚ_serverú.pt) ÚosÚpathÚjoinrÚexistsÚmakedirsrÚtorchÚsaver©r)Z model_pathr+r+r,Úsave_global_model}s   zServer.save_global_modelcCsHtj d|j¡}tj ||jdd¡}tj |¡s8J‚t |¡|_dSrZ) r^r_r`rrrarcÚloadrrer+r+r,Ú load_model„szServer.load_modelcCs4tj d|j¡}tj ||jdd¡}tj |¡SrZ)r^r_r`rrrarer+r+r,Ú model_existsŠszServer.model_existscCsÊ|jd|j}d}tj |¡s*t |¡t|jƒrÆ|d|jdt |j ƒ}|d  |¡}t d|ƒt  |d¡�@}|jd|jd�|jd|jd�|jd |jd�Wdƒn1s¼0YdS) NÚ_z ../results/z{}.h5z File path: rPr!)rSr"r#)rrr^r_rarbr6r!rÚstrr$ÚformatÚprintÚh5pyÚFileZcreate_datasetr"r#)r)ÚalgoÚ result_pathÚ file_pathZhfr+r+r,Ú save_results�s    zServer.save_resultscCs>tj |j¡st |j¡t |tj |jd|d¡¡dS©NZserver_r])r^r_rarrbrcrdr`)r)ÚitemÚ item_namer+r+r,Ú save_itemŸs zServer.save_itemcCst tj |jd|d¡¡Srt)rcrgr^r_r`r)r)rvr+r+r,Ú load_item¤szServer.load_itemc Cshg}g}g}|jD]8}| ¡\}}}| |d¡| ||¡| |¡qdd„|jDƒ}||||fS)Nçğ?cSsg|] }|j‘qSr+©r/©r>Úcr+r+r,r?´r@z'Server.test_metrics.<locals>.<listcomp>)rÚ test_metricsr7) r)Ú num_samplesZ tot_correctZtot_aucr|ÚctÚnsÚaucÚidsr+r+r,r}§s  zServer.test_metricscCsRg}g}|jD](}| ¡\}}| |¡| |d¡qdd„|jDƒ}|||fS)NrycSsg|] }|j‘qSr+rzr{r+r+r,r?Àr@z(Server.train_metrics.<locals>.<listcomp>)rÚ train_metricsr7)r)r~Úlossesr|Úclr€r‚r+r+r,rƒ¸s   zServer.train_metricsNc Cs>| ¡}| ¡}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}dd„t|d|dƒDƒ}dd„t|d|dƒDƒ} |dkr²|j |¡n | |¡|j |¡|dkrŞ|j |¡n | |¡td  |¡ƒtd   |¡ƒtd   |¡ƒtd   t   |¡¡ƒtd   t   | ¡¡ƒdS) NéryéécSsg|]\}}||‘qSr+r+©r>ÚaÚnr+r+r,r?Ír@z#Server.evaluate.<locals>.<listcomp>cSsg|]\}}||‘qSr+r+r‰r+r+r,r?Îr@zAveraged Train Loss: {:.4f}zAveraged Test Accurancy: {:.4f}zAveraged Test AUC: {:.4f}zStd Test Accurancy: {:.4f}zStd Test AUC: {:.4f}) r}rƒÚsumr4r!r7r"r#rmrlrAÚstd) r)ÚaccÚlossÚstatsZ stats_trainÚtest_accÚtest_aucÚ train_lossZaccsZaucsr+r+r,ÚevaluateÅs&   zServer.evaluatecCs.td |¡ƒtd |¡ƒtd |¡ƒdS)NzAverage Test Accurancy: {:.4f}zAverage Test AUC: {:.4f}zAverage Train Loss: {:.4f})rmrl)r)r‘r’r“r+r+r,Úprint_ász Server.print_cCsğ|D]æ}|dkrr|dkrrt|ƒt t |¡d¡jd|k}t|ƒdko^t || d…¡|k}|rj|rjqêdSq|dkr¬t|ƒt t |¡d¡jd|k}|r¤qêdSq|dkræt|ƒdkoÖt || d…¡|k}|rŞqêdSqt‚qdS)Nr‡rFT)r6rcÚtopkÚtensorÚindicesrAr�ÚNotImplementedError)r)Zacc_lssrÚ div_valueZacc_lsZfind_topZfind_divr+r+r,Ú check_doneæs&$$$$zServer.check_done)NN)NN)Ú__name__Ú __module__Ú __qualname__r-r<rFrHrKrNrQrXrUrfrhrirsrwrxr}rƒr”r•r›r+r+r+r,r s('   r) rcr^ÚnumpyrArnr ÚtimerBÚutils.data_utilsrÚobjectrr+r+r+r,Ú<module>s 
8,366
Python
.pyt
47
176.787234
760
0.4
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,004
serverbn.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverbn.cpython-39.pyc
a f¾`cãã@sLddlmZddlmZddlmZddlmZddlZGdd„deƒZ dS)é)ÚclientBN)ÚServer)Úread_client_data)ÚThreadNcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚFedBNcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©úq/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverbn.pyr s  zFedBN.__init__cCs¢t|jdƒD]h}| ¡|_| ¡||jdkrRtd|›d�ƒtdƒ| ¡|jD] }| ¡qX|  ¡|  ¡qtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rÚiÚclientrrrrs    z FedBN.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rrrrrs r) Zflcore.clients.clientbnrÚflcore.servers.serverbaserÚutils.data_utilsrÚ threadingrÚtimerrrrrÚ<module>s    
1,562
Python
.pyt
16
96.375
358
0.505495
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,005
serverpfedpt.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverpfedpt.cpython-38.pyc
U Õ şcšã@shddlmZddlmZddlmZddlZddlZddlZddl Z ddl Z ddl Z Gdd„deƒZ dS)é)ÚclientPT)ÚServer)ÚThreadNcsNeZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Zdd d „Zdd„Z ‡Z S)ÚPFedPTcsntƒ ||¡| ¡||_| |t¡t |jj ¡|_ g|_ g|_ t d|j›d|j›�ƒt dƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚargsÚ set_clientsrÚcopyÚdeepcopyÚmodelÚbaseÚ global_modelÚdiff_proÚclients_divergeÚprintÚ join_ratioÚ num_clientsÚBudget)Úselfr Útimes©Ú __class__©õdD:\京东\promot\第二次投稿\å®�验\æœ�务器\native - pro\system\flcore\servers\serverpfedpt.pyr s zPFedPT.__init__c Csúg}t|jdƒD�]n}t ¡}| ¡|_| ¡||jdkr`td|›d�ƒtdƒ| ¡d}|jD]}|  ¡}||  ¡}qjtd  |¡ƒ|j   |¡d}t|jdjj ¡|jdjj ¡ƒD]:\}} || } t | dk| t | ¡| ¡} |t | ¡}qÈtd  |  ¡¡ƒ|j  |  ¡¡||jdk�rJtdƒ|j|d �| ¡| ¡|j  t ¡|¡td d d |jd ƒqtd ƒtt|jƒƒtdƒtt|jdd…ƒt|jdd…ƒƒtdƒtt|ƒƒ| ¡| ¡|  ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz"Averaged prompr difference: {:.4f}z"0 and 1 clients difference: {:.4f}z Evaluate local model)Úaccz-------------------------z time costéÿÿÿÿz Best global accuracy.z Average time cost per round.z Best local accuracy.)!ÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gaprÚevaluateÚtrainÚitemÚformatrÚappendÚzipÚclientsr Ú generatorÚ parametersÚtorchÚwhereÚ zeros_likeÚsumrÚreceive_modelsÚaggregate_parametersrÚmaxÚ rs_test_accÚlenÚ save_resultsÚsave_global_modelÚsave_client_model) rÚ local_accÚiÚs_tÚ temp_diff_proÚclientÚtemp_diff_pro_clientÚdiverge_clentsÚ new_paramÚ old_paramrrrrr'sL   . ( z PFedPT.traincCs„t|jƒdkst‚d}|jD]}||j7}qg|_g|_g|_|jD]:}|j |j|¡|j |j¡|j t   |j j ¡¡qDdS)Nr) r7r#ÚAssertionErrorÚ train_samplesÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr*Úidr r r r)rÚactive_train_samplesr?rrrr3Ss   zPFedPT.receive_modelscCstj d|jd|jjd|jjd|jjd|jjd|jj d|jj ¡}tj  |¡sjt  |¡t |jƒD]†\}}tj ||jdt|ƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒd¡}t |j|¡qtdS)NÚmodelsr?Ú*Ú_clientÚ_z.pt)ÚosÚpathÚjoinÚdatasetr Úarv1Úarv2Úarv3Úarv4Úarv5Úarv6ÚexistsÚmakedirsÚ enumerater,Ú algorithmÚstrÚ num_promptrrÚ plocal_stepsr r/Úsaver )rÚ model_pathÚc_idxÚcÚmodel_path_saverrrr:as T  pzPFedPT.save_client_modelc Cs€|jd|j}d|jjd|jjd|jjd|jjd|jjd|jjd}t j   |¡srt   |¡t |jƒ�r||d|jdt|jƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒ}|d |¡}td|ƒt |d¡�f}|jd|jd �|jd |jd �|jd |jd �|jd |jd �|jd |jd �|jd|j d �W5QRXdS)NrNz ../results/rLú/z{}.h5z File path: Úwr6)ÚdataÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossrr)!rRr\r rSrTrUrVrWrXrOrPrYrZr7r6Úgoalr]rr^rrr_r r)rÚh5pyÚFileÚcreate_datasetrhrirjrr)rÚalgoÚ result_pathÚ file_pathÚhfrrrr8isL   l zPFedPT.save_resultsNc Cs|| ¡}| ¡}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}dd„t|d|dƒDƒ} dd„t|d|dƒDƒ} |dkrÎ|j |¡n | |¡|j |¡|j t  | ¡¡|dk�r|j  |¡n | |¡t d   |¡ƒt d   |¡ƒt d   |¡ƒt d   |¡ƒt d   t  | ¡¡ƒt d  t  | ¡¡ƒdS)Néçğ?réécSsg|]\}}||‘qSrr©Ú.0ÚaÚnrrrÚ <listcomp>…sz#PFedPT.evaluate.<locals>.<listcomp>cSsg|]\}}||‘qSrrrwrrrr{†szAveraged Train Loss: {:.4f}zAveraged Test Accurancy: {:.4f}z$Averaged Test oral Accurancy: {:.4f}zAveraged Test AUC: {:.4f}zStd Test Accurancy: {:.4f}zStd Test AUC: {:.4f}) Ú test_metricsÚ train_metricsr2r+r6r*rirhÚnpÚstdrjrr)) rrÚlossÚstatsÚ stats_trainÚtest_accÚ test_acc2Útest_aucÚ train_lossÚaccsÚaucsrrrr&|s,    zPFedPT.evaluatec Cs~g}g}g}g}|jD]H}| ¡\}}}} | |d¡| |d¡| | |¡| |¡qdd„|jDƒ} | ||||fS)NrtcSsg|] }|j‘qSr)rI)rxrcrrrr{¨sz'PFedPT.test_metrics.<locals>.<listcomp>)r,r|r*) rÚ num_samplesÚ tot_correctÚ tot_correct2Útot_aucrcÚctÚct2ÚnsÚaucÚidsrrrr|›s  zPFedPT.test_metrics)NN) Ú__name__Ú __module__Ú __qualname__rr'r3r:r8r&r|Ú __classcell__rrrrr s 4 r)Zflcore.clients.clientptrÚflcore.servers.serverbaserÚ threadingrr!r/rOrlr Únumpyr~rrrrrÚ<module>s   
6,091
Python
.pyt
58
103.758621
494
0.426086
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,006
serverbabu.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverbabu.cpython-38.pyc
U [Ğıcr ã@s8ddlmZddlmZddlmZGdd„deƒZdS)é)Ú clientBABU)ÚServer)ÚThreadcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)ÚFedBABUcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©õXD:\京东\promot\第二次投稿\å®�验\native - pro\system\flcore\servers\serverbabu.pyrs  zFedBABU.__init__cCsg}t|jdƒD]Š}| ¡|_| ¡||jdkrVtd|›d�ƒtdƒ| ¡|jD] }| ¡q\||jdkrŒtdƒ|j|d�|  ¡|  ¡qtdƒtt |j ƒƒtd ƒtt |ƒƒ|j D] }| ¡qÎtd ƒ| ¡| ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz Evaluate local model)Úaccz Best global accuracy.z Best local accuracy.z4 -------------Evaluate fine-tuned model-------------)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚclientsZ fine_tuneÚ save_resultsÚsave_global_model)r Ú local_accÚiÚclientrrrrs2        z FedBABU.traincCs�t|jƒdkst‚g|_d}g|_g|_|jD]:}|j |j¡||j7}|j |j¡|j |j j ¡q.t |jƒD]\}}|||j|<qtdS)Nr) ÚlenrÚAssertionErrorÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsÚappendÚ train_samplesÚidÚmodelÚbaseÚ enumerate)r Ú tot_samplesr'r&Úwrrrr;s  zFedBABU.receive_models)Ú__name__Ú __module__Ú __qualname__rrrÚ __classcell__rrrrrs 'rN)Zflcore.clients.clientbaburÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s   
2,164
Python
.pyt
25
85.4
254
0.477103
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,007
serverrep.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverrep.cpython-38.pyc
U ºĞıcí ã@sHddlmZddlmZddlmZddlZddlZGdd„deƒZdS)é)Ú clientRep)ÚServer)ÚThreadNcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)ÚFedRepcsLtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfÚargsÚtimes©Ú __class__©õWD:\京东\promot\第二次投稿\å®�验\native - pro\system\flcore\servers\serverrep.pyr s  zFedRep.__init__cCs<g}t|jdƒD]º}t ¡}| ¡|_| ¡||jdkr^td|›d�ƒtdƒ| ¡|jD] }|  ¡qd||jdkr”tdƒ|j|d�|  ¡|  ¡|j   t ¡|¡tdd d|j d ƒqtd ƒtt|jƒƒtd ƒtt|j dd…ƒt|j dd…ƒƒtd ƒtt|ƒƒ| ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Evaluate local model)Úaccz-------------------------z time costéÿÿÿÿz Best global accuracy.z Average time cost per round.z Best local accuracy.)ÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersr ÚappendÚmaxÚ rs_test_accÚsumÚlenÚ save_resultsÚsave_global_model)rÚ local_accÚiÚs_tÚclientrrrr s4    ( z FedRep.traincCs„t|jƒdkst‚d}|jD]}||j7}qg|_g|_g|_|jD]:}|j |j|¡|j |j¡|j t   |j j ¡¡qDdS)Nr) r'rÚAssertionErrorÚ train_samplesÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr#ÚidÚcopyÚdeepcopyÚmodelÚbase)rÚactive_train_samplesr-rrrr!>s   zFedRep.receive_models)Ú__name__Ú __module__Ú __qualname__rr r!Ú __classcell__rrrrrs 'r) Zflcore.clients.clientreprÚflcore.servers.serverbaserÚ threadingrrr4rrrrrÚ<module>s   
2,288
Python
.pyt
21
107.619048
383
0.472222
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,008
serverrod.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverrod.cpython-39.pyc
a f¾`cçã@sLddlmZddlmZddlmZddlmZddlZGdd„deƒZ dS)é)Ú clientROD)ÚServer)Úread_client_data)ÚThreadNcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚFedRODcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©úr/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverrod.pyr s  zFedROD.__init__cCs¢t|jdƒD]h}| ¡|_| ¡||jdkrRtd|›d�ƒtdƒ| ¡|jD] }| ¡qX|  ¡|  ¡qtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rÚiÚclientrrrrs    z FedROD.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rrrrrs r) Zflcore.clients.clientrodrÚflcore.servers.serverbaserÚutils.data_utilsrÚ threadingrÚtimerrrrrÚ<module>s    
1,568
Python
.pyt
16
96.75
360
0.507405
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,009
serveramp.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serveramp.cpython-38.pyc
U [Ğıc¢ ã@sdddlZddlZddlZddlZddlZddlmZmZddl m Z ddl m Z Gdd„de ƒZ dS)éN)Ú clientAMPÚweight_flatten)ÚServer)ÚThreadcs4eZdZ‡fdd„Zdd„Zdd„Zdd„Z‡ZS) ÚFedAMPcsVtƒ ||¡| ¡| |t¡|j|_|j|_td|j›d|j ›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚalphaKÚsigmaÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©õWD:\京东\promot\第二次投稿\å®�验\native - pro\system\flcore\servers\serveramp.pyr s zFedAMP.__init__cCsÔg}t|jdƒD]‚}| ¡|_| ¡||jdkrVtd|›d�ƒtdƒ| ¡|jD] }| ¡q\||jdkrŒtdƒ|j|d�|  ¡qtdƒtt |j ƒƒtd ƒtt |ƒƒ|  ¡|  ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz Evaluate local model)Úaccz Best global accuracy.z Best local accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rÚ local_accÚiÚclientrrrr s(      z FedAMP.trainc CsŒt|jƒdkst‚t|jƒdk�rˆ|jD�]\}t |j¡}| ¡D]}|j   ¡qBt   |j ¡}t|jƒD]b\}}|j|j|krÂt|jƒ}t|ƒ}|| d¡} t  | | ¡} |j| | ¡||<qhd||<qhdt  |¡} t|jƒD]:\}}t| ¡| ¡ƒD]\}} |j ||| 7_ qşqät ¡} |j�rJt dt tj ¡¡¡|  || ¡|j!dd7<|j!ddt ¡| 7<q(dS)Nréÿÿÿÿrgš™™™™™¹?Ú num_roundsÚ total_costé)"ÚlenrÚAssertionErrorÚuploaded_modelsÚclientsÚcopyÚdeepcopyÚ global_modelÚ parametersÚdataÚzero_ÚtorchÚzerosÚ join_clientsÚ enumerateÚidÚ uploaded_idsrÚmodelÚviewÚdotr ÚeÚsumÚzipÚtimeÚ send_slowÚsleepÚnpÚabsÚrandomÚrandÚset_parametersÚsend_time_cost) rÚcÚmuÚparamÚcoefÚjZmwZ weights_iZ weights_jÚsubZ coef_selfZparam_jÚ start_timerrrr;s2         zFedAMP.send_modelscCst | |j¡|jS)N)ÚmathÚexpr )rÚxrrrr@_szFedAMP.e)Ú__name__Ú __module__Ú __qualname__rr rr@Ú __classcell__rrrrr s "$r)r7r1rCÚnumpyrFrSZflcore.clients.clientamprrÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s  
2,831
Python
.pyt
30
93.2
387
0.467523
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,010
serverrodpt.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverrodpt.cpython-38.pyc
U Õ şcÒã@stddlmZddlmZddlmZddlmZddlZddl Z ddl Z ddl Z ddl Z ddl ZGdd„deƒZdS)é)Ú clientRODPT)ÚServer)Úread_client_data)ÚThreadNcsNeZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Zdd d „Zdd„Z ‡Z S)ÚFedRODPTcsftƒ ||¡||_| ¡| |t¡g|_t |j ¡|_ g|_ t d|j ›d|j›�ƒt dƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__ÚargsÚset_slow_clientsÚ set_clientsrÚclients_divergeÚcopyÚdeepcopyÚmodelÚ global_modelÚdiff_proÚprintÚ join_ratioÚ num_clients)Úselfr Útimes©Ú __class__©õcD:\京东\promot\第二次投稿\å®�验\æœ�务器\native - pro\system\flcore\servers\serverrodpt.pyr s zFedRODPT.__init__c Csšg}t|jdƒD�]>}| ¡|_| ¡||jdkrXtd|›d�ƒtdƒ| ¡d}|jD]}| ¡}||  ¡}qbtd  |¡ƒ|j   |¡d}t |jdjj ¡|jdjj ¡ƒD]:\}}||} t | dk| t | ¡| ¡} |t | ¡}qÀtd  |  ¡¡ƒ|j  |  ¡¡||jdk�rBtdƒ|j|d �| ¡| ¡qtd ƒtt|jƒƒtd ƒtt|ƒƒ| ¡| ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz"Averaged prompr difference: {:.4f}z"0 and 1 clients difference: {:.4f}z Evaluate local model)Úaccz Best global accuracy.z Best local accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gaprÚevaluateÚtrainÚitemÚformatrÚappendÚzipÚclientsrÚ generatorÚ parametersÚtorchÚwhereÚ zeros_likeÚsumr Úreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_modelÚsave_client_model) rÚ local_accÚiÚ temp_diff_proÚclientÚtemp_diff_pro_clientÚdiverge_clentsÚ new_paramÚ old_paramrrrrr$sF   ÿ    zFedRODPT.traincCs‚t|jƒdkst‚d}|jD]}||j7}qg|_g|_g|_|jD]8}|j |j|¡|j |j¡|j t   |j ¡¡qDdS)Nr) Úlenr ÚAssertionErrorÚ train_samplesÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr'Úidr rr)rÚactive_train_samplesr:rrrr0Is   zFedRODPT.receive_modelscCstj d|jd|jjd|jjd|jjd|jjd|jj d|jj ¡}tj  |¡sjt  |¡t |jƒD]†\}}tj ||jdt|ƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒd¡}t |j|¡qtdS)NÚmodelsr:Ú*Ú_clientÚ_z.pt)ÚosÚpathÚjoinÚdatasetr Úarv1Úarv2Úarv3Úarv4Úarv5Úarv6ÚexistsÚmakedirsÚ enumerater)Ú algorithmÚstrÚ num_promptrrÚ plocal_stepsrr,Úsaver)rÚ model_pathÚc_idxÚcÚmodel_path_saverrrr6Ws T  pzFedRODPT.save_client_modelc Cs€|jd|j}d|jjd|jjd|jjd|jjd|jjd|jjd}t j   |¡srt   |¡t |jƒ�r||d|jdt|jƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒ}|d |¡}td|ƒt |d¡�f}|jd|jd �|jd |jd �|jd |jd �|jd |jd �|jd |jd �|jd|j d �W5QRXdS)NrJz ../results/rHú/z{}.h5z File path: Úwr3)ÚdataÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossrr )!rNrXr rOrPrQrRrSrTrKrLrUrVr?r3ÚgoalrYrrZrrr[rr&rÚh5pyÚFileÚcreate_datasetrdrerfrr )rÚalgoÚ result_pathÚ file_pathÚhfrrrr4_sL   l zFedRODPT.save_resultsNc Cs|| ¡}| ¡}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}dd„t|d|dƒDƒ} dd„t|d|dƒDƒ} |dkrÎ|j |¡n | |¡|j |¡|j t  | ¡¡|dk�r|j  |¡n | |¡t d   |¡ƒt d   |¡ƒt d   |¡ƒt d   |¡ƒt d   t  | ¡¡ƒt d  t  | ¡¡ƒdS)Néçğ?réécSsg|]\}}||‘qSrr©Ú.0ÚaÚnrrrÚ <listcomp>{sz%FedRODPT.evaluate.<locals>.<listcomp>cSsg|]\}}||‘qSrrrsrrrrw|szAveraged Train Loss: {:.4f}zAveraged Test Accurancy: {:.4f}z$Averaged Test oral Accurancy: {:.4f}zAveraged Test AUC: {:.4f}zStd Test Accurancy: {:.4f}zStd Test AUC: {:.4f}) Ú test_metricsÚ train_metricsr/r(r3r'rerdÚnpÚstdrfrr&) rrÚlossÚstatsÚ stats_trainÚtest_accÚ test_acc2Útest_aucÚ train_lossÚaccsÚaucsrrrr#rs,    zFedRODPT.evaluatec Cs~g}g}g}g}|jD]H}| ¡\}}}} | |d¡| |d¡| | |¡| |¡qdd„|jDƒ} | ||||fS)NrpcSsg|] }|j‘qSr)rE)rtr_rrrrw�sz)FedRODPT.test_metrics.<locals>.<listcomp>)r)rxr') rÚ num_samplesÚ tot_correctÚ tot_correct2Útot_aucr_ÚctÚct2ÚnsÚaucÚidsrrrrx‘s  zFedRODPT.test_metrics)NN) Ú__name__Ú __module__Ú __qualname__rr$r0r6r4r#rxÚ __classcell__rrrrr s - r)Zflcore.clients.clientrodptrÚflcore.servers.serverbaserÚutils.data_utilsrÚ threadingrÚtimerhr rKr,ÚnumpyrzrrrrrÚ<module>s    
5,946
Python
.pyt
51
115.333333
496
0.433005
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,011
serverpfedpt.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverpfedpt.cpython-39.pyc
a @=bc_ ã@sXddlmZddlmZddlmZddlZddlZddlZddl Z Gdd„deƒZ dS)é)ÚclientPT)ÚServer)ÚThreadNcs4eZdZ‡fdd„Zdd„Zdd„Zdd„Z‡ZS) ÚPFedPTcs\tƒ ||¡| ¡| |t¡t |jj¡|_ t d|j ›d|j ›�ƒt dƒg|_ dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚcopyÚdeepcopyÚmodelÚbaseÚ global_modelÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfÚargsÚtimes©Ú __class__©úu/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverpfedpt.pyr s zPFedPT.__init__cCs t|jdƒD]˜}t ¡}| ¡|_| ¡||jdkrZtd|›d�ƒtdƒ| ¡|jD] }|  ¡q`|  ¡|  ¡|j   t ¡|¡tddd|j dƒqtd ƒtt|jƒƒtd ƒtt|j dd…ƒt|j dd…ƒƒ| ¡| ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz-------------------------z time costéÿÿÿÿz Best global accuracy.z Average time cost per round.)ÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gaprÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersrÚappendÚmaxÚ rs_test_accÚsumÚlenÚ save_resultsÚsave_global_modelÚsave_client_model)rÚiÚs_tÚclientrrrr$s*   (z PFedPT.traincCs„t|jƒdksJ‚d}|jD]}||j7}qg|_g|_g|_|jD]:}|j |j|¡|j |j¡|j t  |j j ¡¡qDdS)Nr) r+r Ú train_samplesÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr'Úidr r r r )rÚactive_train_samplesr1rrrr%@s   zPFedPT.receive_modelscCsntj d|jd¡}tj |¡s(t |¡t|jƒD]6\}}tj ||jdt |ƒd¡}t   |j |¡q2dS)NÚmodelsr1Ú_serverz.pt) ÚosÚpathÚjoinÚdatasetÚexistsÚmakedirsÚ enumerateÚclientsÚ algorithmÚstrÚtorchÚsaver )rÚ model_pathÚc_idxÚcZmodel_path_saverrrr.Ns    zPFedPT.save_client_model)Ú__name__Ú __module__Ú __qualname__rr$r%r.Ú __classcell__rrrrr s &r) Zsystem.flcore.clients.clientptrÚ system.flcore.servers.serverbaserÚ threadingrrr rDr:rrrrrÚ<module>s   
2,685
Python
.pyt
27
98.185185
326
0.478751
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,012
servermoon.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/servermoon.cpython-37.pyc
B ¿:cc{ã@sLddlmZddlmZddlmZddlmZddlZGdd„deƒZ dS)é)Ú clientMOON)ÚServer)Úread_client_data)ÚThreadNcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚMOONcsLtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfÚargsÚtimes)Ú __class__©úG/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/servermoon.pyr s  z MOON.__init__cCs\g}d|_d}xŞ|jsìt ¡}| ¡|_| ¡||jdkr^td|›d�ƒtdƒ| ¡x|jD] }| ¡qfW||jdkr˜tdƒ|j|d�|  ¡|  ¡|j   t ¡|¡td|j d ƒ|j |jg|jd �|_|d 7}qWtd ƒtt|jƒƒtd ƒtt|ƒƒtdƒtt|j d d…ƒt|j d d…ƒƒ| ¡| ¡dS)NFrz -------------Round number: z -------------z Evaluate global modelz Evaluate local model)Úaccz2--------------------------------------------------éÿÿÿÿ)Úacc_lssÚtop_cntéz Best global accuracy.z Best local accuracy.z Averaged time per iteration.)ÚdoneÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersrÚappendÚ check_doneÚ rs_test_accrÚmaxÚsumÚlenÚ save_resultsÚsave_global_model)rÚ local_accÚiÚs_tÚclientrrrr!s<      (z MOON.train)Ú__name__Ú __module__Ú __qualname__rr!Ú __classcell__rr)rrrs r) Zflcore.clients.clientmoonrÚflcore.servers.serverbaserÚutils.data_utilsrÚ threadingrrrrrrrÚ<module>s    
1,946
Python
.pyt
19
101.157895
370
0.475622
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,013
serverapfl.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverapfl.cpython-37.pyc
B ¸:cc®ã@s8ddlmZddlmZddlmZGdd„deƒZdS)é)Ú clientAPFL)ÚServer)ÚThreadcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚAPFLcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes)Ú __class__©úG/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverapfl.pyrs  z APFL.__init__cCsªx~t|jdƒD]l}| ¡|_| ¡||jdkrTtd|›d�ƒtdƒ| ¡x|jD] }| ¡q\W|  ¡|  ¡qWtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)r ÚiÚclientrrrrs    z APFL.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rr)rrrs rN)Zflcore.clients.clientapflrÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s   
1,442
Python
.pyt
14
101.714286
227
0.492652
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,014
serverbabu.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverbabu.cpython-37.pyc
B ¸:cc•ã@s8ddlmZddlmZddlmZGdd„deƒZdS)é)Ú clientBABU)ÚServer)ÚThreadcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)ÚFedBABUcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes)Ú __class__©úG/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverbabu.pyrs  zFedBABU.__init__cCsÒx~t|jdƒD]l}| ¡|_| ¡||jdkrTtd|›d�ƒtdƒ| ¡x|jD] }| ¡q\W|  ¡|  ¡qWtdƒtt |j ƒƒx|j D] }| ¡q�Wtdƒ| ¡| ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.z4 -------------Evaluate fine-tuned model-------------)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚclientsZ fine_tuneÚ save_resultsÚsave_global_model)r ÚiÚclientrrrrs&      z FedBABU.traincCs˜t|jƒdkst‚g|_d}g|_g|_xD|jD]:}|j |j¡||j7}|j |j¡|j |j j ¡q0Wx$t |jƒD]\}}|||j|<qzWdS)Nr) ÚlenrÚAssertionErrorÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsÚappendÚ train_samplesÚidÚmodelÚbaseÚ enumerate)r Ú tot_samplesr$r#Úwrrrr7s  zFedBABU.receive_models)Ú__name__Ú __module__Ú __qualname__rrrÚ __classcell__rr)rrrs #rN)Zflcore.clients.clientbaburÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s   
2,012
Python
.pyt
16
124.5
264
0.48022
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,015
serverdyn.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverdyn.cpython-39.pyc
a »¢bcã@sPddlZddlZddlmZddlmZddlmZddlZGdd„deƒZ dS)éN)Ú clientDyn)ÚServer)ÚThreadcs<eZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Z‡ZS) ÚFedDyncs€tƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_|j |_ t   |j ¡|_ |j  ¡D]}t |j¡|_qhdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudgetÚalphaÚcopyÚdeepcopyÚmodelÚ server_stateÚ parametersÚtorchÚ zeros_likeÚdata)ÚselfÚargsÚtimesÚparam©Ú __class__©úr/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverdyn.pyr s zFedDyn.__init__cCshg}d|_d}t|jdƒD]Ü}t ¡}| ¡|_| ¡||jdkrhtd|›d�ƒtdƒ|  ¡|jD] }|  ¡qn||jdkržtdƒ|j |d�|  ¡|  ¡|  ¡|j t ¡|¡td |jd ƒ|j|jg|jd �|_|d7}qtd ƒtt|jƒƒtd ƒtt|ƒƒtdƒtt|jdd…ƒt|jdd…ƒƒ| ¡| ¡dS)NFréz -------------Round number: z -------------z Evaluate global modelz Evaluate local model)Úaccz2--------------------------------------------------éÿÿÿÿ)Úacc_lssÚtop_cntz Best global accuracy.z Best local accuracy.z Averaged time per iteration.)ÚdoneÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚupdate_server_stateÚaggregate_parametersr ÚappendÚ check_doneÚ rs_test_accr#ÚmaxÚsumÚlenÚ save_resultsÚsave_global_model)rZ local_accÚiÚs_tÚclientrrrr-s>      (z FedDyn.traincCs<t|j ¡| ¡ƒD]"\}}|j|j ¡|j7_qdS)N)ÚzipÚ global_modelrrÚcloneÚ join_clients)rÚ client_modelÚ server_paramÚ client_paramrrrÚadd_parametersOszFedDyn.add_parameterscCs”t|jƒdksJ‚t |jd¡|_|j ¡D]}t |j¡|_q.|jD]}|  |¡qHt |j ¡|j  ¡ƒD] \}}|jd|j |8_qndS)Nrr) r6Úuploaded_modelsrrr=rrrrrCr<rr)rrr@rAÚ state_paramrrrr0Ss  zFedDyn.aggregate_parameterscCs¾t|jƒdksJ‚t |jd¡}| ¡D]}t |j¡|_q*|jD]B}t|j  ¡| ¡| ¡ƒD]"\}}}|j|||j 7_qbqDt|j  ¡| ¡ƒD]\}}|j|j |8_qœdS)Nr) r6rDrrrrrrr<r=r rr)rZ model_deltarr@rArBZ delta_paramrErrrr/`s  $zFedDyn.update_server_state) Ú__name__Ú __module__Ú __qualname__rr-rCr0r/Ú __classcell__rrrrr s  1 r) rrZsystem.flcore.clients.clientdynrÚ system.flcore.servers.serverbaserÚ threadingrr'rrrrrÚ<module>s    
3,312
Python
.pyt
30
109.166667
409
0.439842
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,016
servermoonpt.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/servermoonpt.cpython-38.pyc
U şcÿã@stddlmZddlmZddlmZddlmZddlZddl Z ddl Z ddl Z ddl Z ddl ZGdd„deƒZdS)é)Ú clientMOONPT)ÚServer)Úread_client_data)ÚThreadNcsNeZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Zdd d „Zdd„Z ‡Z S)ÚMOONPTcsltƒ ||¡||_| ¡| |t¡g|_g|_td|j ›d|j ›�ƒtdƒt   |j ¡|_g|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__ÚargsÚset_slow_clientsÚ set_clientsrÚclients_divergeÚdiff_proÚprintÚ join_ratioÚ num_clientsÚcopyÚdeepcopyÚmodelÚ global_modelÚBudget)Úselfr Útimes©Ú __class__©õdD:\京东\promot\第二次投稿\å®�验\æœ�务器\native - pro\system\flcore\servers\servermoonpt.pyr s zMOONPT.__init__c Csg}d|_d}t|jdƒD�]ˆ}t ¡}| ¡|_| ¡||jdkrjtd|›d�ƒtdƒ|  ¡d}|jD]}|  ¡}||  ¡}qttd  |¡ƒ|j  |¡d}t|jdjj ¡|jdjj ¡ƒD]:\}} || } t | dk| t | ¡| ¡} |t | ¡}qÒtd  |  ¡¡ƒ|j |  ¡¡||jdk�rTtd ƒ|j |d �| ¡| ¡|j t ¡|¡td |jd ƒ|j|jg|jd �|_|d7}qtdƒtt|jƒƒtdƒtt|ƒƒtdƒtt|jdd…ƒt |jdd…ƒƒ| !¡| "¡| #¡dS)NFréz -------------Round number: z -------------z Evaluate global modelz"Averaged prompr difference: {:.4f}z"0 and 1 clients difference: {:.4f}z Evaluate local model)Úaccz2--------------------------------------------------éÿÿÿÿ)Úacc_lssÚtop_cntz Best global accuracy.z Best local accuracy.z Averaged time per iteration.)$ÚdoneÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gaprÚevaluateÚtrainÚitemÚformatr ÚappendÚzipÚclientsrÚ generatorÚ parametersÚtorchÚwhereÚ zeros_likeÚsumr Úreceive_modelsÚaggregate_parametersrÚ check_doneÚ rs_test_accr ÚmaxÚlenÚ save_resultsÚsave_global_modelÚsave_client_model) rÚ local_accÚiÚs_tÚ temp_diff_proÚclientÚtemp_diff_pro_clientÚdiverge_clentsÚ new_paramÚ old_paramr rrrr*sX   ÿ    (z MOONPT.traincCs‚t|jƒdkst‚d}|jD]}||j7}qg|_g|_g|_|jD]8}|j |j|¡|j |j¡|j t   |j ¡¡qDdS)Nr) r;r&ÚAssertionErrorÚ train_samplesÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr-Úidrrr)rÚactive_train_samplesrCrrrr6Ys   zMOONPT.receive_modelscCstj d|jd|jjd|jjd|jjd|jjd|jj d|jj ¡}tj  |¡sjt  |¡t |jƒD]†\}}tj ||jdt|ƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒd¡}t |j|¡qtdS)NÚmodelsrCÚ*Ú_clientÚ_z.pt)ÚosÚpathÚjoinÚdatasetr Úarv1Úarv2Úarv3Úarv4Úarv5Úarv6ÚexistsÚmakedirsÚ enumerater/Ú algorithmÚstrÚ num_promptrrÚ plocal_stepsr#r2Úsaver)rÚ model_pathÚc_idxÚcÚmodel_path_saverrrr>hs<T  ÿÿ ÿÿÿÿş şşÿşızMOONPT.save_client_modelc Cs€|jd|j}d|jjd|jjd|jjd|jjd|jjd|jjd}t j   |¡srt   |¡t |jƒ�r||d|jdt|jƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒ}|d |¡}td|ƒt |d¡�f}|jd|jd �|jd |jd �|jd |jd �|jd |jd �|jd |jd �|jd|j d �W5QRXdS)NrRz ../results/rPú/z{}.h5z File path: Úwr9)ÚdataÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossr r )!rVr`r rWrXrYrZr[r\rSrTr]r^r;r9Úgoalrarrbrrrcr#r,rÚh5pyÚFileÚcreate_datasetrlrmrnr r )rÚalgoÚ result_pathÚ file_pathÚhfrrrr<ts>L   0ÿÿ ÿÿÿÿş ş zMOONPT.save_resultsNc Cs|| ¡}| ¡}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}dd„t|d|dƒDƒ} dd„t|d|dƒDƒ} |dkrÎ|j |¡n | |¡|j |¡|j t  | ¡¡|dk�r|j  |¡n | |¡t d   |¡ƒt d   |¡ƒt d   |¡ƒt d   |¡ƒt d   t  | ¡¡ƒt d  t  | ¡¡ƒdS)Néçğ?réécSsg|]\}}||‘qSrr©Ú.0ÚaÚnrrrÚ <listcomp>’sz#MOONPT.evaluate.<locals>.<listcomp>cSsg|]\}}||‘qSrrr{rrrr“szAveraged Train Loss: {:.4f}zAveraged Test Accurancy: {:.4f}z$Averaged Test oral Accurancy: {:.4f}zAveraged Test AUC: {:.4f}zStd Test Accurancy: {:.4f}zStd Test AUC: {:.4f}) Ú test_metricsÚ train_metricsr5r.r9r-rmrlÚnpÚstdrnrr,) rrÚlossÚstatsÚ stats_trainÚtest_accÚ test_acc2Útest_aucÚ train_lossÚaccsÚaucsrrrr)‰s,    zMOONPT.evaluatec Cs~g}g}g}g}|jD]H}| ¡\}}}} | |d¡| |d¡| | |¡| |¡qdd„|jDƒ} | ||||fS)NrxcSsg|] }|j‘qSr)rM)r|rgrrrrµsz'MOONPT.test_metrics.<locals>.<listcomp>)r/r€r-) rÚ num_samplesÚ tot_correctÚ tot_correct2Útot_aucrgÚctÚct2ÚnsÚaucÚidsrrrr€¨s  zMOONPT.test_metrics)NN) Ú__name__Ú __module__Ú __qualname__rr*r6r>r<r)r€Ú __classcell__rrrrr s =  r)Zflcore.clients.clientmoonptrÚflcore.servers.serverbaserÚutils.data_utilsrÚ threadingrr$rprrSr2Únumpyr‚rrrrrÚ<module>s    
6,327
Python
.pyt
62
100.822581
494
0.423715
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,017
serverper.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverper.cpython-39.pyc
a f¾`cîã@s8ddlmZddlmZddlmZGdd„deƒZdS)é)Ú clientPer)ÚServer)ÚThreadcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)ÚFedPercsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©úr/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverper.pyrs  zFedPer.__init__cCs¢t|jdƒD]h}| ¡|_| ¡||jdkrRtd|›d�ƒtdƒ| ¡|jD] }| ¡qX|  ¡|  ¡qtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)r ÚiÚclientrrrrs    z FedPer.traincCs�t|jƒdksJ‚g|_d}g|_g|_|jD]:}|j |j¡||j7}|j |j¡|j |jj ¡q.t |jƒD]\}}|||j|<qtdS)Nr) ÚlenrÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsÚappendÚ train_samplesÚidÚmodelÚbaseÚ enumerate)r Ú tot_samplesr$r#Úwrrrr2s  zFedPer.receive_models)Ú__name__Ú __module__Ú __qualname__rrrÚ __classcell__rrrrrs rN)Zflcore.clients.clientperrÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s   
1,913
Python
.pyt
18
105.055556
327
0.484177
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,018
serverapfl.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverapfl.cpython-38.pyc
U ”jfc®ã@s8ddlmZddlmZddlmZGdd„deƒZdS)é)Ú clientAPFL)ÚServer)ÚThreadcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚAPFLcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©õLD:\京东\promot\cifar\cifar\Cifar10_iid\system\flcore\servers\serverapfl.pyrs  z APFL.__init__cCs¢t|jdƒD]h}| ¡|_| ¡||jdkrRtd|›d�ƒtdƒ| ¡|jD] }| ¡qX|  ¡|  ¡qtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)r ÚiÚclientrrrrs    z APFL.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rrrrrs rN)Zflcore.clients.clientapflrÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s   
1,456
Python
.pyt
17
84.411765
225
0.488889
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,019
serveravg.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serveravg.cpython-38.pyc
U [Ğıc·ã@sHddlZddlZddlmZddlmZddlmZGdd„deƒZdS)éN)Ú clientAVG)ÚServer)ÚThreadcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚFedAvgcsLtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Zset_slow_clientsZ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfÚargsÚtimes©Ú __class__©õWD:\京东\promot\第二次投稿\å®�验\native - pro\system\flcore\servers\serveravg.pyr s  zFedAvg.__init__cCs<g}t|jdƒD]º}t ¡}| ¡|_| ¡||jdkr^td|›d�ƒtdƒ| ¡|jD] }|  ¡qd||jdkr”tdƒ|j|d�|  ¡|  ¡|j   t ¡|¡tdd d|j d ƒqtd ƒtt|jƒƒtd ƒtt|ƒƒtd ƒtt|j dd…ƒt|j dd…ƒƒ| ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Evaluate local model)Úaccz-------------------------z time costéÿÿÿÿz Best global accuracy.z Best local accuracy.z Average time cost per round.)ÚrangeÚ global_roundsÚtimeZselect_clientsZselected_clientsZ send_modelsZeval_gaprZevaluateÚtrainZreceive_modelsZaggregate_parametersr ÚappendÚmaxZ rs_test_accÚsumÚlenZ save_resultsZsave_global_model)r Z local_accÚiZs_tÚclientrrrrs4     (z FedAvg.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rrrrrs r) rÚtorchZflcore.clients.clientavgrZflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s    
1,867
Python
.pyt
17
108.529412
383
0.47866
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,020
serverditto.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverditto.cpython-37.pyc
B ¾:ccüã@sHddlZddlmZddlmZddlmZddlZGdd„deƒZdS)éN)Ú clientDitto)ÚServer)ÚThreadcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚDittocsLtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfÚargsÚtimes)Ú __class__©úH/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverditto.pyr s  zDitto.__init__cCsx¶t|jdƒD]¤}t ¡}| ¡|_| ¡||jdkr\td|›d�ƒtdƒ| ¡x|jD]}|  ¡|  ¡qdW|  ¡|  ¡|j  t ¡|¡tddd|j dƒqWtd ƒtt|jƒƒtd ƒtt|j dd…ƒt|j dd…ƒƒ| ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz-------------------------z time costéÿÿÿÿz Best global accuracy.z Average time cost per round.)ÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateZptrainÚtrainÚreceive_modelsÚaggregate_parametersr ÚappendÚmaxÚ rs_test_accÚsumÚlenÚ save_resultsÚsave_global_model)rÚiÚs_tÚclientrrrrs*   (z Ditto.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rr)rrrs r) ÚcopyZflcore.clients.clientdittorÚflcore.servers.serverbaserÚ threadingrrrrrrrÚ<module>s    
1,727
Python
.pyt
14
122
331
0.482497
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,021
serverpFedMe.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverpFedMe.cpython-37.pyc
B ¸:ccEã@sPddlZddlZddlZddlmZddlmZddlmZGdd„deƒZ dS)éN)Ú clientpFedMe)ÚServer)ÚThreadcsLeZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Zd d „Zd d„Z ‡Z S)ÚpFedMecs`tƒ ||¡| ¡| |t¡|j|_g|_g|_g|_t d|j ›d|j ›�ƒt dƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚbetaÚrs_train_acc_perÚrs_train_loss_perÚrs_test_acc_perÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes)Ú __class__©úI/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverpFedMe.pyr s zpFedMe.__init__cCsÈxœt|jdƒD]Š}| ¡|_| ¡x|jD] }| ¡q.W||jdkrltd|›d�ƒtdƒ| ¡t   t |j   ¡ƒ¡|_| ¡| ¡| ¡qWtdƒtt|jƒƒ| ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate personalized modelz Best personalized results.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚtrainÚeval_gaprÚevaluate_personalized_modelÚcopyÚdeepcopyÚlistÚ global_modelÚ parametersÚprevious_global_modelÚreceive_modelsÚaggregate_parametersÚbeta_aggregate_parametersÚmaxr Ú save_resultsÚsave_global_model)rÚiÚclientrrrrs"    z pFedMe.traincCsBx<t|j|j ¡ƒD]&\}}d|j|j|j|j|_qWdS)Nr)Úzipr%r#r$r Údata)rZ pre_paramÚparamrrrr(Dsz pFedMe.beta_aggregate_parameterscCsVg}g}x2|jD](}| ¡\}}| |d¡| |¡qWdd„|jDƒ}|||fS)Ngğ?cSsg|] }|j‘qSr)Úid)Ú.0Úcrrrú <listcomp>Psz4pFedMe.test_metrics_personalized.<locals>.<listcomp>)ÚclientsÚtest_metrics_personalizedÚappend)rÚ num_samplesÚ tot_correctr3ÚctÚnsÚidsrrrr6Is  z pFedMe.test_metrics_personalizedc Cslg}g}g}xB|jD]8}| ¡\}}}| |d¡| |¡| |d¡qWdd„|jDƒ}||||fS)Ngğ?cSsg|] }|j‘qSr)r1)r2r3rrrr4^sz5pFedMe.train_metrics_personalized.<locals>.<listcomp>)r5Útrain_metrics_personalizedr7) rr8r9Úlossesr3r:Úclr;r<rrrr=Ts  z!pFedMe.train_metrics_personalizedcCsB| ¡}t|dƒdt|dƒ}|j |¡td |¡ƒdS)Négğ?rz+Average Personalized Test Accurancy: {:.4f})r6Úsumr r7rÚformat)rÚstatsÚtest_accrrrrbs z"pFedMe.evaluate_personalized_modelc Cs¦|jd|j}d}tj |¡s*t |¡t|jƒr¢|d|jdt |j ƒ}t   |d  |¡d¡�6}|jd|jd�|jd|jd�|jd|jd�WdQRXdS) NÚ_z ../results/z{}.h5ÚwÚ rs_test_acc)r/Z rs_train_accÚ rs_train_loss)ÚdatasetÚ algorithmÚosÚpathÚexistsÚmakedirsÚlenr ÚgoalÚstrrÚh5pyÚFilerBÚcreate_datasetr r )rÚalgoÚ result_pathZalgo2Úhfrrrr*qs  zpFedMe.save_results) Ú__name__Ú __module__Ú __qualname__rrr(r6r=rr*Ú __classcell__rr)rrr s + r) rKr rRZflcore.clients.clientpFedMerZflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s    
3,763
Python
.pyt
23
162.26087
430
0.466167
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,022
serveravg.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serveravg.cpython-39.pyc
a Ú'bcçã@sHddlZddlZddlmZddlmZddlmZGdd„deƒZdS)éN)Ú clientAVG)ÚServer)ÚThreadcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚFedAvgcsLtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Zset_slow_clientsZ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfÚargsÚtimes©Ú __class__©úr/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serveravg.pyr s  zFedAvg.__init__cCst|jdƒD]˜}t ¡}| ¡|_| ¡||jdkrZtd|›d�ƒtdƒ| ¡|jD] }|  ¡q`|  ¡|  ¡|j   t ¡|¡tddd|j dƒqtd ƒtt|jƒƒtd ƒtt|j dd…ƒt|j dd…ƒƒ| ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz-------------------------z time costéÿÿÿÿz Best global accuracy.z Average time cost per round.)ÚrangeÚ global_roundsÚtimeZselect_clientsZselected_clientsZ send_modelsÚeval_gaprZevaluateÚtrainZreceive_modelsZaggregate_parametersr ÚappendÚmaxZ rs_test_accÚsumÚlenZ save_resultsZsave_global_model)r ÚiZs_tÚclientrrrrs(   (z FedAvg.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rrrrrs r) rÚtorchZsystem.flcore.clients.clientavgrZ system.flcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s    
1,771
Python
.pyt
15
116.733333
369
0.491747
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,023
serverphp.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverphp.cpython-38.pyc
U •ic‚ã@sHddlmZddlmZddlmZddlZddlZGdd„deƒZdS)é)Ú clientPHP)ÚServer)ÚThreadNcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)ÚFedPHPcsLtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfÚargsÚtimes©Ú __class__©õDD:\京东\promot\cifar\cifar\tiny\system\flcore\servers\serverphp.pyr s  zFedPHP.__init__cCs¤t|jdƒD]j}| ¡|_| |¡||jdkrTtd|›d�ƒtdƒ| ¡|jD] }| ¡qZ|  ¡|  ¡qtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rÚiÚclientrrrrs     z FedPHP.traincCs0t|jƒdkst‚|jD]}| |j|¡qdS)Nr)ÚlenÚclientsÚAssertionErrorÚset_parametersÚ global_model)rÚRr%rrrr5s zFedPHP.send_models)Ú__name__Ú __module__Ú __qualname__rrrÚ __classcell__rrrrrs r) Zflcore.clients.clientphprÚflcore.servers.serverbaserÚ threadingrÚtimeÚcopyrrrrrÚ<module>s   
1,735
Python
.pyt
17
100.823529
350
0.482257
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,024
serverbn.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverbn.cpython-37.pyc
B ¿:ccãã@sLddlmZddlmZddlmZddlmZddlZGdd„deƒZ dS)é)ÚclientBN)ÚServer)Úread_client_data)ÚThreadNcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚFedBNcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes)Ú __class__©úE/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverbn.pyr s  zFedBN.__init__cCsªx~t|jdƒD]l}| ¡|_| ¡||jdkrTtd|›d�ƒtdƒ| ¡x|jD] }| ¡q\W|  ¡|  ¡qWtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rÚiÚclientrrrrs    z FedBN.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rr)rrrs r) Zflcore.clients.clientbnrÚflcore.servers.serverbaserÚutils.data_utilsrÚ threadingrÚtimerrrrrÚ<module>s    
1,512
Python
.pyt
13
115
346
0.498
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,025
serverrod.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverrod.cpython-37.pyc
B ¸:ccçã@sLddlmZddlmZddlmZddlmZddlZGdd„deƒZ dS)é)Ú clientROD)ÚServer)Úread_client_data)ÚThreadNcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚFedRODcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes)Ú __class__©úF/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverrod.pyr s  zFedROD.__init__cCsªx~t|jdƒD]l}| ¡|_| ¡||jdkrTtd|›d�ƒtdƒ| ¡x|jD] }| ¡q\W|  ¡|  ¡qWtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rÚiÚclientrrrrs    z FedROD.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rr)rrrs r) Zflcore.clients.clientrodrÚflcore.servers.serverbaserÚutils.data_utilsrÚ threadingrÚtimerrrrrÚ<module>s    
1,518
Python
.pyt
13
115.461538
348
0.5
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,026
serveramp.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serveramp.cpython-37.pyc
B ·:ccÍ ã@sdddlZddlZddlZddlZddlZddlmZmZddl m Z ddl m Z Gdd„de ƒZ dS)éN)Ú clientAMPÚweight_flatten)ÚServer)ÚThreadcs4eZdZ‡fdd„Zdd„Zdd„Zdd„Z‡ZS) ÚFedAMPcsVtƒ ||¡| ¡| |t¡|j|_|j|_td|j›d|j ›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚalphaKÚsigmaÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes)Ú __class__©úF/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serveramp.pyr s zFedAMP.__init__cCs¢xvt|jdƒD]d}| ¡|_| ¡||jdkrTtd|›d�ƒtdƒ| ¡x|jD] }| ¡q\W|  ¡qWtdƒtt |j ƒƒ|  ¡|  ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rÚiÚclientrrrrs    z FedAMP.trainc Cs¤t|jƒdkst‚t|jƒdk�r �xz|jD�]n}t |j¡}x| ¡D]}|j  ¡qHWt   |j ¡}xpt |jƒD]b\}}|j|j|krÌt|jƒ}t|ƒ}|| d¡} t  | | ¡} |j| | ¡||<qrd||<qrWdt  |¡} xNt |jƒD]@\}}x6t| ¡| ¡ƒD] \}} |j||| 7_�qWqòWt ¡} |j�r`t dt tj ¡¡¡| || ¡|j dd7<|j ddt ¡| 7<q,WdS)Nréÿÿÿÿrgš™™™™™¹?Ú num_roundsÚ total_costé)!ÚlenrÚAssertionErrorÚuploaded_modelsÚcopyÚdeepcopyÚ global_modelÚ parametersÚdataÚzero_ÚtorchÚzerosÚ join_clientsÚ enumerateÚidÚ uploaded_idsrÚmodelÚviewÚdotr ÚeÚsumÚzipÚtimeÚ send_slowÚsleepÚnpÚabsÚrandomÚrandÚset_parametersÚsend_time_cost) rÚcÚmuÚparamÚcoefÚjÚmwZ weights_iZ weights_jÚsubZ coef_selfZparam_jÚ start_timerrrr6s2       zFedAMP.send_modelscCst | |j¡|jS)N)ÚmathÚexpr )rÚxrrrr<ZszFedAMP.e)Ú__name__Ú __module__Ú __qualname__rrrr<Ú __classcell__rr)rrr s $r)r3r-r?ÚnumpyrBrPZflcore.clients.clientamprrÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s  
2,678
Python
.pyt
24
110.291667
333
0.469303
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,027
serverlocal.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverlocal.cpython-37.pyc
B ¿:cc…ã@s8ddlmZddlmZddlmZGdd„deƒZdS)é)Ú clientAVG)ÚServer)ÚThreadcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚLocalcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes)Ú __class__©úH/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverlocal.pyrs  zLocal.__init__cCsœxpt|jdƒD]^}| ¡|_||jdkrLtd|›d�ƒtdƒ| ¡| ¡|_x|jD] }| ¡q^WqWtdƒtt|j ƒƒ|  ¡|  ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.) ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚeval_gapr ÚevaluateÚtrainÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)r ÚiÚclientrrrrs   z Local.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rr)rrrs rN)Zflcore.clients.clientavgrÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s   
1,375
Python
.pyt
13
104.461538
306
0.48496
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,028
serverperavg.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverperavg.cpython-37.pyc
B ¿:cc‚ã@sHddlZddlZddlmZddlmZddlmZGdd„deƒZdS)éN)Ú clientPerAvg)ÚServer)ÚThreadcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)ÚPerAvgcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes)Ú __class__©úI/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverperavg.pyr s  zPerAvg.__init__cCs²x†t|jdƒD]t}| ¡|_| ¡||jdkrTtd|›d�ƒtdƒ| ¡x|jD]}| ¡| ¡q\W|  ¡|  ¡qWtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z+ Evaluate global model with one step updatez Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr Úevaluate_one_stepÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)r ÚiÚclientrrrrs     z PerAvg.traincCsšg}x(|jD]}| t |j¡¡| ¡q W| ¡}x(t|jƒD]\}}| |||j¡qBWt |dƒdt |dƒ}|j  |¡t d  |¡ƒdS)Négğ?rzAverage Test Accurancy: {:.4f}) ÚclientsÚappendÚcopyÚdeepcopyÚmodelZtrain_one_stepÚ test_metricsÚ enumerateÚ clone_modelÚsumrr Úformat)r Z models_tempÚcÚstatsr"Útest_accrrrr4s   zPerAvg.evaluate_one_step)Ú__name__Ú __module__Ú __qualname__rrrÚ __classcell__rr)rrrs !r) r'ÚtorchZflcore.clients.clientperavgrÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s    
2,014
Python
.pyt
16
124.5
335
0.486243
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,029
servermoon.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/servermoon.cpython-38.pyc
U [Ðýc’ã@sLddlmZddlmZddlmZddlmZddlZGdd„deƒZ dS)é)Ú clientMOON)ÚServer)Úread_client_data)ÚThreadNcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚMOONcsLtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfÚargsÚtimes©Ú __class__©õXD:\京东\promot\第二次投稿\实验\native - pro\system\flcore\servers\servermoon.pyr s  z MOON.__init__cCsdg}g}d|_d}t|jdƒD]Ô}t ¡}| ¡|_| ¡||jdkrltd|›d�ƒtdƒ|  ¡|jD] }|  ¡qr||jdkr¢tdƒ|j |d�|  ¡|  ¡|j  t ¡|¡td |j d ƒ|j|jg|jd �|_|d7}q td ƒtt|jƒƒtd ƒtt|ƒƒtdƒtt|j dd…ƒt|j dd…ƒƒ| ¡| ¡dS)NFréz -------------Round number: z -------------z Evaluate global modelz Evaluate local model)Úaccz2--------------------------------------------------éÿÿÿÿ)Úacc_lssÚtop_cntz Best global accuracy.z Best local accuracy.z Averaged time per iteration.)ÚdoneÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersrÚappendÚ check_doneÚ rs_test_accrÚmaxÚsumÚlenÚ save_resultsÚsave_global_model)rÚ local_accÚiÚs_tÚclientrrrr$s>      (z MOON.train)Ú__name__Ú __module__Ú __qualname__rr$Ú __classcell__rrrrrs r) Zflcore.clients.clientmoonrÚflcore.servers.serverbaserÚutils.data_utilsrÚ threadingrrrrrrrÚ<module>s    
2,012
Python
.pyt
22
90.227273
352
0.473631
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,030
serverbase.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverbase.cpython-38.pyc
U ¨ÿcÍ)ã@sXddlZddlZddlZddlZddlZddlZddlZddlm Z Gdd„de ƒZ dS)éN)Úread_client_datac@s°eZdZdd„Zdd„Zdd„Zdd„Zd d „Zd d „Zd d„Z dd„Z dd„Z dd„Z dd„Z dd„Zdd„Zdd„Zdd„Zdd „Zd!d"„Zd*d$d%„Zd&d'„Zd+d(d)„Zd#S),ÚServercCs|j|_|j|_|j|_|j|_|j|_|j|_t |j ¡|_ |j |_ |j |_ t |j |j ƒ|_|j|_|j|_|j|_|j|_|j|_|j|_||_|j|_d|_g|_g|_g|_g|_g|_g|_g|_g|_g|_ g|_!g|_"||_#|j$|_$|j%|_%|j&|_&|j'|_'dS)Néd)(ÚdeviceÚdatasetÚ global_roundsÚ local_stepsÚ batch_sizeÚlocal_learning_rateÚ learning_rateÚcopyÚdeepcopyÚmodelÚ global_modelÚ num_clientsÚ join_ratioÚintÚ join_clientsÚ algorithmÚ time_selectÚgoalÚtime_thretholdÚsave_folder_nameÚ num_promptÚargsÚ plocal_stepsÚtop_cntÚclientsÚselected_clientsÚtrain_slow_clientsÚsend_slow_clientsÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsÚ rs_test_accÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossÚtimesÚeval_gapÚclient_drop_rateÚtrain_slow_rateÚsend_slow_rate)Úselfrr(©r.õbD:\京东\promot\第二次投稿\å®�验\æœ�务器\native - pro\system\flcore\servers\serverbase.pyÚ__init__ sFzServer.__init__c Csrtt|jƒ|j|jƒD]V\}}}t|j||dd�}t|j||dd�}|||t|ƒt|ƒ||d�}|j  |¡qdS)NT)Úis_trainF)ÚidÚ train_samplesÚ test_samplesÚ train_slowÚ send_slow) ÚzipÚrangerrr rrÚlenrÚappend) r-rZ clientObjÚir5r6Ú train_dataÚ test_dataÚclientr.r.r/Ú set_clients8s ûzServer.set_clientscCsVdd„t|jƒDƒ}dd„t|jƒDƒ}tj |t||jƒ¡}|D] }d||<qD|S)NcSsg|]}d‘qS)Fr.©Ú.0r;r.r.r/Ú <listcomp>Isz.Server.select_slow_clients.<locals>.<listcomp>cSsg|]}|‘qSr.r.r@r.r.r/rBJsT)r8rÚnpÚrandomÚchoicer)r-Z slow_rateZ slow_clientsÚidxÚidx_r;r.r.r/Úselect_slow_clientsHs  zServer.select_slow_clientscCs | |j¡|_| |j¡|_dS©N)rHr+rr,r )r-r.r.r/Úset_slow_clientsQs ÿÿzServer.set_slow_clientscCsttjj|j|jdd�ƒ}|S)NF)Úreplace)ÚlistrCrDrErr)r-rr.r.r/Úselect_clientsWszServer.select_clientscCs.t|jƒdkst‚|jD]}| |j¡qdS©Nr)r9rÚAssertionErrorÚset_parametersr)r-r>r.r.r/Ú send_models\s zServer.send_modelscCs�t|jƒdkst‚g|_d}g|_g|_|jD]8}|j |j¡||j7}|j |j¡|j |j ¡q.t |jƒD]\}}|||j|<qrdSrN) r9rrOr!r"r#r:r3r2rÚ enumerate)r-Z tot_samplesr>r;Úwr.r.r/Úreceive_modelsds  zServer.receive_modelscCsft|jƒdkst‚t |jd¡|_|j ¡D]}|j ¡q.t |j |jƒD]\}}|  ||¡qLdSrN) r9r#rOr r rÚ parametersÚdataÚzero_r7r!Úadd_parameters)r-ÚparamrSÚ client_modelr.r.r/Úaggregate_parametersss  zServer.aggregate_parameterscCs:t|j ¡| ¡ƒD] \}}|j|j ¡|7_qdSrI)r7rrUrVÚclone)r-rSrZZ server_paramZ client_paramr.r.r/rX}szServer.add_parameterscCsØtj d|j|jjd|jjd|jjd|jjd|jj d|jj ¡}tj  |¡sht  |¡tj ||j ddt|jƒdt|jƒdt|jƒdt|jƒdt|jƒd¡}t |j|¡dS©NÚmodelsÚ*Ú_serverÚ_ú.pt)ÚosÚpathÚjoinrrÚarv1Úarv2Úarv3Úarv4Úarv5Úarv6ÚexistsÚmakedirsrÚstrrrrrrÚtorchÚsaver©r-Z model_pathr.r.r/Úsave_global_model�s R  ^zServer.save_global_modelcCsĞtj d|j|jjd|jjd|jjd|jjd|jj d|jj ¡}tj ||j ddt |j ƒdt |jƒdt |jƒdt |jƒdt |jƒd¡}tj |¡sÀt‚t |¡|_dSr])rcrdrerrrfrgrhrirjrkrrnrrrrrrlrOroÚloadrrqr.r.r/Ú load_modelˆsR^zServer.load_modelcCs¸tj d|j|jjd|jjd|jjd|jjd|jj d|jj ¡}tj ||j dt |j ƒdt |jƒdt |jƒdt |jƒdt |jƒd¡}tj |¡S)Nr^r_rarb)rcrdrerrrfrgrhrirjrkrrnrrrrrrlrqr.r.r/Ú model_exists�sRZzServer.model_existsc CsV|jd|j}d|jjd|jjd|jjd|jjd|jjd|jjd}t j   |¡srt   |¡t |jƒ�rR|d|jdt|jƒdt|jƒdt|jƒdt|jƒdt|jƒdt|jƒ}|d |¡}td|ƒt |d¡�F}|jd|jd �|jd |jd �|jd |jd �|jd |jd �W5QRXdS) Nraz ../results/r_ú/z{}.h5z File path: rSr$)rVr%r&r')rrrrfrgrhrirjrkrcrdrlrmr9r$rrnr(rrrrrÚformatÚprintÚh5pyZFileZcreate_datasetr%r&r')r-ÚalgoÚ result_pathÚ file_pathZhfr.r.r/Ú save_results“sL   b zServer.save_resultsc Cs„tj |j¡st |j¡t |tj |jd|dt|j ƒdt|j ƒdt|j ƒdt|j ƒdt|j ƒd¡¡dS©NZserver_rarb)rcrdrlrrmrorprernrrrrr)r-ÚitemÚ item_namer.r.r/Ú save_item¤s zServer.save_itemcCsdt tj |jd|dt|jƒdt|jƒdt|j ƒdt|j ƒdt|j ƒd¡¡Sr~) rorsrcrdrerrnrrrrr)r-r€r.r.r/Ú load_item©szServer.load_itemc Cshg}g}g}|jD]8}| ¡\}}}| |d¡| ||¡| |¡qdd„|jDƒ}||||fS)Nçğ?cSsg|] }|j‘qSr.©r2©rAÚcr.r.r/rB¹sz'Server.test_metrics.<locals>.<listcomp>)rÚ test_metricsr:) r-Ú num_samplesZ tot_correctZtot_aucr†ÚctÚnsÚaucÚidsr.r.r/r‡¬s  zServer.test_metricscCsRg}g}|jD](}| ¡\}}| |¡| |d¡qdd„|jDƒ}|||fS)NrƒcSsg|] }|j‘qSr.r„r…r.r.r/rBÅsz(Server.train_metrics.<locals>.<listcomp>)rÚ train_metricsr:)r-rˆÚlossesr†ÚclrŠrŒr.r.r/r�½s   zServer.train_metricsNc CsP| ¡}| ¡}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}dd„t|d|dƒDƒ}dd„t|d|dƒDƒ} |dkr²|j |¡n | |¡|j |¡|j t  |¡¡|dkrğ|j  |¡n | |¡t d  |¡ƒt d   |¡ƒt d   |¡ƒt d   t  |¡¡ƒt d   t  | ¡¡ƒdS) NérƒéécSsg|]\}}||‘qSr.r.©rAÚaÚnr.r.r/rBÒsz#Server.evaluate.<locals>.<listcomp>cSsg|]\}}||‘qSr.r.r“r.r.r/rBÓszAveraged Train Loss: {:.4f}zAveraged Test Accurancy: {:.4f}zAveraged Test AUC: {:.4f}zStd Test Accurancy: {:.4f}zStd Test AUC: {:.4f}) r‡r�Úsumr7r$r:r&r%rCÚstdr'rxrw) r-ÚaccÚlossÚstatsZ stats_trainÚtest_accÚtest_aucÚ train_lossZaccsZaucsr.r.r/ÚevaluateÊs(   zServer.evaluatecCs.td |¡ƒtd |¡ƒtd |¡ƒdS)NzAverage Test Accurancy: {:.4f}zAverage Test AUC: {:.4f}zAverage Train Loss: {:.4f})rxrw)r-r›rœr�r.r.r/Úprint_çsz Server.print_cCsğ|D]æ}|dkrr|dkrrt|ƒt t |¡d¡jd|k}t|ƒdko^t || d…¡|k}|rj|rjqêdSq|dkr¬t|ƒt t |¡d¡jd|k}|r¤qêdSq|dkræt|ƒdkoÖt || d…¡|k}|rŞqêdSqt‚qdS)Nr‘rFT)r9roÚtopkÚtensorÚindicesrCr—ÚNotImplementedError)r-Zacc_lssrÚ div_valueZacc_lsZfind_topZfind_divr.r.r/Ú check_doneìs&$$$$zServer.check_done)NN)NN)Ú__name__Ú __module__Ú __qualname__r0r?rHrJrMrQrTr[rXrrrtrur}r�r‚r‡r�r�rŸr¥r.r.r.r/r s(+   r) rorcÚnumpyrCryr ÚtimerDÚutils.data_utilsrÚobjectrr.r.r.r/Ú<module>s 
9,576
Python
.pyt
50
190.36
711
0.388055
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,031
serverbase.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverbase.cpython-37.pyc
B vĞecû'ã@sXddlZddlZddlZddlZddlZddlZddlZddlm Z Gdd„de ƒZ dS)éN)Úread_client_datac@s°eZdZdd„Zdd„Zdd„Zdd„Zd d „Zd d „Zd d„Z dd„Z dd„Z dd„Z dd„Z dd„Zdd„Zdd„Zdd„Zdd „Zd!d"„Zd*d$d%„Zd&d'„Zd+d(d)„Zd#S),ÚServercCs |j|_|j|_|j|_|j|_|j|_|j|_t |j ¡|_ |j |_ |j |_ t |j |j ƒ|_|j|_|j|_|j|_|j|_|j|_|j|_|j|_d|_g|_g|_g|_g|_g|_g|_g|_g|_g|_g|_ g|_!||_"|j#|_#|j$|_$|j%|_%|j&|_&dS)Néd)'ÚdeviceÚdatasetÚ global_roundsÚ local_stepsÚ batch_sizeÚlocal_learning_rateÚ learning_rateÚcopyÚdeepcopyÚmodelÚ global_modelÚ num_clientsÚ join_ratioÚintÚ join_clientsÚ algorithmÚ time_selectÚgoalÚtime_thretholdÚsave_folder_nameÚ num_promptÚ plocal_stepsÚtop_cntÚclientsÚselected_clientsÚtrain_slow_clientsÚsend_slow_clientsÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsÚ rs_test_accÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossÚtimesÚeval_gapÚclient_drop_rateÚtrain_slow_rateÚsend_slow_rate)ÚselfÚargsr'©r.úG/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverbase.pyÚ__init__ sDzServer.__init__c Csrxltt|jƒ|j|jƒD]R\}}}t|j|dd�}t|j|dd�}|||t|ƒt|ƒ||d�}|j  |¡qWdS)NT)Úis_trainF)ÚidÚ train_samplesÚ test_samplesÚ train_slowÚ send_slow) ÚzipÚrangerrrrrÚlenrÚappend) r,r-Z clientObjÚir5r6Ú train_dataÚ test_dataÚclientr.r.r/Ú set_clients7s"zServer.set_clientscCsZdd„t|jƒDƒ}dd„t|jƒDƒ}tj |t||jƒ¡}x|D] }d||<qFW|S)NcSsg|]}d‘qS)Fr.)Ú.0r;r.r.r/ú <listcomp>Hsz.Server.select_slow_clients.<locals>.<listcomp>cSsg|]}|‘qSr.r.)r@r;r.r.r/rAIsT)r8rÚnpÚrandomÚchoicer)r,Z slow_rateZ slow_clientsÚidxÚidx_r;r.r.r/Úselect_slow_clientsGs   zServer.select_slow_clientscCs | |j¡|_| |j¡|_dS)N)rGr*rr+r)r,r.r.r/Úset_slow_clientsPs zServer.set_slow_clientscCsttjj|j|jdd�ƒ}|S)NF)Úreplace)ÚlistrBrCrDrr)r,rr.r.r/Úselect_clientsVszServer.select_clientscCs2t|jƒdkst‚x|jD]}| |j¡qWdS)Nr)r9rÚAssertionErrorÚset_parametersr)r,r>r.r.r/Ú send_models[s zServer.send_modelscCs–t|jƒdkst‚g|_d}g|_g|_xB|jD]8}|j |j¡||j7}|j |j¡|j |j ¡q0Wx$t |jƒD]\}}|||j|<qxWdS)Nr) r9rrLr r!r"r:r3r2rÚ enumerate)r,Z tot_samplesr>r;Úwr.r.r/Úreceive_modelscs  zServer.receive_modelscCsnt|jƒdkst‚t |jd¡|_x|j ¡D]}|j ¡q0Wx&t |j |jƒD]\}}|  ||¡qRWdS)Nr) r9r"rLr r rÚ parametersÚdataÚzero_r7r Úadd_parameters)r,ÚparamrPÚ client_modelr.r.r/Úaggregate_parametersrs zServer.aggregate_parameterscCs>x8t|j ¡| ¡ƒD] \}}|j|j ¡|7_qWdS)N)r7rrRrSÚclone)r,rPrWZ server_paramZ client_paramr.r.r/rU|szServer.add_parameterscCs–tj d|j¡}tj |¡s&t |¡tj ||jddt|jƒdt|j ƒdt|j ƒdt|j ƒdt|j ƒd¡}t  |j|¡dS)NÚmodelsÚ_serverÚ_z.pt)ÚosÚpathÚjoinrÚexistsÚmakedirsrÚstrrrrrrÚtorchÚsaver)r,Ú model_pathr.r.r/Úsave_global_model€s   ^zServer.save_global_modelcCs�tj d|j¡}tj ||jddt|jƒdt|jƒdt|jƒdt|j ƒdt|j ƒd¡}tj  |¡s~t ‚t  |¡|_dS)NrZr[r\z.pt)r]r^r_rrrbrrrrrr`rLrcÚloadr)r,rer.r.r/Ú load_model‡s^zServer.load_modelcCsvtj d|j¡}tj ||jdt|jƒdt|jƒdt|jƒdt|j ƒdt|j ƒd¡}tj  |¡S)NrZr\z.pt) r]r^r_rrrbrrrrrr`)r,rer.r.r/Ú model_exists�sZzServer.model_existsc Cs|jd|j}d}tj |¡s*t |¡t|jƒ�r |d|jdt |j ƒdt |j ƒdt |j ƒdt |j ƒdt |jƒdt |jƒ}|d |¡}td|ƒt |d¡�F}|jd|jd�|jd|jd�|jd |jd�|jd |jd�WdQRXdS) Nr\z ../results/z{}.h5z File path: rPr#)rSr$r%r&)rrr]r^r`rar9r#rrbr'rrrrrÚformatÚprintÚh5pyÚFileZcreate_datasetr$r%r&)r,ÚalgoÚ result_pathÚ file_pathZhfr.r.r/Ú save_results’s   b zServer.save_resultsc Cs„tj |j¡st |j¡t |tj |jd|dt|j ƒdt|j ƒdt|j ƒdt|j ƒdt|j ƒd¡¡dS)NÚserver_r\z.pt)r]r^r`rrarcrdr_rbrrrrr)r,ÚitemÚ item_namer.r.r/Ú save_item£s zServer.save_itemcCsdt tj |jd|dt|jƒdt|jƒdt|j ƒdt|j ƒdt|j ƒd¡¡S)Nrrr\z.pt) rcrgr]r^r_rrbrrrrr)r,rtr.r.r/Ú load_item¨szServer.load_itemc Cslg}g}g}xB|jD]8}| ¡\}}}| |d¡| ||¡| |¡qWdd„|jDƒ}||||fS)Ngğ?cSsg|] }|j‘qSr.)r2)r@Úcr.r.r/rA¸sz'Server.test_metrics.<locals>.<listcomp>)rÚ test_metricsr:) r,Ú num_samplesZ tot_correctZtot_aucrwÚctÚnsÚaucÚidsr.r.r/rx«s zServer.test_metricscCsVg}g}x2|jD](}| ¡\}}| |¡| |d¡qWdd„|jDƒ}|||fS)Ngğ?cSsg|] }|j‘qSr.)r2)r@rwr.r.r/rAÄsz(Server.train_metrics.<locals>.<listcomp>)rÚ train_metricsr:)r,ryÚlossesrwÚclr{r}r.r.r/r~¼s   zServer.train_metricsNc CsP| ¡}| ¡}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}dd„t|d|dƒDƒ}dd„t|d|dƒDƒ} |dkr²|j |¡n | |¡|j |¡|j t  |¡¡|dkrğ|j  |¡n | |¡t d  |¡ƒt d   |¡ƒt d   |¡ƒt d   t  |¡¡ƒt d   t  | ¡¡ƒdS) Négğ?éécSsg|]\}}||‘qSr.r.)r@ÚaÚnr.r.r/rAÑsz#Server.evaluate.<locals>.<listcomp>cSsg|]\}}||‘qSr.r.)r@r„r…r.r.r/rAÒszAveraged Train Loss: {:.4f}zAveraged Test Accurancy: {:.4f}zAveraged Test AUC: {:.4f}zStd Test Accurancy: {:.4f}zStd Test AUC: {:.4f}) rxr~Úsumr7r#r:r%r$rBÚstdr&rkrj) r,ÚaccÚlossÚstatsZ stats_trainÚtest_accÚtest_aucÚ train_lossZaccsZaucsr.r.r/ÚevaluateÉs(   zServer.evaluatecCs.td |¡ƒtd |¡ƒtd |¡ƒdS)NzAverage Test Accurancy: {:.4f}zAverage Test AUC: {:.4f}zAverage Train Loss: {:.4f})rkrj)r,r‹rŒr�r.r.r/Úprint_æsz Server.print_cCsîxè|D]à}|dkrr|dkrrt|ƒt t |¡d¡jd|k}t|ƒdko`t || d…¡|k}|rl|rlqædSq|dkrªt|ƒt t |¡d¡jd|k}|r¤qædSq|dkrât|ƒdkoÔt || d…¡|k}|rÜqædSqt‚qWdS)Nr‚rFT)r9rcÚtopkÚtensorÚindicesrBr‡ÚNotImplementedError)r,Zacc_lssrÚ div_valueZacc_lsZfind_topZfind_divr.r.r/Ú check_doneës& $$$$zServer.check_done)NN)NN)Ú__name__Ú __module__Ú __qualname__r0r?rGrHrKrNrQrXrUrfrhrirqrurvrxr~r�r�r•r.r.r.r/r s(*   r) rcr]ÚnumpyrBrlr ÚtimerCÚutils.data_utilsrÚobjectrr.r.r.r/Ú<module>s 
9,090
Python
.pyt
50
180.68
695
0.394757
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,032
serverfomo.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverfomo.cpython-38.pyc
U •icWã@s`ddlZddlZddlZddlZddlZddlmZddlm Z ddl m Z Gdd„de ƒZ dS)éN)Ú clientFomo)ÚServer)ÚThreadcs4eZdZ‡fdd„Zdd„Zdd„Zdd„Z‡ZS) ÚFedFomocs€tƒ ||¡| ¡| |t¡t tj|j|j d�¡|_ |j g|_ g|_ t|j|jƒ|_td|j›d|j›�ƒtdƒdS)N)Údevicez Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚtorchÚdiagÚonesÚ num_clientsrÚPÚ global_modelÚuploaded_modelsÚ uploaded_idsÚminÚMÚ join_clientsÚprintÚ join_ratio)ÚselfÚargsÚtimes©Ú __class__©õED:\京东\promot\cifar\cifar\tiny\system\flcore\servers\serverfomo.pyr s  zFedFomo.__init__cCsšt|jdƒD]`}| ¡|_| ¡||jdkrRtd|›d�ƒtdƒ| ¡|jD] }| ¡qX|  ¡qtdƒtt |j ƒƒ|  ¡|  ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gaprÚevaluateÚtrainÚreceive_modelsÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rÚiÚclientrrrr's    z FedFomo.traincCsøt|jƒdkst‚|jD]Ú}t ¡}|jrDt dt tj  ¡¡¡t|j ƒdkrÂt |j t|j ƒƒ}t |j|j|j |¡j ¡}g}g}|D]$}| |j |¡| |j |¡q�| ||¡|jdd7<|jddt ¡|7<qdS)Nrgš™™™™™¹?Ú num_roundsrÚ total_costé)ÚlenÚclientsÚAssertionErrorÚtimeÚ send_slowÚsleepÚnpÚabsÚrandomÚrandrrrrr ÚtopkrÚidÚindicesÚtolistÚappendr(Úsend_time_cost)rr.Ú start_timeZM_r>rrr-rrrr$9s    zFedFomo.send_modelscCsøt|jƒdkst‚t |jtd|j|jƒ¡}g|_g|_ d}g|_ |D]†}|j d|j d|j d|j d}||j krJ|j |j¡|j  |j¡||j7}|j  t |j¡¡|j|j|j7<qJt|j ƒD]\}}|||j |<qÜdS)Nrrr0r/)r2r#r4r:ÚsampleÚintÚclient_drop_raterrÚuploaded_weightsrÚtrain_time_costrAÚtime_thretholdr@r=Ú train_samplesÚcopyÚdeepcopyÚmodelrZ weight_vectorÚ enumerate)rZactive_clientsÚ tot_samplesr.Zclient_time_costr-Úwrrrr(Ps*ÿÿ  zFedFomo.receive_models)Ú__name__Ú __module__Ú __qualname__rr'r$r(Ú __classcell__rrrrr s r) r r5rJr:Únumpyr8Zflcore.clients.clientfomorÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s   
2,998
Python
.pyt
28
105.928571
428
0.453383
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,033
serverproto.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverproto.cpython-39.pyc
a f¾`cÜã@shddlmZddlmZddlmZddlmZddlZddl Z ddl m Z Gdd„deƒZ d d „ZdS) é)Ú clientProto)ÚServer)Úread_client_data)ÚThreadN)Ú defaultdictcs>eZdZ‡fdd„Zdd„Zdd„Zdd„Zd d d „Z‡ZS) ÚFedProtocsjtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_|j |_ dd„t |j ƒDƒ|_ dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.cSsg|]}d‘qS)N©)Ú.0Ú_rrút/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverproto.pyÚ <listcomp>óz%FedProto.__init__.<locals>.<listcomp>) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudgetÚ num_classesÚrangeÚ global_protos)ÚselfÚargsÚtimes©Ú __class__rr r s zFedProto.__init__cCsd|_d}|jsÔt ¡}| ¡|_||jdkrX|dkrXtd|›d�ƒtdƒ| ¡|jD] }| ¡q^| ¡t |j ƒ|_ |  ¡|j  t ¡|¡td|j dƒ|dkrÊ|j|jg|jd�|_|d 7}q td ƒtt|jƒƒtt|j d d…ƒt|j d d…ƒƒ| ¡dS) NFrz -------------Round number: z -------------z Evaluate global modelz2--------------------------------------------------éÿÿÿÿ)Úacc_lssÚtop_cntéz Best global accuracy.)ÚdoneÚtimeÚselect_clientsÚselected_clientsÚeval_gaprÚevaluateÚtrainÚreceive_protosÚproto_aggregationÚuploaded_protosrÚ send_protosrÚappendÚ check_doneÚ rs_test_accr ÚmaxÚsumÚlenÚ save_results)rÚiÚs_tÚclientrrr r(s.     (zFedProto.traincCs.t|jƒdksJ‚|jD]}| |j¡qdS©Nr)r2r%Z set_protosr©rr6rrr r,Cs zFedProto.send_protoscCsJt|jƒdksJ‚g|_g|_|jD] }|j |j¡|j |j¡q$dSr7)r2r%Ú uploaded_idsr+r-ÚidÚprotosr8rrr r)Is  zFedProto.receive_protosNcCsØ| ¡}| ¡}t|dƒdt|dƒ}t|dƒdt|dƒ}dd„t|d|dƒDƒ}|dkrz|j |¡n | |¡|dkrš|j |¡n | |¡td |¡ƒtd |¡ƒtd t   |¡¡ƒdS) Négğ?r!cSsg|]\}}||‘qSrr)r ÚaÚnrrr r Xr z%FedProto.evaluate.<locals>.<listcomp>zAveraged Train Loss: {:.4f}zAveraged Test Accurancy: {:.4f}zStd Test Accurancy: {:.4f}) Ú test_metricsÚ train_metricsr1Úzipr/r-Ú rs_train_lossrÚformatÚnpÚstd)rÚaccÚlossÚstatsÚ stats_trainÚtest_accÚ train_lossÚaccsrrr r'Rs  zFedProto.evaluate)NN) Ú__name__Ú __module__Ú __qualname__rr(r,r)r'Ú __classcell__rrrr r s  ( rcCs–ttƒ}|D]$}| ¡D]}|| ||¡qq | ¡D]V\}}t|ƒdkr‚d|dj}|D]}||j7}q`|t|ƒ||<q:|dj||<q:|S)Nr!r)rÚlistÚkeysr-Úitemsr2Údata)Zlocal_protos_listZagg_protos_labelZ local_protosÚlabelZ proto_listÚprotor4rrr r*ks   r*)Zflcore.clients.clientprotorÚflcore.servers.serverbaserÚutils.data_utilsrÚ threadingrr#ÚnumpyrDÚ collectionsrrr*rrrr Ú<module>s     a
3,711
Python
.pyt
33
111.30303
408
0.437075
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,034
data_utils.cpython-38.pyc
hkgdifyu_pFedPT/system/utils/__pycache__/data_utils.cpython-38.pyc
U ?şcbã@sZddlZddlZddlZddlZdd„Zdd„Zdd„Zdd d „Zdd d „Z dd d„Z dS)éNccs‚|d}|d}tj ¡}tj |¡tj |¡tj |¡tdt|ƒ|ƒD].}||||…}||||…}||fVqNdS)z— data is a dict := {'x': [numpy array], 'y': [numpy array]} (on one client) returns x, y, which are both numpy array of length: batch_size ÚxÚyrN)ÚnpÚrandomÚ get_stateÚshuffleÚ set_stateÚrangeÚlen)ÚdataÚ batch_sizeÚdata_xÚdata_yÚ ran_stateÚiÚ batched_xÚ batched_y©rõYD:\京东\promot\第二次投稿\å®�验\æœ�务器\native - pro\system\utils\data_utils.pyÚ batch_datas    rcCs’t|ƒ|d}t|ƒ|kr†tj tt|dƒƒ¡}||}||t|ƒkrd||d…||d…fS||||…||||…fSn||fSdS)Né)r rrÚchoiceÚlistr )r rr Z num_partsÚ batch_idxÚ sample_indexrrrÚget_random_batch_sample#s "rcCs^|d}|d}tj ¡}tj |¡tj |¡tj |¡|d|…}|d|…}||fS)Nrrr)rrrrr)r r r rrrrrrrÚget_batch_sample0s      rTc Cs|rŠtj d||jd|jd|jd|jd|jd|jd¡}|t |ƒd}t |dƒ�}t j |dd�d  ¡}W5QRX|Stj d||jd|jd|jd|jd|jd|jd ¡}|t |ƒd} t | dƒ�}t j |dd�d  ¡} W5QRX| SdS) Nz ../datasetÚ*ztrain/z.npzÚrbT)Ú allow_pickler ztest/)ÚosÚpathÚjoinZarv1Zarv2Zarv3Zarv4Zarv5Zarv6ÚstrÚopenrÚloadÚtolist) ÚdatasetÚidxÚargsÚis_trainZtrain_data_dirZ train_fileÚfÚ train_dataZ test_data_dirÚ test_fileÚ test_datarrrÚ read_data?sF  F  r/c CsÖ|dd…dks |dd…dkr*t||ƒS|r€t||||ƒ}t |d¡ tj¡}t |d¡ tj¡}dd„t||ƒDƒ}|St||||ƒ}t |d¡ tj¡}t |d¡ tj¡} dd„t|| ƒDƒ}|SdS) NéÚagZSSrrcSsg|]\}}||f‘qSrr©Ú.0rrrrrÚ <listcomp>\sz$read_client_data.<locals>.<listcomp>cSsg|]\}}||f‘qSrrr2rrrr4bs)Úread_client_data_textr/ÚtorchÚTensorÚtypeÚfloat32Úint64Úzip) r'r(r)r*r,ÚX_trainÚy_trainr.ÚX_testÚy_testrrrÚread_client_dataSs  r@c Cs|r€t|||ƒ}tt|d�ƒ\}}|d}t |¡ tj¡}t |¡ tj¡}t |d¡ tj¡}dd„t|||ƒDƒ}|St|||ƒ}tt|d�ƒ\}} |d} t |¡ tj¡}t | ¡ tj¡} t |d¡ tj¡} dd„t|| | ƒDƒ}|SdS)NrrcSsg|]\}}}||f|f‘qSrr©r3rÚlensrrrrr4psz)read_client_data_text.<locals>.<listcomp>cSsg|]\}}}||f|f‘qSrrrArrrr4{s)r/rr;r6r7r8r:) r'r(r*r,r<Z X_train_lensr=r.r>Z X_test_lensr?rrrr5fs"  r5)T)T)T) ZujsonÚnumpyrr r6rrrr/r@r5rrrrÚ<module>s    
3,614
Python
.pyt
30
119.033333
601
0.35258
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,035
data_utils.cpython-37.pyc
hkgdifyu_pFedPT/system/utils/__pycache__/data_utils.cpython-37.pyc
B Ê:cc®ã@sZddlZddlZddlZddlZdd„Zdd„Zdd„Zdd d „Zdd d „Z dd d„Z dS)éNccs†|d}|d}tj ¡}tj |¡tj |¡tj |¡xBtdt|ƒ|ƒD].}||||…}||||…}||fVqPWdS)z— data is a dict := {'x': [numpy array], 'y': [numpy array]} (on one client) returns x, y, which are both numpy array of length: batch_size ÚxÚyrN)ÚnpÚrandomÚ get_stateÚshuffleÚ set_stateÚrangeÚlen)ÚdataÚ batch_sizeÚdata_xÚdata_yÚ ran_stateÚiÚ batched_xÚ batched_y©rú>/root/autodl-tmp/PFL-Non-IID-master/system/utils/data_utils.pyÚ batch_datas    rcCs’t|ƒ|d}t|ƒ|kr†tj tt|dƒƒ¡}||}||t|ƒkrd||d…||d…fS||||…||||…fSn||fSdS)Né)r rrÚchoiceÚlistr )r rr Z num_partsÚ batch_idxÚ sample_indexrrrÚget_random_batch_sample#s "rcCs^|d}|d}tj ¡}tj |¡tj |¡tj |¡|d|…}|d|…}||fS)Nrrr)rrrrr)r r r rrrrrrrÚget_batch_sample0s      rTc Cs¨|rTtj d|d¡}|t|ƒd}t|dƒ�}tj|dd�d ¡}WdQRX|Stj d|d¡}|t|ƒd}t|dƒ�}tj|dd�d ¡} WdQRX| SdS) Nz ../datasetztrain/z.npzÚrbT)Ú allow_pickler ztest/)ÚosÚpathÚjoinÚstrÚopenrÚloadÚtolist) ÚdatasetÚidxÚis_trainZtrain_data_dirZ train_fileÚfÚ train_dataZ test_data_dirÚ test_fileÚ test_datarrrÚ read_data?s    r-c CsÒ|dd…dks |dd…dkr*t||ƒS|r~t|||ƒ}t |d¡ tj¡}t |d¡ tj¡}dd„t||ƒDƒ}|St|||ƒ}t |d¡ tj¡}t |d¡ tj¡}dd„t||ƒDƒ}|SdS) NéÚagZSSrrcSsg|]\}}||f‘qSrr)Ú.0rrrrrú <listcomp>\sz$read_client_data.<locals>.<listcomp>cSsg|]\}}||f‘qSrr)r0rrrrrr1bs)Úread_client_data_textr-ÚtorchÚTensorÚtypeÚfloat32Úint64Úzip) r&r'r(r*ÚX_trainÚy_trainr,ÚX_testÚy_testrrrÚread_client_dataSs    r=c Cs|r€t|||ƒ}tt|d�ƒ\}}|d}t |¡ tj¡}t |¡ tj¡}t |d¡ tj¡}dd„t|||ƒDƒ}|St|||ƒ}tt|d�ƒ\}} |d} t |¡ tj¡}t | ¡ tj¡} t |d¡ tj¡} dd„t|| | ƒDƒ}|SdS)NrrcSsg|]\}}}||f|f‘qSrr)r0rÚlensrrrrr1psz)read_client_data_text.<locals>.<listcomp>cSsg|]\}}}||f|f‘qSrr)r0rr>rrrrr1{s)r-rr8r3r4r5r7) r&r'r(r*r9Z X_train_lensr:r,r;Z X_test_lensr<rrrr2fs"  r2)T)T)T) ZujsonÚnumpyrrr3rrrr-r=r2rrrrÚ<module>s    
3,403
Python
.pyt
25
134.6
606
0.356318
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,036
mem_utils.cpython-37.pyc
hkgdifyu_pFedPT/system/utils/__pycache__/mem_utils.cpython-37.pyc
B Ë:ccEã@s~ddlZddlZddlmZddlmZmZmZddlZddlm Z ddl m Z e e dœdd„Zd Zd Zd ZGd d „d ƒZdS)éN)Ú defaultdict)ÚOptionalÚTupleÚList)Úisnan)Úsize)Ú num_bytesÚreturncCst|ƒr dSd t|ƒ¡S)NÚz{:.2f})rÚformatÚcalmsize)r©r ú=/root/autodl-tmp/PFL-Non-IID-master/system/utils/mem_utils.pyÚ readable_size sréOiic@s~eZdZdZdeejjdœdd„Zej e dœdd„Z d d „Z d d „Z deeejddœdd„Zdeeejddœdd„ZdS)Ú MemReporterz§A memory reporter that collects tensors and memory usages Parameters: - model: an extra nn.Module can be passed to infer the name of Tensors N)ÚmodelcCs�i|_ttƒ|_i|_d|_ttƒ}|dk rbt|tjj ƒs>t ‚x"|  ¡D]\}}||  |¡qHWx(|  ¡D]\}}d |¡|jt|ƒ<qlWdS)Nrú+)Ú tensor_namerÚlistÚdevice_mappingÚdevice_tensor_statÚname_idxÚ isinstanceÚtorchÚnnÚModuleÚAssertionErrorÚnamed_parametersÚappendÚitemsÚjoinÚid)ÚselfrÚ tensor_namesÚnameÚparamr r rÚ__init__s zMemReporter.__init__)Útensorr cCsNt|ƒ}||jkr|j|}n,t|ƒjt|jƒ}||j|<|jd7_|S)Né)r"rÚtypeÚ__name__Ústrr)r#r(Z tensor_idr%r r rÚ_get_tensor_name0s   zMemReporter._get_tensor_namecCs:t ¡}dd„|Dƒ}x|D]}|j|j |¡qWdS)a*Collect all tensor objects tracked by python NOTICE: - the buffers for backward which is implemented in C++ are not tracked by python's reference counting. - the gradients(.grad) of Parameters is not collected, and I don't know why. cSsg|]}t|tjƒr|‘qSr )rrÚTensor)Ú.0Úobjr r rú <listcomp>Fsz.MemReporter.collect_tensor.<locals>.<listcomp>N)ÚgcÚ get_objectsrÚdevicer)r#ÚobjectsÚtensorsÚtr r rÚcollect_tensor;s  zMemReporter.collect_tensorcsÞi‰ˆj ¡tjttttttfdœ‡‡‡fdd„ ‰x–ˆj  ¡D]ˆ\}}g}xp|D]h}|  ¡dkrhqVˆ|ƒ}||7}t |tj j ƒrV|jdk rVd ˆ |¡¡ˆjt|jƒ<ˆ|jƒ}||7}qVW|ˆj|<qDWˆj ¡dS)zûGet the memory stat of tensors and then release them As a memory profiler, we cannot hold the reference to any tensors, which causes possibly inaccurate memory usage stats, so we delete the tensors after getting required stats)r(r c sÊt|tjƒst‚ˆ |¡}|jr@ˆ| ¡ƒ}ˆ| ¡ƒ}||S| ¡}|  ¡}|  ¡  ¡}||}t   |t¡t}|  ¡ ¡} | ˆkr d |ˆ| ¡}d}n|ˆ| <t|  ¡ƒ} | s¼d} || ||fgS)z±Get the stat of a single tensor Returns: - stat: a tuple containing (tensor_name, tensor_size, tensor_numel, tensor_memory) z{}(->{})r)r))rrr.rr-Ú is_sparseÚ_indicesÚ_valuesÚnumelÚ element_sizeÚstoragerÚmathÚceilÚPYTORCH_MIN_ALLOCATEÚdata_ptrr Útuple) r(r%Z indices_statZ values_statr<r=Z fact_numelZfact_memory_sizeZ memory_sizerBr)Úget_tensor_statr#Ú visited_datar rrDSs.        z.MemReporter.get_stats.<locals>.get_tensor_statrNz{}.grad)rÚclearrr.rrr,Úintrr r<rrÚ ParameterÚgradr r-rr")r#r4r6Ú tensor_statsr(Ústatr )rDr#rErÚ get_statsJs$ *,     zMemReporter.get_statsF)ÚverboseÚ target_devicer c Csîxè|j ¡D]Ú\}}|dk r&||kr&q td |¡ƒd}d}x(|D] }|\}} } } || 7}|| 7}qBWtdtƒtd |t|ƒ¡ƒ|t d¡krÚtj |¡�tj  ¡} WdQRXtd |t| ƒ¡ƒ| |krÚtdƒtdtƒq WdS)Nz Storage on {}rú-z"Total Tensors: {} Used Memory: {}ÚcpuzThe allocated memory on {}: {}zOMemory differs due to the matrix alignment or invisible gradient buffer tensors) rr Úprintr ÚLENrrr4ÚcudaÚmemory_allocated) r#rMrNr4rJZ total_memÚ total_numelrKr%rr<ÚmemrTr r rÚ print_stats”s*    zMemReporter.print_stats)rMr4r cCs"| ¡| ¡|j||d�dS)a Interface for end-users to directly print the memory usage args: - verbose: flag to show tensor.storage reuse information - device: `torch.device` object, specify the target device to report detailed memory usage. It will print memory usage on all devices if not specified. Usually we only want to print the memory usage on CUDA devices. )rNN)r8rLrW)r#rMr4r r rÚreport¾s zMemReporter.report)N)FN)FN)r+Ú __module__Ú __qualname__Ú__doc__rrrrr'r.r,r-r8rLÚboolr4rWrXr r r rrs J*r)r?r2Ú collectionsrÚtypingrrrrrr rrGr,rrRrAZPYTORCH_MIN_CACHErr r r rÚ<module>s   
5,571
Python
.pyt
60
87.666667
390
0.482107
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,037
data_utils.cpython-39.pyc
hkgdifyu_pFedPT/system/utils/__pycache__/data_utils.cpython-39.pyc
a f¾`c®ã@sZddlZddlZddlZddlZdd„Zdd„Zdd„Zdd d „Zdd d „Z dd d„Z dS)éNccs‚|d}|d}tj ¡}tj |¡tj |¡tj |¡tdt|ƒ|ƒD].}||||…}||||…}||fVqNdS)z— data is a dict := {'x': [numpy array], 'y': [numpy array]} (on one client) returns x, y, which are both numpy array of length: batch_size ÚxÚyrN)ÚnpÚrandomÚ get_stateÚshuffleÚ set_stateÚrangeÚlen)ÚdataÚ batch_sizeÚdata_xÚdata_yÚ ran_stateÚiÚ batched_xÚ batched_y©rúj/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/utils/data_utils.pyÚ batch_datas    rcCs’t|ƒ|d}t|ƒ|kr†tj tt|dƒƒ¡}||}||t|ƒkrd||d…||d…fS||||…||||…fSn||fSdS)Né)r rrÚchoiceÚlistr )r rr Z num_partsÚ batch_idxÚ sample_indexrrrÚget_random_batch_sample#s "rcCs^|d}|d}tj ¡}tj |¡tj |¡tj |¡|d|…}|d|…}||fS)Nrrr)rrrrr)r r r rrrrrrrÚget_batch_sample0s      rTc CsĞ|rhtj d|d¡}|t|ƒd}t|dƒ�&}tj|dd�d ¡}Wdƒn1sZ0Y|Stj d|d¡}|t|ƒd}t|dƒ�&}tj|dd�d ¡} Wdƒn1s¾0Y| SdS) Nz ../datasetztrain/z.npzÚrbT)Ú allow_pickler ztest/)ÚosÚpathÚjoinÚstrÚopenrÚloadÚtolist) ÚdatasetÚidxÚis_trainZtrain_data_dirZ train_fileÚfÚ train_dataZ test_data_dirÚ test_fileÚ test_datarrrÚ read_data?s 4 4r-c CsÒ|dd…dks |dd…dkr*t||ƒS|r~t|||ƒ}t |d¡ tj¡}t |d¡ tj¡}dd„t||ƒDƒ}|St|||ƒ}t |d¡ tj¡}t |d¡ tj¡}dd„t||ƒDƒ}|SdS) NéÚagZSSrrcSsg|]\}}||f‘qSrr©Ú.0rrrrrÚ <listcomp>\óz$read_client_data.<locals>.<listcomp>cSsg|]\}}||f‘qSrrr0rrrr2br3)Úread_client_data_textr-ÚtorchÚTensorÚtypeÚfloat32Úint64Úzip) r&r'r(r*ÚX_trainÚy_trainr,ÚX_testÚy_testrrrÚread_client_dataSs    r?c Cs|r€t|||ƒ}tt|d�ƒ\}}|d}t |¡ tj¡}t |¡ tj¡}t |d¡ tj¡}dd„t|||ƒDƒ}|St|||ƒ}tt|d�ƒ\}} |d} t |¡ tj¡}t | ¡ tj¡} t |d¡ tj¡} dd„t|| | ƒDƒ}|SdS)NrrcSsg|]\}}}||f|f‘qSrr©r1rÚlensrrrrr2pr3z)read_client_data_text.<locals>.<listcomp>cSsg|]\}}}||f|f‘qSrrr@rrrr2{r3)r-rr:r5r6r7r9) r&r'r(r*r;Z X_train_lensr<r,r=Z X_test_lensr>rrrr4fs"  r4)T)T)T) ZujsonÚnumpyrrr5rrrr-r?r4rrrrÚ<module>s    
3,490
Python
.pyt
25
138.08
593
0.359492
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,038
result_utils.cpython-37.pyc
hkgdifyu_pFedPT/system/utils/__pycache__/result_utils.cpython-37.pyc
B Ë:ccÇã@s:ddlZddlZddlZd dd„Zd dd„Zdd d „ZdS)éNÚé é c Csnt||||t|ƒƒ}tj|dd�}g}x"t|ƒD]}| || ¡¡q0Wtdt |¡ƒtdt  |¡ƒdS)Nr)Úaxiszstd for best accurancy:zmean for best accurancy:) Úget_all_results_for_one_algoÚintÚnpÚaverageÚrangeÚappendÚmaxÚprintÚstdÚmean) Ú algorithmÚdatasetÚgoalÚtimesÚlengthÚtest_accZ test_acc_dataZ max_accurancyÚi©rú@/root/autodl-tmp/PFL-Non-IID-master/system/utils/result_utils.pyÚ average_datasrc Csxt ||f¡}|g|}xZt|ƒD]N}|d||d|dt|ƒ}t t|dd�¡d|…||dd…f<q"W|S)NÚ_F)Údelete)rÚzerosr ÚstrÚarrayÚread_data_then_delete) rrrrrrZalgorithms_listrÚ file_namerrrrs $&rFc CsTd|d}t |d¡�}t | d¡¡}WdQRX|rBt |¡tdt|ƒƒ|S)Nz ../results/z.h5ÚrÚ rs_test_acczLength: ) Úh5pyÚFilerrÚgetÚosÚremover Úlen)r rÚ file_pathÚhfr"rrrr s  r)rrrrr)rrrrr)F)r#Únumpyrr&rrrrrrrÚ<module>s  
1,378
Python
.pyt
10
136.5
467
0.42575
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,039
result_utils.cpython-38.pyc
hkgdifyu_pFedPT/system/utils/__pycache__/result_utils.cpython-38.pyc
U çYgcÈã@s:ddlZddlZddlZd dd„Zd dd„Zdd d „ZdS)éNÚé é c Csjt||||t|ƒƒ}tj|dd�}g}t|ƒD]}| || ¡¡q.tdt |¡ƒtdt  |¡ƒdS)Nr)Úaxiszstd for best accurancy:zmean for best accurancy:) Úget_all_results_for_one_algoÚintÚnpÚaverageÚrangeÚappendÚmaxÚprintÚstdÚmean) Ú algorithmÚdatasetÚgoalÚtimesÚlengthÚtest_accZ test_acc_dataZ max_accurancyÚi©rõQD:\京东\promot\第二次投稿\å®�验\native - pro\system\utils\result_utils.pyÚ average_datasÿ rc Cstt ||f¡}|g|}t|ƒD]N}|d||d|dt|ƒ}t t|dd�¡d|…||dd…f<q |S)NÚ_F)Údelete)rÚzerosr ÚstrÚarrayÚread_data_then_delete) rrrrrrZalgorithms_listrÚ file_namerrrrs(  ÿÿÿÿÿ ÿÿrFc CsTd|d}t |d¡�}t | d¡¡}W5QRX|rBt |¡tdt|ƒƒ|S)Nz ../results/z.h5ÚrÚ rs_test_acczLength: ) Úh5pyÚFilerrÚgetÚosÚremover Úlen)r rÚ file_pathÚhfr"rrrr!s  r)rrrrr)rrrrr)F)r#Únumpyrr&rrrrrrrÚ<module>s  
1,442
Python
.pyt
11
129.818182
497
0.412587
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,040
privacy.cpython-38.pyc
hkgdifyu_pFedPT/system/utils/__pycache__/privacy.cpython-38.pyc
U e¾`cã@sPddlmZddlmZdZdZdZdZdZdd „Z d d „Z d d „Z dd„Z dS)é)Ú PrivacyEngine)ÚDPModelInspectorgğ?gI@gH¯¼šò×z>édécCs$t||tt|td�}| |¡dS)N)Ú sample_rateZ target_deltaZnoise_multiplierZ max_grad_norm)rÚN_ACCUMULATION_STEPSÚDELTAÚ MAX_GRAD_NORMÚattach)ÚmodelÚ optimizerrÚdp_sigmaÚprivacy_engine©rõND:\京东\promot\PFL-Non-IID-master\PFL-Non-IID-master\system\utils\privacy.pyÚ initialize_dp sù rcCs|j t¡tfS©N)rZget_privacy_spentr)r rrrÚ get_dp_paramssrcCstƒ}| |¡dSr)rÚvalidate)r Z inspectorrrrÚcheck_dpsrcCs2|dtdks|d|kr&| ¡n| ¡dS)Nér)rÚstepZ virtual_step)r ÚiZlen_train_loaderrrrÚdp_step s rN) ZopacusrZopacus.dp_model_inspectorrr ZEPSILONrZEPOCHSrrrrrrrrrÚ<module>s   
1,200
Python
.pyt
6
198.833333
480
0.445188
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,041
mem_utils.cpython-38.pyc
hkgdifyu_pFedPT/system/utils/__pycache__/mem_utils.cpython-38.pyc
U –jfcEã@s~ddlZddlZddlmZddlmZmZmZddlZddlm Z ddl m Z e e dœdd„Zd Zd Zd ZGd d „d ƒZdS)éN)Ú defaultdict)ÚOptionalÚTupleÚList)Úisnan)Úsize)Ú num_bytesÚreturncCst|ƒr dSd t|ƒ¡S)NÚz{:.2f})rÚformatÚcalmsize)r©r õBD:\京东\promot\cifar\cifar\Cifar10_iid\system\utils\mem_utils.pyÚ readable_size sréOiic@s~eZdZdZdeejjdœdd„Zej e dœdd„Z d d „Z d d „Z deeejddœdd„Zdeeejddœdd„ZdS)Ú MemReporterz§A memory reporter that collects tensors and memory usages Parameters: - model: an extra nn.Module can be passed to infer the name of Tensors N)ÚmodelcCsˆi|_ttƒ|_i|_d|_ttƒ}|dk r^t|tjj ƒs>t ‚|  ¡D]\}}||  |¡qF|  ¡D]\}}d |¡|jt|ƒ<qfdS)Nrú+)Ú tensor_namerÚlistÚdevice_mappingÚdevice_tensor_statÚname_idxÚ isinstanceÚtorchÚnnÚModuleÚAssertionErrorÚnamed_parametersÚappendÚitemsÚjoinÚid)ÚselfrÚ tensor_namesÚnameÚparamr r rÚ__init__s zMemReporter.__init__©Útensorr cCsNt|ƒ}||jkr|j|}n,t|ƒjt|jƒ}||j|<|jd7_|S)Né)r"rÚtypeÚ__name__Ústrr)r#r)Z tensor_idr%r r rÚ_get_tensor_name0s   zMemReporter._get_tensor_namecCs6t ¡}dd„|Dƒ}|D]}|j|j |¡qdS)a*Collect all tensor objects tracked by python NOTICE: - the buffers for backward which is implemented in C++ are not tracked by python's reference counting. - the gradients(.grad) of Parameters is not collected, and I don't know why. cSsg|]}t|tjƒr|‘qSr )rrÚTensor)Ú.0Úobjr r rÚ <listcomp>Fs z.MemReporter.collect_tensor.<locals>.<listcomp>N)ÚgcÚ get_objectsrÚdevicer)r#ÚobjectsÚtensorsÚtr r rÚcollect_tensor;s zMemReporter.collect_tensorcsÖi‰ˆj ¡tjttttttfdœ‡‡‡fdd„ ‰ˆj  ¡D]„\}}g}|D]h}|  ¡dkrdqRˆ|ƒ}||7}t |tj j ƒrR|jdk rRd ˆ |¡¡ˆjt|jƒ<ˆ|jƒ}||7}qR|ˆj|<qBˆj ¡dS)zûGet the memory stat of tensors and then release them As a memory profiler, we cannot hold the reference to any tensors, which causes possibly inaccurate memory usage stats, so we delete the tensors after getting required statsr(c sÊt|tjƒst‚ˆ |¡}|jr@ˆ| ¡ƒ}ˆ| ¡ƒ}||S| ¡}|  ¡}|  ¡  ¡}||}t   |t¡t}|  ¡ ¡} | ˆkr d |ˆ| ¡}d}n|ˆ| <t|  ¡ƒ} | s¼d} || ||fgS)z±Get the stat of a single tensor Returns: - stat: a tuple containing (tensor_name, tensor_size, tensor_numel, tensor_memory) z{}(->{})r)r*)rrr/rr.Ú is_sparseÚ_indicesÚ_valuesÚnumelÚ element_sizeÚstoragerÚmathÚceilÚPYTORCH_MIN_ALLOCATEÚdata_ptrr Útuple) r)r%Z indices_statZ values_statr=r>Z fact_numelZfact_memory_sizeZ memory_sizerCr©Úget_tensor_statr#Z visited_datar rrFSs2     ÿ ş z.MemReporter.get_stats.<locals>.get_tensor_statrNz{}.grad)rÚclearrr/rrr-Úintrr r=rrÚ ParameterÚgradr r.rr")r#r5r7Ú tensor_statsr)Ústatr rErÚ get_statsJs& *,  ÿ   zMemReporter.get_statsF)ÚverboseÚ target_devicer c Csæ|j ¡D]Ö\}}|dk r$||kr$q td |¡ƒd}d}|D] }|\}} } } || 7}|| 7}q>tdtƒtd |t|ƒ¡ƒ|t d¡krÔtj |¡�tj  ¡} W5QRXtd |t| ƒ¡ƒ| |krÔtdƒtdtƒq dS)Nz Storage on {}rú-z"Total Tensors: {} Used Memory: {}ÚcpuzThe allocated memory on {}: {}zOMemory differs due to the matrix alignment or invisible gradient buffer tensors) rr Úprintr ÚLENrrr5ÚcudaÚmemory_allocated) r#rNrOr5rKZ total_memÚ total_numelrLr%rr=ÚmemrUr r rÚ print_stats”s2   ÿÿzMemReporter.print_stats)rNr5r cCs"| ¡| ¡|j||d�dS)a Interface for end-users to directly print the memory usage args: - verbose: flag to show tensor.storage reuse information - device: `torch.device` object, specify the target device to report detailed memory usage. It will print memory usage on all devices if not specified. Usually we only want to print the memory usage on CUDA devices. )rON)r9rMrX)r#rNr5r r rÚreport¾s zMemReporter.report)N)FN)FN)r,Ú __module__Ú __qualname__Ú__doc__rrrrr'r/r-r.r9rMÚboolr5rXrYr r r rrs J*r)r@r3Ú collectionsrÚtypingrrrrrr rrHr-rrSrBZPYTORCH_MIN_CACHErr r r rÚ<module>s   
5,594
Python
.pyt
60
88.05
406
0.478292
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,042
dataset_utils.cpython-38.pyc
hkgdifyu_pFedPT/dataset/utils/__pycache__/dataset_utils.cpython-38.pyc
U )Qgcâã@sjddlZddlZddlZddlZddlmZdZdZedeZ dd d „Z dd d „Z d d„Z ddd„Z dS)éN)Útrain_test_splitégè?éFTçš™™™™™¹?c CsÔtj |¡rŒt|dƒ�} t | ¡} W5QRX| d|krŒ| d|krŒ| d|krŒ| d|krŒ| d|krŒ| d|krŒ| dtkrŒtd ƒd Stj |¡} tj | ¡s®t  | ¡tj |¡} tj | ¡sĞt  | ¡d S) NÚrÚ num_clientsÚ num_classesÚnon_iidÚbalanceÚ partitionÚalphaÚ batch_sizez Dataset already generated. TF) ÚosÚpathÚexistsÚopenÚujsonÚloadr ÚprintÚdirnameÚmakedirs) Ú config_pathÚ train_pathÚ test_pathrrÚniidr r r ÚfÚconfigÚdir_path©rõMD:\京东\promot\第二次投稿\å®�验\native\dataset\utils\dataset_utils.pyÚcheck s2    ÿ ş ı ü û ú      r c sÚdd„tˆƒDƒ}dd„tˆƒDƒ} dd„tˆƒDƒ} |\} } i} |sNd}|‰|dk�rğt tt| ƒƒ¡}g}t|ƒD]}| || |k¡qv‡fdd„tˆƒDƒ}t|ƒD�]<}g}tˆƒD]2}||dkrØ| |¡|dtˆ|ˆƒ…}q¾t||ƒ}t|ƒ}||‰|�r0‡fdd„t|d ƒDƒ}n&tj tˆd t |ƒˆ|d ¡  ¡}| |t |ƒ¡d}t ||ƒD]r\}}||   ¡k�r¦|||||…| |<n(tj| ||||||…dd �| |<||7}||d 8<�qvq¬�n|d k�rd}|}t| ƒ‰|t k�ræd d„tˆƒDƒ}t|ƒD]²}t | |k¡d}tj |¡tj t |ˆ¡¡}t ‡‡fdd„t ||ƒDƒ¡}||  ¡}t |¡t|ƒ t¡dd…}dd„t |t ||¡ƒDƒ}tdd„|Dƒƒ}�q.�q tˆƒD]}||| |<�qînt‚tˆƒD]b}| |}| |||<| || |<t | |¡D],}| | t|ƒtt | ||kƒƒf¡�qB�q~tˆƒD]P}td|›dt||ƒ›d�t | |¡ƒtddd„| |Dƒƒtdƒ�q~|| | fS)NcSsg|]}g‘qSrr©Ú.0Ú_rrrÚ <listcomp>&sz!separate_data.<locals>.<listcomp>cSsg|]}g‘qSrrr!rrrr$'scSsg|]}g‘qSrrr!rrrr$(sÚpatcsg|]}ˆ‘qSrrr!)Úclass_per_clientrrr$8srcsg|] }tˆƒ‘qSr)Úintr!)Únum_perrrr$Dsré )ÚaxisÚdircSsg|]}g‘qSrrr!rrrr$Yscs$g|]\}}|t|ƒˆˆk‘qSr©Úlen)r"ÚpÚidx_j)ÚNrrrr$^séÿÿÿÿcSsg|]\}}|| ¡‘qSr)Útolist)r"r/Úidxrrrr$ascSsg|] }t|ƒ‘qSrr,)r"r/rrrr$bszClient z Size of data: z Labels: z Samples of labels: cSsg|]}|‘qSrr)r"Úirrrr$xsz2--------------------------------------------------)ÚrangeÚnpÚarrayr-Úappendr'ÚrandomÚrandintÚmaxÚ least_samplesr2ÚsumÚzipÚkeysÚwhereÚshuffleÚ dirichletÚrepeatÚcumsumÚastypeÚsplitÚminÚNotImplementedErrorÚuniquer) Údatarrrr r r&r ÚXÚyÚ statisticZdataset_contentÚ dataset_labelZ dataidx_mapÚidxsZidx_for_each_classr4Zclass_num_per_clientZselected_clientsÚclientZnum_all_samplesZnum_selected_clientsÚ num_samplesr3Z num_sampleÚmin_sizeÚKZ idx_batchÚkZidx_kZ proportionsÚjr)r0r&rr(rÚ separate_data%s€      &(           . * rVc Csgg}}ggdœ}tt|ƒƒD]¬}tj||dd�\}}t|ƒdkrht||||tdd�\}} } } n t||||tdd�\}} } } | || dœ¡|d t| ƒ¡| | | dœ¡|d t| ƒ¡q td t |d|dƒƒtd |dƒtd |dƒtƒ~~||fS) N)ÚtrainÚtestT)Ú return_countsr)Ú train_sizerA)ÚxrLrWrXzTotal number of samples:zThe number of train samples:zThe number of test samples:) r5r-r6rIrGrrZr8rr=) rKrLÚ train_dataÚ test_datarQr4rIÚcountZX_trainZX_testZy_trainZy_testrrrÚ split_data~s6   ÿÿr_c  CsĞ|||| | || tdœ} tdƒt|ƒD]8\} }t|t| ƒddƒ�}tj||d�W5QRXq&t|ƒD]8\} }t|t| ƒddƒ�}tj||d�W5QRXqht|dƒ�}t | |¡W5QRXtdƒdS)N)rrr r r z%Size of samples for labels in clientsr r zSaving to disk. z.npzÚwb)rJÚwzFinish generating dataset. ) r rÚ enumeraterÚstrr6Úsavez_compressedrÚdump)rrrr\r]rrrMrr r r rr3Z train_dictrZ test_dictrrrÚ save_filešs&ø  rf)FTNr)FFNrr)FTNr)rrÚnumpyr6ÚgcZsklearn.model_selectionrr rZr<r rVr_rfrrrrÚ<module>s&  ÿ  Yÿ
5,383
Python
.pyt
63
84.365079
504
0.367224
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,043
dataset_utils.cpython-37.pyc
hkgdifyu_pFedPT/dataset/utils/__pycache__/dataset_utils.cpython-37.pyc
B 4ØccÊã@snddlZddlZddlZddlZddlmZdZdZedeZ dZ dd d „Z dd d „Z d d„Z ddd„ZdS)éN)Útrain_test_splitégè?égš™™™™™¹?FTc CsÔtj |¡rŒt|dƒ�}t |¡} WdQRX| d|krŒ| d|krŒ| d|krŒ| d|krŒ| d|krŒ| dtkrŒ| dtkrŒtd ƒd Stj  |¡} tj | ¡s®t  | ¡tj  |¡} tj | ¡sĞt  | ¡d S) NÚrÚ num_clientsÚ num_classesÚnon_iidÚbalanceÚ partitionÚalphaÚ batch_sizez Dataset already generated. TF) ÚosÚpathÚexistsÚopenÚujsonÚloadr r ÚprintÚdirnameÚmakedirs) Ú config_pathÚ train_pathÚ test_pathrrÚniidr r ÚfÚconfigÚdir_path©rúB/root/autodl-tmp/PFL-Non-IID-master/dataset/utils/dataset_utils.pyÚcheck s&               rc sdd„tˆƒDƒ}dd„tˆƒDƒ}dd„tˆƒDƒ} |\} } i} |sNd}|‰|dk�rt tt| ƒƒ¡} g}x"t|ƒD]}| | | |k¡qxW‡fdd„tˆƒDƒ}�xzt|ƒD�]D}g}x>tˆƒD]2}||dkrâ| |¡|dtˆ|ˆƒ…}qÈWt||ƒ}t|ƒ}||‰|�r<‡fdd„t|d ƒDƒ}n&tj tˆd t |ƒˆ|d ¡  ¡}| |t |ƒ¡d}x€t ||ƒD]r\}}||   ¡k�r´|||||…| |<n(tj| ||||||…dd �| |<||7}||d 8<�q„Wq´W�n$|d k�r"d}|}t| ƒ‰xâ|t k�rşd d„tˆƒDƒ}x¾t|ƒD]²}t | |k¡d}tj |¡tj t tˆ¡¡}t ‡‡fdd„t ||ƒDƒ¡}||  ¡}t |¡t|ƒ t¡dd…}dd„t |t ||¡ƒDƒ}tdd„|Dƒƒ}�qDW�qWx$tˆƒD]}||| |<�q Wnt‚xrtˆƒD]f}| |} | | ||<| | ||<x>t ||¡D],}| | t|ƒtt |||kƒƒf¡�qdW�q0W~x\tˆƒD]P}td|›dt||ƒ›d�t ||¡ƒtddd„| |Dƒƒtdƒ�q¦W||| fS)NcSsg|]}g‘qSrr)Ú.0Ú_rrrú <listcomp>&sz!separate_data.<locals>.<listcomp>cSsg|]}g‘qSrr)r r!rrrr"'scSsg|]}g‘qSrr)r r!rrrr"(sÚpatcsg|]}ˆ‘qSrr)r r!)Úclass_per_clientrrr"8srcsg|] }tˆƒ‘qSr)Úint)r r!)Únum_perrrr"Dsré )ÚaxisÚdircSsg|]}g‘qSrr)r r!rrrr"Yscs$g|]\}}|t|ƒˆˆk‘qSr)Úlen)r ÚpÚidx_j)ÚNrrrr"^séÿÿÿÿcSsg|]\}}|| ¡‘qSr)Útolist)r r,Úidxrrrr"ascSsg|] }t|ƒ‘qSr)r*)r r,rrrr"bszClient z Size of data: z Labels: z Samples of labels: cSsg|]}|‘qSrr)r Úirrrr"xsz2--------------------------------------------------)ÚrangeÚnpÚarrayr*Úappendr%ÚrandomÚrandintÚmaxÚ least_samplesr/ÚsumÚzipÚkeysÚwhereÚshuffleÚ dirichletÚrepeatr ÚcumsumÚastypeÚsplitÚminÚNotImplementedErrorÚuniquer)Údatarrrr r r$ÚXÚyÚ statisticZdataset_contentÚ dataset_labelZ dataidx_mapÚidxsZidx_for_each_classr1Zclass_num_per_clientZselected_clientsÚclientZnum_all_samplesZnum_selected_clientsÚ num_samplesr0Z num_sampleÚmin_sizeÚKZ idx_batchÚkZidx_kZ proportionsÚjr)r-r$rr&rÚ separate_data%s€    &(        2*rSc Csgg}}ggdœ}x¼tt|ƒƒD]¬}tj||dd�\}}t|ƒdkrjt||||tdd�\}} } } n t||||tdd�\}} } } | || dœ¡|d t| ƒ¡| | | dœ¡|d t| ƒ¡q"Wtd t |d|dƒƒtd |dƒtd |dƒtƒ~~||fS) N)ÚtrainÚtestT)Ú return_countsr)Ú train_sizer>)ÚxrIrTrUzTotal number of samples:zThe number of train samples:zThe number of test samples:) r2r*r3rFrDrrWr5rr:) rHrIÚ train_dataÚ test_datarNr1rFÚcountZX_trainZX_testZy_trainZy_testrrrÚ split_data~s&    r\c  CsØ|||| | |ttdœ} tdƒxDt|ƒD]8\} } t|t| ƒddƒ�}tj|| d�WdQRXq(WxDt|ƒD]8\} }t|t| ƒddƒ�}tj||d�WdQRXqnWt|dƒ�}t  | |¡WdQRXtdƒdS)N)rrrr r z%Size of samples for labels in clientsr r zSaving to disk. z.npzÚwb)rGÚwzFinish generating dataset. ) r r rÚ enumeraterÚstrr3Úsavez_compressedrÚdump)rrrrYrZrrrJrr r rr0Z train_dictrZ test_dictrrrÚ save_filešs$ rc)FTN)FFNr)FTN)r rÚnumpyr3ÚgcZsklearn.model_selectionrr rWr9r rrSr\rcrrrrÚ<module>s    Y
5,332
Python
.pyt
54
97.592593
489
0.37526
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,044
backend_main.py
OSSQA-PUM_OSSQA/src/backend_main.py
""" This is the main entry point for the backend of the application. It creates an instance of the Flask app using the `create_app()` function from the `backend.server` module, and then runs the app on port 5090, allowing connections from any host. Usage: python backend_main.py """ import backend.server as server if __name__ == "__main__": app = server.create_app() app.run(port=5090, host='0.0.0.0')
419
Python
.py
12
32.583333
73
0.727047
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,045
web_main.py
OSSQA-PUM_OSSQA/src/web_main.py
""" This module serves as the entry point for running the web application. It imports the `run` function from the `main.frontend.web_api` module and calls it when the script is executed. This module is used by the docker-compose file to start the web application. Usage: python web_main.py """ from main.frontend.web_api import run if __name__ == "__main__": run()
378
Python
.py
11
32.181818
76
0.748619
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,046
test_backend_main.py
OSSQA-PUM_OSSQA/src/test_backend_main.py
""" This script starts a test server for the backend application. Usage: python test_backend_main.py The server runs on port 5091 by default. """ import backend.server as server if __name__ == "__main__": app = server.create_test_app() app.run(port=5091)
271
Python
.py
10
24.5
61
0.715953
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,047
cli_main.py
OSSQA-PUM_OSSQA/src/cli_main.py
""" The entry point of the CLI. Example usage: python cli_main.py -a -p ../sboms/example-SBOM.json -g [YOUR_GIT_TOKEN] """ from main.frontend.cli import ossqa_cli if __name__ == "__main__": ossqa_cli()
213
Python
.py
8
24.25
75
0.663366
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,048
util.py
OSSQA-PUM_OSSQA/src/main/util.py
""" This module contains utility functions for the main module. Functions: - get_git_sha1: Gets the SHA1 hash for a version of a dependency. - is_valid_sha1: Check the validity of a sha1 string. """ import re import os import datetime from math import inf from time import time import requests class TokenLimitExceededError(Exception): """ Exception raised when the GitHub API rate limit is exceeded. """ reset_time: int message: str @property def reset_datetime(self) -> str: """ Returns: str: The date and time at which the rate limit will be reset. """ reset_datetime = datetime.datetime.fromtimestamp(int(self.reset_time)) reset_datetime = reset_datetime.strftime("%Y-%m-%d %H:%M:%S") return reset_datetime @property def time_to_wait(self) -> float: """ Returns: float: The time in seconds remaining until the rate limit is reset. """ return self.reset_time - time() def __init__(self, reset_time: str): """ Initializes the TokenLimitExceededError. Args: reset_time (str): The time at which the rate limit will be reset. Raises: ValueError: If the reset time is not a valid integer. """ self.reset_time = int(reset_time) self.message = (f"GitHub API rate limit exceeded. Try again later. " f"Rate limit resets at {reset_time}.") super().__init__(reset_time) def __str__(self) -> str: return self.message class Sha1NotFoundError(Exception): """ Exception raised when the SHA1 hash for a version of a dependency is not found. """ message: str def __init__(self, message: str = "SHA1 hash not found."): super().__init__(message) def get_token_data() -> dict: """ Returns: dict: A dictionary containing the user's GitHub API token data Raises: requests.ConnectionError: If the request to the GitHub API is unsuccessful. """ token = get_github_token() url = 'https://api.github.com/rate_limit' # Make a GET request to the GitHub API with your token for authentication headers = {'Authorization': f'token {token}'} response = requests.get(url, headers=headers, timeout=5) # Check if the request was successful if response.status_code == 200: user_data = response.headers return { "limit": int(user_data['X-RateLimit-Limit']), "used": int(user_data['x-ratelimit-used']), "remaining": int(user_data['X-RateLimit-Remaining']), "reset_time": int(user_data["X-RateLimit-Reset"]) } raise requests.ConnectionError( f"Failed to authenticate token. Status code: {response.status_code}" ) def get_git_sha1(git_url: str, version: str) -> str: """ Get the SHA1 hash for a version of a dependency. Args: git_url (str): The URL of the GitHub repository. version (str): The version of the dependency. Returns: str: The SHA1 hash of the dependency version. Raises: ValueError: If the GitHub authentication token is not found in the environment. ConnectionRefusedError: If the request to the GitHub API is unsuccessful. TokenLimitExceededError: If the GitHub API rate limit is exceeded. AssertionError: If the found SHA1 hash is not valid. Sha1NotFoundError: If the SHA1 hash for the dependency version is not found. """ # Get the GitHub authentication token token = get_github_token() headers = {'Authorization': f'token {token}'} if token else {} # Check that the release version exists url = f"https://api.github.com/repos/{git_url}/git/matching-refs/tags" response = requests.get(url, headers=headers, timeout=10) # Check if token is depleted if response.status_code == 403: token_data = get_token_data() raise TokenLimitExceededError(token_data["reset_time"]) # Check if the request was successful if response.status_code != 200: raise ConnectionRefusedError( f"Failed to get tags. Status code: {response.status_code}" ) # Filter out unwanted characters from the version original_version = version version.strip("-").strip("v").strip("@40") # Get the SHA1 hash for the version response_content: list[dict] = response.json() if not response_content: raise Sha1NotFoundError( f"SHA1 hash not found for version {original_version}. " "No tags found for repo.") # Sort the tags by version number for tag in response_content: tag_name: str = tag["ref"] tag_name: str = (tag_name.strip("refs/tags/").strip("-") .strip("v").strip("@40")) tag["ref"] = tag_name tag_digits = "".join([c for c in tag_name if c.isdigit()]) if not tag_digits: tag_digits = inf else: tag_digits = int(tag_digits) tag["tag_digits"] = tag_digits response_content = sorted(response_content, key=lambda x: x["tag_digits"], reverse=True) # Get the SHA1 hash for the version version_digits = "".join([c for c in version if c.isdigit()]) if version_digits: version_digits = int(version_digits) result_sha1 = "" for tag in response_content: tag_name: str = tag["ref"] tag_sha: str = tag["object"]["sha"] # Check if the tag name contains the version if version in tag_name: result_sha1 = tag_sha break if not version_digits: continue tag_digits = tag["tag_digits"] # Check if the tag version is less than or equal to the required # version if tag_digits <= version_digits: result_sha1 = tag_sha break if result_sha1: assert is_valid_sha1(result_sha1) return result_sha1 raise Sha1NotFoundError( f"SHA1 hash not found for version {original_version}.") def is_valid_sha1(sha1_str: str) -> bool: """ Check the validity of a sha1 string. Args: sha1_str (str): The SHA1 string to validate. Returns: bool: True if the SHA1 string is valid, False otherwise. """ if not re.match('^[0-9A-Fa-f]{40}$', sha1_str): return False return True def get_github_token() -> str: """ Gets the GitHub authentication token from the environment. Returns: str: The GitHub authentication token. Raises: ValueError: If the GitHub authentication token is not found in the environment. """ token = os.environ.get('GITHUB_AUTH_TOKEN') if not token: raise ValueError( "GitHub authentication token not found in environment" ) return token
7,005
Python
.py
188
29.601064
79
0.629345
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,049
constants.py
OSSQA-PUM_OSSQA/src/main/constants.py
""" This module contains constants used in the application. Attributes: HOST (str): The host URL for the application. """ HOST = "http://host.docker.internal:5090"
170
Python
.py
6
26.333333
55
0.746914
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,050
sbom_processor.py
OSSQA-PUM_OSSQA/src/main/sbom_processor.py
""" This module contains the SBOM processor class, which is responsible for processing SBOMs. Classes: - SbomProcessor: Represents an SBOM processor that defines the process of analyzing SBOMs. - SbomProcessorStatus: Represents the status of the SBOM processor. """ from enum import StrEnum from dataclasses import dataclass from main.data_types.sbom_types.sbom import Sbom from main.data_types.sbom_types.dependency import Dependency from main.data_types.event import Event from main.data_types.dependency_scorer import StepResponse from main.data_types.dependency_scorer import (SSFAPIFetcher, DependencyScorer, ScorecardAnalyzer) from main.backend_communication import BackendCommunication class SbomProcessorStates(StrEnum): """ Represents the status of a job. """ INITIALIZING = "Initializing" INACTIVE = "Inactive" VALIDATING = "Validating" PARSING = "Parsing" FETCH_DATABASE = "Fetching from Database" SSF_LOOKUP = "SSF Lookup" ANALYZING_SCORE = "Analyzing Score" FINAL_SCORE = "Final Score" COMPLETED = "Completed" FAILED = "Failed" CANCELLED = "Cancelled" UPLOADING_SCORES = "Uploading Scores" @dataclass class SbomProcessorStatus: """ Represents the status of an SBOM processor. """ current_state: str step_response: StepResponse = None class SbomProcessor: """ Represents an SBOM processor that defines the process of analyzing SBOMs. """ on_status_update: Event[SbomProcessorStatus] sbom_processor_status: SbomProcessorStatus def __init__(self, backend_host: str): """ Initializes an SBOM processor. Args: backend_host (str): The host of the backend. """ self.on_status_update = Event[SbomProcessorStatus]() self.sbom_processor_status = SbomProcessorStatus( SbomProcessorStates.INITIALIZING ) self.backend_communication = BackendCommunication( self._event_callback, backend_host ) def _event_callback(self, step_response: StepResponse) -> None: """ Callback function for the event. Args: step_response (StepResponse): The response from the step. """ if step_response == self.sbom_processor_status.step_response: return self.sbom_processor_status.step_response = step_response self.on_status_update.invoke(self.sbom_processor_status) def _set_event_start_state(self, state: SbomProcessorStates, step_response: StepResponse = None) -> None: """ Sets the event start state. Args: state (SbomProcessorStates): The state to set. step_response (StepResponse): The response from the step. """ if (state == self.sbom_processor_status.current_state and step_response == self.sbom_processor_status.step_response): return if step_response is None: step_response = StepResponse(0, 0, 0, 0, state.value) self.sbom_processor_status.step_response = step_response self.sbom_processor_status.current_state = state self.on_status_update.invoke(self.sbom_processor_status) def _run_dependency_scorer( self, sbom: Sbom, dependency_scorer: DependencyScorer, state: SbomProcessorStates ) -> None: """ Runs a dependency scorer. Args: sbom (Sbom): The SBOM to analyze. dependency_scorer (DependencyScorer): The dependency scorer to run. state (SbomProcessorStates): The state to set. """ unscored: list[Dependency] = sbom.get_dependencies_by_filter( lambda dependency: not dependency.scorecard ) if not unscored: return self._set_event_start_state( state, StepResponse( len(unscored), 0, 0, 0, state.value) ) new_dependencies = dependency_scorer.score( unscored ) sbom.dependency_manager.update(new_dependencies) step_response = self.sbom_processor_status.step_response if step_response.batch_size != step_response.completed_items: step_response.completed_items = step_response.batch_size self._set_event_start_state(state, step_response) def analyze_sbom(self, sbom: Sbom) -> Sbom: """ Analyzes the given SBOM (Software Bill of Materials) by running various dependency scorers and updating the scores in the database. Parameters: sbom (Sbom): The SBOM to be analyzed. Returns: Sbom: The analyzed SBOM with updated scores. """ # 1. Get score from BackendScorer self._run_dependency_scorer( sbom, self.backend_communication.backend_fetcher, SbomProcessorStates.FETCH_DATABASE ) # 2. Get score from SSFAPIScorer self._run_dependency_scorer( sbom, SSFAPIFetcher(self._event_callback), SbomProcessorStates.SSF_LOOKUP ) # 3. Get score from ScorecardAnalyzer self._run_dependency_scorer( sbom, ScorecardAnalyzer(self._event_callback), SbomProcessorStates.ANALYZING_SCORE ) # 4. Update database with new scores self._set_event_start_state( SbomProcessorStates.UPLOADING_SCORES ) self.backend_communication.add_sbom(sbom) # 5. Return the analyzed SBOM batch_size = len(sbom.get_dependencies_by_filter(lambda x: True)) completed_items = batch_size success_count = len(sbom.get_scored_dependencies()) failed_count = batch_size - success_count self._set_event_start_state( SbomProcessorStates.COMPLETED, StepResponse( batch_size, completed_items, success_count, failed_count, SbomProcessorStates.COMPLETED.value ) ) return sbom def lookup_stored_sboms(self) -> list[str]: """ Looks up stored SBOMs in database Returns: list[str]: The list of the SBOM names """ # Look up stored SBOMs self._set_event_start_state(SbomProcessorStates.FETCH_DATABASE) sbom_names = self.backend_communication.get_sbom_names() self._set_event_start_state(SbomProcessorStates.COMPLETED) return sbom_names def lookup_previous_sboms(self, name: str) -> list[Sbom]: """ Looks up previously analyzed SBOMs. Args: name (str): The name of the SBOM to look up. Returns: list[Sbom]: The list of the SBOMs with the same name. """ self._set_event_start_state(SbomProcessorStates.FETCH_DATABASE) sboms = self.backend_communication.get_sboms_by_name(name) self._set_event_start_state(SbomProcessorStates.COMPLETED) return sboms
7,265
Python
.py
185
29.648649
79
0.632482
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,051
backend_communication.py
OSSQA-PUM_OSSQA/src/main/backend_communication.py
""" This module contains functions for communicating with the database. Functions: - add_sbom: Adds an SBOM, its dependencies, and their scores to the database. - get_sboms_by_name: Gets all versions of a SBOM:s with a specific name. - get_sbom_names: Returns the names of all the SBOMs in the database. - get_existing_dependencies: Gets saved dependencies from the database. """ from typing import Any, Callable import requests from main.data_types.sbom_types.dependency import Dependency from main.data_types.sbom_types.sbom import Sbom from main.data_types.dependency_scorer import DependencyScorer, StepResponse from main.data_types.event import Event class BackendCommunication: """ Represents backend communication. Can add SBOMs to the database and get SBOMs from the database. Will also inform user about the status of the operation. Attributes: on_status_changed (Event[StepResponse]): The event for status changes. backend_fetcher (DependencyScorer): The backend fetcher. Args: callback (Callable[[StepResponse], Any]): The callback function. host (str): The host of the backend. """ on_status_changed: Event[StepResponse] backend_fetcher: DependencyScorer def __init__(self, callback: Callable[[StepResponse], Any], host: str) \ -> None: self.on_status_changed = Event[StepResponse]() self.on_status_changed.subscribe(callback) self.backend_fetcher = BackendFetcher(callback, host) self.host = host.rstrip("/") def add_sbom(self, sbom: Sbom) -> None: """ Adds an SBOM, its dependencies, and their scores to the database. Args: sbom (Sbom): The SBOM to add to the database. """ try: resp = requests.post( self.host + "/sbom", json=sbom.to_dict(), timeout=5 ) if resp.status_code == 500: self.on_status_changed.invoke( StepResponse(0, 0, 0, 0, "The sbom could not be uploaded") ) except (requests.exceptions.Timeout, requests.exceptions.ConnectionError): # Tell the user that the request timed out self.on_status_changed.invoke( StepResponse(0, 0, 0, 0, "The request timed out") ) def get_sboms_by_name(self, name: str) -> list[Sbom]: """ Gets all versions of a SBOM:s with a specific name. Args: name (str): The name of the SBOM:s. Returns: list[Sbom]: A list containing the SBOM:s. """ try: response = requests.get(self.host + f"/sbom/{name}", timeout=5) except requests.exceptions.Timeout: # Tell the user that the request timed out self.on_status_changed.invoke( StepResponse(0, 0, 0, 0, "The request timed out") ) result: list[Sbom] = [] if response.status_code != 200: self.on_status_changed.invoke( StepResponse(0, 0, 0, 0, "An error occurred in the database") ) return result for sbom in response.json(): print("before") sbom_obj = Sbom(sbom=sbom) print("after") result.append(sbom_obj) return result def get_sbom_names(self) -> list[str]: """ Returns the names of all the SBOMs in the database. Returns: list[str]: A list containing the names of all SBOM:s """ try: response = requests.get(self.host + "/sbom", timeout=5) except requests.exceptions.Timeout: # Tell the user that the request timed out self.on_status_changed.invoke( StepResponse(0, 0, 0, 0, "The request timed out") ) if response.status_code != 200: self.on_status_changed.invoke( StepResponse(0, 0, 0, 0, "An error occurred in the database") ) return [] return response.json() class BackendFetcher(DependencyScorer): """ Represents a backend fetcher """ def __init__(self, callback: Callable[[StepResponse], Any], host: str) \ -> None: """ Initializes the backend fetcher. Args: callback (Callable[[StepResponse], Any]): The callback function. host (str): The host of the backend. """ super().__init__(callback) self.host = host.rstrip("/") def score(self, dependencies: list[Dependency]) -> list[Dependency]: """ Scores a list of dependencies by fetching the scores from the backend. Args: dependencies (list[Dependency]): The dependencies to score. Returns: list[Dependency]: The scored dependencies. """ new_dependencies = self._get_existing_dependencies(dependencies) return new_dependencies def _get_existing_dependencies(self, dependencies: list[Dependency]) \ -> list[Dependency]: """ Gets saved dependencies from the database Args: dependencies (list[Dependency]): The dependencies to check Returns: list[Dependency]: The existing dependencies in the database """ batch_size = len(dependencies) failed_count = 0 success_count = 0 dep_dicts = [] current_step = StepResponse( batch_size, 0, 0, 0, "Fetching existing dependencies") self.on_step_complete.invoke(current_step) for dependency in dependencies: try: dep_dict = dependency.to_dict() except (KeyError, ValueError): failed_count += 1 current_step = StepResponse( batch_size, len(dep_dicts) + failed_count, success_count, failed_count, "Failed to convert dependency to dictionary") self.on_step_complete.invoke(current_step) continue dep_dicts.append(dep_dict) failure_message: str = "" try: response = requests.get(self.host + "/dependency/existing", json=dep_dicts, timeout=5 ) except requests.exceptions.Timeout: # Tell the user that the request timed out failure_message = "The request timed out" except TypeError: # Tell the user that the response was not JSON failure_message = "The response was not JSON" except requests.exceptions.ConnectionError: # Tell the user that the connection was refused failure_message = "The connection was refused" new_dependencies = [] if failure_message: failed_count = batch_size current_step = StepResponse( batch_size, batch_size, success_count, failed_count, failure_message) self.on_step_complete.invoke(current_step) return new_dependencies if not response or response.status_code != 200: failed_count = batch_size current_step = StepResponse( batch_size, batch_size, success_count, failed_count, "Failed to fetch existing dependencies") self.on_step_complete.invoke(current_step) return new_dependencies if not response.json(): failed_count = batch_size current_step = StepResponse( batch_size, batch_size, success_count, failed_count, "No existing dependencies found") self.on_step_complete.invoke(current_step) return new_dependencies failed_count = batch_size - len(response.json()) for dependency_component in response.json(): dep_obj = Dependency(dependency_component) new_dependencies.append(dep_obj) success_count += 1 current_step = StepResponse( batch_size, success_count + failed_count, success_count, failed_count, "Found existing dependency") self.on_step_complete.invoke(current_step) return new_dependencies
8,467
Python
.py
203
30.600985
78
0.592854
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,052
front_end_api.py
OSSQA-PUM_OSSQA/src/main/frontend/front_end_api.py
""" Front-end API for interacting with the SBOM processor and performing various operations such as analyzing SBOMs, looking up stored SBOMs, and retrieving previous SBOMs by name. """ import copy from typing import Callable, Any from main.data_types.sbom_types.sbom import Sbom from main.data_types.user_requirements import UserRequirements from main.data_types.event import Event from main.sbom_processor import SbomProcessor, SbomProcessorStatus from main.frontend.dependency_grader import grade_dependencies class FrontEndAPI: """ Represents a front-end API for interacting with the SBOM processor and performing various operations such as analyzing SBOMs, looking up stored SBOMs, and retrieving previous SBOMs by name. """ sbom_processor: SbomProcessor on_sbom_processor_status_update: Event[SbomProcessorStatus] def __init__(self, backend_host: str): """ Initializes a front-end API. """ self.sbom_processor = SbomProcessor(backend_host) self.on_sbom_processor_status_update = Event[SbomProcessorStatus]() self.sbom_processor.on_status_update.subscribe( self._update_sbom_processor_status) def _update_sbom_processor_status( self, sbom_processor_status: SbomProcessorStatus ) -> None: """ Invokes the SBOM processor status update event when the - SBOM processor status is updated. Args: sbom_processor_status (SbomProcessorStatus): The SBOM processor status. """ self.on_sbom_processor_status_update.invoke(sbom_processor_status) def subscribe_to_state_change( self, callback: Callable[[SbomProcessorStatus], Any] ) -> None: """ Subscribes to state change events. Args: callback: The callback function to be called on state change. """ self.on_sbom_processor_status_update.subscribe(callback) def analyze_sbom(self, sbom: Sbom, user_requirements: UserRequirements) -> Sbom: """ Analyzes an SBOM. Args: sbom (Sbom): The SBOM to analyze. user_requirements (UserRequirements): The user requirements. Returns: Sbom: The analyzed SBOM """ assert isinstance(sbom, Sbom), "sbom must be of type Sbom" assert isinstance(user_requirements, UserRequirements), \ "user_requirements must be of type UserRequirements" sbom_copy: Sbom = copy.deepcopy(sbom) sbom_copy = self.sbom_processor.analyze_sbom(sbom_copy) scored_sbom: Sbom = grade_dependencies(sbom_copy, user_requirements) return scored_sbom def lookup_stored_sboms(self) -> list[str]: """ Looks up the names of stored SBOMs. Returns: list[str]: The list of names of stored SBOMs. """ return self.sbom_processor.lookup_stored_sboms() def lookup_previous_sboms(self, name: str) -> list[Sbom]: """ Looks up previous SBOMs with the specified name. Args: name (str): The name of the SBOM. Returns: list[Sbom]: The list of SBOMs with the specified name. """ return self.sbom_processor.lookup_previous_sboms(name)
3,399
Python
.py
83
32.240964
76
0.656155
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,053
cli.py
OSSQA-PUM_OSSQA/src/main/frontend/cli.py
""" This script defines a command-line argument parser for the Open Source Security and Quality Assessment (OSSQA) program. The script uses the argparse module to define and parse command-line arguments for two commands: analyze and lookup. The analyze command takes several arguments including the file path to the SBOM JSON file, user requirements for the software, git token, output type, and verbosity. The lookup command takes the ID of the SBOM as an argument. The script also includes helper functions to parse and validate the arguments. """ import json import os from pathlib import Path from time import sleep import click import requests import validators import tqdm from tabulate import tabulate from main import constants from main.data_types.sbom_types.dependency import Dependency from main.data_types.sbom_types.sbom import Sbom from main.data_types.user_requirements import (RequirementsType, UserRequirements) from main.frontend.front_end_api import FrontEndAPI from main.sbom_processor import SbomProcessorStatus, SbomProcessorStates from main.data_types.dependency_scorer import StepResponse _progress_bar: tqdm.tqdm = None # Disable tqdm progress bar def calculate_mean_score(dependency: Dependency, decimals: int = 1) -> float: """ Calculate the mean score of a dependency. Args: dependency (Dependency): The dependency to calculate the mean score for. decimals (int): The number of decimals to round the mean score to. Returns: float: The mean score of the dependency. """ mean_score = 0 for dep_score in dependency.scorecard.checks: mean_score += dep_score.score mean_score /= len(dependency.scorecard.checks) mean_score = round(mean_score, decimals) return mean_score def calculate_mean_scores(dependencies: list[Dependency]) -> \ list[list[Dependency, float]]: """ Calculate the mean scores of the dependencies. Args: dependencies (list[Dependency]): The dependencies to calculate the mean scores for. Returns: list[list[Dependency, float]]: A list of lists containing the dependency and the mean score. """ mean_scores: list = [] for dependency in dependencies: mean_score = calculate_mean_score(dependency) dep_result = [dependency.component_name, mean_score, dependency.reach_requirement] mean_scores.append(dep_result) return mean_scores def color_score(name: str, score: float, requirement: str) -> \ list[str, str, str]: """ Color the score based on the value. Args: name (str): The name of the dependency. score (float): The score of the dependency. Returns: list[str, str]: A list containing the dependency name and the colored score. """ if requirement == "No" or requirement == "Test result not found": return [f"\033[91m{name}\033[0m", f"\033[91m{score}\033[0m", f"\033[91m{requirement}\033[0m"] if score >= 7: return [f"\033[92m{name}\033[0m", f"\033[92m{score}\033[0m", f"\033[92m{requirement}\033[0m"] if score >= 3: return [f"\033[93m{name}\033[0m", f"\033[93m{score}\033[0m", f"\033[93m{requirement}\033[0m"] return [f"\033[91m{name}\033[0m", f"\033[91m{score}\033[0m", f"\033[91m{requirement}\033[0m"] def color_scores(scores: list[list[Dependency, float, str]]) -> \ list[list[str, str, str]]: """ Color the scores based on the values. Args: scores (list[list[Dependency, float]]): The dependency scores. Returns: list[list[str, str]]: A list of lists containing the colored dependency name and score. """ colored_scores: list = [] for score in scores: colored_score = color_score(score[0], score[1], score[2]) colored_scores.append(colored_score) return colored_scores def parse_requirements(**kwargs) -> UserRequirements: """ Parses the requirements from the arguments. Args: **kwargs (Any): The arguments. Returns: UserRequirements: The parsed user requirements. """ vulnerabilities: int = kwargs.get("vulnerabilities") dependency_update_tool: int = kwargs.get("dependency_update_tool") maintained: int = kwargs.get("maintained") security_policy: int = kwargs.get("security_policy") license: int = kwargs.get("license") cii_best_practices: int = kwargs.get("cii_best_practices") ci_tests: int = kwargs.get("ci_tests") fuzzing: int = kwargs.get("fuzzing") sast: int = kwargs.get("sast") binary_artifacts: int = kwargs.get("binary_artifacts") branch_protection: int = kwargs.get("branch_protection") dangerous_workflow: int = kwargs.get("dangerous_workflow") code_review: int = kwargs.get("code_review") contributors: int = kwargs.get("contributors") pinned_dependencies: int = kwargs.get("pinned_dependencies") token_permissions: int = kwargs.get("token_permissions") packaging: int = kwargs.get("packaging") signed_releases: int = kwargs.get("signed_releases") return UserRequirements({ RequirementsType.VULNERABILITIES: vulnerabilities, RequirementsType.DEPENDENCY_UPDATE_TOOL: dependency_update_tool, RequirementsType.MAINTAINED: maintained, RequirementsType.SECURITY_POLICY: security_policy, RequirementsType.LICENSE: license, RequirementsType.CII_BEST_PRACTICES: cii_best_practices, RequirementsType.CI_TESTS: ci_tests, RequirementsType.FUZZING: fuzzing, RequirementsType.SAST: sast, RequirementsType.BINARY_ARTIFACTS: binary_artifacts, RequirementsType.BRANCH_PROTECTION: branch_protection, RequirementsType.DANGEROUS_WORKFLOW: dangerous_workflow, RequirementsType.CODE_REVIEW: code_review, RequirementsType.CONTRIBUTORS: contributors, RequirementsType.PINNED_DEPENDENCIES: pinned_dependencies, RequirementsType.TOKEN_PERMISSIONS: token_permissions, RequirementsType.PACKAGING: packaging, RequirementsType.SIGNED_RELEASES: signed_releases }) def table_output(scored_sbom: Sbom): """ Print the output in a table format. Args: scored_sbom (Sbom): The scored SBOM. """ scored_deps = scored_sbom.dependency_manager.get_scored_dependencies() failed_deps = scored_sbom.dependency_manager.get_failed_dependencies() mean_scores = calculate_mean_scores(scored_deps) mean_scores = sorted(mean_scores, key=lambda x: x[1]) mean_scores = color_scores(mean_scores) failed_deps = [ [dep.component_name, dep.failure_reason] for dep in failed_deps ] print( tabulate( mean_scores, headers=[ "Successful Dependencies", "Average Score", "Meet requirements?" ] ) ) print( tabulate( failed_deps, headers=["Failed Dependencies", "Failure Reason"] ) ) def json_output(scored_sbom: Sbom): """ Print the output in a JSON format. Args: scored_sbom (Sbom): The scored SBOM. """ deps_dict = scored_sbom.dependency_manager.to_dict() print(json.dumps(deps_dict)) def simplified_output(scored_sbom: Sbom): """ Print the output in a simplified format. Args: scored_sbom (Sbom): The scored SBOM. """ scored_deps = scored_sbom.dependency_manager.get_scored_dependencies() failed_deps = scored_sbom.dependency_manager.get_failed_dependencies() mean_scores = calculate_mean_scores(scored_deps) mean_scores = sorted(mean_scores, key=lambda x: x[1]) failed_deps = [[dep.component_name, dep.failure_reason] for dep in failed_deps] print("Successful dependencies:") for dep in mean_scores: print(f"{dep[0]},{dep[1]},{dep[2]}") print("\nFailed dependencies:") for dep in failed_deps: print(f"{dep[0]},{dep[1]}") def validate_backend(_ctx, _param, value: str): """ Validates that the backend URL is a valid URL. """ value = value.replace("localhost", "127.0.0.1") validation_res = validators.url(value) if isinstance(validation_res, validators.ValidationError): print(validation_res) raise click.BadParameter("Invalid backend URL.") else: return value def validate_git_token(_ctx, _param, value: str): """ Validates that a GitHub Personal Access Token is valid. """ if not value: raise click.BadParameter("Please set environment variable " "'GITHUB_AUTH_TOKEN' or provide a " "non-empty string.") url = "https://api.github.com/user" headers = {"Authorization": f"Bearer {value}"} response = requests.get(url, headers=headers, timeout=5) match response.status_code: case 401: raise click.BadParameter("Your GitHub token could not be " "authenticated (HTTP 401).") case _: return value def print_analysis_summary(step_response: StepResponse): """ Print the analysis summary. """ print("Analysis summary:") print(f"Completed items: {step_response.completed_items}") print(f"Success count: {step_response.successful_items}") print(f"Failed count: {step_response.failed_items}") def on_analysis_status_change(current_status: SbomProcessorStatus): """ Callback function for the status change event. """ global _progress_bar if current_status.current_state == SbomProcessorStates.COMPLETED: if _progress_bar: _progress_bar.refresh() _progress_bar.close() _progress_bar = None return step_response: StepResponse = current_status.step_response if (step_response and step_response.message and "Token limit reached" in step_response.message): _progress_bar.close() _progress_bar = None print(step_response.message) if (_progress_bar and (_progress_bar.desc != current_status.current_state or step_response is None)): _progress_bar.close() _progress_bar = None if (step_response is None or step_response.batch_size == 0): print(f"{current_status.current_state}...") return if _progress_bar is None: _progress_bar = tqdm.tqdm(total=step_response.batch_size, desc=current_status.current_state) _progress_bar.n = step_response.completed_items _progress_bar.refresh() if step_response.completed_items == step_response.batch_size: _progress_bar.close() _progress_bar = None @click.group(context_settings={"max_content_width": 120, "show_default": True}) def ossqa_cli(): """ The entry point of the program. """ pass @ossqa_cli.command(help="Analyze the given SBOM.") @click.argument("path", required=True, type=click.Path(exists=True, dir_okay=False, path_type=Path)) @click.option("-g", "--git-token", type=str, envvar="GITHUB_AUTH_TOKEN", callback=validate_git_token, help=("GitHub Personal Access Token." " [default: GITHUB_AUTH_TOKEN env variable]")) @click.option("-vu", "--vulnerabilities", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirement for vulnerabilities.") @click.option("-dut", "--dependency-update-tool", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirement for dependency update tool.") @click.option("-m", "--maintained", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirement for maintained.") @click.option("-sp", "--security-policy", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirement for security policy.") @click.option("-l", "--license", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirements for license.") @click.option("-cbp", "--cii-best-practices", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirement for CII best practices.") @click.option("-ct", "--ci-tests", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirement for CI tests.") @click.option("-f", "--fuzzing", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirement for fuzzing.") @click.option("-s", "--sast", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirement for SAST.") @click.option("-ba", "--binary-artifacts", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirement for binary artifacts.") @click.option("-bp", "--branch-protection", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirement for branch protection.") @click.option("-dw", "--dangerous-workflow", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirement for dangerous workflow.") @click.option("-cr", "--code-review", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirement for code review.") @click.option("-c", "--contributors", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirement for contributors.") @click.option("-pd", "--pinned-dependencies", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirement for pinned dependencies.") @click.option("-tp", "--token-permissions", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirement for contributors.") @click.option("-p", "--packaging", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirement for packaging.") @click.option("-sr", "--signed-releases", type=click.IntRange(-1, 10), required=False, default=-1, help="Requirement for signed releases.") @click.option("-b", "--backend", type=str, callback=validate_backend, default=constants.HOST, help="URL of the database server.") @click.option("-o", "--output", type=click.Choice(["table", "simplified", "json"]), required=False, default="table", help="Output format.") @click.option("-v", "--verbose", is_flag=True, default=False, required=False, help="Verbose output.") def analyze(path: Path, git_token: str, backend: str, output: str, verbose, **kwargs): """ Executes the command that analyzes an SBOM. """ global _progress_bar requirements = parse_requirements(**kwargs) with open(path, "r", encoding="utf-8") as file: unscored_sbom = Sbom(json.load(file)) # TODO: git_token should be sent to the frontend API, instead of setting # os.environ, to be more traceable os.environ["GITHUB_AUTH_TOKEN"] = git_token front_end_api = FrontEndAPI(backend) if verbose: on_analysis_status_change(front_end_api.sbom_processor.sbom_processor_status) front_end_api.on_sbom_processor_status_update.subscribe(on_analysis_status_change) scored_sbom = front_end_api.analyze_sbom(unscored_sbom, requirements) if verbose: print_analysis_summary( front_end_api.sbom_processor.sbom_processor_status.step_response ) match output: case "table": table_output(scored_sbom) case "json": json_output(scored_sbom) case "simplified": simplified_output(scored_sbom) case _: print("Unrecognized output format.") @ossqa_cli.command(help="Lookup names of SBOMs in the database.") @click.option("-b", "--backend", type=str, callback=validate_backend, default=constants.HOST, help="URL of the database server.") @click.option("-o", "--output", type=click.Choice(["table", "json"]), required=False, default="table", help="Output format.") @click.option("-v", "--verbose", is_flag=True, default=False, required=False, help="Verbose output.") def sboms(backend: str, output: str, verbose: str): """ Executes the command that fetches all SBOM names from the backend. """ front_end_api = FrontEndAPI(backend) front_end_api.on_sbom_processor_status_update.subscribe(on_analysis_status_change) sbom_names = front_end_api.lookup_stored_sboms() if verbose: print(f"Found {len(sbom_names)} SBOMs in the database.") if output == "table": table = tabulate([sbom_names], headers=["Repository Names"]) print(table) elif output == "json": print(json.dumps(sbom_names)) else: print("This code should be unreachable.") @ossqa_cli.command(help="Lookup SBOMs with a given name.") @click.argument("name", required=True, type=str) @click.option("-b", "--backend", type=str, callback=validate_backend, default=constants.HOST, help="URL of the database server.") @click.option("-o", "--output", type=click.Choice(["table", "json"]), required=False, default="table", help="Output format.") @click.option("-v", "--verbose", is_flag=True, default=False, required=False, help="Verbose output.") def lookup(name: str, backend: str, output: str, verbose: str): """ Executes the command that fetches all SBOMs of a specific name from the backend. """ global _progress_bar front_end_api = FrontEndAPI(backend) if verbose: on_analysis_status_change(front_end_api.sbom_processor.sbom_processor_status) front_end_api.on_sbom_processor_status_update.subscribe(on_analysis_status_change) found_sboms = front_end_api.lookup_previous_sboms(name) if verbose: print(f"Found {len(found_sboms)} SBOMs with the name '{name}'.") if output == "table": table_data = [] for sbom in found_sboms: dependencies = sbom.dependency_manager.get_dependencies_by_filter( lambda _: True ) sbom_data = [ sbom.serial_number, sbom.version, sbom.repo_name, sbom.repo_version, len(dependencies), ] table_data.append(sbom_data) table_headers = [ "Serial number", "Version", "Repo name", "Repo version", "Dependency count" ] table = tabulate(table_data, headers=table_headers) print(table) elif output == "json": sbom_dicts = [sbom.to_dict() for sbom in found_sboms] print(json.dumps(sbom_dicts))
19,382
Python
.py
448
34.957589
90
0.64011
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,054
web_api.py
OSSQA-PUM_OSSQA/src/main/frontend/web_api.py
import json from dataclasses import asdict from flask import Flask, request from main.frontend.front_end_api import FrontEndAPI from main.data_types.sbom_types.sbom import Sbom from main.data_types.user_requirements import (UserRequirements, RequirementsType) from main.sbom_processor import SbomProcessorStatus import main.constants as constants app = Flask(__name__) frontend_api = FrontEndAPI(constants.HOST) status: SbomProcessorStatus = SbomProcessorStatus("Initializing") @app.errorhandler(415) def page_not_found(error): """ Handles the error when the request is not found. Args: error: The error that occurred. """ print("Error:", error) return "Not found", 415 @app.route("/analyze", methods=['POST']) def analyze(): """ Analyzes the SBOM and returns the result. Returns: str: The result of the analysis. """ print("Request received") data = request.get_json() try: user_reqs_param = data['user_reqs'] except (KeyError, TypeError): user_reqs_param = "[10, 10, 10, 10, 10, 10,\ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]" user_reqs_param = json.loads(user_reqs_param) user_reqs_dict = { RequirementsType.VULNERABILITIES: user_reqs_param["Vulnerabilities"], RequirementsType.DEPENDENCY_UPDATE_TOOL: user_reqs_param["Dependency Update Tool"], RequirementsType.MAINTAINED: user_reqs_param["Maintained"], RequirementsType.SECURITY_POLICY: user_reqs_param["Security Policy"], RequirementsType.LICENSE: user_reqs_param["License"], RequirementsType.CII_BEST_PRACTICES: user_reqs_param["CII Best Practices"], RequirementsType.CI_TESTS: user_reqs_param["CI Tests"], RequirementsType.FUZZING: user_reqs_param["Fuzzing"], RequirementsType.SAST: user_reqs_param["SAST"], RequirementsType.BINARY_ARTIFACTS: user_reqs_param["Binary Artifacts"], RequirementsType.BRANCH_PROTECTION: user_reqs_param["Branch Protection"], RequirementsType.DANGEROUS_WORKFLOW: user_reqs_param["Dangerous Workflow"], RequirementsType.CODE_REVIEW: user_reqs_param["Code Review"], RequirementsType.CONTRIBUTORS: user_reqs_param["Contributors"], RequirementsType.PINNED_DEPENDENCIES: user_reqs_param["Pinned Dependencies"], RequirementsType.TOKEN_PERMISSIONS: user_reqs_param["Token Permissions"], RequirementsType.PACKAGING: user_reqs_param["Packaging"], RequirementsType.SIGNED_RELEASES: user_reqs_param["Signed Releases"] } try: user_reqs: UserRequirements = UserRequirements(user_reqs_dict) except (AssertionError, ValueError): return "Invalid user requirements", 415 try: sbom = Sbom(json.loads(data['sbom'])) except (KeyError, TypeError): return "Invalid SBOM", 415 frontend_api.subscribe_to_state_change(update_current_status) result_sbom: Sbom = frontend_api.analyze_sbom(sbom, user_reqs) result_json = result_sbom.to_dict_web() return json.dumps(result_json) def update_current_status(update: SbomProcessorStatus): """ Updates the current status of the request. Args: update (SbomProcessorStatus): The updated status. """ global status status = update @app.route("/get_current_status", methods=['GET']) def get_current_status(): """ Gets the current status of the request. Returns: str: The current status of the request. """ global status print(f"Status updated: {asdict(status)}") return json.dumps(asdict(status)) @app.route("/get_previous_sbom/<path:repo_name>", methods=['GET']) def get_previous_sboms(repo_name: str): print(f"Looking up previous SBOMs for {repo_name}") sboms = frontend_api.lookup_previous_sboms(repo_name) sbom_dicts = [sbom.to_dict_web() for sbom in sboms] return json.dumps(sbom_dicts) def run(): """ Runs the web API. This function starts the web API- and listens for incoming requests on port 98. The API runs in debug mode. """ app.run(port=98, debug=True, host='0.0.0.0')
4,409
Python
.py
118
30.228814
70
0.664246
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,055
dependency_grader.py
OSSQA-PUM_OSSQA/src/main/frontend/dependency_grader.py
""" This module contains the function to calculate the final scores for the dependencies in the SBOM. """ import copy from main.data_types.sbom_types.sbom import Sbom from main.data_types.user_requirements import UserRequirements from main.data_types.sbom_types.dependency import Dependency def grade_dependencies(sbom: Sbom, user_requirements: UserRequirements) \ -> Sbom: """ Grades the dependencies in the SBOM based on the user requirements. Args: sbom (Sbom): The SBOM to grade. user_requirements (UserRequirements): The user requirements. Returns: Sbom: The graded SBOM. """ sbom_copy = copy.deepcopy(sbom) for dependency in sbom_copy.dependency_manager.get_scored_dependencies(): dependency.reach_requirement = _grade_dependency(dependency, user_requirements) return sbom_copy def _grade_dependency(dependency: Dependency, user_requirements: UserRequirements) -> str: """ Grades the dependency based on the user requirements. Args: dependency (Dependency): The dependency to grade. user_requirements (UserRequirements): The user requirements. Returns: str: The grade of the dependency. """ dependency_scores: dict = dependency.scorecard.to_dict() checks: dict = {} for check in dependency_scores["checks"]: checks[check["name"]] = check["score"] found_all_checks: bool = True # Check if dependency failed for req_name, req_score in user_requirements.get_listed_requirements(): if req_score == -1: continue check_score = checks.get(req_name, None) if check_score is None: found_all_checks = False continue if check_score < req_score: return "No" # Check if result not found if not found_all_checks: return "Test result not found" return "Yes"
1,988
Python
.py
52
30.711538
77
0.665281
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,056
user_requirements.py
OSSQA-PUM_OSSQA/src/main/data_types/user_requirements.py
""" This module contains every datatype related to user-specified requirements. """ from enum import StrEnum class RequirementsType(StrEnum): """ Mappings between strings and OpenSSF Scorecard tests. """ VULNERABILITIES = "vulnerabilities" DEPENDENCY_UPDATE_TOOL = "dependency_update_tool" MAINTAINED = "maintained" SECURITY_POLICY = "security_policy" LICENSE = "license" CII_BEST_PRACTICES = "cii_best_practices" CI_TESTS = "ci_tests" FUZZING = "fuzzing" SAST = "sast" BINARY_ARTIFACTS = "binary_artifacts" BRANCH_PROTECTION = "branch_protection" DANGEROUS_WORKFLOW = "dangerous_workflow" CODE_REVIEW = "code_review" CONTRIBUTORS = "contributors" PINNED_DEPENDENCIES = "pinned_dependencies" TOKEN_PERMISSIONS = "token_permissions" PACKAGING = "packaging" SIGNED_RELEASES = "signed_releases" class UserRequirements: """ Represents the weights/priorities of the OpenSSF Scorecard tests. Attributes: source_risk_assessment (int): The risk assessment of the source. includes: vulnerabilities (int): The vulnerabilities of the project. maintenance (int): The maintenance of the project. includes: dependency_update_tool (int): The dependency update tool of the project. maintained (int): The maintenance of the project. security_policy (int): The security policy of the project. license (int): The license of the project. cii_best_practices (int): The CII best practices of the project. build_risk_assessment (int): The risk assessment of the build. includes: ci_tests (int): The CI tests of the project. fuzzing (int): The fuzzing of the project. sast (int): The SAST of the project. continuous_testing (int): The continuous testing of the project. includes: binary_artifacts (int): The binary artifacts of the project. branch_protection (int): The branch protection of the project. dangerous_workflow (int): The dangerous workflow of the project. code_review (int): The code review of the project. contributors (int): The contributors of the project. code_vulnerabilities (int): The code vulnerabilities of the project. includes: pinned_dependencies (int): The pinned dependencies of the project. token_permissions (int): The token permissions of the project. packaging (int): The packaging of the project. signed_releases (int): The signed releases of the project. """ vulnerabilities: int = -1 dependency_update_tool: int = -1 maintained: int = -1 security_policy: int = -1 license: int = -1 cii_best_practices: int = -1 ci_tests: int = -1 fuzzing: int = -1 sast: int = -1 binary_artifacts: int = -1 branch_protection: int = -1 dangerous_workflow: int = -1 code_review: int = -1 contributors: int = -1 pinned_dependencies: int = -1 token_permissions: int = -1 packaging: int = -1 signed_releases: int = -1 def __init__(self, requirements: dict[str, int]): """ Initializes the user requirements. Args: requirements (dict): The user requirements. """ self.code_vulnerabilities = requirements.get( RequirementsType.VULNERABILITIES, -1) self.dependency_update_tool = requirements.get( RequirementsType.DEPENDENCY_UPDATE_TOOL, -1) self.maintained = requirements.get( RequirementsType.MAINTAINED, -1) self.security_policy = requirements.get( RequirementsType.SECURITY_POLICY, -1) self.license = requirements.get( RequirementsType.LICENSE, -1) self.cii_best_practices = requirements.get( RequirementsType.CII_BEST_PRACTICES, -1) self.ci_tests = requirements.get( RequirementsType.CI_TESTS, -1) self.fuzzing = requirements.get( RequirementsType.FUZZING, -1) self.sast = requirements.get( RequirementsType.SAST, -1) self.binary_artifacts = requirements.get( RequirementsType.BINARY_ARTIFACTS, -1) self.branch_protection = requirements.get( RequirementsType.BRANCH_PROTECTION, -1) self.dangerous_workflow = requirements.get( RequirementsType.DANGEROUS_WORKFLOW, -1) self.code_review = requirements.get( RequirementsType.CODE_REVIEW, -1) self.contributors = requirements.get( RequirementsType.CONTRIBUTORS, -1) self.pinned_dependencies = requirements.get( RequirementsType.PINNED_DEPENDENCIES, -1) self.token_permissions = requirements.get( RequirementsType.TOKEN_PERMISSIONS, -1) self.packaging = requirements.get( RequirementsType.PACKAGING, -1) self.signed_releases = requirements.get( RequirementsType.SIGNED_RELEASES, -1) self.validate() def validate(self): """ Validate the user requirements. Raises: ValueError: If the user requirements are invalid. """ def is_int(value) -> bool: return not isinstance(value, bool) and isinstance(value, int) for attr in self.__dict__: if not is_int(getattr(self, attr)): print(f"Invalid value for {attr}") if not (is_int(self.vulnerabilities) and is_int(self.dependency_update_tool) and is_int(self.maintained) and is_int(self.security_policy) and is_int(self.license) and is_int(self.cii_best_practices) and is_int(self.ci_tests) and is_int(self.fuzzing) and is_int(self.sast) and is_int(self.binary_artifacts) and is_int(self.branch_protection) and is_int(self.dangerous_workflow) and is_int(self.code_review) and is_int(self.contributors) and is_int(self.pinned_dependencies) and is_int(self.token_permissions) and is_int(self.packaging) and is_int(self.signed_releases)): raise TypeError("input arguments are not integers") if not (-1 <= self.vulnerabilities <= 10 and -1 <= self.dependency_update_tool <= 10 and -1 <= self.maintained <= 10 and -1 <= self.security_policy <= 10 and -1 <= self.license <= 10 and -1 <= self.cii_best_practices <= 10 and -1 <= self.ci_tests <= 10 and -1 <= self.fuzzing <= 10 and -1 <= self.sast <= 10 and -1 <= self.binary_artifacts <= 10 and -1 <= self.branch_protection <= 10 and -1 <= self.dangerous_workflow <= 10 and -1 <= self.code_review <= 10 and -1 <= self.contributors <= 10 and -1 <= self.pinned_dependencies <= 10 and -1 <= self.token_permissions <= 10 and -1 <= self.packaging <= 10 and -1 <= self.signed_releases <= 10): raise ValueError( "input arguments fall out of bounds,\ check if input variables are within the bounds 0 to 10") def get_listed_requirements(self) -> list[str]: """ Get a list of the requirements. Returns: list[str]: The list of requirements. """ return [["Vulnerabilities", self.vulnerabilities], ["Dependency-Update-Tool", self.dependency_update_tool], ["Maintained", self.maintained], ["Security-Policy", self.security_policy], ["License", self.license], ["CII-Best-Practices", self.cii_best_practices], ["CI-Tests", self.ci_tests], ["Fuzzing", self.fuzzing], ["SAST", self.sast], ["Binary-Artifacts", self.binary_artifacts], ["Branch-Protection", self.branch_protection], ["Dangerous-Workflow", self.dangerous_workflow], ["Code-Review", self.code_review], ["Contributors", self.contributors], ["Pinned-Dependencies", self.pinned_dependencies], ["Token-Permissions", self.token_permissions], ["Packaging", self.packaging], ["Signed-Releases", self.signed_releases]] def to_dict(self) -> dict: """ Get a dictionary of the requirements. Returns: dict: The dictionary of requirements. """ return { "Vulnerabilities": self.vulnerabilities, "Dependency-Update-Tool": self.dependency_update_tool, "Maintained": self.maintained, "Security-Policy": self.security_policy, "License": self.license, "CII-Best-Practices": self.cii_best_practices, "CI-Tests": self.ci_tests, "Fuzzing": self.fuzzing, "SAST": self.sast, "Binary-Artifacts": self.binary_artifacts, "Branch-Protection": self.branch_protection, "Dangerous-Workflow": self.dangerous_workflow, "Code-Review": self.code_review, "Contributors": self.contributors, "Pinned-Dependencies": self.pinned_dependencies, "Token-Permissions": self.token_permissions, "Packaging": self.packaging, "Signed-Releases": self.signed_releases }
9,848
Python
.py
224
32.830357
78
0.596962
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,057
event.py
OSSQA-PUM_OSSQA/src/main/data_types/event.py
""" This module defines the Event class, which allows subscribing to and invoking callback functions. Example usage: event = Event[int]() event.subscribe(callback_function) event.invoke(42) """ from typing import TypeVar, Generic, Callable, Any T = TypeVar('T') class Event(Generic[T]): """ Represents an event that can be subscribed to and invoked. Attributes: _callbacks (list[Callable[[T], Any]]): A list of callback functions subscribed to the event. """ _callbacks: list[Callable[[T], Any]] def __init__(self) -> None: """ Initializes a new instance of the Event class. """ self._callbacks = [] def subscribe(self, callback: Callable[[T], Any]) -> None: """ Subscribes to the event. Args: callback (Callable[[T], Any]): The callback function to subscribe. """ self._callbacks.append(callback) def unsubscribe(self, callback: Callable[[T], Any]) -> None: """ Unsubscribes from the event. Args: callback (Callable[[T], Any]): The callback function to unsubscribe. """ try: self._callbacks.remove(callback) except ValueError as e: raise ValueError(f"Callback {callback} not in list") from e def invoke(self, event_data: T) -> None: """ Invokes the callback functions of subscribers. Args: event_data (T): The value to pass as an argument to the callback functions. """ for callback in self._callbacks: callback(event_data)
1,691
Python
.py
50
25.5
78
0.594096
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,058
dependency_scorer.py
OSSQA-PUM_OSSQA/src/main/data_types/dependency_scorer.py
""" This module contains classes for analyzing or fetching dependencies. The class DependencyScorer is an abstract base class based on the Strategy design pattern, with children implementing different ways of retrieving Dependencies with scored data. """ from abc import ABC, abstractmethod from typing import Any, Callable from dataclasses import dataclass import subprocess from multiprocessing import Pool from time import sleep import re import json import copy import os import requests from main.data_types.sbom_types.dependency import Dependency from main.data_types.sbom_types.scorecard import Scorecard from main.data_types.event import Event from main.util import (get_git_sha1, get_token_data, Sha1NotFoundError, TokenLimitExceededError) @dataclass class StepResponse: """ Represents a response from a step in the dependency scoring process. """ batch_size: int completed_items: int successful_items: int failed_items: int message: str = "" class DependencyScorer(ABC): """ Represents a dependency scorer. This class is an abstract base class based on the Strategy design pattern. Attributes: on_step_complete (Event[StepResponse]): An event that triggers when a step in the scoring process is completed. Methods: score: An abstract function that intends to score a list of dependencies. """ on_step_complete: Event[StepResponse] _scored_dependencies: list[Dependency] def __init__(self, callback: Callable[[StepResponse], Any]) -> None: super().__init__() self.on_step_complete = Event[StepResponse]() self.on_step_complete.subscribe(callback) self._scored_dependencies = [] @abstractmethod def score(self, dependencies: list[Dependency]) -> list[Dependency]: """ An abstract function that intends to score a list of dependencies. Args: dependencies (list[Dependency]): The dependencies to score. Returns: list[Dependency]: The scored dependencies. """ def _check_if_scored(self, dependency: Dependency) -> bool: """ Checks if a dependency has already been scored. Also updates the dependency with the score if it has been scored. Args: dependency (Dependency): The dependency to check. Returns: bool: True if the dependency has already been scored, False otherwise. """ for scored_dep in self._scored_dependencies: if (dependency.git_url == scored_dep.git_url and dependency.component_version == scored_dep.component_version): dependency.scorecard = scored_dep.scorecard dependency.failure_reason = scored_dep.failure_reason return True return False class SSFAPIFetcher(DependencyScorer): """ Represents a SSF API fetcher. This class fetches dependencies with scores from the OpenSSF Scorecard API. Attributes: on_step_complete (Event[StepResponse]): An event that triggers when a step in the scoring process is completed. Methods: score: Scores a list of dependencies by fetching stored scores with the OpenSSF Scorecard API. """ def score(self, dependencies: list[Dependency]) -> list[Dependency]: """ Scores a list of dependencies by fetching stored scores with the OpenSSF Scorecard API. Args: dependencies (list[Dependency]): The dependencies to score. Returns: list[Dependency]: The scored dependencies. """ batch_size = len(dependencies) failed_items = 0 successful_items = 0 new_dependencies = [] remaining_dependencies: list[Dependency] = copy.deepcopy(dependencies) try: with Pool() as pool: for index, dependency in enumerate( pool.imap(self._request_ssf_api, dependencies) ): if dependency.scorecard: successful_items += 1 else: failed_items += 1 remaining_dependencies.remove(dependency) new_dependencies.append(dependency) self.on_step_complete.invoke( StepResponse( batch_size, successful_items + failed_items, successful_items, failed_items ) ) except TokenLimitExceededError as e: time_to_wait: int = e.time_to_wait + 10 self.on_step_complete.invoke( StepResponse( batch_size, successful_items + failed_items, successful_items, failed_items, f"Token limit reached. Until {e.reset_datetime}" + "for token reset." ) ) sleep(time_to_wait) return new_dependencies + self.score(remaining_dependencies) return new_dependencies def _request_ssf_api(self, dependency: Dependency) -> Dependency: """ Looks up the score for a dependency in the SSF API. Args: dependency (Dependency): The dependency to look up. Returns: Dependency: The dependency with an SSF score. Raises: TokenLimitExceededError: If the GitHub token limit is exceeded. """ assert isinstance(dependency, Dependency), \ f"dependency: {dependency} is not a Dependency object" new_dependency: Dependency = copy.deepcopy(dependency) # Check if the dependency has already been scored if self._check_if_scored(new_dependency): return new_dependency try: repo_path = new_dependency.repo_path component_version = new_dependency.component_version except (KeyError, ValueError) as e: error_message = f"Failed missing required field due to: {e}" new_dependency.failure_reason = type(e)(error_message) return new_dependency try: sha1 = get_git_sha1(repo_path, component_version) except ( ConnectionRefusedError, AssertionError, ValueError, KeyError, Sha1NotFoundError) as e: error_message = f"Failed to get git sha1 due to: {e}" new_dependency.failure_reason = type(e)(error_message) return new_dependency except TokenLimitExceededError as e: raise TokenLimitExceededError(e.reset_time) from e try: score = self._lookup_ssf_api( dependency.git_url.lstrip("htps:/"), sha1 ) new_dependency.scorecard = score self._scored_dependencies.append(new_dependency) new_dependency.failure_reason = None return new_dependency except requests.exceptions.RequestException as e: error_message = f"Failed to get score due to: {e}" new_dependency.failure_reason = type(e)(error_message) return new_dependency def _lookup_ssf_api(self, git_url: str, sha1: str) -> Scorecard: """ Looks up the score for a dependency in the SSF API. Args: git_url (str): The Git URL of the dependency. sha1 (str): The SHA1 hash of the dependency. Returns: Scorecard: The scorecard of the dependency. Raises: requests.exceptions.RequestException: If the scorecard failed to be retrieved. """ try: score = requests.get( ("https://api.securityscorecards.dev/projects/" f"{git_url}/?commit={sha1}"), timeout=10 ) if score.status_code != 200: score = requests.get( (f"https://api.securityscorecards.dev" f"/projects/{git_url}"), timeout=10 ) if score.status_code != 200: raise requests.exceptions.RequestException( f"Failed to get score for {git_url} at {sha1}" ) score = score.json() return Scorecard(score) except requests.exceptions.RequestException as e: raise e class ScorecardAnalyzer(DependencyScorer): """ Represents a Scorecard Analyzer. This class scores dependencies by analyzing the scores of the OpenSSF Scorecard. Attributes: on_step_complete (Event[StepResponse]): An event that triggers when a step in the scoring process is completed. Methods: score: Scores a list of dependencies by analyzing the scores of the OpenSSF Scorecard. """ def score(self, dependencies: list[Dependency]) -> list[Dependency]: """ Scores a list of dependencies by analyzing the scores of the OpenSSF Scorecard. Args: dependencies (list[Dependency]): The dependencies to score. Returns: list[Dependency]: The scored dependencies. Raises: AssertionError: If the dependencies is not a list. """ assert isinstance(dependencies, list), \ f"dependencies: {dependencies} is not a list" batch_size = len(dependencies) failed_items = 0 successful_items = 0 new_dependencies = [] remaining_dependencies: list[Dependency] = copy.deepcopy(dependencies) with Pool() as pool: try: for index, scored_dependency in enumerate( pool.imap(self._analyze_scorecard, dependencies) ): if scored_dependency.scorecard: successful_items += 1 else: failed_items += 1 new_dependencies.append(scored_dependency) remaining_dependencies.remove(scored_dependency) self.on_step_complete.invoke( StepResponse( batch_size, index + 1, successful_items, failed_items ) ) except TokenLimitExceededError as e: time_to_wait: int = e.time_to_wait + 10 self.on_step_complete.invoke( StepResponse( batch_size, successful_items + failed_items, successful_items, failed_items, "Token limit reached. Waiting until " + f"{e.reset_datetime} for token reset." ) ) sleep(time_to_wait) return new_dependencies + self.score(remaining_dependencies) return new_dependencies def _analyze_scorecard(self, dependency: Dependency, timeout: float = 120)\ -> Dependency: """ Analyzes the score for a dependency. Args: dependency (Dependency): The dependency to analyze. timeout (float): Time for requests to timeout Returns: Dependency: A deepcopy the dependency with an analyzed score. Raises: AssertionError: If the git sha1 is invalid. AssertionError: If the dependency is not a Dependency object. AssertionError: If the scorecard could not be executed. ConnectionRefusedError: If the git sha1 could not be retrieved. subprocess.CalledProcessError: If the scorecard binary could not be executed. json.JSONDecodeError: If the scorecard output could not be parsed. TimeoutError: If the scorecard execution timed out. TokenLimitExceededError: If the GitHub token limit is exceeded. """ assert isinstance(dependency, Dependency), \ f"dependency: {dependency} is not a Dependency object" new_dependency: Dependency = copy.deepcopy(dependency) # Check if the dependency has already been scored if self._check_if_scored(new_dependency): return new_dependency try: repo_path = new_dependency.repo_path component_version = new_dependency.component_version except (KeyError, ValueError) as e: error_message = f"Failed missing required field due to: {e}" new_dependency.failure_reason = type(e)(error_message) return new_dependency try: version_git_sha1: str = get_git_sha1( repo_path, component_version ) except ( ConnectionRefusedError, AssertionError, ValueError, KeyError, Sha1NotFoundError) as e: error_message = f"Failed to get git sha1 due to: {e}" new_dependency.failure_reason = type(e)(error_message) return new_dependency except TokenLimitExceededError as e: raise TokenLimitExceededError(e.reset_time) from e remaining_tries: int = 3 retry_interval: int = 3 success: bool = False while remaining_tries > 0 and not success: try: remaining_tries -= 1 scorecard: Scorecard = self._execute_scorecard( new_dependency.git_url, version_git_sha1, timeout ) new_dependency.scorecard = scorecard self._scored_dependencies.append(new_dependency) success = True except (AssertionError, subprocess.CalledProcessError, TimeoutError, json.JSONDecodeError, ValueError, requests.ConnectionError) as e: error_message = f"Failed to execute scorecard due to: {e}" new_dependency.failure_reason = type(e)(error_message) sleep(retry_interval) # Wait before retrying continue except TokenLimitExceededError as e: raise TokenLimitExceededError(e.reset_time) from e # Successful execution of scorecard if new_dependency.scorecard: new_dependency.failure_reason = None return new_dependency def _execute_scorecard(self, git_url: str, commit_sha1: str, timeout: float = 120) -> Scorecard: """ Executes the OpenSSF Scorecard binary on a specific version of a dependency. Args: git_url (str): The Git URL of the dependency. version (str): The version of the dependency. timeout (float): The timeout in seconds for the scorecard binary. Returns: Scorecard: The scorecard of the dependency. Raises: AssertionError: If the git url is not a string. AssertionError: If the commit sha1 is not a string. ValueError: If the scorecard output could not be parsed. json.JSONDecodeError: If the scorecard output could not be parsed. subprocess.CalledProcessError: If the scorecard binary could not be executed. TimeoutError: If the scorecard execution timed out. """ # Validate input assert isinstance(git_url, str), f"git_url: {git_url} is not a string" assert isinstance(commit_sha1, str), \ f"commit_sha1: {commit_sha1} is not a string." # Set flags for scorecard binary flags: str = (f"--repo {git_url} --show-details " f"--format json --commit {commit_sha1}") # Execute scorecard binary and get the raw output as a string try: # Execute scorecard binary if OS is Windows or Unix if os.name == "nt": output: str = subprocess.check_output( f'scorecard-windows.exe {flags}', shell=False, timeout=timeout ).decode("utf-8") else: output: str = subprocess.check_output( f'scorecard {flags}', shell=True, timeout=timeout ).decode("utf-8") except (subprocess.CalledProcessError) as e: output = e.output.decode("utf-8") except subprocess.TimeoutExpired as e: token_data: dict = get_token_data() if token_data.get("remaining") == 0: raise TokenLimitExceededError( token_data.get("reset_time")) raise TimeoutError(e.timeout, "Scorecard execution timed out") from e # Remove unnecessary data # Find start of JSON used for creating a Scorecard by finding the first # '{"date":' try: start_of_json: int = output.find('{"date":') assert start_of_json != -1, "JSON start not found" # Find end of JSON by finding the first '}' after the metadata pos_of_metadata: int = output.find('"metadata":') assert pos_of_metadata != -1, "Metadata not found" end_of_json: int = pos_of_metadata + \ output[pos_of_metadata:].find('}') assert end_of_json != -1, "JSON end not found" # Remove newlines and save the JSON output: str = re.sub( r"\n", "", output[start_of_json:end_of_json + 1] ) assert output != "", "JSON is empty" except AssertionError as e: raise ValueError( f"Failed to parse scorecard executable output because {e}" ) from e # Parse output to dict try: scorecard_dict: dict = json.loads(output) assert scorecard_dict["checks"], "Scorecard checks are empty" except json.JSONDecodeError as e: raise e # Create Scorecard object try: scorecard: Scorecard = Scorecard(scorecard_dict) except AssertionError as e: raise AssertionError( "Failed to create Scorecard object from dict: " + f"{scorecard_dict} because {e}" ) from e return scorecard
19,185
Python
.py
450
29.577778
79
0.572791
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,059
dependency_manager.py
OSSQA-PUM_OSSQA/src/main/data_types/sbom_types/dependency_manager.py
""" This module contains the DependencyManager class, which manages the dependencies (components) of an SBOM. Classes: - DependencyManager: Represents a dependency manager that manages dependencies for a project. """ from typing import Callable from main.data_types.sbom_types.dependency import Dependency class DependencyManager: """ Represents a dependency manager that manages dependencies for a project. """ _dependencies: list[Dependency] def __init__(self, sbom_components: list[dict]): """ Initialize the dependency manager. Args: sbom_components (list[dict]): The components of the SBOM. """ self._dependencies = \ [Dependency(component) for component in sbom_components] def update(self, dependencies: list[Dependency]): """ Update existing dependencies or add new ones if they do not exist. Dependencies are unique and identified by their platform, repo_path, and version. Args: dependencies (list[Dependency]): The dependencies to update. """ for dependency in dependencies: try: index = self._dependencies.index(dependency) if dependency.scorecard or dependency.failure_reason: # We only want scored or failed dependencies self._dependencies[index] = dependency except ValueError: self._dependencies.append(dependency) def get_scored_dependencies(self) -> list[Dependency]: """ Get the scored dependencies. Returns: list[Dependency]: The scored dependencies. """ return list( filter( lambda dependency: dependency.scorecard and not dependency.failure_reason, self._dependencies ) ) def get_unscored_dependencies(self) -> list[Dependency]: """ Get the unscored dependencies. Returns: list[Dependency]: The unscored dependencies. """ return list( filter( lambda dependency: not dependency.scorecard and not dependency.failure_reason, self._dependencies ) ) def get_failed_dependencies(self) -> list[Dependency]: """ Get the failed dependencies. Returns: list[Dependency]: The failed dependencies. """ return list( filter( lambda dependency: dependency.failure_reason, self._dependencies ) ) def get_dependencies_by_filter(self, dependency_filter: Callable) \ -> list[Dependency]: """ Get the dependencies with a filter. Args: dependency_filter (Callable): The filter to apply. Returns: list[Dependency]: The filtered dependencies. """ return list(filter(dependency_filter, self._dependencies)) def to_dict(self) -> dict: """ Create a dictionary representing the dependency manager. Returns: dict: The dependency manager as a dictionary. """ return { 'scored_dependencies': [dependency.to_dict() for dependency in self.get_scored_dependencies()], 'unscored_dependencies': [dependency.to_dict() for dependency in self.get_unscored_dependencies()], 'failed_dependencies': [dependency.to_dict() for dependency in self.get_failed_dependencies()] } def to_dict_web(self) -> dict: """ Create a dictionary representing the dependency manager to use in the web interface. Returns: dict: The dependency manager as a dictionary. """ return { 'scored_dependencies': [dependency.to_dict_web() for dependency in self.get_scored_dependencies()], 'unscored_dependencies': [dependency.to_dict_web() for dependency in self.get_unscored_dependencies()], 'failed_dependencies': [dependency.to_dict_web() for dependency in self.get_failed_dependencies()] }
4,498
Python
.py
121
25.826446
78
0.578609
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,060
sbom.py
OSSQA-PUM_OSSQA/src/main/data_types/sbom_types/sbom.py
""" This module contains the Sbom class, which represents a Software Bill of Materials (SBOM) object. Classes: - Sbom: Represents a Software Bill of Materials (SBOM) object. """ from re import match import requests from main.data_types.sbom_types.dependency_manager import DependencyManager from main.data_types.sbom_types.dependency import Dependency from main.util import get_github_token class Sbom: """ Represents a Software Bill of Materials (SBOM) object. Attributes: dependency_manager (DependencyManager): The dependency manager for the SBOM. serial_number (str): The serial number of the SBOM. version (str): The version of the SBOM. repo_name (str): The name of the repository. repo_version (str): The version of the repository. spec_version (str): The specification version of the SBOM. """ dependency_manager: DependencyManager serial_number: str version: int repo_name: str repo_version: str spec_version: str def __init__(self, sbom: dict): """ Initializes the SBOM. Args: sbom (dict): The SBOM contents. """ self._check_format_of_sbom(sbom) self.dependency_manager: DependencyManager \ = DependencyManager(sbom["components"]) self.serial_number: str = sbom["serialNumber"] self.version: int = sbom["version"] self.repo_name: str = sbom["metadata"]["component"]["name"] self.repo_version: str = sbom["metadata"]["component"]["version"] self.spec_version: str = sbom["specVersion"] def to_dict(self) -> dict: """ Creates a dictionary representing the SBOM. Returns: dict: The SBOM as a dictionary. """ res = {} res["serialNumber"] = self.serial_number res["version"] = self.version res["repo_name"] = self.repo_name res["repo_version"] = self.repo_version # Add dependencies to the dictionary dependencies = self.dependency_manager.to_dict() for key, value in dependencies.items(): res[key] = value return res def to_dict_web(self) -> dict: """ Creates a dictionary representing the SBOM to use in the web interface. Returns: dict: The SBOM as a dictionary. """ res = {} res["serialNumber"] = self.serial_number res["version"] = self.version res["repo_name"] = self.repo_name res["repo_version"] = self.repo_version # Add dependencies to the dictionary dependencies = self.dependency_manager.to_dict_web() for key, value in dependencies.items(): res[key] = value return res def get_scored_dependencies(self) -> list[Dependency]: """ Gets the scored dependencies of the SBOM. Returns: list[Dependency]: The scored dependencies of the SBOM. """ return self.dependency_manager.get_scored_dependencies() def get_unscored_dependencies(self) -> list[Dependency]: """ Gets the unscored dependencies of the SBOM. Returns: list[Dependency]: The unscored dependencies of the SBOM. """ return self.dependency_manager.get_unscored_dependencies() def get_failed_dependencies(self) -> list[Dependency]: """ Gets the failed dependencies of the SBOM. Returns: list[Dependency]: The failed dependencies of the SBOM. """ return self.dependency_manager.get_failed_dependencies() def get_dependencies_by_filter(self, dependency_filter: callable) \ -> list[Dependency]: """ Gets the dependencies of the SBOM with a filter. Args: dependency_filter (Callable): The filter to apply. Returns: list[Dependency]: The filtered dependencies of the SBOM. """ return self.dependency_manager.get_dependencies_by_filter( dependency_filter ) def update_dependencies(self, dependencies: list[Dependency]) -> None: """ Updates the dependencies of the SBOM. Args: dependencies (list[Dependency]): The dependencies to update. """ self.dependency_manager.update(dependencies) def _check_format_of_sbom(self, sbom_contents: dict) -> None: """ Checks the format of the SBOM contents. Args: sbom_contents (dict): The SBOM contents to be checked. Raises: SyntaxError: If the 'bomFormat' is missing or not 'CycloneDX'. IndexError: If the 'specVersion' is missing, out of date, or incorrect. SyntaxError: If the 'serialNumber' does not match the RFC-4122 format. IndexError: If the 'version' of SBOM is lower than 1 or not a proper integer. ValueError: If the name could not be found, indicating a non-valid SBOM. """ if not sbom_contents["bomFormat"] == "CycloneDX": raise SyntaxError("bomFormat missing or not CycloneDX") if not sbom_contents["specVersion"] in ["1.2", "1.3", "1.4", "1.5"]: raise IndexError( "CycloneDX version missing, out of date or incorrect" ) if not match( "^urn:uuid:[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-" + "[0-9a-f]{4}-[0-9a-f]{12}$", sbom_contents["serialNumber"]): raise SyntaxError( "SBOM Serial number does not match the RFC-4122 format") if not sbom_contents["version"] >= 1: raise IndexError("Version of SBOM is lower than 1") if not isinstance(sbom_contents["version"], int): raise IndexError("Version of SBOM is not proper integer") # Checks if name of SBOM exists try: name = sbom_contents["metadata"]["component"]["name"] except (IndexError, KeyError): name = "" if name == "": raise ValueError("Name could not be found, non valid SBOM") def _try_git_api_connection(self, url: str) -> None: """ Tries to connect to the GitHub API. Args: url (str): The URL to connect to. Raises: ConnectionError: If the connection could not be established. """ token = get_github_token() headers = {'Authorization': f'Bearer {token}'} if token else {} url = url.removeprefix("github.com") url = f"https://api.github.com/repos{url}" try: response = requests.get(url, headers=headers, timeout=5) if response.status_code != 200: print(f"Could not connect to {url} {response.text}") raise ConnectionError( f"Could not connect to GitHub API for {url}") except requests.exceptions.ConnectionError as e: raise e
7,160
Python
.py
175
30.954286
78
0.600058
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,061
dependency.py
OSSQA-PUM_OSSQA/src/main/data_types/sbom_types/dependency.py
""" This module contains the Dependency class which represents a dependency for a project. """ from urllib.parse import urlparse from re import search from dataclasses import dataclass from main.data_types.sbom_types.scorecard import Scorecard @dataclass class Dependency: """ Represents a dependency for a project. Attributes: scorecard (Scorecard): The scorecard related to the dependency. failure_reason (Exception): The reason for any failure related to the dependency. reach_requirement (str): The grade of the dependency. """ scorecard: Scorecard = None failure_reason: Exception = None reach_requirement: str = None def __init__(self, dependency: dict): """ Initializes the dependency. Args: dependency (dict): The component counterpart of the dependency in the SBOM. """ self.scorecard = None self.failure_reason = None self.reach_requirement = None for dependency_attr in dependency.keys(): if dependency_attr == "scorecard": self.scorecard = Scorecard(dependency[dependency_attr]) else: setattr(self, dependency_attr, dependency[dependency_attr]) def __eq__(self, other): """ Check if two dependencies are equal. Args: other (Dependency): The dependency to compare with. Returns: bool: True if the dependencies are equal, False otherwise. """ # Loop over all attributes of the dependency for attr in self.__dict__: if attr not in ("name", "version"): continue # If the attribute is not the same in both dependencies try: other_attr = getattr(other, attr) except AttributeError: return False if getattr(self, attr) != other_attr: return False return True @property def component(self) -> dict: """ Get all the attributes from the SBOM of the dependency. Returns: dict: The attributes from the SBOM of the dependency. """ res = {} for attr in self.__dict__: if attr not in ( "scorecard", "failure_reason", "reach_requirement"): res.update({attr: getattr(self, attr)}) return res @property def component_name(self) -> str: """ Get the name of the component. Returns: str: The name of the component. Raises: KeyError: If the component name is not found in the component. """ if "name" not in dir(self): raise KeyError("name not found in component") return getattr(self, "name") @property def component_version(self) -> str: """ Get the version of the component. Returns: str: The version of the component. Raises: KeyError: If the version is not found in the component. """ if "version" not in dir(self): raise KeyError("version not found in component") return getattr(self, "version") @property def platform(self) -> str: """ Get the platform of the dependency. Returns: str: The platform of the dependency. Raises: KeyError: If the URL is not found in the component external references. ValueError: If the URL is not a valid URL. """ return self._get_git_url().split("/", maxsplit=1)[0] @property def repo_path(self) -> str: """ Get the repo path of the dependency. Returns: str: The repo path of the dependency. Raises: KeyError: If the URL is not found in the component external references. ValueError: If the URL is not a valid URL. """ return self._get_git_url().split("/", maxsplit=1)[1] @property def git_url(self) -> str: """ Get the git URL of the dependency. Returns: str: The git URL of the dependency. Raises: ValueError: If no git URL could be found in externalReferences of the component. KeyError: If the component has no externalReferences field. """ git_url = self._get_git_url() return f"https://{git_url}" def _get_git_url(self) -> str: """ Get the git URL of the dependency. Returns: str: The git URL of the dependency. Raises: ValueError: If no git URL could be found in externalReferences of the component. KeyError: If the component has no externalReferences field. """ if "externalReferences" not in dir(self): raise KeyError("externalReferences not found in component") external_ref = getattr(self, "externalReferences") urls = [] for ref in external_ref: if "url" not in ref: continue try: github_url = self._parse_github_url(ref["url"]) except ValueError: continue urls.append(github_url) if not urls: raise ValueError("No valid URLs found in externalReferences") return urls[0] def _parse_github_url(self, url: str) -> str: """ Parse the GitHub URL from a URL. Args: url (str): The URL to parse. Returns: str: The GitHub URL. Raises: ValueError: If the URL is not a github.com URL or if the URL could not be parsed. """ url_split = urlparse(url) platform = url_split.netloc if platform != "github.com": raise ValueError("Platform not supported") git_repo_path = url_split.path.removesuffix(".git") pattern = r"\/([^\/]+)\/([^\/]+)" # Match /owner/repo _match = search(pattern, git_repo_path) if not _match: raise ValueError("Could not parse git URL") git_repo_path = _match.group(0) github_url = platform + git_repo_path return github_url def to_dict(self) -> dict: """ Create a dictionary representing the dependency. Returns: dict: The dependency as a dictionary. """ dependency_dict = {} for attr in self.__dict__: if attr not in ("scorecard", "failure_reason"): dependency_dict.update({attr: getattr(self, attr)}) if attr == "scorecard": dependency_dict.update( {"scorecard": self.scorecard.to_dict() if self.scorecard else None } ) if attr == "failure_reason": dependency_dict.update( {"failure_reason": str(self.failure_reason) if self.failure_reason else None } ) # Only allow path if it is a valid GitHub URL try: platform = self.platform repo_path = self.repo_path except (KeyError, ValueError): return dependency_dict dependency_dict.update({"platform_path": platform + repo_path}) return dependency_dict def to_dict_web(self) -> dict: """ Creates a dictionary representing the dependency to use in the web interface. Returns: dict: The dependency as a dictionary. """ res = {} res["dependency_score"] = self.scorecard.to_dict() \ if self.scorecard else None res["failure_reason"] = str(self.failure_reason) \ if self.failure_reason else "" res["reach_requirement"] = self.reach_requirement try: platform = self.platform repo_path = self.repo_path res["name"] = f"{platform}/{repo_path}" except (KeyError, ValueError): res["name"] = "" res["version"] = self.component_version return res
8,436
Python
.py
231
25.549784
78
0.555977
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,062
scorecard.py
OSSQA-PUM_OSSQA/src/main/data_types/sbom_types/scorecard.py
""" This module contains classes that represent data from OpenSSF Scorecard results. Classes: - Check: Represents a scorecard check. - Scorecard: Represents the results of OpenSSF Scorecard. - ScorecardChecks: Represents the checks that can be performed by OpenSSF Scorecard on a dependency. """ from dataclasses import dataclass, asdict from enum import StrEnum class ScorecardChecks(StrEnum): """ Represents the checks that can be performed by OpenSSF Scorecard on a dependency. https://securityscorecards.dev/#the-checks """ BINARY_ARTIFACTS = "Binary-Artifacts" BRANCH_PROTECTION = "Branch-Protection" CI_TESTS = "CI-Tests" CII_BEST_PRACTICES = "CII-Best-Practices" CODE_REVIEW = "Code-Review" CONTRIBUTORS = "Contributors" DANGEROUS_WORKFLOW = "Dangerous-Workflow" DEPENDENCY_UPDATE_TOOL = "Dependency-Update-Tool" FUZZING = "Fuzzing" LICENSE = "License" MAINTAINED = "Maintained" PACKAGING = "Packaging" PINNED_DEPENDENCIES = "Pinned-Dependencies" SAST = "SAST" SECURITY_POLICY = "Security-Policy" SIGNED_RELEASES = "Signed-Releases" TOKEN_PERMISSIONS = "Token-Permissions" VULNERABILITIES = "Vulnerabilities" WEBHOOKS = "Webhooks" @classmethod def all(cls): """ Get all checks. Returns: list[ScorecardChecks]: The list of all checks. """ return list(ScorecardChecks) @classmethod def title_hyphen_to_snake(cls, title: str) -> str: """ Convert a title with hyphens to snake case. Args: title (str): The title to convert. Returns: str: The title in snake case. """ return title.lower().replace("-", "_") @dataclass(frozen=True, eq=True) class Check: """ Represents a scorecard check. Attributes: name (str): The name of the check. score (int): The score of the check. reason (str): The reason for the score. details (list[str]): The details of the check. """ name: str score: int reason: str details: list[str] @dataclass class Scorecard: """ Represents the results of OpenSSF Scorecard. https://securityscorecards.dev/#the-checks """ date: str score: float checks: list[Check] """ Old attributes for reference: binary_artifacts: Check branch_protection: Check ci_tests: Check cii_best_practices: Check code_review: Check contributors: Check dangerous_workflow: Check dependency_update_tool: Check fuzzing: Check license: Check maintained: Check packaging: Check pinned_dependencies: Check sast: Check security_policy: Check signed_releases: Check token_permissions: Check vulnerabilities: Check """ def __init__(self, ssf_scorecard: dict): """ Initializes the scorecard. Args: ssf_scorecard (dict): The OpenSSF Scorecard results. """ self._validate(ssf_scorecard) self.date = ssf_scorecard["date"] self.score = ssf_scorecard["score"] self.checks = [] for check in ssf_scorecard["checks"]: name = check["name"] score = check["score"] reason = check["reason"] details = check["details"] self.checks.append(Check(name, score, reason, details)) def __eq__(self, other) -> bool: return isinstance(other, Scorecard) and \ self.score == other.score and self.checks == other.checks def _validate(self, ssf_scorecard: dict) -> None: """ Validates the scorecard. Args: ssf_scorecard (dict): The OpenSSF Scorecard results. Returns: bool: True if the scorecard is valid, False otherwise. Raises: AssertionError: If the scorecard is invalid. """ assert isinstance(ssf_scorecard, dict), ("Scorecard must be a " "dictionary.") assert "date" in ssf_scorecard, "Scorecard must contain a date." assert "score" in ssf_scorecard, "Scorecard must contain a score." score = ssf_scorecard.get("score") assert isinstance(score, (int, float)), "Score must be a number." assert -1 <= score <= 10, f"Score: {score} must be between -1 and 10." assert "checks" in ssf_scorecard, "Scorecard must contain checks." checks = ssf_scorecard.get("checks") assert isinstance(checks, list), "Checks must be a list." for check in checks: assert isinstance(check, dict), "Check must be a dictionary." assert "name" in check, "Check must contain a name." assert check.get("name") in ScorecardChecks.all(), \ f"Check name '{check.get('name')}' not a valid check." assert "score" in check, "Check must contain a score." check_score = check.get("score") assert isinstance(check_score, (int, float)), \ "Check score must be a number." assert -1 <= check_score <= 10, \ "Check score must be between -1 and 10." assert "reason" in check, "Check must contain a reason." assert "details" in check, "Check must contain details." def to_dict(self) -> dict: """ Creates a dictionary representing the scorecard. Returns: dict: The scorecard as a dictionary. """ return asdict(self)
5,654
Python
.py
157
27.88535
78
0.619352
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,063
models.py
OSSQA-PUM_OSSQA/src/backend/models.py
""" This module handles the configuration of each database model and their relations. """ import json from flask_sqlalchemy import SQLAlchemy db = SQLAlchemy() dependency_sbom = db.Table("dependency_sbom", db.Column("dependency_id", db.Integer, db.ForeignKey("dependency.id"), primary_key=True), db.Column("sbom_id", db.Integer, db.ForeignKey("sbom.id"), primary_key=True)) class Check(db.Model): """ A model representing a OpenSSF Scorecard Check. """ id = db.Column(db.Integer, autoincrement=True, primary_key=True) name = db.Column(db.String(255), unique=False) score = db.Column(db.Integer, unique=False) reason = db.Column(db.String(255), unique=False) details = db.Column(db.Text, unique=False) scorecard_id = db.Column(db.Integer, db.ForeignKey("scorecard.id"), nullable=False) def to_dict(self) -> dict: """ Creates a dictionary representing the check, that can be used in responses. Returns dict: A dictionary representation of the check. """ return { "name": self.name, "score": self.score, "reason": self.reason, "details": self.details } class Scorecard(db.Model): """ A model representing a OpenSSF Scorecard. """ id = db.Column(db.Integer, autoincrement=True, primary_key=True) date = db.Column(db.String(10), unique=False) aggregate_score = db.Column(db.Double, unique=False) checks = db.relationship("Check", backref="scorecard", lazy=True) dependency_id = db.Column(db.Integer, db.ForeignKey("dependency.id"), nullable=False) def to_dict(self) -> dict: """ Creates a dictionary representing the scorecard, that can be used in responses. Returns dict: A dictionary representation of the scorecard. """ return { "date": self.date, "score": self.aggregate_score, "checks": [check.to_dict() for check in self.checks], } class Dependency(db.Model): """ A dependency with a score from OpenSSF Scorecard. """ id = db.Column(db.Integer, autoincrement=True, primary_key=True) name = db.Column(db.String(255), unique=False) version = db.Column(db.String(255), unique=False) platform_path = db.Column(db.String(255), unique=False) component = db.Column(db.Text, unique=False) scorecard = db.relationship("Scorecard", backref="dependency", lazy=True, uselist=False) sboms = db.relationship("SBOM", secondary=dependency_sbom, lazy="subquery", back_populates="dependencies") def to_dict(self) -> dict: """ Creates a dictionary representing the dependency, that can be used in responses. Returns dict: A dictionary representation of the dependency. """ res = {} for key, value in json.loads(self.component).items(): res[key] = value res["scorecard"] = self.scorecard.to_dict() return res class SBOM(db.Model): """ A software bill of materials (SBOM). """ id = db.Column(db.Integer, autoincrement=True, primary_key=True) serial_number = db.Column(db.String(60), unique=False) version = db.Column(db.Integer, unique=False) repo_name = db.Column(db.String(255), unique=False) repo_version = db.Column(db.String(255), unique=False) dependencies = db.relationship("Dependency", secondary=dependency_sbom, lazy="subquery", back_populates="sboms") def to_dict(self) -> dict: """ Creates a dictionary representing the SBOM, that can be used in responses. Returns dict: A dictionary representation of the SBOM. """ return { "bomFormat": "CycloneDX", "specVersion": "1.2", "serialNumber": self.serial_number, "version": self.version, "metadata": { "component": { "name": self.repo_name, "version": self.repo_version, }, }, "components": [c.to_dict() for c in self.dependencies], }
4,551
Python
.py
116
28.939655
79
0.581237
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,064
routes.py
OSSQA-PUM_OSSQA/src/backend/routes.py
""" This module handles the endpoints that the backend communication interface interfaces with. It also handles functionality for creating and updating various objects in the database. """ import json from flask import Flask, jsonify, request from flask_sqlalchemy import SQLAlchemy from backend.models import SBOM, Dependency, Scorecard, Check def register_endpoints(app: Flask, db: SQLAlchemy): """ Registers all endpoints with a flask app and its database. Args: app (Flask): The flask app. db (SQLAlchemy): The database. """ @app.route("/sbom", methods=["POST"]) def add_sbom(): """ Adds an SBOM to the database. Args: json (object): A JSON object containing the SBOM data. Status codes: 400: If the request is not of type JSON. 201: If the SBOM could be added to the database. """ if not request.is_json: return "", 400 sbom_json = request.json sbom: SBOM = SBOM.query.filter_by( version=sbom_json["version"], repo_name=sbom_json["repo_name"], repo_version=sbom_json["repo_version"], ).first() if not sbom: sbom = SBOM( serial_number=sbom_json["serialNumber"], version=sbom_json["version"], repo_name=sbom_json["repo_name"], repo_version=sbom_json["repo_version"], ) db.session.add(sbom) for dep_json in sbom_json["scored_dependencies"]: dep: Dependency = Dependency.query.filter_by( platform_path=dep_json["platform_path"], name=dep_json["name"], version=dep_json["version"], ).first() # Get the attributes that are not part of the dependency scorecard dep_component = {} for k, v in dep_json.items(): if k not in ( "platform_path", "scorecard", "failure_reason", "reach_requirement"): dep_component[k] = v scorecard_json = dep_json["scorecard"] if dep: scorecard: Scorecard = dep.scorecard for check in scorecard.checks: db.session.delete(check) db.session.delete(scorecard) else: dep = Dependency( platform_path=dep_json["platform_path"], name=dep_json["name"], version=dep_json["version"], component=json.dumps(dep_component), ) sbom.dependencies.append(dep) db.session.add(dep) scorecard = Scorecard( date=scorecard_json["date"], aggregate_score=scorecard_json["score"], ) dep.scorecard = scorecard db.session.add(scorecard) for check_json in scorecard_json["checks"]: check = Check( name=check_json["name"], score=check_json["score"], reason=check_json["reason"], ) scorecard.checks.append(check) db.session.add(check) db.session.commit() return "", 201 @app.route("/sbom", methods=["GET"]) def get_sbom_names(): """ Fetches the name of every SBOM in the database. Returns: json (array): The list of SBOM names. Status codes: 200: If the list of SBOM names could be created. """ names = set() for sbom in SBOM.query.all(): names.add(sbom.repo_name) return jsonify(list(names)), 200 @app.route("/sbom/<path:repo_name>", methods=["GET"]) def get_sboms_by_name(repo_name: str): """ Fetches a list of SBOMs with a specific name. Args: repo_name (str): The name to query the database with. Returns: json (array): The list of SBOMs. Status codes: 200: If the list of SBOMs could be created. """ sboms = SBOM.query.filter_by(repo_name=repo_name).all() sbom_dicts = [sbom.to_dict() for sbom in sboms] return jsonify(sbom_dicts), 200 @app.route("/dependency/existing", methods=["GET"]) def get_existing_dependencies(): """ Fetches all dependencies that have an existing match in the database. Args: json (array): The list of dependency dictionaries, containing containing the name, version, and platform_path. Returns: json (array): The list of dependecies. Status codes: 400: If the request is not of type JSON. 200: If the list of dependencies could be created. """ if not request.is_json: return jsonify([]), 400 dependencies = [] for dep in request.json: version = dep["version"] name = dep["name"] try: path = dep["platform_path"] except KeyError: continue dependency = Dependency.query.filter_by( platform_path=path, version=version, name=name).first() if dependency: dependencies.append(dependency.to_dict()) return jsonify(dependencies), 200 def register_test_endpoints(app: Flask, db: SQLAlchemy): """ Registers all test endpoints with a flask app and its database. Args: app (Flask): A flask app. db (SQLAlchemy): A database. """ @app.route("/test/reset", methods=["POST"]) def reset_database(): """ Drops and recreates the database. Status codes: 200: If the datebase could be dropped and recreated. """ db.drop_all() db.create_all() return "", 200
6,066
Python
.py
161
26.093168
78
0.547591
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,065
server.py
OSSQA-PUM_OSSQA/src/backend/server.py
""" This module handles the creation and configuration of the flask app, which is our backend and database. """ from flask import Flask from backend.models import db from backend.routes import register_endpoints, register_test_endpoints def create_test_app() -> Flask: """ Creates a flask app that can be used for testing. Returns: Flask: The test app. """ app = Flask(__name__) app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///./test.db" app.config["TESTING"] = True db.init_app(app) register_endpoints(app, db) register_test_endpoints(app, db) app.app_context().push() db.create_all() return app def create_app() -> Flask: """ Creates a flask app that can be used in production. Returns: Flask: The app. """ app = Flask(__name__) app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///./our.db" db.init_app(app) register_endpoints(app, db) app.app_context().push() db.create_all() return app
1,018
Python
.py
35
24.485714
70
0.664603
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,066
test_dependency_grader.py
OSSQA-PUM_OSSQA/src/tests/main/unit/test_dependency_grader.py
from main.data_types.sbom_types.sbom import Sbom from main.data_types.sbom_types.dependency import Dependency from main.data_types.sbom_types.scorecard import Scorecard, ScorecardChecks from main.frontend.dependency_grader import grade_dependencies from main.data_types.user_requirements import UserRequirements minimal_sbom_dict = { "bomFormat": "CycloneDX", "specVersion": "1.2", "serialNumber": "urn:uuid:d7a0ac67-e0f8-4342-86c6-801a02437636", "version": 1, "metadata": { "timestamp": "2021-05-16T17:10:53+02:00", "tools": [ ], "component": { "bom-ref": "pkg:golang/github.com/ProtonMail/[email protected]", "type": "application", "name": "github.com/ProtonMail/proton-bridge", "version": "v1.8.0", "purl": "pkg:golang/github.com/ProtonMail/[email protected]", "externalReferences": [ { "url": "https://github.com/ProtonMail/proton-bridge", "type": "vcs" } ] } }, "components": [ ] } scorecard_1: Scorecard = Scorecard({ "date": "2021-05-16T17:10:53+02:00", "score": 10, "checks": [ { "name": ScorecardChecks.DEPENDENCY_UPDATE_TOOL.value, "score": 8, "reason": "Check", "details": "Check" }, { "name": ScorecardChecks.FUZZING.value, "score": 10, "reason": "Check", "details": "Check" }, { "name": ScorecardChecks.LICENSE.value, "score": 5, "reason": "Check", "details": "Check" } ] }) dep_one = Dependency( { "name": "component1", } ) dep_one.scorecard = scorecard_1 scorecard_2: Scorecard = Scorecard({ "date": "2021-05-16T17:10:53+02:00", "score": 10, "checks": [ { "name": ScorecardChecks.DEPENDENCY_UPDATE_TOOL.value, "score": 8, "reason": "Check", "details": "Check" }, { "name": ScorecardChecks.FUZZING.value, "score": 8, "reason": "Check", "details": "Check" }, { "name": ScorecardChecks.LICENSE.value, "score": 9, "reason": "Check", "details": "Check" } ] }) dep_two = Dependency( { "name": "component2", } ) dep_two.scorecard = scorecard_2 scorecard_3: Scorecard = Scorecard({ "date": "2021-05-16T17:10:53+02:00", "score": 10, "checks": [ { "name": ScorecardChecks.DEPENDENCY_UPDATE_TOOL.value, "score": 10, "reason": "Check", "details": "Check" }, { "name": ScorecardChecks.FUZZING.value, "score": 10, "reason": "Check", "details": "Check" } ] }) dep_three = Dependency( { "name": "component3", } ) dep_three.scorecard = scorecard_3 scorecard_4: Scorecard = Scorecard({ "date": "2021-05-16T17:10:53+02:00", "score": 10, "checks": [ { "name": ScorecardChecks.DEPENDENCY_UPDATE_TOOL.value, "score": 8, "reason": "Check", "details": "Check" }, { "name": ScorecardChecks.FUZZING.value, "score": 6, "reason": "Check", "details": "Check" }, { "name": ScorecardChecks.LICENSE.value, "score": 9, "reason": "Check", "details": "Check" }, { "name": ScorecardChecks.BINARY_ARTIFACTS.value, "score": 1, "reason": "Check", "details": "Check" }, { "name": ScorecardChecks.CII_BEST_PRACTICES.value, "score": 5, "reason": "Check", "details": "Check" }, { "name": ScorecardChecks.CI_TESTS.value, "score": 6, "reason": "Check", "details": "Check" } ] }) dep_four = Dependency( { "name": "component4", } ) dep_four.scorecard = scorecard_4 def test_single_pass(): """ Test that the reach_requirement field is set to "Yes" when the dependency meets the user requirements """ sbom: Sbom = Sbom(minimal_sbom_dict) sbom.dependency_manager.update([dep_one]) user_requirements_pass: UserRequirements = UserRequirements( { "dependency_update_tool": 8, "fuzzing": 10, "license": 3 } ) graded_sbom = grade_dependencies(sbom, user_requirements_pass) scored_dependencies = graded_sbom.get_scored_dependencies() assert scored_dependencies[0].reach_requirement == "Yes" def test_single_fail(): """ Test that the reach_requirement field is set to "No" when the dependency does not meet the user requirements """ sbom: Sbom = Sbom(minimal_sbom_dict) sbom.dependency_manager.update([dep_one]) user_requirements_fail: UserRequirements = UserRequirements( { "dependency_update_tool": 8, "fuzzing": 10, "license": 6 } ) graded_sbom = grade_dependencies(sbom, user_requirements_fail) scored_dependencies = graded_sbom.get_scored_dependencies() assert scored_dependencies[0].reach_requirement == "No" def test_single_not_found(): """ Test that the reach_requirement field is set to "Test result not found" when the dependency does not have a scorecard """ sbom: Sbom = Sbom(minimal_sbom_dict) sbom.dependency_manager.update([dep_one]) user_requirements: UserRequirements = UserRequirements( { "dependency_update_tool": 4, "fuzzing": 10, "license": 5, "binary_artifacts": 2 } ) graded_sbom = grade_dependencies(sbom, user_requirements) scored_dependencies = graded_sbom.get_scored_dependencies() assert scored_dependencies[0].reach_requirement == "Test result not found" def test_single_edge_case(): """ Test that the reach_requirement field is set correctly when along the edge """ sbom: Sbom = Sbom(minimal_sbom_dict) sbom.dependency_manager.update([dep_four]) user_requirements: UserRequirements = UserRequirements( { "dependency_update_tool": 4, "fuzzing": 10, "license": 7, "binary_artifact": 4, "cii_best_practices": 5, "ci_tests": 0 } ) graded_sbom = grade_dependencies(sbom, user_requirements) scored_dependencies = graded_sbom.get_scored_dependencies() assert scored_dependencies[0].reach_requirement == "No" user_requirements: UserRequirements = UserRequirements( { "dependency_update_tool": 8, "fuzzing": 4, "license": 9, "binary_artifact": 1, "cii_best_practices": 5, "ci_tests": 6 } ) graded_sbom = grade_dependencies(sbom, user_requirements) scored_dependencies = graded_sbom.get_scored_dependencies() assert scored_dependencies[0].reach_requirement == "Yes" user_requirements: UserRequirements = UserRequirements( { "dependency_update_tool": 8, "fuzzing": 4, "license": 9, "packaging": 1, "binary_artifact": 1, "cii_best_practices": 5, "ci_tests": 6 } ) graded_sbom = grade_dependencies(sbom, user_requirements) scored_dependencies = graded_sbom.get_scored_dependencies() assert scored_dependencies[0].reach_requirement == "Test result not found" def test_multiple(): """ Test that the reach_requirement field is set to the correct value when multiple dependencies are graded """ sbom: Sbom = Sbom(minimal_sbom_dict) sbom.dependency_manager.update([dep_one, dep_two, dep_three, dep_four]) user_requirements: UserRequirements = UserRequirements( { "dependency_update_tool": 8, "fuzzing": 6, "license": 7 } ) graded_sbom = grade_dependencies(sbom, user_requirements) scored_dependencies = graded_sbom.get_scored_dependencies() print(len(scored_dependencies)) assert scored_dependencies[0].reach_requirement == "No" assert scored_dependencies[1].reach_requirement == "Yes" assert scored_dependencies[2].reach_requirement == "Test result not found" assert scored_dependencies[3].reach_requirement == "Yes"
8,917
Python
.py
282
22.741135
79
0.559907
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,067
sboms.py
OSSQA-PUM_OSSQA/src/tests/main/unit/sboms/sboms.py
from pathlib import Path DIRECTORY = Path(__file__).parent PATHS = [ DIRECTORY / "example-SBOM.json", ] SBOM_COMPONENT_URLS = [ "github.com/ProtonMail/proton-bridge", "github.com/allan-simon/go-singleinstance", "github.com/andybalholm/cascadia", "gitsnub.com/ProtonMail/proton-bridge", "", "github.com/ProtonMail/", "a" ] DUMMY_DEPENDENCIES = [ { "type": "library-test", "bom-ref": "ahash 0.8.7 (registry+https://github.com/rust-lang/crates.io-index)", "name": "ahash", "version": "0.8.7", "description": "A non-cryptographic hash function using AES-NI for high performance", "scope": "required", "licenses": [ { "license": { "id": "MIT" } } ], "purl": "pkg:cargo/[email protected]", "externalReferences": [ { "type": "documentation", "url": "https://docs.rs/ahash" }, { "type": "vcs", "url": "https://github.com/tkaitchuck/ahash" } ] }, { "type": "library-test", "bom-ref": "bit-set 0.5.33 (registry+https://github.com/rust-lang/crates.io-index)", "name": "bit-set", "version": "v0.5.33", "description": "A set of bits", "scope": "required", "licenses": [ { "license": { "id": "MIT" } } ], "purl": "pkg:cargo/[email protected]", "externalReferences": [ { "type": "documentation", "url": "https://contain-rs.github.io/bit-set/bit_set" }, { "type": "website", "url": "https://github.com/contain-rs/bit-set" }, { "type": "vcs", "url": "https://github.com/contain-rs/bit-set" } ] }, { "type": "library", "bom-ref": "async-stream 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "name": "async-stream", "version": "0.3.5", "description": "Asynchronous streams using async & await notation", "scope": "required", "licenses": [ { "license": { "id": "MIT" } } ], "purl": "pkg:cargo/[email protected]", "externalReferences": [ { "type": "vcs", "url": "https://github.com/tokio-rs/async-stream" } ] }, { "bom-ref": "pkg:golang/github.com/allan-simon/[email protected]", "type": "library", "name": "github.com/allan-simon/go-singleinstance", "version": "1.0.0", "scope": "required", "hashes": [ { "alg": "SHA-256", "content": "99971ad3f1d9fd75973fdb7191f765d861fa994cc1a80972273deee4b83c7ee0" } ], "licenses": [ { "license": { "id": "MIT", "url": "https://spdx.org/licenses/MIT.html" } } ], "purl": "pkg:golang/github.com/allan-simon/[email protected]", "externalReferences": [ { "url": "https://github.com/allan-simon/go-singleinstance", "type": "vcs" } ] }, { "bom-ref": "pkg:golang/github.com/andybalholm/[email protected]", "type": "library", "name": "github.com/andybalholm/cascadia", "version": "v1.1.0", "scope": "required", "hashes": [ { "alg": "SHA-256", "content": "06eb8eeac49f40d151bb52e9a606c3db91ebdaf2d85b6e49bf11ece73cec2d3a" } ], "licenses": [ { "license": { "id": "BSD-2-Clause", "url": "https://spdx.org/licenses/BSD-2-Clause.html" } } ], "purl": "pkg:golang/github.com/andybalholm/[email protected]", "externalReferences": [ { "url": "https://github.com/andybalholm/cascadia", "type": "vcs" } ] }, { "type": "library", "bom-ref": "bytes 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "name": "bytes", "version": "1.5.0", "description": "Types and traits for working with bytes", "scope": "required", "licenses": [ { "license": { "id": "MIT" } } ], "purl": "pkg:cargo/[email protected]", "externalReferences": [ { "type": "vcs", "url": "https://github.com/tokio-rs/bytes" } ] }, { "type": "library", "bom-ref": "config-reader 0.1.0 (path+file:///janus/lib/config-reader)", "name": "config-reader", "version": "0.1.0", "scope": "required", "purl": "pkg:cargo/[email protected]?download_url=file%3A%2F%2F..%252F..%252F..%252Flib%252Fconfig-reader" }, { "type": "library", "bom-ref": "fancy-regex 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "name": "fancy-regex", "version": "0.11.0", "description": "An implementation of regexes, supporting a relatively rich set of features, including backreferences and look-around.", "scope": "required", "licenses": [ { "license": { "id": "MIT" } } ], "purl": "pkg:cargo/[email protected]", "externalReferences": [ { "type": "documentation", "url": "https://docs.rs/fancy-regex" }, { "type": "vcs", "url": "https://github.com/fancy-regex/fancy-regex" } ] }, { "type": "library", "bom-ref": "fastrand 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "name": "fastrand", "version": "2.0.1", "description": "A simple and fast random number generator", "scope": "required", "licenses": [ { "license": { "id": "MIT" } } ], "purl": "pkg:cargo/[email protected]", "externalReferences": [ { "type": "vcs", "url": "https://github.com/smol-rs/fastrand" } ] }, { "type": "library", "bom-ref": "fnv 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", "name": "fnv", "version": "1.0.7", "description": "Fowler\u2013Noll\u2013Vo hash function", "scope": "required", "licenses": [ { "license": { "id": "MIT" } } ], "purl": "pkg:cargo/[email protected]", "externalReferences": [ { "type": "documentation", "url": "https://doc.servo.org/fnv/" }, { "type": "vcs", "url": "https://github.com/servo/rust-fnv" } ] }, { "type": "library", "bom-ref": "form_urlencoded 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "name": "form_urlencoded", "version": "1.2.1", "description": "Parser and serializer for the application/x-www-form-urlencoded syntax, as used by HTML forms.", "scope": "required", "licenses": [ { "license": { "id": "MIT" } } ], "purl": "pkg:cargo/[email protected]", "externalReferences": [ { "type": "vcs", "url": "https://github.com/servo/rust-url" } ] }, { "type": "library", "bom-ref": "fraction 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", "name": "fraction", "version": "0.13.1", "description": "Lossless fractions and decimals; drop-in float replacement", "scope": "required", "licenses": [ { "license": { "id": "MIT" } } ], "purl": "pkg:cargo/[email protected]", "externalReferences": [ { "type": "documentation", "url": "https://docs.rs/fraction/" }, { "type": "website", "url": "https://github.com/dnsl48/fraction.git" }, { "type": "vcs", "url": "https://github.com/dnsl48/fraction.git" } ] }, { "bom-ref": "pkg:golang/github.com/chzyer/[email protected]", "type": "library", "name": "github.com/chzyer/test", "version": "v0.0.0-20180213035817-a1ea475d72b1", "scope": "required", "hashes": [ { "alg": "SHA-256", "content": "abbeb7a9ff61b8dd7590341abd6b2865724d5b7c44138249c876b9436e7fb1df" } ], "licenses": [ { "license": { "id": "MIT", "url": "https://spdx.org/licenses/MIT.html" } } ], "purl": "pkg:golang/github.com/chzyer/[email protected]", "externalReferences": [ { "url": "https://github.com/chzyer/test", "type": "vcs" } ] }, { "bom-ref": "pkg:golang/github.com/aymerick/[email protected]", "type": "library", "name": "github.com/aymerick/raymond", "version": "v2.0.3-0.20180322193309-b565731e1464", "scope": "required", "hashes": [ { "alg": "SHA-256", "content": "901a7783930a0426984323b3db44d35643a9cadacc01a92f2fb43fcf530b35cf" } ], "licenses": [ { "license": { "id": "MIT", "url": "https://spdx.org/licenses/MIT.html" } } ], "purl": "pkg:golang/github.com/aymerick/[email protected]", "externalReferences": [ { "url": "https://github.com/aymerick/raymond", "type": "vcs" } ] } ] DUMMY_SBOM = { "bomFormat": "CycloneDX", "specVersion": "1.2", "serialNumber": "urn:uuid:d7a0ac67-e0f8-4342-86c6-801a02437636", "version": 1, "metadata": { "timestamp": "2021-05-16T17:10:53+02:00", "tools": [ { "vendor": "CycloneDX", "name": "cyclonedx-gomod", "version": "v0.6.1", "hashes": [ { "alg": "MD5", "content": "a92d9f6145a94c2c7ad8489d84301eb9" }, { "alg": "SHA-1", "content": "a5af6c5ef3f21bf5425c680b64acf57cc6a90c69" }, { "alg": "SHA-256", "content": "dc215a651772356eca763d6fe77169379c1cc25c2bb89c7d6df2e2170c3972ab" }, { "alg": "SHA-512", "content": "387953ab509c31bf352693de9df617650c87494e607119bc284b91ba9a0a2d284a2e96946c272dc284c4370875412eea855bc30351faedd099dbdbed209e4636" } ] } ], "component": { "bom-ref": "pkg:golang/github.com/ProtonMail/[email protected]", "type": "application", "name": "github.com/ProtonMail/proton-bridge", "version": "v1.8.0", "purl": "pkg:golang/github.com/ProtonMail/[email protected]", "externalReferences": [ { "url": "https://github.com/ProtonMail/proton-bridge", "type": "vcs" } ] } }, "components": [ { "bom-ref": "pkg:golang/github.com/allan-simon/[email protected]", "type": "library", "name": "github.com/allan-simon/go-singleinstance", "version": "v0.0.0-20160830203053-79edcfdc2dfc", "scope": "required", "hashes": [ { "alg": "SHA-256", "content": "99971ad3f1d9fd75973fdb7191f765d861fa994cc1a80972273deee4b83c7ee0" } ], "licenses": [ { "license": { "id": "MIT", "url": "https://spdx.org/licenses/MIT.html" } } ], "purl": "pkg:golang/github.com/allan-simon/[email protected]", "externalReferences": [ { "url": "https://github.com/allan-simon/go-singleinstance", "type": "vcs" } ] } ] } BAD_SBOMS = [ { "bomFormat": "CycloneDX", "specVersion": "1.2", "serialNumber": "urn:uuid:d7a0ac67-e0f8-4342-86c6-801a02437636", "metadata": { "timestamp": "2021-05-16T17:10:53+02:00", "tools": [ { "vendor": "CycloneDX", "name": "cyclonedx-gomod", "version": "v0.6.1", "hashes": [ { "alg": "MD5", "content": "a92d9f6145a94c2c7ad8489d84301eb9" }, { "alg": "SHA-1", "content": "a5af6c5ef3f21bf5425c680b64acf57cc6a90c69" }, { "alg": "SHA-256", "content": "dc215a651772356eca763d6fe77169379c1cc25c2bb89c7d6df2e2170c3972ab" }, { "alg": "SHA-512", "content": "387953ab509c31bf352693de9df617650c87494e607119bc284b91ba9a0a2d284a2e96946c272dc284c4370875412eea855bc30351faedd099dbdbed209e4636" } ] } ], "component": { "bom-ref": "pkg:golang/github.com/ProtonMail/[email protected]", "type": "application", "name": "github.com/ProtonMail/proton-bridge", "version": "v1.8.0", "purl": "pkg:golang/github.com/ProtonMail/[email protected]", "externalReferences": [ { "url": "https://github.com/ProtonMail/proton-bridge", "type": "vcs" } ] } }, "components": [ { "bom-ref": "pkg:golang/github.com/allan-simon/[email protected]", "type": "library", "name": "github.com/allan-simon/go-singleinstance", "version": "v0.0.0-20160830203053-79edcfdc2dfc", "scope": "required", "hashes": [ { "alg": "SHA-256", "content": "99971ad3f1d9fd75973fdb7191f765d861fa994cc1a80972273deee4b83c7ee0" } ], "licenses": [ { "license": { "id": "MIT", "url": "https://spdx.org/licenses/MIT.html" } } ], "purl": "pkg:golang/github.com/allan-simon/[email protected]", "externalReferences": [ { "url": "https://github.com/allan-simon/go-singleinstance", "type": "vcs" } ] }, { "bom-ref": "pkg:golang/github.com/andybalholm/[email protected]", "type": "library", "name": "github.com/andybalholm/cascadia", "version": "v1.1.0", "scope": "required", "hashes": [ { "alg": "SHA-256", "content": "06eb8eeac49f40d151bb52e9a606c3db91ebdaf2d85b6e49bf11ece73cec2d3a" } ], "licenses": [ { "license": { "id": "BSD-2-Clause", "url": "https://spdx.org/licenses/BSD-2-Clause.html" } } ], "purl": "pkg:golang/github.com/andybalholm/[email protected]", "externalReferences": [ { "url": "https://github.com/andybalholm/cascadia", "type": "vcs" } ] }, ] }, { "bomFormat": "CycloneDX", "specVersion": "1.2", "serialNumber": "urn:uuidd7a0ac67-e0f8-4342-86c6-801a02437636", "version": 1, "metadata": { "timestamp": "2021-05-16T17:10:53+02:00", "tools": [ { "vendor": "CycloneDX", "name": "cyclonedx-gomod", "version": "v0.6.1", "hashes": [ { "alg": "MD5", "content": "a92d9f6145a94c2c7ad8489d84301eb9" }, { "alg": "SHA-1", "content": "a5af6c5ef3f21bf5425c680b64acf57cc6a90c69" }, { "alg": "SHA-256", "content": "dc215a651772356eca763d6fe77169379c1cc25c2bb89c7d6df2e2170c3972ab" }, { "alg": "SHA-512", "content": "387953ab509c31bf352693de9df617650c87494e607119bc284b91ba9a0a2d284a2e96946c272dc284c4370875412eea855bc30351faedd099dbdbed209e4636" } ] } ], "component": { "bom-ref": "pkg:golang/github.com/ProtonMail/[email protected]", "type": "application", "name": "github.com/ProtonMail/proton-bridge", "version": "v1.8.0", "purl": "pkg:golang/github.com/ProtonMail/[email protected]", "externalReferences": [ { "url": "https://github.com/ProtonMail/proton-bridge", "type": "vcs" } ] } }, "components": [ { "bom-ref": "pkg:golang/github.com/allan-simon/[email protected]", "type": "library", "name": "github.com/allan-simon/go-singleinstance", "version": "v0.0.0-20160830203053-79edcfdc2dfc", "scope": "required", "hashes": [ { "alg": "SHA-256", "content": "99971ad3f1d9fd75973fdb7191f765d861fa994cc1a80972273deee4b83c7ee0" } ], "licenses": [ { "license": { "id": "MIT", "url": "https://spdx.org/licenses/MIT.html" } } ], "purl": "pkg:golang/github.com/allan-simon/[email protected]", "externalReferences": [ { "url": "https://github.com/allan-simon/go-singleinstance", "type": "vcs" } ] }, { "bom-ref": "pkg:golang/github.com/andybalholm/[email protected]", "type": "library", "name": "github.com/andybalholm/cascadia", "version": "v1.1.0", "scope": "required", "hashes": [ { "alg": "SHA-256", "content": "06eb8eeac49f40d151bb52e9a606c3db91ebdaf2d85b6e49bf11ece73cec2d3a" } ], "licenses": [ { "license": { "id": "BSD-2-Clause", "url": "https://spdx.org/licenses/BSD-2-Clause.html" } } ], "purl": "pkg:golang/github.com/andybalholm/[email protected]", "externalReferences": [ { "url": "https://github.com/andybalholm/cascadia", "type": "vcs" } ] }, ] } ]
19,516
Python
.py
650
19.852308
169
0.483934
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,068
scorecards.py
OSSQA-PUM_OSSQA/src/tests/main/unit/scorecards/scorecards.py
from pathlib import Path from main.data_types.sbom_types.scorecard import ScorecardChecks DIRECTORY = Path(__file__).parent PATHS = [ DIRECTORY / "docker.json", DIRECTORY / "pad-left.json", DIRECTORY / "pytorch.json", DIRECTORY / "react.json", DIRECTORY / "OSSQA-PUM-scorecard.json" ] UNPARSABLE_SCORECARDS = [ { "date": "2021-10-10", "checks": [ { "name": ScorecardChecks.BINARY_ARTIFACTS, "score": 0, "reason": "reason1", "details": [] }, { "name": ScorecardChecks.BRANCH_PROTECTION, "score": 0, "reason": "reason2", "details": [] }, { "name": ScorecardChecks.CI_TESTS, "score": 0, "reason": "reason3", "details": [] } ] }, { "score": 0, "checks": [ { "name": ScorecardChecks.CONTRIBUTORS, "score": 0, "reason": "reason1", "details": [] }, { "name": ScorecardChecks.DANGEROUS_WORKFLOW, "score": 0, "reason": "reason2", "details": [] }, { "name": ScorecardChecks.DEPENDENCY_UPDATE_TOOL, "score": 0, "reason": "reason3", "details": [] } ] }, { "date": "2021-10-10", "score": 0, "checks": [ { "score": 0, "reason": "reason1", "details": [] }, { "name": ScorecardChecks.VULNERABILITIES, "score": 0, "reason": "reason2", "details": [] }, { "name": ScorecardChecks.SAST, "score": 0, "reason": "reason3", "details": [] } ] }, { "date": "2021-10-10", "score": 0, "checks": [ { "name": ScorecardChecks.SECURITY_POLICY, "reason": "reason1", "details": [] }, { "name": ScorecardChecks.SIGNED_RELEASES, "score": 0, "reason": "reason2", "details": [] }, { "name": ScorecardChecks.TOKEN_PERMISSIONS, "score": 0, "reason": "reason3", "details": [] } ] }, { "date": "2021-10-10", "score": 0, "checks": [ { "name": ScorecardChecks.VULNERABILITIES, "score": 0, "details": [] }, { "name": ScorecardChecks.BINARY_ARTIFACTS, "score": 0, "reason": "reason2", "details": [] }, { "name": ScorecardChecks.CODE_REVIEW, "score": 0, "reason": "reason3", "details": [] } ] }, { "date": "2021-10-10", "score": 0, "checks": [ { "name": ScorecardChecks.CII_BEST_PRACTICES, "score": 0, "reason": "reason1", }, { "name": ScorecardChecks.CII_BEST_PRACTICES, "score": 0, "reason": "reason2", "details": [] }, { "name": ScorecardChecks.CII_BEST_PRACTICES, "score": 0, "reason": "reason3", "details": [] } ] }, { "date": "2021-10-10", "score": 0, } ] OUT_OF_BOUNDS_SCORECARDS = [ { "date": "2021-10-10", "score": -2, "checks": [ { "name": ScorecardChecks.CII_BEST_PRACTICES, "score": 0, "reason": "reason1", "details": [] }, { "name": ScorecardChecks.CII_BEST_PRACTICES, "score": 0, "reason": "reason2", "details": [] }, { "name": ScorecardChecks.CII_BEST_PRACTICES, "score": 0, "reason": "reason3", "details": [] } ] }, { "date": "2021-10-10", "score": 11, "checks": [ { "name": ScorecardChecks.CII_BEST_PRACTICES, "score": 0, "reason": "reason1", "details": [] }, { "name": ScorecardChecks.CII_BEST_PRACTICES, "score": 0, "reason": "reason2", "details": [] }, { "name": ScorecardChecks.CII_BEST_PRACTICES, "score": 0, "reason": "reason3", "details": [] } ] }, { "date": "2021-10-10", "score": 0, "checks": [ { "name": ScorecardChecks.CII_BEST_PRACTICES, "score": -2, "reason": "reason1", "details": [] } ] }, { "date": "2021-10-10", "score": 0, "checks": [ { "name": ScorecardChecks.CII_BEST_PRACTICES, "score": 11, "reason": "reason1", "details": [] } ] } ]
5,882
Python
.py
228
13.144737
64
0.351567
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,069
test_event.py
OSSQA-PUM_OSSQA/src/tests/main/unit/data_types/test_event.py
import pytest from main.data_types.event import Event def test_event_initialization(): """ Test the initialization of the Event class. """ event = Event() assert isinstance(event, Event) assert isinstance(event._callbacks, list) def test_event_subscribe_int(): """ Test the subscribe method of the Event class with an integer data type. """ event = Event() test_value = 42 def callback_function(value: int): assert value == test_value event.subscribe(callback_function) assert callback_function in event._callbacks assert event.invoke(test_value) is None def test_event_subscribe_str(): """ Test the subscribe method of the Event class with a string data type. """ event = Event() test_value = "Hello, World!" def callback_function(value: str) -> bool: assert value == test_value event.subscribe(callback_function) assert callback_function in event._callbacks assert event.invoke(test_value) is None def test_event_unsubscribe(): """ Test the unsubscribe method of the Event class. """ event = Event() test_value = 42 def callback_function(value: int): # Should not enter this block. assert False event.subscribe(callback_function) assert callback_function in event._callbacks event.unsubscribe(callback_function) assert callback_function not in event._callbacks assert len(event._callbacks) == 0 assert event.invoke(test_value) is None def test_event_invoke_multiple_subscribers(): """ Test the invoke method of the Event class with multiple subscribers. """ event = Event() test_value1 = 42 test_value2 = 43 test_value3 = 44 def callback_function1(value: int): assert value == test_value1 def callback_function2(value: int): assert value != test_value2 def callback_function3(value: int): assert value != test_value3 event.subscribe(callback_function1) event.subscribe(callback_function2) event.subscribe(callback_function3) assert callback_function1 in event._callbacks assert callback_function2 in event._callbacks assert callback_function3 in event._callbacks assert event.invoke(test_value1) is None def test_event_multiple_unsubscribe(): """ Test the unsubscribe method of the Event class with multiple subscribers. """ event = Event() test_value = 42 def callback_function(value: int): # Should not enter this block. assert False event.subscribe(callback_function) assert callback_function in event._callbacks event.unsubscribe(callback_function) assert callback_function not in event._callbacks assert len(event._callbacks) == 0 with pytest.raises(ValueError): event.unsubscribe(callback_function) assert len(event._callbacks) == 0 assert event.invoke(test_value) is None def test_event_invoke_with_data_type(): """ Test the invoke method of the Event class with a custom data type. """ class TestDataType: """Dummy TestDataType class.""" def __init__(self, value: int) -> None: self.value = value event = Event() test_value = TestDataType(42) def callback_function(value: TestDataType): assert value.value == test_value.value event.subscribe(callback_function) assert callback_function in event._callbacks assert event.invoke(test_value) is None
3,513
Python
.py
100
29.56
77
0.701183
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,070
test_user_requirements.py
OSSQA-PUM_OSSQA/src/tests/main/unit/data_types/test_user_requirements.py
import pytest from main.data_types.user_requirements import (UserRequirements, RequirementsType) REQ_TYPES = [req_type.value for req_type in RequirementsType] @pytest.fixture(name="valid_empty_reqs") def valid_empty_reqs_fixture() -> dict: return {} @pytest.fixture(name="valid_filled_reqs") def valid_filled_reqs_fixture() -> dict: return { RequirementsType.VULNERABILITIES: 4, RequirementsType.DEPENDENCY_UPDATE_TOOL: 7, RequirementsType.MAINTAINED: 5, RequirementsType.SECURITY_POLICY: 10, RequirementsType.LICENSE: -1, RequirementsType.CI_TESTS: 7, RequirementsType.FUZZING: 0, RequirementsType.SAST: 2, RequirementsType.BINARY_ARTIFACTS: 3, RequirementsType.BRANCH_PROTECTION: 9, RequirementsType.DANGEROUS_WORKFLOW: 5, RequirementsType.CODE_REVIEW: 6, RequirementsType.CONTRIBUTORS: 3, RequirementsType.PINNED_DEPENDENCIES: 1, RequirementsType.TOKEN_PERMISSIONS: 1, RequirementsType.PACKAGING: 5, RequirementsType.SIGNED_RELEASES: 8 } @pytest.fixture(name="invalid_type_reqs") def invalid_type_reqs_fixture(valid_filled_reqs: dict) -> list[dict]: result = [] for req_type in REQ_TYPES: reqs = valid_filled_reqs.copy() reqs[req_type] = True result.append(reqs) return result @pytest.fixture(name="invalid_value_reqs") def invalid_valid_reqs_fixture(valid_filled_reqs: dict) -> list[dict]: result = [] for req_type in REQ_TYPES: reqs = valid_filled_reqs.copy() reqs[req_type] = -1 result.append(reqs) for req_type in REQ_TYPES: reqs = valid_filled_reqs.copy() reqs[req_type] = 11 result.append(reqs) return result def test_valid_requirements(valid_filled_reqs: dict, valid_empty_reqs: dict): try: UserRequirements(valid_filled_reqs) except TypeError: pytest.fail("Invalid requirement types!") except ValueError: pytest.fail("Invalid requirement values!") try: UserRequirements(valid_empty_reqs) except TypeError: pytest.fail("Invalid requirement types!") except ValueError: pytest.fail("Invalid requirement values!")
2,330
Python
.py
62
29.983871
70
0.670364
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,071
test_dependency_scorer.py
OSSQA-PUM_OSSQA/src/tests/main/unit/data_types/test_dependency_scorer.py
import json import pytest from main.data_types.dependency_scorer import ScorecardAnalyzer from main.data_types.sbom_types.scorecard import Scorecard from tests.main.unit.scorecards.scorecards import PATHS # Dummy dependencies for testing DUMMY_DEPS = [ { "name": "github.com/", "version": "1.0" }, { "name": "dep2", "version": "2.0" } ] @pytest.fixture(params=PATHS) def git_hub_sha1(request): with open(request.param, "r", encoding="utf-8") as file: scorecard = json.load(file) git_url = scorecard["repo"]["name"] commit = scorecard["repo"]["commit"] checks = scorecard["checks"] return (git_url, commit, checks) @pytest.mark.skip("ScorecardAnalyzer tests are not implemented") def test_scorecard_analyzer(git_hub_sha1): analyzer = ScorecardAnalyzer(lambda x: None) git_url, commit, checks = git_hub_sha1 result: Scorecard = analyzer._execute_scorecard( git_url, commit, timeout=15 ) assert Scorecard(checks) == result, \ "Scorecard does not match expected result"
1,084
Python
.py
33
28.272727
64
0.689952
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,072
test_dependency_manager.py
OSSQA-PUM_OSSQA/src/tests/main/unit/data_types/sbom_types/test_dependency_manager.py
""" This module contains unit tests for the `DependencyManager` class in the `dependency_manager` module of the `sbom_types` package. The `DependencyManager` class is responsible for managing dependencies in a software bill of materials (SBOM). It provides methods for initializing the manager, updating the dependencies, and converting the manager to a dictionary representation. The unit tests in this module cover the initialization of the `DependencyManager` class, the conversion of the manager to a dictionary, and the updating of dependencies. The tests also cover the retrieval of scored, unscored, and failed dependencies from the manager. """ import json import pytest import copy from main.data_types.sbom_types.dependency_manager import DependencyManager from main.data_types.sbom_types.dependency import Dependency from main.data_types.sbom_types.scorecard import Scorecard from tests.main.unit.scorecards.scorecards import PATHS from tests.main.unit.data_types.sbom_types.expected_jsons.expected_results \ import PATHS as expected_paths from tests.main.unit.sboms.sboms import DUMMY_DEPENDENCIES @pytest.fixture def dependency_manager_5_dependencies(): """ Fixture to create a DependencyManager with 5 dependencies. """ dependency_manager = DependencyManager(DUMMY_DEPENDENCIES[0:5]) return dependency_manager @pytest.fixture def dependency_manager_with_score(): """ Fixture to create a DependencyManager with a scored dependency. """ dependency_manager = DependencyManager(DUMMY_DEPENDENCIES[0:2]) with (open(PATHS[0], "r", encoding="utf-8")) as file: scorecard = Scorecard(json.load(file)) deps = dependency_manager.get_unscored_dependencies() new_dep1 = copy.deepcopy(deps[0]) new_dep2 = copy.deepcopy(deps[1]) new_dep1.scorecard = scorecard new_dep2.scorecard = scorecard dependency_manager.update([new_dep1, new_dep2]) return dependency_manager @pytest.fixture def dependency_manager_failed_dependency(): """ Fixture to create a DependencyManager with a failed dependency. """ dependency_manager = DependencyManager([DUMMY_DEPENDENCIES[1]]) dep1 = dependency_manager.get_unscored_dependencies()[0] dep1.failure_reason = Exception("Failed to fetch dependency") return dependency_manager @pytest.fixture def dep_mangr_with_distict_deps(): """ Fixture to create a DependencyManager with scored, unscored, and failed dependencies.""" dependency_manager = DependencyManager(DUMMY_DEPENDENCIES[0:3]) deps = dependency_manager.get_unscored_dependencies() dep1 = deps[0] dep2 = deps[1] dep3 = deps[2] with (open(PATHS[1], "r", encoding="utf-8")) as file: scorecard = Scorecard(json.load(file)) dep1.scorecard = scorecard dep2.failure_reason = Exception("Failed to fetch dependency") dependency_manager.update([dep1, dep2, dep3]) return dependency_manager def test_dependency_manager_initialization(): """ Test the initialization of the DependencyManager class. """ assert DependencyManager(DUMMY_DEPENDENCIES) def test_dependency_empty_manager_to_dict(): """ Test the to_dict method of the DependencyManager class when the manager is empty.""" dependency_manager = DependencyManager([]) assert dependency_manager.to_dict() == {"scored_dependencies": [], "unscored_dependencies": [], "failed_dependencies": []} def test_dependency_manager_update(): """ Test the update method of the DependencyManager class when the manager is updated with a new dependency.""" dependency_manager = DependencyManager([]) dep1 = Dependency(DUMMY_DEPENDENCIES[0]) dependency_manager.update([dep1]) assert len(dependency_manager.get_unscored_dependencies()) == 1 def test_dependency_manager_get_scored_dependencies( dependency_manager_with_score): """ Test the get_scored_dependencies method of the DependencyManager class when the manager has scored dependencies.""" assert len(dependency_manager_with_score.get_scored_dependencies()) == 2 def test_dependency_manager_get_unscored_dependencies( dependency_manager_5_dependencies): """ Test the get_unscored_dependencies method of the DependencyManager class when the manager has unscored dependencies.""" dependency_manager = dependency_manager_5_dependencies assert len(dependency_manager.get_unscored_dependencies()) == 5 def test_dependency_manager_get_failed_dependencies( dependency_manager_failed_dependency): """ Test the get_failed_dependencies method of the DependencyManager class when the manager has a failed dependency.""" dependency_manager = dependency_manager_failed_dependency assert len(dependency_manager.get_failed_dependencies()) == 1 def test_dependency_manager_get_unscored_scored_and_failed_dependencies( dep_mangr_with_distict_deps): """ Test the get_unscored_dependencies, get_scored_dependencies, and get_failed_dependencies methods of the DependencyManager class when the manager has scored, unscored, and failed dependencies.""" dependency_manager = dep_mangr_with_distict_deps assert len(dependency_manager.get_unscored_dependencies()) == 1 assert len(dependency_manager.get_scored_dependencies()) == 1 assert len(dependency_manager.get_failed_dependencies()) == 1 def test_dependency_manager_update_same_dependency( dependency_manager_5_dependencies): """ Test the update method of the DependencyManager class when the same dependency is updated multiple times.""" dependency_manager = dependency_manager_5_dependencies new_dep = Dependency(DUMMY_DEPENDENCIES[0]) dependency_manager.update([new_dep]) dependency_manager.update([new_dep]) dependency_manager.update([new_dep]) dependency_manager.update([new_dep]) dependency_manager.update([new_dep]) assert len(dependency_manager.get_unscored_dependencies()) == 5 def test_dependency_manager_replace_scored_with_unscored( dependency_manager_with_score): """ Test the update method of the DependencyManager class when a scored dependency is replaced with an unscored dependency. """ dependency_manager = dependency_manager_with_score new_dep = Dependency(DUMMY_DEPENDENCIES[0]) dependency_manager.update([new_dep]) assert len(dependency_manager.get_unscored_dependencies()) == 0 assert len(dependency_manager.get_scored_dependencies()) == 2 def test_dependency_manager_to_dict_filled( dep_mangr_with_distict_deps ): """ Test the to_dict method of the DependencyManager class when the manager has scored, unscored, and failed dependencies. """ dependency_manager = dep_mangr_with_distict_deps with (open(expected_paths[0], "r", encoding="utf-8")) as file: expected = json.load(file) dep_dict = dependency_manager.to_dict() print(dep_dict) assert dep_dict["scored_dependencies"][0]["scorecard"] == expected for dep in dep_dict["unscored_dependencies"]: for attr in dep: if attr not in ( "scorecard", "failure_reason", "reach_requirement", "platform_path"): assert dep[attr] == DUMMY_DEPENDENCIES[2][attr] else: assert dep[attr] in [None, False] or "github" in dep[attr] for dep in dep_dict["failed_dependencies"]: for attr in dep: if attr not in ( "scorecard", "failure_reason", "reach_requirement", "platform_path"): assert dep[attr] == DUMMY_DEPENDENCIES[1][attr] elif attr == "failure_reason": assert isinstance(dep[attr], str) else: assert dep[attr] in [None, False] or "github" in dep[attr] def test_dependency_manager_get_by_filter(dep_mangr_with_distict_deps): """ Test the get_dependencies_by_filter method of the DependencyManager class. """ dependency_manager = dep_mangr_with_distict_deps scored_deps = dependency_manager.get_dependencies_by_filter( lambda dep: dep.scorecard ) unscored_deps = dependency_manager.get_dependencies_by_filter( lambda dep: not dep.scorecard ) failed_deps = dependency_manager.get_dependencies_by_filter( lambda dep: dep.failure_reason ) all_deps = dependency_manager.get_dependencies_by_filter( lambda dep: True ) assert len(scored_deps) == 1 assert len(unscored_deps) == 2 assert len(failed_deps) == 1 assert len(all_deps) == 3
8,831
Python
.py
200
37.94
79
0.711959
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,073
test_scorecard.py
OSSQA-PUM_OSSQA/src/tests/main/unit/data_types/sbom_types/test_scorecard.py
""" Test the Scorecard class and its methods. The Scorecard class represents a scorecard retrieved from OpenSSF Scorecard. It contains information about the scorecard, such as the date, score, and checks. The class provides methods for initializing a scorecard, converting the scorecard to a dictionary representation, and validating the scorecard. """ import json import pytest from main.data_types.sbom_types.scorecard import Scorecard, ScorecardChecks from tests.main.unit.scorecards.scorecards import (PATHS, UNPARSABLE_SCORECARDS, OUT_OF_BOUNDS_SCORECARDS) @pytest.fixture(params=UNPARSABLE_SCORECARDS) def scorecard_json_unparsable(request): """ Fixture to load an unparsable scorecard JSON file. """ return request.param @pytest.fixture(params=OUT_OF_BOUNDS_SCORECARDS) def scorecard_json_out_of_bounds(request): """ Fixture to load a scorecard JSON file with a score that is out of bounds. """ return request.param @pytest.fixture(params=PATHS) def scorecard_json(request): """ Fixture to load a scorecard JSON file. """ with open(request.param, "r", encoding="utf-8") as file: return json.load(file) def test_scorecard_initialization(scorecard_json): """ Test that a scorecard can be initialized. """ assert Scorecard(scorecard_json) def test_scorecard_to_dict(scorecard_json): """ Test that the to_dict method returns a dictionary representation of the scorecard. """ scorecard = Scorecard(scorecard_json) scorecard_dict = scorecard.to_dict() assert "date" in scorecard_dict assert "score" in scorecard_dict assert "checks" in scorecard_dict assert isinstance(scorecard_dict["date"], str) assert isinstance(scorecard_dict["score"], float) assert isinstance(scorecard_dict["checks"], list) assert all(isinstance(check, dict) for check in scorecard_dict["checks"]) assert all("name" in check for check in scorecard_dict["checks"]) assert all("score" in check for check in scorecard_dict["checks"]) assert all("reason" in check for check in scorecard_dict["checks"]) assert all("details" in check for check in scorecard_dict["checks"]) assert all( isinstance(check["name"], str) for check in scorecard_dict["checks"] ) assert all( isinstance(check["score"], int) for check in scorecard_dict["checks"] ) assert all( isinstance(check["reason"], str) for check in scorecard_dict["checks"] ) assert all( isinstance(check["details"], list | None) for check in scorecard_dict["checks"] ) def test_unparsable_scorecard(scorecard_json_unparsable): with pytest.raises(AssertionError): Scorecard(scorecard_json_unparsable) def test_out_of_bounds_scorecard(scorecard_json_out_of_bounds): """ Test that a ValueError is raised when a scorecard has a score that is out of bounds. """ with pytest.raises(AssertionError): Scorecard(scorecard_json_out_of_bounds) def test_scorecard_equals(): """ Test that two scorecards are equal when they have the same values. This test uses the same scorecard JSON file for both scorecards. """ with open(PATHS[0], "r") as file: json_dict = json.load(file) scorecard1 = Scorecard(json_dict) scorecard2 = Scorecard(json_dict) assert scorecard1 == scorecard2 def test_scorecard_not_equals(): """ Test that two scorecards are not equal when they have different values. This test uses two different scorecard JSON files for the scorecards. """ with open(PATHS[0], "r") as file: scorecard1 = json.load(file) with open(PATHS[1], "r") as file: scorecard2 = json.load(file) scorecard1 = Scorecard(scorecard1) scorecard2 = Scorecard(scorecard2) assert scorecard1 != scorecard2 def test_check_title_hyphen_to_snake(): """ Test that the title_hyphen_to_snake method replaces hyphens with underscores and converts the title to lowercase. """ for check in ScorecardChecks.all(): snake = ScorecardChecks.title_hyphen_to_snake(check) assert "-" not in snake assert snake.islower()
4,323
Python
.py
109
33.798165
79
0.700239
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,074
test_sbom.py
OSSQA-PUM_OSSQA/src/tests/main/unit/data_types/sbom_types/test_sbom.py
""" This module contains unit tests for the `Sbom` class in the `sbom` module of the `sbom_types` package. """ import json import pytest from tests.main.unit.sboms.sboms import (PATHS as SBOM_PATHS, BAD_SBOMS, SBOM_COMPONENT_URLS, DUMMY_DEPENDENCIES) from main.data_types.sbom_types.sbom import Sbom from main.data_types.sbom_types.dependency import Dependency @pytest.fixture(params=SBOM_PATHS) def sbom_from_json(request): """ Fixture to load an SBOM JSON file. """ with open(request.param, "r", encoding="utf-8") as file: return json.load(file) @pytest.fixture(params=BAD_SBOMS) def sbom_bad(request): """ Fixture to load a bad SBOM JSON file. """ return request.param @pytest.fixture(params=SBOM_COMPONENT_URLS) def sbom_component_url(request): """ Fixture to load a SBOM component URL. """ return request.param @pytest.fixture(params=[0, 1, 2, 3]) def sbom_component(request): """ Fixture to load a SBOM component. """ with open(SBOM_PATHS[0], "r", encoding="utf-8") as file: sbom_dict = json.load(file) return sbom_dict["components"][request.param] def test_sbom_initialize(sbom_from_json): """ Test that an SBOM can be initialized. """ sbom = Sbom(sbom_from_json) assert sbom is not None assert isinstance(sbom.serial_number, str) assert isinstance(sbom.version, int) assert isinstance(sbom.repo_name, str) assert isinstance(sbom.repo_version, str) assert sbom.dependency_manager is not None def test_sbom_to_dict(sbom_from_json): """ Test that the to_dict method returns a dictionary representation of the SBOM. """ sbom = Sbom(sbom_from_json) sbom_dict = sbom.to_dict() assert "serialNumber" in sbom_dict.keys() assert "version" in sbom_dict.keys() assert "repo_name" in sbom_dict.keys() assert "repo_version" in sbom_dict.keys() assert "scored_dependencies" in sbom_dict.keys() assert "unscored_dependencies" in sbom_dict.keys() assert "failed_dependencies" in sbom_dict.keys() def test_sbom_validation(sbom_bad): """ Test that an exception is raised when an SBOM is invalid. """ with pytest.raises(Exception): Sbom(sbom_bad) def test_sbom_dependency_manager(sbom_from_json): """ Test that the dependency manager is correctly initialized. """ sbom = Sbom(sbom_from_json) assert sbom.dependency_manager is not None assert len(sbom.get_failed_dependencies()) == 0 assert len(sbom.get_scored_dependencies()) == 0 assert len(sbom.get_unscored_dependencies()) == 13 def test_sbom_dependency_manager_update(sbom_from_json): """ Test that the dependency manager is correctly updated. """ sbom = Sbom(sbom_from_json) assert sbom.dependency_manager is not None new_dep1 = Dependency(DUMMY_DEPENDENCIES[0]) new_dep2 = Dependency(DUMMY_DEPENDENCIES[1]) sbom.update_dependencies([new_dep1, new_dep2]) assert len(sbom.get_failed_dependencies()) == 0 assert len(sbom.get_scored_dependencies()) == 0 assert len(sbom.get_unscored_dependencies()) == 15
3,263
Python
.py
91
30.274725
75
0.679455
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,075
test_dependency.py
OSSQA-PUM_OSSQA/src/tests/main/unit/data_types/sbom_types/test_dependency.py
""" This file contains test cases for the Dependency class. The Dependency class represents a single dependency in a software bill of materials (SBOM). It contains information about the dependency, such as its name, version, and scorecard. The class provides methods for initializing a dependency, comparing dependencies, and converting dependencies to a dictionary representation. """ import json import re import pytest from main.data_types.sbom_types.dependency import Dependency from main.data_types.sbom_types.scorecard import Scorecard from tests.main.unit.scorecards.scorecards import PATHS from tests.main.unit.sboms.sboms import DUMMY_DEPENDENCIES DEPENDENCY_NAME = "github.com/repo/path" COMPONENT_NAME = "path" @pytest.fixture def dependency_basic(): """ Fixture to create a basic Dependency object. """ return Dependency( DUMMY_DEPENDENCIES[0] ) @pytest.fixture(params=PATHS) def dependency_scorecard(request): """ Fixture to create a Dependency object with a scorecard. """ with open(request.param, "r", encoding="utf-8") as file: scorecard = json.load(file) dep = Dependency(DUMMY_DEPENDENCIES[0]) dep.scorecard = Scorecard(scorecard) return dep def test_dependency_initialization(): """ Test that a Dependency object can be initialized. """ assert Dependency(DUMMY_DEPENDENCIES[0]) def test_dependency_eq(): """ Test that two Dependency objects are equal if they have the same name and version.""" dep1 = Dependency(DUMMY_DEPENDENCIES[0]) dep2 = Dependency(DUMMY_DEPENDENCIES[0]) assert dep1 == dep2 def test_dependency_not_eq(): """ Test that two Dependency objects are not equal if they have different names or versions.""" dep1 = Dependency(DUMMY_DEPENDENCIES[0]) dep2 = Dependency(DUMMY_DEPENDENCIES[1]) dep3 = Dependency(DUMMY_DEPENDENCIES[2]) dep4 = Dependency(DUMMY_DEPENDENCIES[3]) dep5 = Dependency(DUMMY_DEPENDENCIES[4]) assert dep1 != dep2 assert dep1 != dep3 assert dep1 != dep4 assert dep4 != dep5 def test_dependency_platform(dependency_basic): """ Test that the platform property of a Dependency object is correct. """ assert dependency_basic.platform == "github.com" def test_dependency_repo_path(dependency_basic): """ Test that the repo_path property of a Dependency object is correct. """ assert isinstance(dependency_basic.repo_path, str) assert len(dependency_basic.repo_path) > 0 assert "/" in dependency_basic.repo_path assert re.search(r"^[a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+$", dependency_basic.repo_path) def test_dependency_url(dependency_basic): """ Test that the url property of a Dependency object is correct. """ assert isinstance(dependency_basic.git_url, str) assert len(dependency_basic.git_url) > 0 assert "/" in dependency_basic.git_url assert re.search(r"https://github.com/[a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+$", dependency_basic.git_url) def test_dependency_basic_to_dict(dependency_basic): """ Test that the to_dict method of a Dependency object returns the correct dictionary representation of the object. """ dep_dict = dependency_basic.to_dict() assert "scorecard" in dep_dict assert "failure_reason" in dep_dict for key in DUMMY_DEPENDENCIES[0]: assert key in dep_dict assert dep_dict[key] == DUMMY_DEPENDENCIES[0][key] def test_dependency_scorecard_to_dict(dependency_scorecard): """ Test that the to_dict method of a Dependency object with a scorecard returns the correct dictionary representation of the object. """ dep_dict = dependency_scorecard.to_dict() assert "scorecard" in dep_dict assert "failure_reason" in dep_dict for key in DUMMY_DEPENDENCIES[0]: assert key in dep_dict assert dep_dict[key] == DUMMY_DEPENDENCIES[0][key] # Check correct format of scorecard assert isinstance(dep_dict["scorecard"], dict) assert "date" in dep_dict["scorecard"] assert "score" in dep_dict["scorecard"] assert "checks" in dep_dict["scorecard"] assert isinstance(dep_dict["scorecard"]["date"], str) assert isinstance(dep_dict["scorecard"]["score"], float) assert isinstance(dep_dict["scorecard"]["checks"], list) assert all( isinstance(check, dict) for check in dep_dict["scorecard"]["checks"] ) assert all( "name" in check for check in dep_dict["scorecard"]["checks"] ) assert all( "score" in check for check in dep_dict["scorecard"]["checks"] ) assert all( "reason" in check for check in dep_dict["scorecard"]["checks"] ) assert all( "details" in check for check in dep_dict["scorecard"]["checks"] ) assert all( isinstance(check["name"], str) for check in dep_dict["scorecard"]["checks"] ) assert all( isinstance(check["score"], int) for check in dep_dict["scorecard"]["checks"] ) assert all( isinstance(check["reason"], str) for check in dep_dict["scorecard"]["checks"] ) assert all( isinstance(check["details"], list | None) for check in dep_dict["scorecard"]["checks"] )
5,357
Python
.py
145
31.517241
75
0.687934
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,076
expected_results.py
OSSQA-PUM_OSSQA/src/tests/main/unit/data_types/sbom_types/expected_jsons/expected_results.py
import json from pathlib import Path from tests.main.unit.scorecards.scorecards import PATHS from main.data_types.sbom_types.scorecard import Scorecard DIRECTORY = Path(__file__).parent PATHS = [ DIRECTORY / "dependency_pad_left.json", ] def test(): with open(PATHS[1], "r") as file: scorecard = json.load(file) s = Scorecard(scorecard) with open("dependency.json", "w") as file: json.dump(s.to_dict(), file, indent=4)
455
Python
.py
14
28.928571
58
0.709382
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,077
test_get_named_sboms.py
OSSQA-PUM_OSSQA/src/tests/main/integration/test_get_named_sboms.py
""" This module tests the functionality of gettings SBOMs with specific names from the database. """ import sys from unittest.mock import patch import pytest import requests from main.backend_communication import BackendCommunication from main.data_types.dependency_scorer import StepResponse from main.data_types.sbom_types.sbom import Sbom from main.frontend.cli import ossqa_cli from main.frontend.front_end_api import FrontEndAPI from main.sbom_processor import SbomProcessor, SbomProcessorStatus from tests.main.integration.constants import HOST from tests.main.integration.sboms import create_scored_sbom @pytest.fixture(name="fake_scored_sbom", scope="module") def fake_scored_sbom_fixture() -> Sbom: """ This fixture reads and SBOM file and creates an Sbom object with fake scorecard data. """ return create_scored_sbom() @pytest.mark.order(-1) # Ensures the tests run after all unit tests class TestGetNamedSboms: """ These functions test the action of getting SBOMs with specific names from the database in a bottom-up fashion. """ def test_populate_database(self, fake_scored_sbom: Sbom): resp = requests.post(HOST + "/test/reset", timeout=10) assert resp.status_code == 200 sbom_dict = fake_scored_sbom.to_dict() resp = requests.post(HOST + "/sbom", json=sbom_dict, timeout=10) assert resp.status_code == 201 def test_backend(self, fake_scored_sbom: Sbom): name = fake_scored_sbom.repo_name resp = requests.get(HOST + f"/sbom/{name}", timeout=5) assert resp.status_code == 200 sbom_dicts = resp.json() assert len(sbom_dicts) != 0 sbom = Sbom(sbom_dicts[0]) assert sbom.repo_name == name def test_backend_comm(self, fake_scored_sbom: Sbom): name = fake_scored_sbom.repo_name def callback(response: StepResponse): assert response.message != "The request timed out" assert response.message != "An error occurred in the database" backend_comm = BackendCommunication(callback, HOST) sboms = backend_comm.get_sboms_by_name(name) assert isinstance(sboms, list) assert len(sboms) != 0 assert sboms[0].repo_name == name def test_sbom_processor(self, fake_scored_sbom: Sbom): name = fake_scored_sbom.repo_name def callback(status: SbomProcessorStatus): if response := status.step_response: assert response.message != "The request timed out" assert response.message != "An error occurred in the database" sbom_proc = SbomProcessor(HOST) sbom_proc.on_status_update.subscribe(callback) sboms = sbom_proc.lookup_previous_sboms(name) assert isinstance(sboms, list) assert len(sboms) != 0 assert sboms[0].repo_name == name def test_front_end_api(self, fake_scored_sbom: Sbom): name = fake_scored_sbom.repo_name def callback(status: SbomProcessorStatus): if response := status.step_response: assert response.message != "The request timed out" assert response.message != "An error occurred in the database" front_end_api = FrontEndAPI(HOST) front_end_api.on_sbom_processor_status_update.subscribe(callback) sboms = front_end_api.lookup_previous_sboms(name) assert isinstance(sboms, list) assert len(sboms) != 0 assert sboms[0].repo_name == name def test_cli(self, fake_scored_sbom: Sbom): name = fake_scored_sbom.repo_name # Temporarily overwrite sys.argv, then run the CLI mock_args = ["prog", "lookup", "--backend", HOST, name] with patch("sys.argv", mock_args): assert sys.argv == mock_args try: ossqa_cli() # TODO: test this more extensively. maybe catch stdout? except SystemExit as e: # Click explicitly calls sys.exit, so this needs to be caught assert e.code == 0
4,066
Python
.py
89
37.955056
78
0.67163
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,078
constants.py
OSSQA-PUM_OSSQA/src/tests/main/integration/constants.py
""" This module contains constants that are useful for integration testing. """ HOST = "http://localhost:5091"
112
Python
.py
4
26.75
71
0.766355
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,079
test_get_sbom_names.py
OSSQA-PUM_OSSQA/src/tests/main/integration/test_get_sbom_names.py
""" This module tests the functionality of getting SBOM names from the database. """ import json import sys from pathlib import Path from unittest.mock import patch import pytest import requests from main.backend_communication import BackendCommunication from main.data_types.dependency_scorer import StepResponse from main.data_types.sbom_types.sbom import Sbom from main.data_types.sbom_types.scorecard import Scorecard from main.frontend.cli import ossqa_cli from main.frontend.front_end_api import FrontEndAPI from main.sbom_processor import SbomProcessor, SbomProcessorStatus from tests.main.integration.constants import HOST from tests.main.integration.sboms import create_scored_sbom @pytest.fixture(name="fake_scored_sbom", scope="module") def fake_scored_sbom_fixture() -> Sbom: """ This fixture reads and SBOM file and creates an Sbom object with fake scorecard data. """ return create_scored_sbom() @pytest.mark.order(-2) # Ensures the tests run after all unit tests class TestGetSbomNames: """ These functions test the action of getting SBOM names from the database in a bottom-up fashion. """ def test_populate_database(self, fake_scored_sbom: Sbom): resp = requests.post(HOST + "/test/reset", timeout=10) assert resp.status_code == 200 sbom_dict = fake_scored_sbom.to_dict() resp = requests.post(HOST + "/sbom", json=sbom_dict, timeout=10) assert resp.status_code == 201 def test_backend(self): resp = requests.get(HOST + "/sbom", timeout=5) assert resp.status_code == 200 assert len(resp.json()) != 0 def test_backend_comm(self): def callback(response: StepResponse): assert response.message != "The request timed out" assert response.message != "An error occurred in the database" backend_comm = BackendCommunication(callback, HOST) names = backend_comm.get_sbom_names() assert isinstance(names, list) assert len(names) != 0 def test_sbom_processor(self): def callback(status: SbomProcessorStatus): if response := status.step_response: assert response.message != "The request timed out" assert response.message != "An error occurred in the database" sbom_proc = SbomProcessor(HOST) sbom_proc.on_status_update.subscribe(callback) names = sbom_proc.lookup_stored_sboms() assert isinstance(names, list) assert len(names) != 0 def test_front_end_api(self): def callback(status: SbomProcessorStatus): if response := status.step_response: assert response.message != "The request timed out" assert response.message != "An error occurred in the database" front_end_api = FrontEndAPI(HOST) front_end_api.on_sbom_processor_status_update.subscribe(callback) names = front_end_api.lookup_stored_sboms() assert isinstance(names, list) assert len(names) != 0 def test_cli(self): # Temporarily overwrite sys.argv, then run the CLI mock_args = ["prog", "sboms", "--backend", HOST] with patch("sys.argv", mock_args): assert sys.argv == mock_args try: ossqa_cli() # TODO: test this more extensively. maybe catch stdout? except SystemExit as e: # Click explicitly calls sys.exit, so this needs to be caught assert e.code == 0
3,531
Python
.py
80
36.7875
78
0.679267
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,080
test_analyze_sbom.py
OSSQA-PUM_OSSQA/src/tests/main/integration/test_analyze_sbom.py
""" This module tests the functionality of analyzing an SBOM. """ import os import sys from pathlib import Path from unittest.mock import patch import pytest import requests from main.backend_communication import BackendCommunication from main.frontend.cli import ossqa_cli from main.frontend.front_end_api import FrontEndAPI from main.sbom_processor import SbomProcessor from main.data_types.dependency_scorer import StepResponse from main.data_types.sbom_types.sbom import Sbom from main.data_types.user_requirements import (RequirementsType, UserRequirements) from tests.main.integration.constants import HOST from tests.main.integration.sboms import SBOM_PATH, create_scored_sbom, \ create_unscored_sbom @pytest.fixture(name="git_token", scope="module") def git_token_fixture() -> str: return os.environ.get("GITHUB_AUTH_TOKEN", "invalid_token") @pytest.fixture(name="user_reqs", scope="module") def user_reqs_fixture() -> UserRequirements: req_types = [t.value for t in RequirementsType] reqs_dict = {req_type: 10 for req_type in req_types} return UserRequirements(reqs_dict) @pytest.fixture(name="sbom_path", scope="module") def sbom_path_fixture() -> Path: return SBOM_PATH @pytest.fixture(name="sbom", scope="function") def sbom_fixture(sbom_path: Path) -> Sbom: """ This fixture reads an SBOM file and creates an Sbom object. """ return create_unscored_sbom() @pytest.fixture(name="fake_scored_sbom", scope="function") def fake_scored_sbom_fixture(sbom_path: Path) -> Sbom: """ This fixture reads and SBOM file and creates an Sbom object with fake scorecard data. """ return create_scored_sbom() def before_test(): """ Resets the backend database and checks that the reset is successful. """ resp = requests.post(HOST + "/test/reset", timeout=10) assert resp.status_code == 200 resp = requests.get(HOST + "/sbom", timeout=10) assert resp.status_code == 200 assert len(resp.json()) == 0 def after_test(): """ Checks that an SBOM has been successfully added to the backend database. """ resp = requests.get(HOST + "/sbom", timeout=10) assert resp.status_code == 200 assert len(resp.json()) != 0 @pytest.mark.order(-3) # Ensures the tests run after all unit tests class TestAnalyzeSBOM: """ These functions test the action of analyzing an SBOM in a bottom-up fashion. """ def test_backend(self, fake_scored_sbom: Sbom): before_test() sbom_dict = fake_scored_sbom.to_dict() resp = requests.post(HOST + "/sbom", json=sbom_dict, timeout=10) assert resp.status_code == 201 after_test() def test_backend_comm(self, fake_scored_sbom: Sbom): before_test() def callback(response: StepResponse): assert response.message != "The request timed out" backend_comm = BackendCommunication(callback, HOST) backend_comm.add_sbom(fake_scored_sbom) after_test() def test_sbom_processor(self, sbom: Sbom): before_test() sbom_proc = SbomProcessor(HOST) res_sbom = sbom_proc.analyze_sbom(sbom) scored_deps = res_sbom.get_scored_dependencies() assert len(scored_deps) != 0 after_test() def test_front_end_api(self, sbom: Sbom, user_reqs: UserRequirements): before_test() front_end_api = FrontEndAPI(HOST) res_sbom = front_end_api.analyze_sbom(sbom, user_reqs) scored_deps = res_sbom.get_scored_dependencies() assert len(scored_deps) != 0 after_test() def test_cli(self, sbom_path: Path, git_token: str): before_test() # Temporarily overwrite sys.argv, then run the CLI # TODO: add user requirements to mock_args mock_args = [ "prog", "analyze", "-g", git_token, "--backend", HOST, str(sbom_path), ] with patch("sys.argv", mock_args): assert sys.argv == mock_args try: ossqa_cli() except SystemExit as e: # Click explicitly calls sys.exit, so this needs to be caught assert e.code == 0 after_test()
4,298
Python
.py
112
31.875
77
0.664903
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,081
__init__.py
OSSQA-PUM_OSSQA/src/tests/main/integration/sboms/__init__.py
""" This module contains downscaled SBOMs tailored to be used in the integration tests. """ import json from pathlib import Path from main.data_types.sbom_types.sbom import Sbom from main.data_types.sbom_types.scorecard import Scorecard DIR_PATH = Path(__file__).parent SBOM_PATH = DIR_PATH / "downscaled-ddmanager-controller.cdx.json" SCORECARD = Scorecard({ "date": "2024-04-15", "repo": { "name": "github.com/jonschlinkert/pad-left", "commit": "e521c7ba0d5d290b2cef7485af11b98dc96f2930" }, "scorecard": { "version": "v4.13.1-273-g0b9dfb65", "commit": "0b9dfb656f1796c7c693ad74f5193657b6a81e0b" }, "score": 3.2, "checks": [ { "name": "Code-Review", "score": 1, "reason": "Found 3/28 approved changesets -- score normalized to 1", "details": None, "documentation": { "short": "Determines if the project requires human code review before pull requests (aka merge requests) are merged.", "url": "https://github.com/ossf/scorecard/blob/0b9dfb656f1796c7c693ad74f5193657b6a81e0b/docs/checks.md#code-review" } }, { "name": "Maintained", "score": 0, "reason": "0 commit(s) and 0 issue activity found in the last 90 days -- score normalized to 0", "details": None, "documentation": { "short": "Determines if the project is \"actively maintained\".", "url": "https://github.com/ossf/scorecard/blob/0b9dfb656f1796c7c693ad74f5193657b6a81e0b/docs/checks.md#maintained" } }, { "name": "CII-Best-Practices", "score": 0, "reason": "no effort to earn an OpenSSF best practices badge detected", "details": None, "documentation": { "short": "Determines if the project has an OpenSSF (formerly CII) Best Practices Badge.", "url": "https://github.com/ossf/scorecard/blob/0b9dfb656f1796c7c693ad74f5193657b6a81e0b/docs/checks.md#cii-best-practices" } }, { "name": "License", "score": 10, "reason": "license file detected", "details": [ "Info: project has a license file: LICENSE:0", "Info: FSF or OSI recognized license: MIT License: LICENSE:0" ], "documentation": { "short": "Determines if the project has defined a license.", "url": "https://github.com/ossf/scorecard/blob/0b9dfb656f1796c7c693ad74f5193657b6a81e0b/docs/checks.md#license" } }, { "name": "Signed-Releases", "score": -1, "reason": "no releases found", "details": None, "documentation": { "short": "Determines if the project cryptographically signs release artifacts.", "url": "https://github.com/ossf/scorecard/blob/0b9dfb656f1796c7c693ad74f5193657b6a81e0b/docs/checks.md#signed-releases" } }, { "name": "Token-Permissions", "score": -1, "reason": "No tokens found", "details": None, "documentation": { "short": "Determines if the project's workflows follow the principle of least privilege.", "url": "https://github.com/ossf/scorecard/blob/0b9dfb656f1796c7c693ad74f5193657b6a81e0b/docs/checks.md#token-permissions" } }, { "name": "Dangerous-Workflow", "score": -1, "reason": "no workflows found", "details": None, "documentation": { "short": "Determines if the project's GitHub Action workflows avoid dangerous patterns.", "url": "https://github.com/ossf/scorecard/blob/0b9dfb656f1796c7c693ad74f5193657b6a81e0b/docs/checks.md#dangerous-workflow" } }, { "name": "Packaging", "score": -1, "reason": "packaging workflow not detected", "details": [ "Warn: no GitHub/GitLab publishing workflow detected." ], "documentation": { "short": "Determines if the project is published as a package that others can easily download, install, easily update, and uninstall.", "url": "https://github.com/ossf/scorecard/blob/0b9dfb656f1796c7c693ad74f5193657b6a81e0b/docs/checks.md#packaging" } }, { "name": "Binary-Artifacts", "score": 10, "reason": "no binaries found in the repo", "details": None, "documentation": { "short": "Determines if the project has generated executable (binary) artifacts in the source repository.", "url": "https://github.com/ossf/scorecard/blob/0b9dfb656f1796c7c693ad74f5193657b6a81e0b/docs/checks.md#binary-artifacts" } }, { "name": "Pinned-Dependencies", "score": -1, "reason": "no dependencies found", "details": None, "documentation": { "short": "Determines if the project has declared and pinned the dependencies of its build process.", "url": "https://github.com/ossf/scorecard/blob/0b9dfb656f1796c7c693ad74f5193657b6a81e0b/docs/checks.md#pinned-dependencies" } }, { "name": "Branch-Protection", "score": 0, "reason": "branch protection not enabled on development/release branches", "details": [ "Warn: branch protection not enabled for branch 'master'" ], "documentation": { "short": "Determines if the default and release branches are protected with GitHub's branch protection settings.", "url": "https://github.com/ossf/scorecard/blob/0b9dfb656f1796c7c693ad74f5193657b6a81e0b/docs/checks.md#branch-protection" } }, { "name": "Fuzzing", "score": 0, "reason": "project is not fuzzed", "details": [ "Warn: no fuzzer integrations found" ], "documentation": { "short": "Determines if the project uses fuzzing.", "url": "https://github.com/ossf/scorecard/blob/0b9dfb656f1796c7c693ad74f5193657b6a81e0b/docs/checks.md#fuzzing" } }, { "name": "Vulnerabilities", "score": 10, "reason": "0 existing vulnerabilities detected", "details": None, "documentation": { "short": "Determines if the project has open, known unfixed vulnerabilities.", "url": "https://github.com/ossf/scorecard/blob/0b9dfb656f1796c7c693ad74f5193657b6a81e0b/docs/checks.md#vulnerabilities" } }, { "name": "Security-Policy", "score": 0, "reason": "security policy file not detected", "details": [ "Warn: no security policy file detected", "Warn: no security file to analyze", "Warn: no security file to analyze", "Warn: no security file to analyze" ], "documentation": { "short": "Determines if the project has published a security policy.", "url": "https://github.com/ossf/scorecard/blob/0b9dfb656f1796c7c693ad74f5193657b6a81e0b/docs/checks.md#security-policy" } }, { "name": "SAST", "score": 0, "reason": "SAST tool is not run on all commits -- score normalized to 0", "details": [ "Warn: 0 commits out of 5 are checked with a SAST tool" ], "documentation": { "short": "Determines if the project uses static code analysis.", "url": "https://github.com/ossf/scorecard/blob/0b9dfb656f1796c7c693ad74f5193657b6a81e0b/docs/checks.md#sast" } } ] }) def create_unscored_sbom() -> Sbom: with open(SBOM_PATH, "r", encoding="utf-8") as file: return Sbom(json.load(file)) def create_scored_sbom() -> Sbom: with open(SBOM_PATH, "r", encoding="utf-8") as file: sbom = Sbom(json.load(file)) dependencies = [] for dep in sbom.get_unscored_dependencies(): dep_dict = dep.to_dict() if "platform_path" in dep_dict.keys(): dep.scorecard = SCORECARD sbom.update_dependencies(dependencies) return sbom
8,777
Python
.py
203
31.541872
151
0.568927
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,082
test_routes.py
OSSQA-PUM_OSSQA/src/tests/backend/test_routes.py
""" This module contains tests for each route in the flask app. """ import json from typing import Generator from flask.testing import FlaskClient from pytest import fixture, FixtureRequest from backend.server import create_test_app from backend.models import db from tests.backend.sboms import PATHS @fixture(name="client", scope="module") def client_fixture() -> Generator[FlaskClient, None, None]: """ Creates a test client that handles requests to the test app. """ app = create_test_app() yield app.test_client() db.session.remove() db.drop_all() @fixture(name="sbom", params=PATHS, scope="module") def sbom_fixture(request: FixtureRequest) -> dict: """ Opens and reads the content of SBOMs. """ with open(request.param, "r", encoding="utf-8") as sbom_file: return json.load(sbom_file) def test_add_sbom(client: FlaskClient, sbom: dict): resp = client.post("/sbom", json=sbom) assert resp.status_code == 201 # TODO: make sure the right objects exist in the database def test_update_sbom(client: FlaskClient, sbom: dict): resp = client.post("/sbom", json=sbom) assert resp.status_code == 201 # TODO: make sure the right objects exist in the database def test_sbom_names(client: FlaskClient, sbom: dict): resp = client.get("/sbom") assert resp.status_code == 200 found_name = False for name in resp.json: if name == sbom["repo_name"]: found_name = True break assert found_name def test_get_sboms_by_name(client: FlaskClient, sbom: dict): resp = client.get(f"/sbom/{sbom["repo_name"]}") assert resp.status_code == 200 for resp_sbom in resp.json: assert resp_sbom["serialNumber"] == sbom["serialNumber"] assert resp_sbom["version"] == sbom["version"] assert resp_sbom["metadata"]["component"]["name"] == sbom["repo_name"] assert resp_sbom["metadata"]["component"]["version"] == \ sbom["repo_version"] for component in resp_sbom["components"]: found_component = False for dependency in sbom["scored_dependencies"]: found_component = component["name"] == dependency["name"] and \ component["version"] == dependency["version"] if found_component: break assert found_component def test_get_existing_dependencies(client: FlaskClient, sbom: dict): deps = [] for dep in sbom["scored_dependencies"]: deps.append(dep) resp = client.get("/dependency/existing", json=deps) assert resp.status_code == 200 for component in resp.json: found_component = False for dependency in sbom["scored_dependencies"]: found_component = component["name"] == dependency["name"] and \ component["version"] == dependency["version"] if found_component: break assert found_component
2,980
Python
.py
74
33.486486
79
0.654193
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,083
__init__.py
OSSQA-PUM_OSSQA/src/tests/backend/sboms/__init__.py
from pathlib import Path BASE_DIR = Path(__file__).parent PATHS = [ BASE_DIR / "fake_sbom.json", ]
104
Python
.py
5
18.8
32
0.673469
OSSQA-PUM/OSSQA
8
1
2
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,084
manage.py
codegenius2_Editoral/manage.py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PyEditorial.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
631
Python
.py
17
31.176471
75
0.681967
codegenius2/Editoral
8
0
0
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,085
settings.py
codegenius2_Editoral/PyEditorial/settings.py
""" Django settings for PyEditorial project. Generated by 'django-admin startproject' using Django 3.0.8. For more information on this file, see https://docs.djangoproject.com/en/3.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.0/ref/settings/ """ import os from django.utils.translation import ugettext_lazy as _ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '^rz9wrbb#ig!tl)4c$!o_^01ef8(rtxe(i()$ph61$8+mh^v_8' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'constance', 'constance.backends.database', 'ckeditor', 'ckeditor_uploader', 'content.apps.ContentConfig', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'PyEditorial.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'content.context_processors.show_system_content', ], }, }, ] WSGI_APPLICATION = 'PyEditorial.wsgi.application' # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # If you need to use Postgresql, you can use this section # DATABASES = { # 'default': { # 'ENGINE': 'django.db.backends.postgresql', # 'NAME': 'PyEditorial', # 'USER': 'postgres', # 'PASSWORD': 'great123', # 'HOST': 'localhost', # 'PORT': '', # } # } # Password validation # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.0/topics/i18n/ LANGUAGES = [ ('en', _('English')), ] LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static') STATICFILES_DIRS = [ os.path.join(BASE_DIR, "templates/static"), ] # Media files MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = '/media/' # CKEditor CKEDITOR_UPLOAD_PATH = "uploads/" CKEDITOR_CONFIGS = { 'default': { 'toolbar': 'full', }, } # CONSTANCE Settings CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend' CONSTANCE_IGNORE_ADMIN_VERSION_CHECK = True CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '127.0.0.1:11211', } } CONSTANCE_DATABASE_CACHE_BACKEND = 'default' CONSTANCE_ADDITIONAL_FIELDS = { 'yes_no_select': ['django.forms.fields.ChoiceField', { 'widget': 'django.forms.Select', 'choices': ( ("yes", _('Yes')), ("no", _('No')) ) }], 'image_field': ['django.forms.ImageField', {}] } CONSTANCE_CONFIG = { 'SITE_TITLE': ('My Blog', _('Title of this site!'), str), 'SITE_DESCRIPTION': ('Blog Description', _('Description of this site!'), str), 'SITE_FAVICON': ('default_favicon.png', _('Favicon of this site!'), 'image_field'), 'GET_IN_TOUCH_ACTIVE': ('yes', _('"Get in touch" section is active?'), 'yes_no_select'), 'GET_IN_TOUCH_INFO': ('Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.', _('"Get in touch" information text'), str), 'GET_IN_TOUCH_EMAIL_ADDRESS': ('[email protected]', _('"Get in touch" email address'), str), 'GET_IN_TOUCH_PHONE': ('(000) 000-0000', _('"Get in touch" phone number'), str), 'GET_IN_TOUCH_ADDRESS': ('1234 Somewhere Road #8254<br />Nashville, TN 00000-0000', _('"Get in touch" address'), str), 'SOCIAL_NETWORKS_FACEBOOK_URL': ('#', _('Social Networks - Facebook'), str), 'SOCIAL_NETWORKS_TWITTER_URL': ('#', _('Social Networks - Twitter'), str), 'SOCIAL_NETWORKS_SNAPCHAT_URL': ('#', _('Social Networks - Snapchat'), str), 'SOCIAL_NETWORKS_INSTAGRAM_URL': ('#', _('Social Networks - Instagram'), str), 'SOCIAL_NETWORKS_MEDIUM_URL': ('#', _('Social Networks - Medium'), str), 'SOCIAL_NETWORKS_TELEGRAM_URL': ('#', _('Social Networks - Telegram'), str), 'SOCIAL_NETWORKS_GITHUB_URL': ('#', _('Social Networks - Github'), str), 'SOCIAL_NETWORKS_GITLAB_URL': ('#', _('Social Networks - Gitlab'), str), } CONSTANCE_CONFIG_FIELDSETS = { 'General Options': ( 'SITE_TITLE', 'SITE_DESCRIPTION', 'SITE_FAVICON', ), '"Get in touch" Options': ( 'GET_IN_TOUCH_ACTIVE', 'GET_IN_TOUCH_INFO', 'GET_IN_TOUCH_EMAIL_ADDRESS', 'GET_IN_TOUCH_PHONE', 'GET_IN_TOUCH_ADDRESS', ), '"Social Networks" Options': ( 'SOCIAL_NETWORKS_FACEBOOK_URL', 'SOCIAL_NETWORKS_TWITTER_URL', 'SOCIAL_NETWORKS_SNAPCHAT_URL', 'SOCIAL_NETWORKS_INSTAGRAM_URL', 'SOCIAL_NETWORKS_MEDIUM_URL', 'SOCIAL_NETWORKS_TELEGRAM_URL', 'SOCIAL_NETWORKS_GITHUB_URL', 'SOCIAL_NETWORKS_GITLAB_URL', ), } # CONSTANCE Settings # Auth Settings LOGIN_REDIRECT_URL = 'content:index' LOGIN_URL = 'login' LOGOUT_REDIRECT_URL = 'content:index'
6,996
Python
.py
189
32.407407
196
0.663955
codegenius2/Editoral
8
0
0
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,086
asgi.py
codegenius2_Editoral/PyEditorial/asgi.py
""" ASGI config for PyEditorial project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PyEditorial.settings') application = get_asgi_application()
399
Python
.py
10
38.3
78
0.806789
codegenius2/Editoral
8
0
0
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,087
urls.py
codegenius2_Editoral/PyEditorial/urls.py
"""PyEditorial URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('content/', include('content.urls')) """ from django.conf.urls.static import static from django.contrib import admin from django.urls import path, include from django.conf import settings urlpatterns = [ path('admin/', admin.site.urls), path('ckeditor/', include('ckeditor_uploader.urls')), path('', include(('content.urls', 'content'), namespace='content')), path('accounts/', include("django.contrib.auth.urls")) ] if settings.DEBUG: urlpatterns = urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
1,244
Python
.py
27
43.037037
95
0.738056
codegenius2/Editoral
8
0
0
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,088
wsgi.py
codegenius2_Editoral/PyEditorial/wsgi.py
""" WSGI config for PyEditorial project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PyEditorial.settings') application = get_wsgi_application()
399
Python
.py
10
38.3
78
0.806789
codegenius2/Editoral
8
0
0
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,089
models.py
codegenius2_Editoral/content/models.py
from django.db import models from django.utils.translation import ugettext_lazy as _ from ckeditor_uploader.fields import RichTextUploadingField class BlogCategory(models.Model): title = models.CharField( max_length=256, verbose_name=_('Title :'), unique=True, blank=False, null=False ) class Meta: verbose_name = _('Blog Category') verbose_name_plural = _('Blog Categories') def __str__(self): return self.title class Blog(models.Model): title = models.CharField( max_length=256, verbose_name=_('Title :'), unique=True, null=False, blank=False ) slug = models.SlugField( max_length=256, verbose_name=_('Slug :'), unique=True, null=False, blank=False ) thumbnail = models.ImageField( upload_to='content/blog/thumbnail/', verbose_name=_('Thumbnail :') ) publish = models.BooleanField( verbose_name=_('Publish :'), default=True, help_text=_('Will this post be published?') ) category = models.ManyToManyField( BlogCategory ) content = RichTextUploadingField() class Meta: verbose_name = _('Blog') verbose_name_plural = _('Blogs') def display_category(self): return ', '.join([cat.title for cat in self.category.all()]) def __str__(self): return self.title class VideocastCategory(models.Model): title = models.CharField( max_length=256, verbose_name=_('Title :'), unique=True, blank=False, null=False ) class Meta: verbose_name = _('Video Cast Category') verbose_name_plural = _('Video Cast Categories') def __str__(self): return self.title class Videocast(models.Model): title = models.CharField( max_length=256, verbose_name=_('Title :'), unique=True, null=False, blank=False ) slug = models.SlugField( max_length=256, verbose_name=_('Slug :'), unique=True, null=False, blank=False ) thumbnail = models.ImageField( upload_to='content/video/thumbnail/', verbose_name=_('Thumbnail :') ) publish = models.BooleanField( verbose_name=_('Publish :'), default=True, help_text=_('Will this video be published?') ) video = models.CharField( max_length=256, verbose_name=_('Video link :') ) category = models.ManyToManyField( VideocastCategory ) content = RichTextUploadingField() class Meta: verbose_name = _('Video Cast') verbose_name_plural = _('Video Casts') def display_category(self): return ', '.join([cat.title for cat in self.category.all()]) def __str__(self): return self.title class PodcastCategory(models.Model): title = models.CharField( max_length=256, verbose_name=_('Title :'), unique=True, blank=False, null=False ) class Meta: verbose_name = _('Podcast Category') verbose_name_plural = _('Podcast Categories') def __str__(self): return self.title class Podcast(models.Model): title = models.CharField( max_length=256, verbose_name=_('Title :'), unique=True, null=False, blank=False ) slug = models.SlugField( max_length=256, verbose_name=_('Slug :'), unique=True, null=False, blank=False ) thumbnail = models.ImageField( upload_to='content/podcast/thumbnail/', verbose_name=_('Thumbnail :') ) publish = models.BooleanField( verbose_name=_('Publish :'), default=True, help_text=_('Will this audio be published?') ) audio = models.FileField( upload_to='content/podcast/audio/', verbose_name=_('Audio :') ) category = models.ManyToManyField( PodcastCategory ) content = RichTextUploadingField() class Meta: verbose_name = _('Podcast') verbose_name_plural = _('Podcasts') def display_category(self): return ', '.join([cat.title for cat in self.category.all()]) def __str__(self): return self.title class Skill(models.Model): title = models.CharField( max_length=256, verbose_name=_('Title :'), unique=True, null=False, blank=False ) description = models.TextField( verbose_name=_('Description :'), null=False, blank=False ) icon = models.CharField( max_length=30, verbose_name=_('Icon name :'), null=False, blank=False ) class Meta: verbose_name = _('Skill') verbose_name_plural = _('Skills') def __str__(self): return self.title
4,956
Python
.py
179
20.424581
68
0.58769
codegenius2/Editoral
8
0
0
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,090
urls.py
codegenius2_Editoral/content/urls.py
from django.urls import path from . import views urlpatterns = [ path('', views.Index.as_view(), name='index'), path('search/', views.Search.as_view(), name='search'), path('blog/', views.Blog.as_view(), name='blog'), path('create/blog/', views.BlogCreateView.as_view(), name='blog_create'), path('create/blog_category/', views.BlogCategoryCreateView.as_view(), name='blog_category_create'), path('blog/<int:pk>/', views.BlogArchiveByCategoryPK.as_view(), name='blog_archive_by_category_pk'), path('blog/<str:slug>/', views.BlogSingle.as_view(), name='blog_single'), path('create/videocast_category/', views.VideocastCategoryCreateView.as_view(), name='videocast_category_create'), path('videocast/', views.Videocast.as_view(), name='videocast'), path('create/videocast/', views.VideocastCreateView.as_view(), name='videocast_create'), path('videocast/<int:pk>/', views.VideocastArchiveByCategoryPK.as_view(), name='videocast_archive_by_category_pk'), path('videocast/<str:slug>/', views.VideocastSingle.as_view(), name='videocast_single'), path('create/podcast_category/', views.PodcastCategoryCreateView.as_view(), name='podcast_category_create'), path('podcast/', views.Podcast.as_view(), name='podcast'), path('create/podcast/', views.PodcastCreateView.as_view(), name='podcast_create'), path('podcast/<int:pk>/', views.PodArchiveByCategoryPK.as_view(), name='podcast_archive_by_category_pk'), path('podcast/<str:slug>/', views.PodSingle.as_view(), name='podcast_single'), path('create/skill/', views.SkillCreateView.as_view(), name='skill_create'), ]
1,630
Python
.py
22
69.727273
119
0.709215
codegenius2/Editoral
8
0
0
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,091
context_processors.py
codegenius2_Editoral/content/context_processors.py
from . import models from constance import config def show_system_content(request): return { 'blog_categories': models.BlogCategory.objects.all(), 'videocast_categories': models.VideocastCategory.objects.all(), 'podcast_categories': models.PodcastCategory.objects.all(), 'podcasts': models.Podcast.objects.order_by('-pk').filter(publish=True)[:2], 'config': config }
417
Python
.py
10
35.7
84
0.698765
codegenius2/Editoral
8
0
0
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,092
admin.py
codegenius2_Editoral/content/admin.py
from django.contrib import admin from django.utils.translation import ugettext_lazy as _ from . import models class BlogAdmin(admin.ModelAdmin): list_display = ['title', 'display_category', 'publish'] list_editable = ('publish',) list_filter = ('publish', 'category',) search_fields = ('title',) models.Blog.display_category.short_description = _('Categories') prepopulated_fields = {'slug': ('title',)} class VideoAdmin(admin.ModelAdmin): list_display = ['title', 'display_category', 'publish'] list_editable = ('publish',) list_filter = ('publish', 'category',) search_fields = ('title',) models.Videocast.display_category.short_description = _('Categories') prepopulated_fields = {'slug': ('title',)} class PodcastAdmin(admin.ModelAdmin): list_display = ['title', 'display_category', 'publish'] list_editable = ('publish',) list_filter = ('publish', 'category',) search_fields = ('title',) models.Podcast.display_category.short_description = _('Categories') prepopulated_fields = {'slug': ('title',)} admin.site.register(models.BlogCategory) admin.site.register(models.Blog, BlogAdmin) admin.site.register(models.VideocastCategory) admin.site.register(models.Videocast, VideoAdmin) admin.site.register(models.Podcast, PodcastAdmin) admin.site.register(models.PodcastCategory) admin.site.register(models.Skill)
1,394
Python
.py
31
41.354839
73
0.72452
codegenius2/Editoral
8
0
0
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,093
views.py
codegenius2_Editoral/content/views.py
from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.messages.views import SuccessMessageMixin from django.shortcuts import render, redirect from django.urls import reverse from django.views import generic, View from django.db.models import Q from . import models from .forms import SearchForm class Index(View): template_name = 'index.html' def get(self, request, *args, **kwargs): context = { 'last_blog': models.Blog.objects.order_by('-pk').filter(publish=True)[:1], 'skills': models.Skill.objects.all(), 'blogs': models.Blog.objects.order_by('-pk').filter(publish=True)[1:5], 'videocasts': models.Videocast.objects.order_by('-pk').filter(publish=True)[:4] } return render(request, self.template_name, context) class Search(View): template_name = 'search.html' def get(self, request, *args, **kwargs): form = SearchForm(self.request.GET) if form.is_valid(): query = form.cleaned_data['query'] context = { 'blogs': models.Blog.objects.order_by('-pk').filter( Q(title__icontains=query) | Q(content__icontains=query) ), 'videocasts': models.Videocast.objects.order_by('-pk').filter( Q(title__icontains=query) | Q(content__icontains=query) ), 'podcasts': models.Podcast.objects.order_by('-pk').filter( Q(title__icontains=query) | Q(content__icontains=query) ) } else: return redirect('content:index') return render(request, self.template_name, context) class BlogCategoryCreateView(LoginRequiredMixin, SuccessMessageMixin, generic.CreateView): model = models.BlogCategory fields = '__all__' success_message = 'Blog category was created successfully' def get_success_url(self): return reverse('content:blog_category_create') class Blog(generic.ListView): model = models.Blog template_name = 'blog_archive.html' class BlogCreateView(LoginRequiredMixin, SuccessMessageMixin, generic.CreateView): model = models.Blog fields = '__all__' success_message = 'Blog was created successfully' def get_success_url(self): return reverse('content:blog_create') class BlogArchiveByCategoryPK(generic.ListView): model = models.Blog template_name = 'blog_archive.html' def get_queryset(self): return self.model.objects.filter(category=self.kwargs['pk']) class BlogSingle(generic.DetailView): model = models.Blog template_name = 'single.html' def get_queryset(self): return self.model.objects.filter(slug=self.kwargs['slug']) class VideocastCategoryCreateView(LoginRequiredMixin, SuccessMessageMixin, generic.CreateView): model = models.VideocastCategory fields = '__all__' success_message = 'Video cast category was created successfully' def get_success_url(self): return reverse('content:videocast_category_create') class Videocast(generic.ListView): model = models.Videocast template_name = 'videocast_archive.html' class VideocastCreateView(LoginRequiredMixin, SuccessMessageMixin, generic.CreateView): model = models.Videocast fields = '__all__' success_message = 'Video cast was created successfully' def get_success_url(self): return reverse('content:videocast_create') class VideocastArchiveByCategoryPK(generic.ListView): model = models.Videocast template_name = 'videocast_archive.html' def get_queryset(self): return self.model.objects.filter(category=self.kwargs['pk']) class VideocastSingle(generic.DetailView): model = models.Videocast template_name = 'single.html' def get_queryset(self): return self.model.objects.filter(slug=self.kwargs['slug']) class PodcastCategoryCreateView(LoginRequiredMixin, SuccessMessageMixin, generic.CreateView): model = models.PodcastCategory fields = '__all__' success_message = 'Podcast category was created successfully' def get_success_url(self): return reverse('content:podcast_category_create') class Podcast(generic.ListView): model = models.Podcast template_name = 'podcast_archive.html' class PodcastCreateView(LoginRequiredMixin, SuccessMessageMixin, generic.CreateView): model = models.Podcast fields = '__all__' success_message = 'Podcast was created successfully' def get_success_url(self): return reverse('content:podcast_create') class PodArchiveByCategoryPK(generic.ListView): model = models.Podcast template_name = 'podcast_archive.html' def get_queryset(self): return self.model.objects.filter(category=self.kwargs['pk']) class PodSingle(generic.DetailView): model = models.Podcast template_name = 'single.html' def get_queryset(self): return self.model.objects.filter(slug=self.kwargs['slug']) class SkillCreateView(LoginRequiredMixin, SuccessMessageMixin, generic.CreateView): model = models.Skill fields = '__all__' success_message = 'Skill was created successfully' def get_success_url(self): return reverse('content:skill_create')
5,293
Python
.py
119
37.697479
95
0.704803
codegenius2/Editoral
8
0
0
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,094
setup.py
mabrains_path_length_analysis/setup.py
# ========================================================================================= # Copyright (c) 2024, Mabrains LLC # Licensed under the GNU Lesser General Public License, Version 3.0 (the "License"); # you may not use this file except in compliance with the License. # GNU Lesser General Public License # Version 3, 29 June 2007 # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # SPDX-License-Identifier: LGPL-3.0 # ========================================================================================= from setuptools import setup, find_packages requirements = open("requirements.txt").read().strip().split("\n") setup( name="path_analysis", packages=find_packages(), version="0.4.0", description="Measurement Of Path Length For Photonic and Electrical systems", long_description=open("README.md").read(), long_description_content_type="text/markdown", author="Mabrains LLC", author_email="[email protected]", install_requires=requirements, classifiers=[ "Programming Language :: Python :: 3", "Intended Audience :: Developers", "Operating System :: POSIX :: Linux", ], python_requires=">3.9", )
1,841
Python
.py
39
44.205128
91
0.648664
mabrains/path_length_analysis
8
4
0
LGPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,095
path_length.py
mabrains_path_length_analysis/path_length.py
# ========================================================================================= # Copyright (c) 2024, Mabrains LLC # Licensed under the GNU Lesser General Public License, Version 3.0 (the "License"); # you may not use this file except in compliance with the License. # GNU Lesser General Public License # Version 3, 29 June 2007 # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # SPDX-License-Identifier: LGPL-3.0 # ========================================================================================= """ Run Path Length Measurements. Usage: path_length.py --config=<config_file_path> [--run_dir=<run_dir_path>] Options: --help -h Print this help message. --config=<param> Yaml file contains the path length parameters. --run_dir=<run_dir_path> directory to save all the results [default: pwd] """ import logging import os from datetime import datetime import time from docopt import docopt import yaml import pandas as pd from typing import Any from path_analysis.path_analysis import path_length def read_yaml(yaml_file: str) -> dict[str, Any]: """ Reading yaml file and saving the data to dictionary Args: yaml_file (str): yaml file path Returns: yaml_dic (dict): contains all the yaml file data """ # load yaml config data with open(yaml_file, "r") as stream: try: yaml_dic = yaml.safe_load(stream) except yaml.YAMLError as exc: logging.error(exc) return yaml_dic if __name__ == "__main__": # arguments arguments = docopt(__doc__, version="RUN Path Length: 1.0") # logs format now_str = datetime.utcnow().strftime("length_run_%Y_%m_%d_%H_%M_%S") # checking config file existance config_in = arguments["--config"] if not os.path.exists(config_in): logging.error(f"The configuration file {config_in} doesn't exist, please check") exit(1) if ( arguments["--run_dir"] == "pwd" or arguments["--run_dir"] == "" or arguments["--run_dir"] is None ): run_dir = os.path.join(os.path.abspath(os.getcwd()), now_str) else: run_dir = os.path.abspath(arguments["--run_dir"]) # checking run_dir existance & creation if not os.path.isdir(run_dir): os.makedirs(run_dir, exist_ok=True) else: # shutil.rmtree(run_dir) os.makedirs(run_dir, exist_ok=True) # logs setup logging.basicConfig( level=logging.DEBUG, handlers=[ logging.FileHandler(os.path.join(run_dir, "{}.log".format(now_str))), logging.StreamHandler(), ], format="%(asctime)s | %(levelname)-7s | %(message)s", datefmt="%d-%b-%Y %H:%M:%S", ) # set pandas options pd.set_option("display.max_rows", None) # reading config file config_data = read_yaml(config_in) # Calling the main function time_start = time.time() path_length_df = path_length(**config_data) exc_time = time.time() - time_start # Save clean report with desired lengths path_length_df.to_csv(os.path.join(run_dir, "final_report_length.csv"), index=False) logging.info(f"path_length_report: \n {path_length_df}") # Reporting execution time logging.info(f"Path length execution time: {exc_time} sec")
3,995
Python
.py
100
35.09
91
0.632911
mabrains/path_length_analysis
8
4
0
LGPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,096
__init__.py
mabrains_path_length_analysis/path_analysis/__init__.py
# ========================================================================================= # Copyright (c) 2024, Mabrains LLC # Licensed under the GNU Lesser General Public License, Version 3.0 (the "License"); # you may not use this file except in compliance with the License. # GNU Lesser General Public License # Version 3, 29 June 2007 # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # SPDX-License-Identifier: LGPL-3.0 # ========================================================================================= from .path_analysis import path_length
1,208
Python
.py
21
56.380952
91
0.635135
mabrains/path_length_analysis
8
4
0
LGPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,097
path_analysis.py
mabrains_path_length_analysis/path_analysis/path_analysis.py
# ========================================================================================= # Copyright (c) 2024, Mabrains LLC # Licensed under the GNU Lesser General Public License, Version 3.0 (the "License"); # you may not use this file except in compliance with the License. # GNU Lesser General Public License # Version 3, 29 June 2007 # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # SPDX-License-Identifier: LGPL-3.0 # ========================================================================================= import gdstk import logging import os from math import sqrt import pandas as pd import networkx as nx from functools import partial def get_length(poly: gdstk.Polygon) -> float: """ Calculate the length of a polygon using its area and perimeter. Args: poly (gdstk.Polygon): The polygon for which to calculate the length. Returns: float: The calculated length of the polygon. Raises: ValueError: If the discriminant (value inside the square root) is negative, indicating no real solution for the length with the given area and perimeter. Note: The area and perimeter are calculated using the gdstk library methods `area()` and `perimeter()`. The discriminant is rounded to 12 decimal places to avoid precision issues. If a negative value in the square root is detected, an error is logged, and a ValueError is raised. Example: >>> poly = gdstk.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)]) >>> length = get_length(poly) >>> print(f"The length is: {length}") """ area = poly.area() perimeter = poly.perimeter() discriminant = round(perimeter * perimeter / 16 - area, 12) if discriminant < 0: logging.error( f"area = {area}, perimeter = {perimeter}, negative value {discriminant} in sqrt" ) raise ValueError( "Invalid input: No real solution for the length with the given area and perimeter" ) return perimeter / 4 + sqrt(discriminant) def get_polygons( gdstk_lib: gdstk.Library, path_layer: tuple[int, int], cutting_layer: tuple[int, int], cell_name: str | None = None, ) -> tuple[list[gdstk.Polygon], list[list[gdstk.Polygon]], list[list[gdstk.Label]]]: """ Retrieve polygons representing paths and cutting regions based on input parameters. Parameters: - gdstk_lib (gdstk.Library): The gdstk.Library containing the desired cell. - path_layer (tuple[int, int]): Layer number and dtype for paths polygons. - cutting_layer (tuple[int, int]): Layer number and dtype for cutting polygons. - cell_name (str, optional): Name of the cell. Defaults to None. Returns: tuple: A tuple containing three elements. - List of path polygons (gdstk.Polygon). - List of list of cutting polygons, where each list represents cutting polygons corresponding to a path polygon (list[list[gdstk.Polygon]]) - List of list of cutting labels, where each list represents cutting labels corresponding to a path polygon (list[list[gdstk.Polygon]]) The function performs the following steps: 1. Calls _get_polygons to obtain path polygons, cutting polygons, and labels. 2. Filters cutting polygons to include only valid polygons. 3. Filters labels based on whether their origin is inside cutting polygons. ``` """ path_polygons, cutting_polygons, labels = _get_polygons( gdstk_lib, path_layer, cutting_layer, cell_name ) cutting_polygons, labels = filter_polygons(path_polygons, cutting_polygons, labels) return path_polygons, cutting_polygons, labels def _get_polygons( gdstk_lib: gdstk.Library, path_layer: tuple[int, int], cutting_layer: tuple[int, int], cell_name: str | None = None, ) -> tuple[gdstk.Polygon, gdstk.Polygon, list[gdstk.Label]]: """ Retrieve polygons representing paths and cutting regions from a gdstk.Library. Parameters: - gdstk_lib (gdstk.Library): The gdstk.library containing the desired cell. - path_layer (tuple[int, int]): Layer number and dtype for paths polygons. - cutting_layer (tuple[int, int]): Layer number and dtype for cutting polygons. - cell_name (str, optional): Name of the cell. Defaults to None. Returns: tuple: A tuple containing three elements. - Polygons representing path (gdstk.Polygon). - Polygons representing cutting regions (gdstk.Polygon). - List of labels associated with cutting regions (list[gdstk.Label]). The function performs the following steps: 1. Retrieves the top-level cells from the gdstk.Library. 2. If no cells are available, logs an error and exits. 3. If multiple top-level cells exist and no specific cell is specified, logs an error and exits. 4. If a specific cell name is provided, searches for the cell with that name. 5. Retrieves path polygons from the selected cell based on layer and datatype. 6. Merges path polygons using gdstk.boolean with "or" operation. 7. Retrieves cutting polygons and labels from the selected cell based on layer and datatype. 8. Returns path polygons, cutting polygons, and labels. """ cells = gdstk_lib.top_level() if len(cells) < 1: logging.error("no cells available") exit(1) if len(cells) > 1 and cell_name is None: logging.error("Please specify a cell name when multiple top-level cells exist.") exit(1) cell: gdstk.Cell | None = None if cell_name is None: cell = cells[0] else: for c in cells: if c.name == cell_name: cell = c break if cell is None: logging.error("Invalid cell name") exit(1) path_polygons = cell.get_polygons( depth=None, layer=path_layer[0], datatype=path_layer[1] ) path_polygons = gdstk.boolean(path_polygons, path_polygons, "or") cutting_polygons = cell.get_polygons( depth=None, layer=cutting_layer[0], datatype=cutting_layer[1] ) labels = cell.get_labels( depth=None, layer=cutting_layer[0], texttype=cutting_layer[1] ) return path_polygons, cutting_polygons, labels def filter_polygons( path_polygons: list[gdstk.Polygon], cutting_polygons: list[gdstk.Polygon], labels: list[gdstk.Label], ) -> tuple[list[list[gdstk.Polygon]], list[list[gdstk.Label]]]: """ Filters cutting polygons and labels based on their relationship with path polygons. Parameters: - path_polygons (list[gdstk.Polygon]): List of path polygons. - cutting_polygons (list[gdstk.Polygon]): List of cutting polygons. - labels (list[gdstk.Label]): List of labels associated with cutting polygons. Returns: Tuple containing two filtered lists: 1. List of filtered cutting polygons. 2. List of filtered labels. This function filters cutting polygons based on whether they intersect with any of the path polygons. It also filters labels associated with the filtered cutting polygons. Duplicate labels are checked, and an error is raised if duplicates are found. Returns the rearranged data as a tuple of lists: - List of lists of cutting polygons per path polygon. - List of lists of labels per path polygon. """ # filter cutting polygons cutting_polygons = [ polygon for polygon in cutting_polygons if check_if_polygon_cuts_path(polygon, path_polygons) ] # filter labels labels = [ label for label, condition1 in zip( labels, gdstk.inside([label.origin for label in labels], cutting_polygons), ) if condition1 ] labels_text = [(label.text) for label in labels] duplicate_labels = get_duplicates(labels_text) if duplicate_labels: logging.error( ( f"found duplicate labels {duplicate_labels}," "please make sure to name your cutting polygons with a unique name" ) ) exit(1) return _rearrange_data(path_polygons, cutting_polygons, labels) def _rearrange_data( path_polygons: list[gdstk.Polygon], cutting_polygons: list[gdstk.Polygon], labels: list[gdstk.Label], ) -> tuple[list[list[gdstk.Polygon]], list[list[gdstk.Label]]]: """ Rearranges data by associating cutting polygons and labels with path polygons. Parameters: - path_polygons (list[gdstk.Polygon]): List of path polygons. - cutting_polygons (list[gdstk.Polygon]): List of cutting polygons. - labels (list[gdstk.Label]): List of labels associated with cutting polygons. Returns: Tuple containing two lists: 1. List of lists of cutting polygons per path polygon. 2. List of lists of labels per path polygon. """ labels_points = [label.origin for label in labels] sorted_labels: list[gdstk.Label] = [] for polygon in cutting_polygons: for label, condition in zip(labels, gdstk.inside(labels_points, polygon)): if condition: sorted_labels.append(label) break cutting_polygons_per_path: list[list[gdstk.Polygon]] = [] cutting_labels_per_path: list[list[gdstk.Label]] = [] for path_polygon in path_polygons: valid_cutting_polygons = [] valid_cutting_labels = [] for polygon, label in zip(cutting_polygons, sorted_labels): if check_if_polygon_cuts_path(polygon, path_polygon): if polygon.contain(label.origin): valid_cutting_polygons.append(polygon) valid_cutting_labels.append(label) cutting_polygons_per_path.append(valid_cutting_polygons) cutting_labels_per_path.append(valid_cutting_labels) return cutting_polygons_per_path, cutting_labels_per_path def get_duplicates(lst: list) -> list: """ Finds and returns duplicate elements in a list. Parameters: - lst (list): The input list to check for duplicates. Returns: list: A list containing the duplicate elements found in the input list. Example: duplicates = get_duplicates([1, 2, 2, 3, 4, 4, 5]) # Result: [2, 4] """ seen = set() duplicates = [] for item in lst: if item in seen: duplicates.append(item) seen.add(item) return duplicates def check_if_polygon_cuts_path( polygon: gdstk.Polygon, path_polygons: list[gdstk.Polygon] ) -> bool: """ Check if a polygon cuts through a set of path polygons. Parameters: - polygon (gdstk.Polygon): The polygon to be checked for cutting through paths. - path_polygons (list[gdstk.Polygon]): List of path polygons to check against. Returns: bool: True if the given polygon cuts through any polygon of the set of path polygons, False otherwise. The function performs the following steps: 1. Calculates the boolean operation 'not' between the given polygon and the set of path polygons. 2. Checks if the length of the resulting polygons is greater than 1, indicating cutting through paths. 3. Handles cases where the boolean operations 'and' yields empty list.(not on path). 4. Handles cases where the cutting polygon is on the path end. (if any vertix is inside the path it will be considered invalid cut). """ splitted_polygons = gdstk.boolean(path_polygons, polygon, "not") if len(splitted_polygons) > 1: return True if len(splitted_polygons) == 1 and gdstk.boolean(path_polygons, polygon, "and"): return not gdstk.any_inside(polygon.points, path_polygons) return False def split_polygon( poly: gdstk.Polygon, cutting_polygons: list[gdstk.Polygon] ) -> list[gdstk.Polygon]: """ Split a polygon using a set of cutting polygons. Parameters: - poly (gdstk.Polygon): The polygon to be split. - cutting_polygons (list[gdstk.Polygon]): List of cutting polygons for the split operation. Returns: list[gdstk.Polygon]: List of polygons resulting from the split operation. The function performs the following steps: 1. Calculates the boolean operation 'not' between the given polygon and the cutting polygons. 2. Returns a list of polygons resulting from the split operation. Example: ``` split_result = split_polygon( poly=my_polygon, cutting_polygons=[cutting1, cutting2, cutting3] ) ``` """ return gdstk.boolean(poly, cutting_polygons, "not") def construct_graph_data_frame( path_polygons: list[gdstk.Polygon], cutting_polygons: list[list[gdstk.Polygon]], labels: list[list[gdstk.Label]], ) -> pd.DataFrame: """ Construct a DataFrame representing graph data from path_polygons and cutting polygons. Parameters: - path_polygons (list[gdstk.Polygon]): List of path polygons. - cutting_polygons (list[list[gdstk.Polygon]]): List of lists, where each list represents cutting polygons corresponding to a path polygon. - labels (list[list[gdstk.Label]]): List of lists of labels associated with cutting polygons. Returns: pd.DataFrame: DataFrame with columns 'port1', 'port2', and 'length' representing the graph data. The function performs the following steps: 1. Iterates over path_polygons, cutting_polygons to process each path and its cutting polygons. 2. splits path_polygon with cutting_polygons. 3. For each sub_polygon, extracts port1, port2, and length. 4. Appends records (port1, port2, length) to a list. 5. Constructs a DataFrame from the list of records. 6. Returns the constructed DataFrame. The resulting DataFrame might look like: ``` port1 port2 length 0 label1_tail_0 node_1 10.0 1 node_1 node_2 15.0 2 node_2 polygon_0_tail_1 12.0 3 polygon_1_tail_0 node_4 8.0 4 node_4 polygon_1_tail_1 20.0 ``` """ records = [] path_labels = move_labels_on_path(path_polygons, cutting_polygons, labels) for i, (poly, cutting_polys) in enumerate(zip(path_polygons, cutting_polygons)): tail_counter = 0 if cutting_polys: splitted_polygons = split_polygon(poly, cutting_polys) for sub_poly in splitted_polygons: node_names = get_node_names(sub_poly, path_labels) if len(node_names) == 1: continue port1 = node_names[0] port2 = f"polygon_{i}_tail_{tail_counter}" tail_counter += 1 elif len(node_names) == 2: port1, port2 = node_names else: continue length = get_length(sub_poly) records.append([port1, port2, length]) df = pd.DataFrame(records, columns=["port1", "port2", "length"]) return df def get_node_names(poly, labels: list[gdstk.Label]) -> list[str]: """ Get unique node names associated with a polygon based on label positions. Parameters: - poly (gdstk.Polygon): The polygon for which node names are determined. - labels (list[gdstk.Label]): List of labels associated with the polygon. Returns: list[str]: List of unique node names associated with the polygon. The function performs the following steps: 1. Extracts points and corresponding names from the given list of labels. 2. checks whether points are inside the polygon. 3. Appends names to the list 'node_names' if the corresponding point are inside the polygon. 4. Returns a list containing unique node names. Example: ``` polygon = gdstk.Polygon(...) labels = [label1, label2, label3] node_names = get_node_names(polygon, labels) # Result: ['port1', 'port2'] ``` """ points = [label.origin for label in labels] names = [label.text for label in labels] node_names: list[str] = [] for name, condition in zip(names, gdstk.inside(points, poly)): if condition: node_names.append(name) return list(set(node_names)) def _get_path_labels( path: gdstk.Polygon, cutting_poly: gdstk.Polygon, text: str ) -> list[gdstk.Label]: """ Create labels with a specified text at the vertices of the intersection between a path polygon and a cutting polygon. Parameters: - path (gdstk.Polygon): The path polygon. - cutting_poly (gdstk.Polygon): The cutting polygon. - text (str): The text to be assigned to the labels. Returns: list[gdstk.Label]: List of labels with the specified text at the vertices of the intersection between the path and cutting polygons. """ intersection = gdstk.boolean(cutting_poly, path, "and") if intersection: points = intersection[0].points else: logging.error(f"failed to find intersection between {cutting_poly}, and {path}") exit(1) return [gdstk.Label(text, origin=point) for point in points] def move_labels_on_path( path_polygons: list[gdstk.Polygon], cutting_polygons: list[list[gdstk.Polygon]], labels: list[list[gdstk.Label]], ) -> list[gdstk.Label]: """ Create labels with a specified text at the vertices of the intersection between a path polygon and a cutting polygon. Parameters: - path_polygons (list[gdstk.Polygon]): List of path polygons. - cutting_polygons (list[list[gdstk.Polygon]]): List of lists containing cutting polygons corresponding to each path. - labels (list[list[gdstk.Label]]): List of lists containing labels associated with cutting polygons. Returns: list[gdstk.Label]: List of path_labels. The function performs the following steps: 1. Iterates over each path polygon, its cutting polygons, and associated labels. 2. For each cutting polygon and label, computes the labels along the path based on the vertices of the intersection. 3. Appends the calculated labels to the 'moved_labels' list. 4. Returns the list of moved labels. """ moved_labels = [] for path_poly, cutting_polys, labels in zip( path_polygons, cutting_polygons, labels ): get_path_labels = partial(_get_path_labels, path=path_poly) for poly, label in zip(cutting_polys, labels): moved_labels += get_path_labels(cutting_poly=poly, text=label.text) return moved_labels def get_nx_graph(graph_data_frame: pd.DataFrame) -> nx.Graph: """ Create a NetworkX graph from a DataFrame containing edge information. Args: graph_data_frame (pd.DataFrame): A DataFrame with columns 'port1', 'port2', and 'length' representing edges and their corresponding lengths. Returns: nx.Graph: A NetworkX graph constructed from the provided DataFrame. Note: This function uses the `nx.from_pandas_edgelist` method to create a graph. The 'port1' and 'port2' columns of the DataFrame represent nodes, and the 'length' column is used as the edge attribute. Example: >>> data = { ... 'port1': ['a', 'b'], ... 'port2': ['b', 'c'], ... 'length': [1.0, 2.0] ... } >>> graph_df = pd.DataFrame(data) >>> graph = get_nx_graph(graph_df) >>> print(list(graph.edges(data=True))) [('a', 'b', {'length': 1.0}), ('b', 'c', {'length': 2.0})] """ return nx.from_pandas_edgelist(graph_data_frame, "port1", "port2", "length") def get_paths_report(graph: nx.Graph) -> pd.DataFrame: """ Generate a report of shortest path lengths between all pairs of nodes in a graph. Parameters: - graph (nx.Graph): The input graph with weighted edges. Returns: pd.DataFrame: DataFrame containing information about shortest path lengths between all pairs of nodes. Columns include 'port1', 'port2', and 'length'. The function performs the following steps: 1. Iterates over all pairs of nodes in the graph. 2. Uses NetworkX's shortest_path_length to find the shortest path length between each pair, considering edge weights defined by the 'length' attribute. 3. Handles cases where no path exists between nodes using a try-except block. 4. Appends records (port1, port2, length) to a list. 5. Constructs a DataFrame from the list of records. 6. Adds a 'sorted_nodes' column for later duplicate checking. 7. Drops duplicate rows based on the 'sorted_nodes' column. 8. Returns the resulting DataFrame. """ nodes = graph.nodes records = [] for start_node in nodes: for end_node in nodes: if start_node != end_node: try: path_length = nx.shortest_path_length( graph, start_node, end_node, weight="length" ) except nx.NetworkXNoPath: path_length = -1 records.append([start_node, end_node, path_length]) if not records: logging.error(f"no_nodes detected : {records}") exit(1) report = pd.DataFrame(records) report.columns = ["port1", "port2", "length (um)"] # Sort values in each row and create a new sorted column report["sorted_nodes"] = report.apply( lambda row: "".join(sorted([row["port1"], row["port2"]])), axis=1 ) # Drop duplicates based on the sorted column report = report.drop_duplicates("sorted_nodes").drop("sorted_nodes", axis=1) return report def filter_path_report(report: pd.DataFrame, nodes: list[str]) -> pd.DataFrame: """ Filter a DataFrame based on specified nodes in the 'port1' and 'port2' columns. Parameters: - report (pd.DataFrame): The input DataFrame containing a network report. - nodes (list[str]): A list of nodes to filter the DataFrame by. Returns: - pd.DataFrame: A filtered DataFrame containing rows where either 'port1' or 'port2' matches any node in the specified list. """ return report[report["port1"].isin(nodes) & report["port2"].isin(nodes)] def key_exist_dict(key: str, d: dict): """ Checking if a specific key is exist in a dict Args: key (str): Dict key to be checked d (dict): Dict to check in Returns: val (str or list): contains value of the selected key """ if key in d: return d[key] else: logging.error(f"There is no {key} parameter in the config file, please recheck") exit(1) def path_length( gds_file: str, path_layer: dict[str, int], cutting_layer: dict[str, int], cell_name: str | None = None, nodes: list[str] = [], ) -> pd.DataFrame: """ Calculate the shortest path lengths between cutting polygons on paths in a gds file. Parameters: - gds_file (str): The path to the gds file. - path_layer (tuple[int, int]): Layer number and dtype for paths. - cutting_layer (tuple[int, int]): Layer number and dtype for cutting regions. - cell_name (str, optional): Name of the cell. Defaults to None. - nodes (list[str], optional): list of node names to consider for path length report. Returns: pd.DataFrame: DataFrame containing information about shortest path lengths between cutting polygons. Columns include 'port1', 'port2', and 'length'. The function performs the following steps: 1. Reads the gds file using gdstk.read_gds to obtain a gdstk.Library. 2. Calls the get_polygons function to retrieve path and cutting polygons. 3. Constructs a DataFrame with graph information using construct_graph_data_frame. 4. Converts the DataFrame to a NetworkX graph using get_nx_graph. 5. Generates a report of shortest path lengths between labels using get_paths_report. 6. Returns the DataFrame containing path lengths. Example: ``` gds_file_path = "path/to/your/file.gds" lengths_df = path_length( gds_file=gds_file_path, path_layer=(1,0), cutting_layer=(2,0), cell_name="example", nodes=['start','mid','stop'] ) ``` """ # Make sure that both path and cutting layer passed in proper format and make them as a list path_ly = [ key_exist_dict("layer_no", path_layer), key_exist_dict("layer_dtype", path_layer), ] cut_ly = [ key_exist_dict("layer_no", cutting_layer), key_exist_dict("layer_dtype", cutting_layer), ] # Reading input layout file if not os.path.isfile(gds_file): logging.error(f"{gds_file} file can't be found") exit(1) gdstk_lib = gdstk.read_gds(gds_file) # get path_polygons and cutting polygons path_polygons, cutting_polygons, labels = get_polygons( gdstk_lib=gdstk_lib, path_layer=path_ly, cutting_layer=cut_ly, cell_name=cell_name, ) # get networkx graph df = construct_graph_data_frame(path_polygons, cutting_polygons, labels) graph = get_nx_graph(df) # generate report for all paths report = get_paths_report(graph) report_clean_df = report[report["length (um)"] > 0] # Filter out required ports only if nodes: return filter_path_report(report_clean_df, nodes) return report_clean_df
26,111
Python
.py
591
37.62775
100
0.667637
mabrains/path_length_analysis
8
4
0
LGPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,098
pytest_path_length.py
mabrains_path_length_analysis/tests/pytest_path_length.py
# ========================================================================================= # Copyright (c) 2024, Mabrains LLC # Licensed under the GNU Lesser General Public License, Version 3.0 (the "License"); # you may not use this file except in compliance with the License. # GNU Lesser General Public License # Version 3, 29 June 2007 # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # SPDX-License-Identifier: LGPL-3.0 # ========================================================================================= import pandas as pd from path_analysis import path_length import numpy as np # Pytest function to test the correctness of the output DataFrame for simple route example def test_simple_path(): # Expected output data based on the processing logic expected_output = pd.DataFrame( {"port1": ["start"], "port2": ["end"], "length (um)": [19.831]} ) # Call the function to get the actual output actual_output = path_length( gds_file="tests/route_path.gds", path_layer={"layer_no": 41, "layer_dtype": 0}, cutting_layer={"layer_no": 66, "layer_dtype": 0}, ) # Sort ports to make sure that both data have same order [port1 & port2 are interchangeable] expected_output[["port1", "port2"]] = np.sort( expected_output[["port1", "port2"]], axis=1 ) actual_output[["port1", "port2"]] = np.sort( actual_output[["port1", "port2"]], axis=1 ) expected_output.reset_index(drop=True, inplace=True) actual_output.reset_index(drop=True, inplace=True) # Use pandas testing functions to check the correctness pd.testing.assert_frame_equal(actual_output, expected_output) # Pytest function to test the correctness of the output DataFrame for intermediate route example def test_intermediate_path(): # Expected output data based on the processing logic expected_output = pd.DataFrame( {"port1": ["start"], "port2": ["end"], "length (um)": [162.511598]} ) # Call the function to get the actual output actual_output = path_length( gds_file="tests/route_bend_path.gds", path_layer={"layer_no": 1, "layer_dtype": 0}, cutting_layer={"layer_no": 66, "layer_dtype": 0}, ) print(actual_output) # Sort ports to make sure that both data have same order [port1 & port2 are interchangeable] expected_output[["port1", "port2"]] = np.sort( expected_output[["port1", "port2"]], axis=1 ) actual_output[["port1", "port2"]] = np.sort( actual_output[["port1", "port2"]], axis=1 ) expected_output.reset_index(drop=True, inplace=True) actual_output.reset_index(drop=True, inplace=True) # Use pandas testing functions to check the correctness pd.testing.assert_frame_equal(actual_output, expected_output) # Pytest function to test the correctness of the output DataFrame for complex route example def test_complex_path(): # Expected output data based on the processing logic expected_output = pd.DataFrame( { "port1": ["splitter_p1_start", "splitter_p3_start"], "port2": ["splitter_p1_end", "splitter_p3_end"], "length (um)": [526.139253, 501.134027], } ) # Call the function to get the actual output actual_output = path_length( gds_file="tests/lidar_no_rad.gds", path_layer={"layer_no": 1, "layer_dtype": 0}, cutting_layer={"layer_no": 1, "layer_dtype": 10}, nodes=[ "splitter_p1_start", "splitter_p1_end", "splitter_p3_start", "splitter_p3_end", ], ) # Sort ports to make sure that both data have same order [port1 & port2 are interchangeable] expected_output[["port1", "port2"]] = np.sort( expected_output[["port1", "port2"]], axis=1 ) actual_output[["port1", "port2"]] = np.sort( actual_output[["port1", "port2"]], axis=1 ) expected_output.reset_index(drop=True, inplace=True) actual_output.reset_index(drop=True, inplace=True) # Use pandas testing functions to check the correctness pd.testing.assert_frame_equal(actual_output, expected_output)
4,820
Python
.py
103
41.407767
96
0.647309
mabrains/path_length_analysis
8
4
0
LGPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,287,099
aws-glueetl-costs-analysis.py
zhiweio_data-engineer-scripts/src/aws-glueetl-costs-analysis/aws-glueetl-costs-analysis.py
import argparse import json import os from datetime import datetime from typing import Generator, List import boto3 import pandas as pd import plotly.express as px class GlueClient: def __init__(self): self.client = boto3.client("glue") def get_jobs(self) -> Generator[dict, None, None]: """Generator to yield Glue jobs.""" res = self.client.get_jobs(MaxResults=1000) jobs = res.get("Jobs", []) yield from jobs next_token = res.get("NextToken") while next_token: res = self.client.get_jobs(MaxResults=1000, NextToken=next_token) jobs = res.get("Jobs", []) yield from jobs next_token = res.get("NextToken") def get_all_job_runs(self, job_name: str) -> Generator[dict, None, None]: """Generator to yield all job runs for a given job name.""" res = self.client.get_job_runs(JobName=job_name, MaxResults=200) job_runs = res.get("JobRuns", []) yield from job_runs next_token = res.get("NextToken") while next_token: res = self.client.get_job_runs( JobName=job_name, MaxResults=200, NextToken=next_token ) job_runs = res.get("JobRuns", []) yield from job_runs next_token = res.get("NextToken") def filter_etl_jobs(self, jobs: Generator[dict, None, None]) -> List[str]: """Filter ETL jobs.""" return [job["Name"] for job in jobs if job["Command"]["Name"] == "glueetl"] def download_job_runs_log(glue_client: GlueClient, costs_file: str): """Download and save job runs log to a file.""" etl_jobs = glue_client.filter_etl_jobs(glue_client.get_jobs()) with open(costs_file, "w", encoding="utf8") as f: for job in etl_jobs: for run in glue_client.get_all_job_runs(job): stat = ( "Id: {Id}\tStartedOn: {StartedOn}\tCompletedOn: {CompletedOn}\t" "ExecutionTime: {ExecutionTime}\tMaxCapacity: {MaxCapacity}\t" "WorkerType: {WorkerType}\tNumberOfWorkers: {NumberOfWorkers}" ).format( Id=run["Id"], StartedOn=run["StartedOn"], CompletedOn=run["CompletedOn"], ExecutionTime=run["ExecutionTime"], MaxCapacity=run.get("MaxCapacity"), WorkerType=run.get("WorkerType"), NumberOfWorkers=run.get("NumberOfWorkers"), ) print(stat) f.write(json.dumps(run, ensure_ascii=False, default=str) + "\n") print(f"Data saved to {costs_file}") def analysis_job_costs(costs_file, start_date: str, end_date: str): """Analyze Glue job costs based on DPUs usage.""" if not os.path.exists(costs_file): raise FileNotFoundError(costs_file) start_date = datetime.strptime(start_date, "%Y-%m-%d").date() end_date = datetime.strptime(end_date, "%Y-%m-%d").date() df = pd.read_json(costs_file, lines=True) df["StartedOn"] = pd.to_datetime(df["StartedOn"], format="mixed") mask = (df["StartedOn"].dt.date >= start_date) & ( df["StartedOn"].dt.date <= end_date ) df = df[mask] df["CompletedOn"] = pd.to_datetime(df["CompletedOn"], format="mixed") df["DPUs"] = df["ExecutionTime"] / 60 / 60 * df["MaxCapacity"] df["EndTime"] = df["CompletedOn"].dt.strftime("%Y-%m-%d %H") agg_data = df.groupby(["JobName", "EndTime"])["DPUs"].sum().reset_index() fig = px.scatter( agg_data, x="EndTime", y="DPUs", color="JobName", title="Daily Glue Job DPUs Costs", labels={"EndTime": "End time(Y-M-D Hour)", "DPUs": "DPU Costs"}, hover_data=["JobName"], category_orders={"EndTime": sorted(agg_data["EndTime"].unique())}, ) fig.show() if __name__ == "__main__": parser = argparse.ArgumentParser() opt = parser.add_argument opt("start_date", type=str, help="Start date, e.g., 2023-11-01") opt("end_date", type=str, help="End date, e.g., 2023-11-30") opt( "-F", "--costs-file", type=str, default="./glueetl_costs.jsonl", help="File to save costs data", ) opt( "-K", "--skip-download", action="store_true", help="Skip downloading job logs and only analyze", ) args = parser.parse_args() glue_client = GlueClient() if not args.skip_download: download_job_runs_log(glue_client, args.costs_file) analysis_job_costs(args.costs_file, args.start_date, args.end_date)
4,664
Python
.py
110
33.509091
84
0.589325
zhiweio/data-engineer-scripts
8
0
0
GPL-3.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)