id
int64
0
458k
file_name
stringlengths
4
119
file_path
stringlengths
14
227
content
stringlengths
24
9.96M
size
int64
24
9.96M
language
stringclasses
1 value
extension
stringclasses
14 values
total_lines
int64
1
219k
avg_line_length
float64
2.52
4.63M
max_line_length
int64
5
9.91M
alphanum_fraction
float64
0
1
repo_name
stringlengths
7
101
repo_stars
int64
100
139k
repo_forks
int64
0
26.4k
repo_open_issues
int64
0
2.27k
repo_license
stringclasses
12 values
repo_extraction_date
stringclasses
433 values
2,286,700
clientamppt.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/clients/__pycache__/clientamppt.cpython-38.pyc
U #ôıc‰ã@stddlZddlmZddlmZddlZddlZddlZddlZddl m Z ddl m Z Gdd„deƒZ dd„ZdS) éN)ÚClient)Úlabel_binarize)Úmetricscs4eZdZ‡fdd„Zdd„Zdd„Zdd„Z‡ZS) Ú clientAMPPTc s¦tƒj||||f|�|j|_|j|_t |j¡|_t  ¡|_ t j   |jj ¡|jdœg¡|_|j|_t j j |jj ¡|j|jd�|_t j jj|j|j|jd�|_dS)N)ÚparamsÚlr)rÚmomentum)Ú step_sizeÚgamma)ÚsuperÚ__init__ÚalphaKÚlamdaÚcopyÚdeepcopyÚmodelÚclient_uÚnnÚCrossEntropyLossÚlossÚtorchÚoptimÚSGDÚbaseÚ parametersÚ learning_rateÚ optimizerÚ plocal_stepsÚ generatorÚpt_learning_raterÚ poptimizerÚ lr_schedulerÚStepLRÚlearning_decayÚ scheduler)ÚselfÚargsÚidÚ train_samplesÚ test_samplesÚkwargs©Ú __class__©õYD:\京东\promot\第二次投稿\å®�验\native - pro\system\flcore\clients\clientamppt.pyr s ÿÿÿzclientAMPPT.__init__c Cs | ¡}t ¡}t |jj¡}|j |j¡|j |j¡|j  ¡|jj   ¡D] }d|_ qP|jj  ¡D] }d|_ qht |jƒD]´}t|ƒD]¦\}\}}t|ƒtgƒkr¼|d |j¡|d<n | |j¡}| |j¡}|jrôt dt tj ¡¡¡|j ¡| |¡} | | |¡} |  ¡|j ¡|j ¡qŠq~|j} |j�rTtj d| d¡} |jj   ¡D] }d|_ �q`|jj  ¡D] }d|_ �qzt | ƒD]æ}|D]Ú\}}t|ƒtgƒk�rÈ|d |j¡|d<n | |j¡}| |j¡}|j�rt dt tj ¡¡¡|j ¡| |¡} | | |¡} t |jƒ} t |jƒ} | | }| |j!|j"dt# $||¡7} |  ¡|j ¡�q˜�q�|j %¡|j %¡~t |jj¡}d}t&|  ¡|  ¡ƒD]<\}}||}t# '|dk|t# (|¡|¡}|t# )|¡}�q²|j*dd7<|j*dt ¡|7<|S) NFTrgš™™™™™¹?ééÚ num_roundsÚ total_cost)+Úload_train_dataÚtimerrrrÚtoÚdevicerÚtrainrrÚ requires_gradÚrangerÚ enumerateÚtypeÚ train_slowÚsleepÚnpÚabsÚrandomÚrandr Ú zero_gradrÚbackwardÚstepr$Ú local_stepsÚrandintrÚweight_flattenrr rÚdotÚcpuÚzipÚwhereÚ zeros_likeÚsumÚtrain_time_cost)r%Ú trainloaderÚ start_timeÚ old_promptÚparamrDÚiÚxÚyÚoutputrÚmax_local_stepsrÚparams_ÚsubÚ new_promptÚ diff_provalueÚ new_paramÚ old_paramÚdiff_pror-r-r.r7 sx                     zclientAMPPT.traincCs<t|j ¡|jj ¡ƒD]\}}|j||j ¡|_qdS)N)rJrrrÚdataÚclone)r%rÚ coef_selfr\r]r-r-r.Úset_parametersis zclientAMPPT.set_parametersc Csv| ¡}|j |j¡|j ¡d}d}d}g}g}t ¡�ò|D]æ\}}t|ƒtgƒkrp|d |j¡|d<n | |j¡}| |j¡}| |¡} |j |¡} |t  tj | dd�|k¡  ¡7}|t  tj | dd�|k¡  ¡7}||j d7}|  |  ¡ ¡ ¡¡|  t| ¡ ¡ ¡t |j¡d�¡qBW5QRX|j ¡tj|dd�}tj|dd�}tj||dd�} |||| fS)Nrr/)Údim)Úclasses)ÚaxisÚmicro)Úaverage)Úload_test_datarr5r6ÚevalrÚno_gradr;rrMÚargmaxÚitemÚshapeÚappendÚdetachrIÚnumpyrr>ÚarangeÚ num_classesÚ concatenaterÚ roc_auc_score) r%ÚtestloaderfullÚtest_accÚ test_acc2Útest_numÚy_probÚy_truerTrUrVÚoutput2Úaucr-r-r.Ú test_metricsms4         2 zclientAMPPT.test_metrics)Ú__name__Ú __module__Ú __qualname__r r7rbr}Ú __classcell__r-r-r+r.r s IrcCs0g}| ¡D]}| | d¡¡q t |¡}|S)Néÿÿÿÿ)rrnÚviewrÚcat)rrÚur-r-r.rG”s   rG)rÚtorch.nnrÚflcore.clients.clientbaserrpr>r4rÚsklearn.preprocessingrÚsklearnrrrGr-r-r-r.Ú<module>s    
4,168
Python
.py
48
85.666667
553
0.415291
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,701
clientrep.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/clients/__pycache__/clientrep.cpython-39.pyc
a W¢bc ã@sDddlZddlmZddlZddlZddlmZGdd„deƒZdS)éN)ÚClientcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)Ú clientRepc shtƒj||||fi|¤�t ¡|_tjj|jj   ¡|j d�|_ tjj|jj   ¡|j d�|_|j|_dS)N)Úlr)ÚsuperÚ__init__ÚnnÚCrossEntropyLossÚlossÚtorchÚoptimÚSGDÚmodelÚbaseÚ parametersÚ learning_rateÚ optimizerÚ predictorÚ poptimizerÚ plocal_steps)ÚselfÚargsÚidÚ train_samplesÚ test_samplesÚkwargs©Ú __class__©úr/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/clients/clientrep.pyr s  zclientRep.__init__c CsX| ¡}t ¡}|j |j¡|j ¡|jj ¡D] }d|_q4|jj  ¡D] }d|_qLt |j ƒD]ª}t |ƒD]œ\}\}}t |ƒt gƒkr |d |j¡|d<n | |j¡}| |j¡}|jrØt dt tj ¡¡¡|j ¡| |¡}| ||¡} |  ¡|j ¡qnqb|j} |j�r.tj d| d¡} |jj ¡D] }d|_�q:|jj  ¡D] }d|_�qTt | ƒD]²}t |ƒD]¢\}\}}t |ƒt gƒk�rª|d |j¡|d<n | |j¡}| |j¡}|j�rät dt tj ¡¡¡|j ¡| |¡}| ||¡} |  ¡|j ¡�qv�qj|j ¡|jdd7<|jdt ¡|7<dS) NFTrgš™™™™™¹?ééÚ num_roundsÚ total_cost)Úload_train_dataÚtimer ÚtoÚdeviceÚtrainrrÚ requires_gradrÚrangerÚ enumerateÚtypeÚ train_slowÚsleepÚnpÚabsÚrandomÚrandrÚ zero_gradr ÚbackwardÚstepÚ local_stepsÚrandintrÚcpuÚtrain_time_cost) rÚ trainloaderÚ start_timeÚparamr4ÚiÚxÚyÚoutputr Úmax_local_stepsrrrr'sX               zclientRep.traincCs0t| ¡|jj ¡ƒD]\}}|j ¡|_qdS)N)Úziprr rÚdataÚclone)rrÚ new_paramÚ old_paramrrrÚset_parametersLszclientRep.set_parameters)Ú__name__Ú __module__Ú __qualname__rr'rFÚ __classcell__rrrrr s 9r) r Útorch.nnrÚnumpyr.r$Z system.flcore.clients.clientbaserrrrrrÚ<module>s   
2,347
Python
.py
32
72.1875
294
0.431779
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,702
clientamp.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/clients/__pycache__/clientamp.cpython-39.pyc
a f¾`cÍã@sTddlZddlmZddlmZddlZddlZddlZGdd„deƒZ dd„Z dS)éN)ÚClientcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)Ú clientAMPc s`tƒj||||fi|¤�|j|_|j|_t |j¡|_t  ¡|_ t j j |j ¡|jd�|_dS)N)Úlr)ÚsuperÚ__init__ÚalphaKÚlamdaÚcopyÚdeepcopyÚmodelÚclient_uÚnnÚCrossEntropyLossÚlossÚtorchÚoptimÚSGDÚ parametersÚ learning_rateÚ optimizer)ÚselfÚargsÚidÚ train_samplesÚ test_samplesÚkwargs©Ú __class__©úr/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/clients/clientamp.pyr s  zclientAMP.__init__c CsR| ¡}t ¡}|j ¡|j}|jr8tj d|d¡}t |ƒD]Ş}|D]Ô\}}t |ƒt gƒkrv|d  |j ¡|d<n |  |j ¡}|  |j ¡}|jr®t  dt tj ¡¡¡|j ¡| |¡}| ||¡}t|jƒ} t|jƒ} | | } ||j|jdt | | ¡7}| ¡|j ¡qHq@~|jdd7<|jdt ¡|7<dS)Néérgš™™™™™¹?Ú num_roundsÚ total_cost)Úload_train_dataÚtimer ÚtrainÚ local_stepsÚ train_slowÚnpÚrandomÚrandintÚrangeÚtypeÚtoÚdeviceÚsleepÚabsÚrandrÚ zero_gradrÚweight_flattenr rrrÚdotÚbackwardÚstepÚtrain_time_cost) rÚ trainloaderÚ start_timeÚmax_local_stepsr7ÚxÚyÚoutputrÚparamsZparams_Úsubrrrr&s4           zclientAMP.traincCs8t| ¡|j ¡ƒD]\}}|j||j ¡|_qdS)N)Úziprr ÚdataÚclone)rr Ú coef_selfÚ new_paramÚ old_paramrrrÚset_parameters=szclientAMP.set_parameters)Ú__name__Ú __module__Ú __qualname__rr&rGÚ __classcell__rrrrr s )rcCs0g}| ¡D]}| | d¡¡q t |¡}|S)Néÿÿÿÿ)rÚappendÚviewrÚcat)r r?Úurrrr4Bs   r4) rÚtorch.nnr Úflcore.clients.clientbaserÚnumpyr)r%r rr4rrrrÚ<module>s  9
2,249
Python
.py
24
92.625
277
0.44699
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,703
clientpt.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/clients/__pycache__/clientpt.cpython-38.pyc
U ˜øc¶ã@sdddlZddlmZddlZddlZddlmZddlZddl m Z ddl m Z Gdd„deƒZ dS)éN)ÚClient)Úlabel_binarize)Úmetricscs4eZdZ‡fdd„Zdd„Zdd„Zdd„Z‡ZS) ÚclientPTc s„tƒj||||f|�t ¡|_tjj|jj   ¡|j d�|_ |j |_ tjj|jj  ¡|j|jd�|_tjjj|j|j |jd�|_dS)N)Úlr)rÚmomentum)Ú step_sizeÚgamma)ÚsuperÚ__init__ÚnnÚCrossEntropyLossÚlossÚtorchÚoptimÚSGDÚmodelÚbaseÚ parametersÚ learning_rateÚ optimizerÚ plocal_stepsÚ generatorZpt_learning_raterÚ poptimizerÚ lr_schedulerÚStepLRZlearning_decayÚ scheduler)ÚselfÚargsÚidÚ train_samplesÚ test_samplesÚkwargs©Ú __class__©õVD:\京东\promot\第二次投稿\å®�验\native - pro\system\flcore\clients\clientpt.pyr s ÿÿzclientPT.__init__c CsÒ| ¡}t ¡}t |jj¡}|j |j¡|j ¡|jj   ¡D] }d|_ qB|jj  ¡D] }d|_ qZt |j ƒD]´}t|ƒD]¦\}\}}t|ƒtgƒkr®|d |j¡|d<n | |j¡}| |j¡}|jræt dt tj ¡¡¡|j ¡| |¡} | | |¡} |  ¡|j ¡|j ¡q|qp|j} |j�rFtj d| d¡} |jj   ¡D] }d|_ �qR|jj  ¡D] }d|_ �qlt | ƒD]²}t|ƒD]¢\}\}}t|ƒtgƒk�rÂ|d |j¡|d<n | |j¡}| |j¡}|j�rüt dt tj ¡¡¡|j ¡| |¡} | | |¡} |  ¡|j ¡�q��q‚|j ¡t |jj¡} d} t |  ¡|   ¡ƒD]<\}}||}t! "|dk|t! #|¡|¡}| t! $|¡} �qd|j%dd7<|j%dt ¡|7<| S) NFTrgš™™™™™¹?ééÚ num_roundsÚ total_cost)&Úload_train_dataÚtimeÚcopyÚdeepcopyrrÚtoÚdeviceÚtrainrrÚ requires_gradÚrangerÚ enumerateÚtypeÚ train_slowÚsleepÚnpÚabsÚrandomÚrandrÚ zero_gradrÚbackwardÚsteprÚ local_stepsÚrandintrÚcpuÚziprÚwhereÚ zeros_likeÚsumÚtrain_time_cost)rÚ trainloaderÚ start_timeZ old_promptÚparamr>ÚiÚxÚyÚoutputrÚmax_local_stepsZ new_promptZ diff_provalueÚ new_paramÚ old_paramÚdiff_pror%r%r&r1sj                zclientPT.traincCs0t| ¡|jj ¡ƒD]\}}|j ¡|_qdS)N)rBrrrÚdataÚclone)rrrOrPr%r%r&Úset_parametersfszclientPT.set_parametersc Csv| ¡}|j |j¡|j ¡d}d}d}g}g}t ¡�ò|D]æ\}}t|ƒtgƒkrp|d |j¡|d<n | |j¡}| |j¡}| |¡} |j |¡} |t  tj | dd�|k¡  ¡7}|t  tj | dd�|k¡  ¡7}||j d7}|  |  ¡ ¡ ¡¡|  t| ¡ ¡ ¡t |j¡d�¡qBW5QRX|j ¡tj|dd�}tj|dd�}tj||dd�} |||| fS)Nrr')Údim)Úclasses)ÚaxisÚmicro)Úaverage)Úload_test_datarr/r0ÚevalrÚno_gradr5rrEÚargmaxÚitemÚshapeÚappendÚdetachrAÚnumpyrr8ÚarangeÚ num_classesÚ concatenaterÚ roc_auc_score) rÚtestloaderfullÚtest_accÚ test_acc2Útest_numÚy_probÚy_truerKrLrMÚoutput2Úaucr%r%r&Ú test_metricsjs4         2 zclientPT.test_metrics)Ú__name__Ú __module__Ú __qualname__r r1rTroÚ __classcell__r%r%r#r&r s Ir)rÚtorch.nnr rbr8r,Úflcore.clients.clientbaserr-Úsklearn.preprocessingrÚsklearnrrr%r%r%r&Ú<module>s    
3,693
Python
.py
46
78.956522
553
0.426261
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,704
clientpFedMe.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/clients/__pycache__/clientpFedMe.cpython-39.pyc
a f¾`cşã@sXddlZddlZddlZddlZddlmZddlmZddl m Z Gdd„de ƒZ dS)éN)ÚpFedMeOptimizer)ÚClientcs4eZdZ‡fdd„Zdd„Zdd„Zdd„Z‡ZS) Ú clientpFedMec s†tƒj||||fi|¤�|j|_|j|_|j|_t t|j   ¡ƒ¡|_ t t|j   ¡ƒ¡|_ t  ¡|_t|j   ¡|j|jd�|_dS)N)ÚlrÚlamda)ÚsuperÚ__init__rÚKZp_learning_rateZpersonalized_learning_rateÚcopyÚdeepcopyÚlistÚmodelÚ parametersÚ local_paramsÚpersonalized_paramsÚnnÚCrossEntropyLossÚlossrÚ optimizer)ÚselfÚargsÚidÚ train_samplesÚ test_samplesÚkwargs©Ú __class__©úu/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/clients/clientpFedMe.pyr s ÿzclientpFedMe.__init__c Cs†| ¡}t ¡}|j ¡|j}|jr8tj d|d¡}t |ƒD�]}|D]ø\}}t |ƒt gƒkrx|d  |j ¡|d<n |  |j ¡}|  |j ¡}|jr°t  dt tj ¡¡¡t |jƒD]@}|j ¡| |¡}| ||¡} |  ¡|j |j|j ¡|_qºt|j|jƒD]6\} } |   |j ¡} | j|j|j| j| j| _�q qJq@| |j|j¡|jdd7<|jdt ¡|7<dS)Néérgš™™™™™¹?Ú num_roundsÚ total_cost)Úload_train_dataÚtimer ÚtrainÚ local_stepsÚ train_slowÚnpÚrandomÚrandintÚrangeÚtypeÚtoÚdeviceÚsleepÚabsÚrandr rÚ zero_gradrÚbackwardÚsteprrÚzipÚdatarÚ learning_rateÚupdate_parametersÚtrain_time_cost) rÚ trainloaderÚ start_timeÚmax_local_stepsr4ÚxÚyÚiÚoutputrÚ new_paramZ localweightrrrr%s4        (zclientpFedMe.traincCs@t| ¡|j ¡|jƒD]"\}}}|j ¡|_|j ¡|_qdS)N)r5rr rr6Úclone)rr rAÚ old_paramZ local_paramrrrÚset_parametersEs" zclientpFedMe.set_parametersc CsŞ| ¡}| |j|j¡|j ¡d}d}t ¡�”|D]~\}}t|ƒtgƒkrf|d |j ¡|d<n | |j ¡}| |j ¡}| |¡}|t  tj |dd�|k¡  ¡7}||j d7}q8Wdƒn1sÌ0Y||fS)Nrr)Údim)Úload_test_datar8r rÚevalÚtorchÚno_gradr,r-r.ÚsumÚargmaxÚitemÚshape)rÚtestloaderfullÚtest_accÚtest_numr=r>r@rrrÚtest_metrics_personalizedJs       .z&clientpFedMe.test_metrics_personalized)Ú__name__Ú __module__Ú __qualname__rr%rDrQÚ __classcell__rrrrr s +r) Únumpyr(r$r rHÚtorch.nnrZflcore.optimizers.fedoptimizerrÚflcore.clients.clientbaserrrrrrÚ<module>s   
2,889
Python
.py
29
98.344828
455
0.454387
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,705
clientbnpt.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/clients/__pycache__/clientbnpt.cpython-38.pyc
U T¸kc÷ã@sLddlZddlmZddlZddlZddlmZddlZGdd„deƒZ dS)éN)ÚClientcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)Ú clientBNPTc s�tƒj||||f|�t ¡|_tjj|j  ¡|j d�|_ tjj|jj   ¡|j d�|_ |j |_ tjj|jj  ¡|j|jd�|_tjjj|j|j |jd�|_dS)N)Úlr)rÚmomentum)Ú step_sizeÚgamma)ÚsuperÚ__init__ÚnnÚCrossEntropyLossÚlossÚtorchÚoptimÚSGDÚmodelÚ parametersÚ learning_rateÚ optimizerÚbaseÚ plocal_stepsÚ generatorÚpt_learning_raterÚ poptimizerÚ lr_schedulerÚStepLRÚlearning_decayÚ scheduler)ÚselfÚargsÚidÚ train_samplesÚ test_samplesÚkwargs©Ú __class__©õXD:\京东\promot\第二次投稿\å®�验\native - pro\system\flcore\clients\clientbnpt.pyr s  ÿzclientBNPT.__init__c Cs| ¡}t ¡}t |jj¡}|j |j¡|j ¡|jj   ¡D] }d|_ qB|jj  ¡D] }d|_ qZt |j ƒD]´}t|ƒD]¦\}\}}t|ƒtgƒkr®|d |j¡|d<n | |j¡}| |j¡}|jræt dt tj ¡¡¡|j ¡| |¡} | | |¡} |  ¡|j ¡|j ¡q|qp|j} |j�rFtj d| d¡} |jj   ¡D] }d|_ �qR|jj  ¡D] }d|_ �qlt | ƒD]Î}t|ƒD]¾\}\}}t|ƒtgƒk�rÂ|d |j¡|d<n | |j¡}| |j¡}|j�rüt dt tj ¡¡¡|j ¡| |¡} | | |¡} |  ¡|j�r@t |j|t!|ƒƒn |j ¡�q��q‚|j "¡|j#dd7<|j#dt ¡|7<t |jj¡} d} t$|  ¡|   ¡ƒD]<\}}||}t% &|dk|t% '|¡|¡}| t% (|¡} �q¬|j#dd7<|j#dt ¡|7<| S) NFTrgš™™™™™¹?ééÚ num_roundsÚ total_cost))Úload_train_dataÚtimeÚcopyÚdeepcopyrrÚtoÚdeviceÚtrainrrÚ requires_gradÚrangerÚ enumerateÚtypeÚ train_slowÚsleepÚnpÚabsÚrandomÚrandrÚ zero_gradr ÚbackwardÚsteprÚ local_stepsÚrandintrÚprivacyÚdp_stepÚlenÚcpuÚtrain_time_costÚzipr ÚwhereÚ zeros_likeÚsum)rÚ trainloaderÚ start_timeÚ old_promptÚparamr>ÚiÚxÚyÚoutputr Úmax_local_stepsÚ new_promptÚ diff_provalueÚ new_paramÚ old_paramÚdiff_pror%r%r&r1sr                zclientBNPT.traincCsBt|j ¡|jj ¡ƒD]$\\}}\}}d|kr|j ¡|_qdS)NÚbn)rFrÚnamed_parametersrÚdataÚclone)rrr r8ÚonÚopr%r%r&Úset_parametersds(zclientBNPT.set_parameters)Ú__name__Ú __module__Ú __qualname__r r1r^Ú __classcell__r%r%r#r&r s Fr) r Útorch.nnr Únumpyr8r,Úflcore.clients.clientbaserr-rr%r%r%r&Ú<module>s   
2,909
Python
.py
41
69.658537
240
0.427675
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,706
clientprox.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/clients/__pycache__/clientprox.cpython-38.pyc
U ”jfc›ã@sXddlZddlZddlZddlZddlmZddlmZddl m Z Gdd„de ƒZ dS)éN)ÚPerturbedGradientDescent)ÚClientcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)Ú clientProxc s\tƒj||||f|�|j|_t t|j ¡ƒ¡|_t   ¡|_ t |j ¡|j |jd�|_dS)N)ÚlrÚmu)ÚsuperÚ__init__rÚcopyÚdeepcopyÚlistÚmodelÚ parametersÚ global_paramsÚnnÚCrossEntropyLossÚlossrÚ learning_rateÚ optimizer)ÚselfÚargsÚidÚ train_samplesÚ test_samplesÚkwargs©Ú __class__©õLD:\京东\promot\cifar\cifar\Cifar10_iid\system\flcore\clients\clientprox.pyr s ÿzclientProx.__init__c Cs| ¡}t ¡}|j |j¡|j ¡|j}|jrFtj   d|d¡}t |ƒD]Š}|D]€\}}t |ƒt gƒkr„|d |j¡|d<n | |j¡}| |j¡}|j  ¡| |¡}| ||¡}| ¡|j  |j|j¡qVqN|j ¡|jdd7<|jdt ¡|7<dS)NéérÚ num_roundsÚ total_cost)Úload_train_dataÚtimer ÚtoÚdeviceÚtrainÚ local_stepsÚ train_slowÚnpÚrandomÚrandintÚrangeÚtyperÚ zero_gradrÚbackwardÚsteprÚcpuÚtrain_time_cost) rÚ trainloaderÚ start_timeÚmax_local_stepsr0ÚxÚyÚoutputrrrrr&s*         zclientProx.traincCs@t| ¡|j|j ¡ƒD]"\}}}|j ¡|_|j ¡|_qdS)N)Úzipr rr ÚdataÚclone)rr Ú new_paramZ global_paramÚparamrrrÚset_parameters5s" zclientProx.set_parameters)Ú__name__Ú __module__Ú __qualname__rr&r>Ú __classcell__rrrrr s r) ÚtorchÚnumpyr)r#r Útorch.nnrÚflcore.optimizers.fedoptimizerrÚflcore.clients.clientbaserrrrrrÚ<module>s   
1,963
Python
.py
20
96.9
283
0.459877
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,707
clientprox.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/clients/__pycache__/clientprox.cpython-39.pyc
a f¾`c›ã@sXddlZddlZddlZddlZddlmZddlmZddl m Z Gdd„de ƒZ dS)éN)ÚPerturbedGradientDescent)ÚClientcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)Ú clientProxc s`tƒj||||fi|¤�|j|_t t|j ¡ƒ¡|_t   ¡|_ t |j ¡|j |jd�|_dS)N)ÚlrÚmu)ÚsuperÚ__init__rÚcopyÚdeepcopyÚlistÚmodelÚ parametersÚ global_paramsÚnnÚCrossEntropyLossÚlossrÚ learning_rateÚ optimizer)ÚselfÚargsÚidÚ train_samplesÚ test_samplesÚkwargs©Ú __class__©ús/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/clients/clientprox.pyr s ÿzclientProx.__init__c Cs| ¡}t ¡}|j ¡|j}|jr8tj d|d¡}t |ƒD]ª}|D] \}}t |ƒt gƒkrv|d  |j ¡|d<n |  |j ¡}|  |j ¡}|jr®t  dt tj ¡¡¡|j ¡| |¡}| ||¡}| ¡|j |j|j ¡qHq@|jdd7<|jdt ¡|7<dS)Néérgš™™™™™¹?Ú num_roundsÚ total_cost)Úload_train_dataÚtimer ÚtrainÚ local_stepsÚ train_slowÚnpÚrandomÚrandintÚrangeÚtypeÚtoÚdeviceÚsleepÚabsÚrandrÚ zero_gradrÚbackwardÚsteprÚtrain_time_cost) rÚ trainloaderÚ start_timeÚmax_local_stepsr3ÚxÚyÚoutputrrrrr$s*        zclientProx.traincCs@t| ¡|j|j ¡ƒD]"\}}}|j ¡|_|j ¡|_qdS)N)Úzipr rr ÚdataÚclone)rr Ú new_paramZ global_paramÚparamrrrÚset_parameters5s" zclientProx.set_parameters)Ú__name__Ú __module__Ú __qualname__rr$r@Ú __classcell__rrrrr s r) ÚtorchÚnumpyr'r#r Útorch.nnrÚflcore.optimizers.fedoptimizerrÚflcore.clients.clientbaserrrrrrÚ<module>s   
2,029
Python
.py
20
100.3
293
0.471642
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,708
clientreppt.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/clients/__pycache__/clientreppt.cpython-38.pyc
U �P÷c�ã@sdddlZddlmZddlZddlZddlmZddlZddl m Z ddl m Z Gdd„deƒZ dS)éN)ÚClient)Úlabel_binarize)Úmetricscs4eZdZ‡fdd„Zdd„Zdd„Zdd„Z‡ZS) Ú clientREPPTc sœtƒj||||f|�t ¡|_tjj|jj   ¡|j d�|_ |j |_ tjj|jj  ¡|jdœ|jj  ¡|j dœg|jd�|_tjjj|j|j |jd�|_dS)N)Úlr)Úparamsr)Úmomentum)Ú step_sizeÚgamma)ÚsuperÚ__init__ÚnnÚCrossEntropyLossÚlossÚtorchÚoptimÚSGDÚmodelÚbaseÚ parametersÚ learning_rateÚ optimizerÚ plocal_stepsÚ generatorÚpt_learning_rateÚ predictorrÚ poptimizerÚ lr_schedulerÚStepLRÚlearning_decayÚ scheduler)ÚselfÚargsÚidÚ train_samplesÚ test_samplesÚkwargs©Ú __class__©õYD:\京东\promot\第二次投稿\å®�验\native - pro\system\flcore\clients\clientreppt.pyr s şıÿzclientREPPT.__init__c Cs0| ¡}t ¡}t |jj¡}|j |j¡|j ¡|jj   ¡D] }d|_ qB|jj  ¡D] }d|_ qZ|jj   ¡D] }d|_ qrt |jƒD]´}t|ƒD]¦\}\}}t|ƒtgƒkrÆ|d |j¡|d<n | |j¡}| |j¡}|jrşt dt tj ¡¡¡|j ¡| |¡} | | |¡} |  ¡|j ¡|j ¡q”qˆ|j} |j�r^tj d| d¡} |jj   ¡D] }d|_ �qj|jj  ¡D] }d|_ �q„|jj   ¡D] }d|_ �q�t | ƒD]²}t|ƒD]¢\}\}}t|ƒtgƒk�rô|d |j¡|d<n | |j¡}| |j¡}|j�r.t dt tj ¡¡¡|j ¡| |¡} | | |¡} |  ¡|j ¡�qÀ�q´|j  ¡|j!dd7<|j!dt ¡|7<t |jj¡} d} t"|  ¡|   ¡ƒD]<\}}||}t# $|dk|t# %|¡|¡}| t# &|¡} �qÂ|j!dd7<|j!dt ¡|7<| S) NFTrgš™™™™™¹?ééÚ num_roundsÚ total_cost)'Úload_train_dataÚtimeÚcopyÚdeepcopyrrÚtoÚdeviceÚtrainrrÚ requires_gradrÚrangerÚ enumerateÚtypeÚ train_slowÚsleepÚnpÚabsÚrandomÚrandrÚ zero_gradrÚbackwardÚstepr Ú local_stepsÚrandintrÚcpuÚtrain_time_costÚziprÚwhereÚ zeros_likeÚsum)r!Ú trainloaderÚ start_timeÚ old_promptÚparamrBÚiÚxÚyÚoutputrÚmax_local_stepsÚ new_promptÚ diff_provalueÚ new_paramÚ old_paramÚdiff_pror)r)r*r5sv                 zclientREPPT.traincCs0t| ¡|jj ¡ƒD]\}}|j ¡|_qdS)N)rGrrrÚdataÚclone)r!rrVrWr)r)r*Úset_parametersoszclientREPPT.set_parametersc Cs~| ¡}|j |j¡|j ¡d}d}d}g}g}t ¡�ú|D]î\}}t|ƒtgƒkrp|d |j¡|d<n | |j¡}| |j¡}| |¡} |j |j  |¡¡} |t  tj | dd�|k¡  ¡7}|t  tj | dd�|k¡  ¡7}||j d7}| |  ¡ ¡ ¡¡| t| ¡ ¡ ¡t |j¡d�¡qBW5QRX|j ¡tj|dd�}tj|dd�}tj||dd�} |||| fS)Nrr+)Údim)Úclasses)ÚaxisÚmicro)Úaverage)Úload_test_datarr3r4ÚevalrÚno_gradr9rrrJÚargmaxÚitemÚshapeÚappendÚdetachrEÚnumpyrr<ÚarangeÚ num_classesÚ concatenaterÚ roc_auc_score) r!ÚtestloaderfullÚtest_accÚ test_acc2Útest_numÚy_probÚy_truerPrQrRÚoutput2Úaucr)r)r*Ú test_metricsus4        2 zclientREPPT.test_metrics)Ú__name__Ú __module__Ú __qualname__r r5r[rvÚ __classcell__r)r)r'r*r s Pr)rÚtorch.nnr rir<r0Úflcore.clients.clientbaserr1Úsklearn.preprocessingrÚsklearnrrr)r)r)r*Ú<module>s    
3,886
Python
.py
51
74.745098
558
0.422576
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,709
clientmtl.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/clients/__pycache__/clientmtl.cpython-37.pyc
B ùùecª ã@s\ddlZddlmZddlmZddlZddlZddlZddl Z Gdd„deƒZ dd„Z dS)éN)ÚClientcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)Ú clientMTLc s`tƒj||||f|�d|_d|_d|_|j|_d|_t ¡|_ t j j |j  ¡|jdd�|_dS)Nrg-Cëâ6?gà?)ÚlrÚmomentum)ÚsuperÚ__init__ÚomegaÚW_globÚidxZitkZlambaÚnnÚCrossEntropyLossÚlossÚtorchÚoptimÚSGDÚmodelÚ parametersÚ learning_rateÚ optimizer)ÚselfÚargsÚidÚ train_samplesÚ test_samplesÚkwargs)Ú __class__©úF/root/autodl-tmp/PFL-Non-IID-master/system/flcore/clients/clientmtl.pyr s zclientMTL.__init__c CsÎ| ¡}t ¡}|j |j¡|j ¡|j}|jrFtj   d|d¡}�x>t |ƒD�]0}�x(|D�]\}}t |ƒt gƒkr�|d |j¡|d<n | |j¡}| |j¡}|jrÈt  dt tj  ¡¡¡|j ¡| |¡}| ||¡}t|jƒ|jdd…|jf<d} | |j ¡d7} | t t |j|jd¡d¡7} tt |jjd¡dƒd} | d| 9} || 7}| ¡|j ¡q`WqRW|j  ¡d|_d|_|j!dd7<|j!dt ¡|7<dS)Néérgš™™™™™¹?é Ú num_roundsÚ total_cost)"Úload_train_dataÚtimerÚtoÚdeviceÚtrainÚ local_stepsÚ train_slowÚnpÚrandomÚrandintÚrangeÚtypeÚsleepÚabsÚrandrÚ zero_gradr Úflattenr r ÚnormrÚsumrÚintÚmathÚlog10ÚshapeÚbackwardÚstepÚcpuÚtrain_time_cost) rÚ trainloaderÚ start_timeÚmax_local_stepsr;ÚxÚyÚoutputr Zloss_regularizerÚfrrrr's@      " zclientMTL.traincCs*t |dd¡|_t |¡|_||_dS)Nr)rÚsqrtrÚcopyÚdeepcopyr r )rr rr rrrÚreceive_valuesHs zclientMTL.receive_values)Ú__name__Ú __module__Ú __qualname__rr'rHÚ __classcell__rr)rrr s 1rcs,| ¡‰ˆ ¡}‡fdd„|Dƒ}t |¡S)Ncsg|]}ˆ| ¡‘qSr)r3)Ú.0Úkey)Ú state_dictrrú <listcomp>Qszflatten.<locals>.<listcomp>)rOÚkeysrÚcat)rrQÚWr)rOrr3Nsr3) rÚtorch.nnr Úflcore.clients.clientbaserÚnumpyr*r$r7rFrr3rrrrÚ<module>s  D
2,511
Python
.py
26
95.423077
285
0.423974
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,710
clientapfl.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/clients/__pycache__/clientapfl.cpython-38.pyc
U ”jfcâ ã@sLddlZddlZddlmZddlZddlZddlmZGdd„deƒZ dS)éN)ÚClientcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)Ú clientAPFLc sntƒj||||f|�t ¡|_tjj|j  ¡|j d�|_ |j |_ t  |j¡|_tjj|j  ¡|j d�|_dS)N)Úlr)ÚsuperÚ__init__ÚnnÚCrossEntropyLossÚlossÚtorchÚoptimÚSGDÚmodelÚ parametersÚ learning_rateÚ optimizerÚalphaÚcopyÚdeepcopyÚ model_perÚ optimizer_per)ÚselfÚargsÚidÚ train_samplesÚ test_samplesÚkwargs©Ú __class__©õLD:\京东\promot\cifar\cifar\Cifar10_iid\system\flcore\clients\clientapfl.pyr s  zclientAPFL.__init__c CsÂ| ¡}t ¡}|j |j¡|j |j¡|j ¡|j}|jrTt j   d|d¡}t |ƒD]ä}t |ƒD]Ö\}\}}t|ƒtgƒkrš|d |j¡|d<n | |j¡}| |j¡}|jrÒt dt  t j  ¡¡¡|j ¡| |¡}| ||¡} |  ¡|j ¡|j ¡| |¡} | | |¡} |  ¡|j ¡| ¡qhq\t|j ¡|j ¡ƒD]$\} } d|j| |j| | _�qX|j ¡|j ¡|jdd7<|jdt ¡|7<dS)Néérgš™™™™™¹?Ú num_roundsÚ total_cost)Úload_train_dataÚtimerÚtoÚdevicer ÚtrainÚ local_stepsÚ train_slowÚnpÚrandomÚrandintÚrangeÚ enumerateÚtypeÚsleepÚabsÚrandrÚ zero_gradr ÚbackwardÚsteprÚ alpha_updateÚziprrÚdataÚcpuÚtrain_time_cost)rÚ trainloaderÚ start_timeÚmax_local_stepsr6ÚiÚxÚyÚoutputr Z output_perZloss_perÚlpÚprrrr(sB               zclientAPFL.traincCs¦d}t|j ¡|j ¡ƒD]P\}}|j|j}|j|jjd|j|jj}|| d¡j  | d¡¡7}q|d|j7}|j|j ||_t   |j  ¡dd¡|_dS)Nrr éÿÿÿÿg{®Gáz”?ggğ?)r8r rrr9rÚgradÚviewÚTÚdotrr+ÚclipÚitem)rZ grad_alphaZl_paramsZp_paramsZdifrFrrrr7@s  zclientAPFL.alpha_update)Ú__name__Ú __module__Ú __qualname__rr(r7Ú __classcell__rrrrr s ,r) rr Útorch.nnrÚnumpyr+r%Úflcore.clients.clientbaserrrrrrÚ<module>s   
2,404
Python
.py
33
71.69697
241
0.42285
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,711
clientprox.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/clients/__pycache__/clientprox.cpython-37.pyc
B fc›ã@sXddlZddlZddlZddlZddlmZddlmZddl m Z Gdd„de ƒZ dS)éN)ÚPerturbedGradientDescent)ÚClientcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)Ú clientProxc s\tƒj||||f|�|j|_t t|j ¡ƒ¡|_t   ¡|_ t |j ¡|j |jd�|_dS)N)ÚlrÚmu)ÚsuperÚ__init__rÚcopyÚdeepcopyÚlistÚmodelÚ parametersÚ global_paramsÚnnÚCrossEntropyLossÚlossrÚ learning_rateÚ optimizer)ÚselfÚargsÚidÚ train_samplesÚ test_samplesÚkwargs)Ú __class__©úG/root/autodl-tmp/PFL-Non-IID-master/system/flcore/clients/clientprox.pyr s  zclientProx.__init__c Cs| ¡}t ¡}|j |j¡|j ¡|j}|jrFtj   d|d¡}xšt |ƒD]�}xˆ|D]€\}}t |ƒt gƒkrˆ|d |j¡|d<n | |j¡}| |j¡}|j  ¡| |¡}| ||¡}| ¡|j  |j|j¡qZWqPW|j ¡|jdd7<|jdt ¡|7<dS)NéérÚ num_roundsÚ total_cost)Úload_train_dataÚtimer ÚtoÚdeviceÚtrainÚ local_stepsÚ train_slowÚnpÚrandomÚrandintÚrangeÚtyperÚ zero_gradrÚbackwardÚsteprÚcpuÚtrain_time_cost) rÚ trainloaderÚ start_timeÚmax_local_stepsr/ÚxÚyÚoutputrrrrr%s*       zclientProx.traincCsDx>t| ¡|j|j ¡ƒD]"\}}}|j ¡|_|j ¡|_qWdS)N)Úzipr rr ÚdataÚclone)rr Ú new_paramZ global_paramÚparamrrrÚset_parameters5s$ zclientProx.set_parameters)Ú__name__Ú __module__Ú __qualname__rr%r=Ú __classcell__rr)rrr s r) ÚtorchÚnumpyr(r"r Útorch.nnrÚflcore.optimizers.fedoptimizerrÚflcore.clients.clientbaserrrrrrÚ<module>s   
1,943
Python
.py
20
95.9
283
0.466216
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,712
clientperavg.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/clients/__pycache__/clientperavg.cpython-38.pyc
U ”jfcã@sXddlZddlZddlZddlZddlmZddlmZddl m Z Gdd„de ƒZ dS)éN)ÚPerAvgOptimizer)ÚClientcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)Ú clientPerAvgc sBtƒj||||f|�|j|_t ¡|_t|j  ¡|jd�|_ dS)N)Úlr) ÚsuperÚ__init__Ú learning_rateÚbetaÚnnÚCrossEntropyLossÚlossrÚmodelÚ parametersÚ optimizer)ÚselfÚargsÚidÚ train_samplesÚ test_samplesÚkwargs©Ú __class__©õND:\京东\promot\cifar\cifar\Cifar10_iid\system\flcore\clients\clientperavg.pyr s zclientPerAvg.__init__c Csˆ| |jd¡}t ¡}|j |j¡|j ¡|j}|jrNt j   d|d¡}t |ƒD�]ô}|D�]è\}}t  t|j ¡ƒ¡}t|ƒtgƒkrÌddg}|dd|j… |j¡|d<|dd|j…|d<n|d|j… |j¡}|d|j… |j¡} |j�rt dt  t j  ¡¡¡|j ¡| |¡} | | | ¡} |  ¡|j ¡t|ƒtgƒk�rœddg}|d|jd… |j¡|d<|d|jd…|d<n||jd… |j¡}||jd… |j¡} |j�rêt dt  t j  ¡¡¡|j ¡| |¡} | | | ¡} |  ¡t|j ¡|ƒD]\} } | j ¡| _�q"|jj|jd�q`qV|j ¡|jdd7<|jdt ¡|7<dS)Néérgš™™™™™¹?©r Ú num_roundsÚ total_cost) Úload_train_dataÚ batch_sizeÚtimer ÚtoÚdeviceÚtrainÚ local_stepsÚ train_slowÚnpÚrandomÚrandintÚrangeÚcopyÚdeepcopyÚlistrÚtypeÚsleepÚabsÚrandrÚ zero_gradr ÚbackwardÚstepÚzipÚdataÚcloner ÚcpuÚtrain_time_cost)rÚ trainloaderÚ start_timeÚmax_local_stepsr4ÚXÚYZ temp_modelÚxÚyÚoutputr Ú old_paramÚ new_paramrrrr$sR         zclientPerAvg.traincCs8| |j¡}t|ƒ}|j |j¡|j ¡t|ƒ\}}t|ƒtgƒkr^|d |j¡|d<n | |j¡}| |j¡}|j   ¡| |¡}|  ||¡}|  ¡|j   ¡t|ƒ\}}t|ƒtgƒkrÚ|d |j¡|d<n | |j¡}| |j¡}|j   ¡| |¡}|  ||¡}|  ¡|j j |jd�|j ¡dS)Nrr)Úload_test_datar Úiterr r"r#r$Únextr.rr2r r3r4r r8)rZ testloaderZiter_testloaderr?r@rAr rrrÚtrain_one_stepNs2               zclientPerAvg.train_one_step)Ú__name__Ú __module__Ú __qualname__rr$rGÚ __classcell__rrrrr s :r) Únumpyr'Útorchr!r+Útorch.nnr Úflcore.optimizers.fedoptimizerrÚflcore.clients.clientbaserrrrrrÚ<module>s   
2,805
Python
.py
40
68.9
226
0.395155
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,713
clientreppt.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/clients/__pycache__/clientreppt.cpython-37.pyc
B K´ccZã@sLddlZddlmZddlZddlZddlmZddlZGdd„deƒZ dS)éN)ÚClientcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)Ú clientREPPTc sdtƒj||||f|�t ¡|_tjj|jj   ¡|j d�|_ tjj|jj   ¡|j d�|_|j|_dS)N)Úlr)ÚsuperÚ__init__ÚnnÚCrossEntropyLossÚlossÚtorchÚoptimÚSGDÚmodelÚ generatorÚ parametersÚ learning_rateÚ poptimizerÚbaseÚ optimizerÚ plocal_steps)ÚselfÚargsÚidÚ train_samplesÚ test_samplesÚkwargs)Ú __class__©úH/root/autodl-tmp/PFL-Non-IID-master/system/flcore/clients/clientreppt.pyr s  zclientREPPT.__init__c CsT| ¡}t ¡}t |jj¡}|j |j¡|j ¡x|jj   ¡D] }d|_ qDWx|jj  ¡D] }d|_ q`Wx|jj   ¡D] }d|_ q|Wx¾t |jƒD]°}xªt|ƒD]�\}\}}t|ƒtgƒkrÖ|d |j¡|d<n | |j¡}| |j¡}|j�rt dt tj ¡¡¡|j ¡| |¡} | | |¡} |  ¡|j ¡q¤Wq–W|j} |j�rjtj d| d¡} x|jj   ¡D] }d|_ �qxWx|jj  ¡D] }d|_ �q–Wx|jj   ¡D] }d|_ �q´WxÂt | ƒD]¶}x®t|ƒD]¢\}\}}t|ƒtgƒk�r|d |j¡|d<n | |j¡}| |j¡}|j�rJt dt tj ¡¡¡|j ¡| |¡} | | |¡} |  ¡|j ¡�qÜW�qÎW|j ¡|j dd7<|j dt ¡|7<t |jj¡} d} xRt!|  ¡|   ¡ƒD]<\}}||}t" #|dk|t" $|¡|¡}| t" %|¡} �qäW|j dd7<|j dt ¡|7<| S) NFTrgš™™™™™¹?ééÚ num_roundsÚ total_cost)&Úload_train_dataÚtimeÚcopyÚdeepcopyr rÚtoÚdeviceÚtrainrrÚ requires_gradÚ predictorÚrangerÚ enumerateÚtypeÚ train_slowÚsleepÚnpÚabsÚrandomÚrandrÚ zero_gradr ÚbackwardÚstepÚ local_stepsÚrandintrÚcpuÚtrain_time_costÚzipr ÚwhereÚ zeros_likeÚsum)rÚ trainloaderÚ start_timeÚ old_promptÚparamr6ÚiÚxÚyÚoutputr Úmax_local_stepsÚ new_promptÚ diff_provalueÚ new_paramÚ old_paramÚdiff_prorrrr(st                  zclientREPPT.traincCs4x.t| ¡|jj ¡ƒD]\}}|j ¡|_qWdS)N)r;rr rÚdataÚclone)rrrJrKrrrÚset_parameters^s zclientREPPT.set_parameters)Ú__name__Ú __module__Ú __qualname__rr(rOÚ __classcell__rr)rrr s Hr) r Útorch.nnrÚnumpyr0r#Ú system.flcore.clients.clientbaserr$rrrrrÚ<module>s   
2,710
Python
.py
44
60.113636
288
0.421447
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,714
clientavg-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientavg-checkpoint.py
import torch import torch.nn as nn import numpy as np import time from flcore.clients.clientbase import Client class clientAVG(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.loss = nn.CrossEntropyLoss() self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate) # # differential privacy # if self.privacy: # check_dp(self.model) # initialize_dp(self.model, self.optimizer, self.sample_rate, self.dp_sigma) def train(self): trainloader = self.load_train_data() start_time = time.time() self.model.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() if self.privacy: dp_step(self.optimizer, i, len(trainloader)) else: self.optimizer.step() self.model.cpu() self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time # if self.privacy: # res, DELTA = get_dp_params(self.optimizer) # print(f"Client {self.id}", f"(ε = {res[0]:.2f}, δ = {DELTA}) for α = {res[1]}")
1,962
Python
.py
45
31.866667
93
0.54878
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,715
clientditto-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientditto-checkpoint.py
import torch import numpy as np import time import copy import torch.nn as nn from flcore.optimizers.fedoptimizer import PerturbedGradientDescent from flcore.clients.clientbase import Client import torch.nn.functional as F from sklearn.preprocessing import label_binarize from sklearn import metrics # from utils.privacy import * class clientDitto(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.mu = args.mu self.plocal_steps = args.plocal_steps self.pmodel = copy.deepcopy(self.model) self.loss = nn.CrossEntropyLoss() self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate) self.poptimizer = PerturbedGradientDescent( self.pmodel.parameters(), lr=self.learning_rate, mu=self.mu) # # differential privacy # if self.privacy: # check_dp(self.model) # initialize_dp(self.model, self.optimizer, self.sample_rate, self.dp_sigma) def train(self): trainloader = self.load_train_data() start_time = time.time() self.model.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() if self.privacy: dp_step(self.optimizer, i, len(trainloader)) else: self.optimizer.step() self.model.cpu() self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time # # if self.privacy: # res, DELTA = get_dp_params(self.optimizer) # print(f"Client {self.id}", f"(ε = {res[0]:.2f}, δ = {DELTA}) for α = {res[1]}") def ptrain(self): trainloader = self.load_train_data() start_time = time.time() self.pmodel.to(self.device) self.pmodel.train() max_local_steps = self.plocal_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): for x, y in trainloader: if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.poptimizer.zero_grad() output = self.pmodel(x) loss = self.loss(output, y) loss.backward() self.poptimizer.step(self.model.parameters(), self.device) self.pmodel.cpu() self.train_time_cost['total_cost'] += time.time() - start_time def test_metrics(self): testloaderfull = self.load_test_data() # self.model = self.load_model('model') self.pmodel.to(self.device) self.pmodel.eval() test_acc = 0 test_num = 0 y_prob = [] y_true = [] with torch.no_grad(): for x, y in testloaderfull: if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) output = self.pmodel(x) test_acc += (torch.sum(torch.argmax(output, dim=1) == y)).item() test_num += y.shape[0] y_prob.append(F.softmax(output).detach().cpu().numpy()) y_true.append(label_binarize(y.detach().cpu().numpy(), classes=np.arange(self.num_classes))) self.pmodel.cpu() y_prob = np.concatenate(y_prob, axis=0) y_true = np.concatenate(y_true, axis=0) auc = metrics.roc_auc_score(y_true, y_prob, average='micro') return test_acc, test_num, auc
4,593
Python
.py
107
30.897196
108
0.550567
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,716
clientpFedMe-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientpFedMe-checkpoint.py
import numpy as np import time import copy import torch import torch.nn as nn from flcore.optimizers.fedoptimizer import pFedMeOptimizer from flcore.clients.clientbase import Client class clientpFedMe(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.lamda = args.lamda self.K = args.K self.personalized_learning_rate = args.p_learning_rate # these parameters are for personalized federated learing. self.local_params = copy.deepcopy(list(self.model.parameters())) self.personalized_params = copy.deepcopy(list(self.model.parameters())) self.loss = nn.CrossEntropyLoss() self.optimizer = pFedMeOptimizer( self.model.parameters(), lr=self.personalized_learning_rate, lamda=self.lamda) def train(self): trainloader = self.load_train_data() start_time = time.time() # self.model.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): # local update for x, y in trainloader: if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) # K is number of personalized steps for i in range(self.K): self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() # finding aproximate theta self.personalized_params = self.optimizer.step(self.local_params, self.device) # update local weight after finding aproximate theta for new_param, localweight in zip(self.personalized_params, self.local_params): localweight = localweight.to(self.device) localweight.data = localweight.data - self.lamda * self.learning_rate * (localweight.data - new_param.data) # self.model.cpu() self.update_parameters(self.model, self.local_params) self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time def set_parameters(self, model): for new_param, old_param, local_param in zip(model.parameters(), self.model.parameters(), self.local_params): old_param.data = new_param.data.clone() local_param.data = new_param.data.clone() def test_metrics_personalized(self): testloaderfull = self.load_test_data() self.update_parameters(self.model, self.personalized_params) # self.model.to(self.device) self.model.eval() test_acc = 0 test_num = 0 with torch.no_grad(): for x, y in testloaderfull: if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) output = self.model(x) test_acc += (torch.sum(torch.argmax(output, dim=1) == y)).item() test_num += y.shape[0] # self.model.cpu() return test_acc, test_num # def train_metrics_personalized(self): # self.update_parameters(self.model, self.personalized_params) # # self.model.to(self.device) # self.model.eval() # train_acc = 0 # train_num = 0 # loss = 0 # for x, y in trainloaderfull: # if type(x) == type([]): # x[0] = x[0].to(self.device) # else: # x = x.to(self.device) # y = y.to(self.device) # output = self.model(x) # train_acc += (torch.sum(torch.argmax(output, dim=1) == y)).item() # train_num += y.shape[0] # loss += self.loss(output, y).item() * y.shape[0] # # self.model.cpu() # return train_acc, loss, train_num
4,350
Python
.py
94
35.308511
127
0.566912
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,717
clientrep-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientrep-checkpoint.py
import torch import torch.nn as nn import numpy as np import time from system.flcore.clients.clientbase import Client # from utils.privacy import * class clientRep(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.loss = nn.CrossEntropyLoss() self.optimizer = torch.optim.SGD(self.model.base.parameters(), lr=self.learning_rate) self.poptimizer = torch.optim.SGD(self.model.predictor.parameters(), lr=self.learning_rate) self.plocal_steps = args.plocal_steps def train(self): trainloader = self.load_train_data() start_time = time.time() self.model.to(self.device) self.model.train() for param in self.model.base.parameters(): param.requires_grad = False for param in self.model.predictor.parameters(): param.requires_grad = True for step in range(self.plocal_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.poptimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() self.poptimizer.step() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for param in self.model.base.parameters(): param.requires_grad = True for param in self.model.predictor.parameters(): param.requires_grad = False for step in range(max_local_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() self.optimizer.step() self.model.cpu() self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time def set_parameters(self, base): for new_param, old_param in zip(base.parameters(), self.model.base.parameters()): old_param.data = new_param.data.clone()
2,827
Python
.py
63
32.095238
99
0.559867
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,718
clientpt-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientpt-checkpoint.py
import torch import torch.nn as nn import numpy as np import time from system.flcore.clients.clientbase import Client import copy # from utils.privacy import * class clientPT(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.loss = nn.CrossEntropyLoss() self.poptimizer = torch.optim.SGD(self.model.generator.parameters(), lr=self.learning_rate) self.optimizer = torch.optim.SGD(self.model.base.parameters(), lr=self.learning_rate) self.plocal_steps = args.plocal_steps def train(self): trainloader = self.load_train_data() start_time = time.time() old_prompt = copy.deepcopy(self.model.generator) self.model.to(self.device) self.model.train() for param in self.model.base.parameters(): param.requires_grad = False for param in self.model.generator.parameters(): param.requires_grad = True for step in range(self.plocal_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.poptimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() self.poptimizer.step() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for param in self.model.base.parameters(): param.requires_grad = True for param in self.model.generator.parameters(): param.requires_grad = False for step in range(max_local_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() self.optimizer.step() self.model.cpu() new_prompt = copy.deepcopy(self.model.generator) diff_provalue = 0 for new_param, old_param in zip(old_prompt.parameters(), new_prompt.parameters()): diff_pro = new_param - old_param diff_pro = torch.where(diff_pro > 0, diff_pro, torch.zeros_like(diff_pro)-diff_pro) diff_provalue = torch.sum(diff_pro) self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time return diff_provalue def set_parameters(self, base): for new_param, old_param in zip(base.parameters(), self.model.base.parameters()): old_param.data = new_param.data.clone()
3,248
Python
.py
72
33.291667
99
0.571745
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,719
clientfomo-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientfomo-checkpoint.py
import torch import torch.nn as nn import numpy as np import time import copy from flcore.clients.clientbase import Client from torch.utils.data import DataLoader from utils.data_utils import read_client_data class clientFomo(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.num_clients = args.num_clients self.old_model = copy.deepcopy(self.model) self.received_ids = [] self.received_models = [] self.weight_vector = torch.zeros(self.num_clients, device=self.device) self.loss = nn.CrossEntropyLoss() self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate) self.val_ratio = 0.2 self.train_samples = self.train_samples * (1-self.val_ratio) def train(self): trainloader, val_loader = self.load_train_data() start_time = time.time() self.aggregate_parameters(val_loader) self.clone_model(self.model, self.old_model) self.model.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): for x, y in trainloader: if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() self.optimizer.step() self.model.cpu() self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time def load_train_data(self, batch_size=None): if batch_size == None: batch_size = self.batch_size train_data = read_client_data(self.dataset, self.id, is_train=True) val_idx = -int(self.val_ratio*len(train_data)) val_data = train_data[val_idx:] train_data = train_data[:val_idx] trainloader = DataLoader(train_data, self.batch_size, drop_last=True, shuffle=True) val_loader = DataLoader(val_data, self.batch_size, drop_last=self.has_BatchNorm, shuffle=True) return trainloader, val_loader def train_metrics(self): trainloader, val_loader = self.load_train_data() # self.model = self.load_model('model') self.model.to(self.device) self.model.eval() train_num = 0 loss = 0 for x, y in trainloader: if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) output = self.model(x) train_num += y.shape[0] loss += self.loss(output, y).item() * y.shape[0] self.model.cpu() # self.save_model(self.model, 'model') return loss, train_num def receive_models(self, ids, models): self.received_ids = ids self.received_models = models def weight_cal(self, val_loader): weight_list = [] L = self.recalculate_loss(self.old_model, val_loader) for received_model in self.received_models: params_dif = [] for param_n, param_i in zip(received_model.parameters(), self.old_model.parameters()): params_dif.append((param_n - param_i).view(-1)) params_dif = torch.cat(params_dif) weight_list.append((L - self.recalculate_loss(received_model, val_loader)) / (torch.norm(params_dif) + 1e-5)) # import torch.autograd.profiler as profiler # with profiler.profile(profile_memory=True, record_shapes=True) as prof: # self.weight_vector_update(weight_list) # print(prof.key_averages().table(sort_by="cuda_memory_usage", row_limit=10)) # from pytorch_memlab import LineProfiler # with LineProfiler(self.weight_vector_update(weight_list)) as prof: # self.weight_vector_update(weight_list) # prof.display() self.weight_vector_update(weight_list) return torch.tensor(weight_list) # from pytorch_memlab import profile # @profile def weight_vector_update(self, weight_list): # self.weight_vector = torch.zeros(self.num_clients, device=self.device) # for w, id in zip(weight_list, self.received_ids): # self.weight_vector[id] += w.clone() self.weight_vector = np.zeros(self.num_clients) for w, id in zip(weight_list, self.received_ids): self.weight_vector[id] += w.item() self.weight_vector = torch.tensor(self.weight_vector).to(self.device) def recalculate_loss(self, new_model, val_loader): L = 0 new_model.to(self.device) for x, y in val_loader: if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) output = new_model(x) loss = self.loss(output, y) L += loss.item() new_model.cpu() return L / len(val_loader) def add_parameters(self, w, received_model): for param, received_param in zip(self.model.parameters(), received_model.parameters()): param.data += received_param.data.clone() * w def aggregate_parameters(self, val_loader): weights = self.weight_scale(self.weight_cal(val_loader)) if len(weights) > 0: for param in self.model.parameters(): param.data.zero_() for w, received_model in zip(weights, self.received_models): self.add_parameters(w, received_model) def weight_scale(self, weights): weights = torch.maximum(weights, torch.tensor(0)) w_sum = torch.sum(weights) if w_sum > 0: weights = [w/w_sum for w in weights] return torch.tensor(weights) else: return torch.tensor([])
6,368
Python
.py
140
34.857143
121
0.596191
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,720
clientper-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientper-checkpoint.py
import copy import torch import torch.nn as nn import numpy as np import time from flcore.clients.clientbase import Client class clientPer(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.loss = nn.CrossEntropyLoss() self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate) def train(self): trainloader = self.load_train_data() start_time = time.time() # self.model.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() self.optimizer.step() # self.model.cpu() self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time def set_parameters(self, model): for new_param, old_param in zip(model.parameters(), self.model.base.parameters()): old_param.data = new_param.data.clone()
1,668
Python
.py
39
31.74359
90
0.573034
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,721
clientbase-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientbase-checkpoint.py
import copy import torch import torch.nn as nn import numpy as np import os import torch.nn.functional as F from torch.utils.data import DataLoader from sklearn.preprocessing import label_binarize from sklearn import metrics from utils.data_utils import read_client_data class Client(object): """ Base class for clients in federated learning. """ def __init__(self, args, id, train_samples, test_samples, **kwargs): self.model = copy.deepcopy(args.model) self.dataset = args.dataset self.device = args.device self.id = id # integer self.save_folder_name = args.save_folder_name self.num_classes = args.num_classes self.train_samples = train_samples self.test_samples = test_samples self.batch_size = args.batch_size self.learning_rate = args.local_learning_rate self.local_steps = args.local_steps # check BatchNorm self.has_BatchNorm = False for layer in self.model.children(): if isinstance(layer, nn.BatchNorm2d): self.has_BatchNorm = True break self.train_slow = kwargs['train_slow'] self.send_slow = kwargs['send_slow'] self.train_time_cost = {'num_rounds': 0, 'total_cost': 0.0} self.send_time_cost = {'num_rounds': 0, 'total_cost': 0.0} self.privacy = args.privacy self.dp_sigma = args.dp_sigma self.sample_rate = self.batch_size / self.train_samples def load_train_data(self, batch_size=None): if batch_size == None: batch_size = self.batch_size train_data = read_client_data(self.dataset, self.id, is_train=True) return DataLoader(train_data, batch_size, drop_last=True, shuffle=True) def load_test_data(self, batch_size=None): if batch_size == None: batch_size = self.batch_size test_data = read_client_data(self.dataset, self.id, is_train=False) return DataLoader(test_data, batch_size, drop_last=False, shuffle=True) def set_parameters(self, model): for new_param, old_param in zip(model.parameters(), self.model.parameters()): old_param.data = new_param.data.clone() def clone_model(self, model, target): for param, target_param in zip(model.parameters(), target.parameters()): target_param.data = param.data.clone() # target_param.grad = param.grad.clone() def update_parameters(self, model, new_params): for param, new_param in zip(model.parameters(), new_params): param.data = new_param.data.clone() def test_metrics(self): testloaderfull = self.load_test_data() # self.model = self.load_model('model') self.model.to(self.device) self.model.eval() test_acc = 0 test_num = 0 y_prob = [] y_true = [] with torch.no_grad(): for x, y in testloaderfull: if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) output = self.model(x) test_acc += (torch.sum(torch.argmax(output, dim=1) == y)).item() test_num += y.shape[0] y_prob.append(output.detach().cpu().numpy()) y_true.append(label_binarize(y.detach().cpu().numpy(), classes=np.arange(self.num_classes))) self.model.cpu() # self.save_model(self.model, 'model') y_prob = np.concatenate(y_prob, axis=0) y_true = np.concatenate(y_true, axis=0) auc = metrics.roc_auc_score(y_true, y_prob, average='micro') return test_acc, test_num, auc def train_metrics(self): trainloader = self.load_train_data() # self.model = self.load_model('model') self.model.to(self.device) self.model.eval() train_num = 0 loss = 0 for x, y in trainloader: if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) output = self.model(x) train_num += y.shape[0] loss += self.loss(output, y).item() * y.shape[0] self.model.cpu() # self.save_model(self.model, 'model') return loss, train_num # def get_next_train_batch(self): # try: # # Samples a new batch for persionalizing # (x, y) = next(self.iter_trainloader) # except StopIteration: # # restart the generator if the previous generator is exhausted. # self.iter_trainloader = iter(self.trainloader) # (x, y) = next(self.iter_trainloader) # if type(x) == type([]): # x = x[0] # x = x.to(self.device) # y = y.to(self.device) # return x, y def save_item(self, item, item_name, item_path=None): if item_path == None: item_path = self.save_folder_name if not os.path.exists(item_path): os.makedirs(item_path) torch.save(item, os.path.join(item_path, "client_" + str(self.id) + "_" + item_name + ".pt")) def load_item(self, item_name, item_path=None): if item_path == None: item_path = self.save_folder_name return torch.load(os.path.join(item_path, "client_" + str(self.id) + "_" + item_name + ".pt")) # @staticmethod # def model_exists(): # return os.path.exists(os.path.join("models", "server" + ".pt"))
5,643
Python
.py
131
33.778626
108
0.586036
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,722
clientdynpt-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientdynpt-checkpoint.py
import copy import torch import torch.nn as nn import numpy as np import time from flcore.clients.clientbase import Client # from utils.privacy import * class clientDynPT(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.loss = nn.CrossEntropyLoss() self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate) # differential privacy # if self.privacy: # check_dp(self.model) # initialize_dp(self.model, self.optimizer, self.sample_rate, self.dp_sigma) self.alpha = args.alpha self.global_model_vector = None old_grad = copy.deepcopy(self.model) old_grad = model_parameter_vector(old_grad) self.old_grad = torch.zeros_like(old_grad) self.poptimizer = torch.optim.SGD(self.model.generator.parameters(), lr=self.learning_rate) self.optimizer = torch.optim.SGD(self.model.base.parameters(), lr=self.learning_rate) self.plocal_steps = args.plocal_steps def train(self): trainloader = self.load_train_data() start_time = time.time() old_prompt = copy.deepcopy(self.model.generator) self.model.to(self.device) self.old_grad = self.old_grad.to(self.device) self.global_model_vector = self.global_model_vector.to(self.device) self.model.train() for param in self.model.base.parameters(): param.requires_grad = False for param in self.model.generator.parameters(): param.requires_grad = True for step in range(self.plocal_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.poptimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() self.poptimizer.step() max_local_steps = self.local_steps for param in self.model.base.parameters(): param.requires_grad = True for param in self.model.generator.parameters(): param.requires_grad = False if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) if self.global_model_vector != None: v1 = model_parameter_vector(self.model) loss += self.alpha / 2 * torch.norm(v1 - self.global_model_vector, 2) loss -= torch.dot(v1, self.old_grad) loss.backward() self.optimizer.step() if self.global_model_vector != None: v1 = model_parameter_vector(self.model).detach() self.old_grad = self.old_grad - self.alpha * (v1 - self.global_model_vector) self.model.cpu() self.old_grad = self.old_grad.cpu() self.global_model_vector = self.global_model_vector.cpu() self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time new_prompt = copy.deepcopy(self.model.generator) diff_provalue = 0 for new_param, old_param in zip(old_prompt.parameters(), new_prompt.parameters()): diff_pro = new_param - old_param diff_pro = torch.where(diff_pro > 0, diff_pro, torch.zeros_like(diff_pro) - diff_pro) diff_provalue = torch.sum(diff_pro) self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time return diff_provalue # # if self.privacy: # res, DELTA = get_dp_params(self.optimizer) # print(f"Client {self.id}", f"(ε = {res[0]:.2f}, δ = {DELTA}) for α = {res[1]}") def set_parameters(self, model): for new_param, old_param in zip(model.base.parameters(), self.model.base.parameters()): old_param.data = new_param.data.clone() self.global_model_vector = model_parameter_vector(model).detach().clone() def model_parameter_vector(model): param = [p.view(-1) for p in model.parameters()] return torch.cat(param, dim=0)
4,956
Python
.py
103
36.747573
99
0.583144
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,723
clientbn-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientbn-checkpoint.py
import torch import torch.nn as nn import numpy as np import time from flcore.clients.clientbase import Client # from utils.privacy import * class clientBN(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.loss = nn.CrossEntropyLoss() self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate) # # differential privacy # if self.privacy: # check_dp(self.model) # initialize_dp(self.model, self.optimizer, self.sample_rate, self.dp_sigma) def train(self): trainloader = self.load_train_data() start_time = time.time() # self.model.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() if self.privacy: dp_step(self.optimizer, i, len(trainloader)) else: self.optimizer.step() # self.model.cpu() self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time if self.privacy: res, DELTA = get_dp_params(self.optimizer) print(f"Client {self.id}", f"(ε = {res[0]:.2f}, δ = {DELTA}) for α = {res[1]}") def set_parameters(self, model): for (nn, np), (on, op) in zip(model.named_parameters(), self.model.named_parameters()): if 'bn' not in nn: op.data = np.data.clone()
2,207
Python
.py
50
32.28
95
0.55298
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,724
clientphp-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientphp-checkpoint.py
import copy import torch import torch.nn as nn import numpy as np import time from flcore.clients.clientbase import Client # from utils.privacy import * class clientPHP(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.loss = nn.CrossEntropyLoss() self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate) self.mu = args.mu / args.global_rounds self.lamda = args.lamda self.model_s = copy.deepcopy(self.model) for param in self.model_s.parameters(): param.requires_grad = False # self.model_s.eval() def train(self): trainloader = self.load_train_data() start_time = time.time() self.model.to(self.device) self.model_s.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) * (1 - self.lamda) loss += MMD(self.model.base(x), self.model_s.base(x), 'rbf', self.device) * self.lamda loss.backward() self.optimizer.step() self.model.cpu() self.model_s.cpu() self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time def set_parameters(self, model, R): mu = self.mu * R for new_param, old_param in zip(model.parameters(), self.model_s.parameters()): old_param.data = new_param.data.clone() for new_param, old_param in zip(model.parameters(), self.model.parameters()): old_param.data = new_param * (1 - mu) + old_param * mu def MMD(x, y, kernel, device='cpu'): """Emprical maximum mean discrepancy. The lower the result the more evidence that distributions are the same. Args: x: first sample, distribution P y: second sample, distribution Q kernel: kernel type such as "multiscale" or "rbf" """ xx, yy, zz = torch.mm(x, x.t()), torch.mm(y, y.t()), torch.mm(x, y.t()) rx = (xx.diag().unsqueeze(0).expand_as(xx)) ry = (yy.diag().unsqueeze(0).expand_as(yy)) dxx = rx.t() + rx - 2. * xx # Used for A in (1) dyy = ry.t() + ry - 2. * yy # Used for B in (1) dxy = rx.t() + ry - 2. * zz # Used for C in (1) XX, YY, XY = (torch.zeros(xx.shape).to(device), torch.zeros(xx.shape).to(device), torch.zeros(xx.shape).to(device)) if kernel == "multiscale": bandwidth_range = [0.2, 0.5, 0.9, 1.3] for a in bandwidth_range: XX += a**2 * (a**2 + dxx)**-1 YY += a**2 * (a**2 + dyy)**-1 XY += a**2 * (a**2 + dxy)**-1 if kernel == "rbf": bandwidth_range = [10, 15, 20, 50] for a in bandwidth_range: XX += torch.exp(-0.5*dxx/a) YY += torch.exp(-0.5*dyy/a) XY += torch.exp(-0.5*dxy/a) return torch.mean(XX + YY - 2. * XY)
3,672
Python
.py
82
34.085366
102
0.555238
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,725
clientreppt-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientreppt-checkpoint.py
import torch import torch.nn as nn import numpy as np import time from system.flcore.clients.clientbase import Client import copy # from utils.privacy import * class clientREPPT(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.loss = nn.CrossEntropyLoss() self.poptimizer = torch.optim.SGD(self.model.generator.parameters(), lr=self.learning_rate) self.optimizer = torch.optim.SGD(self.model.base.parameters(), lr=self.learning_rate) self.plocal_steps = args.plocal_steps def train(self): trainloader = self.load_train_data() start_time = time.time() old_prompt = copy.deepcopy(self.model.generator) self.model.to(self.device) self.model.train() for param in self.model.base.parameters(): param.requires_grad = False for param in self.model.generator.parameters(): param.requires_grad = True for param in self.model.predictor.parameters(): param.requires_grad = True for step in range(self.plocal_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.poptimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() self.poptimizer.step() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for param in self.model.base.parameters(): param.requires_grad = True for param in self.model.generator.parameters(): param.requires_grad = False for param in self.model.predictor.parameters(): param.requires_grad = False for step in range(max_local_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() self.optimizer.step() self.model.cpu() self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time new_prompt = copy.deepcopy(self.model.generator) diff_provalue = 0 for new_param, old_param in zip(old_prompt.parameters(), new_prompt.parameters()): diff_pro = new_param - old_param diff_pro = torch.where(diff_pro > 0, diff_pro, torch.zeros_like(diff_pro) - diff_pro) diff_provalue = torch.sum(diff_pro) self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time return diff_provalue def set_parameters(self, base): for new_param, old_param in zip(base.parameters(), self.model.base.parameters()): old_param.data = new_param.data.clone()
3,562
Python
.py
78
33.974359
99
0.576168
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,726
clientdyn-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientdyn-checkpoint.py
import copy import torch import torch.nn as nn import numpy as np import time from flcore.clients.clientbase import Client # from utils.privacy import * class clientDyn(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.loss = nn.CrossEntropyLoss() self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate) # differential privacy # if self.privacy: # check_dp(self.model) # initialize_dp(self.model, self.optimizer, self.sample_rate, self.dp_sigma) self.alpha = args.alpha self.global_model_vector = None old_grad = copy.deepcopy(self.model) old_grad = model_parameter_vector(old_grad) self.old_grad = torch.zeros_like(old_grad) def train(self): trainloader = self.load_train_data() start_time = time.time() self.model.to(self.device) self.old_grad = self.old_grad.to(self.device) self.global_model_vector = self.global_model_vector.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) if self.global_model_vector != None: v1 = model_parameter_vector(self.model) loss += self.alpha/2 * torch.norm(v1 - self.global_model_vector, 2) loss -= torch.dot(v1, self.old_grad) loss.backward() self.optimizer.step() if self.global_model_vector != None: v1 = model_parameter_vector(self.model).detach() self.old_grad = self.old_grad - self.alpha * (v1 - self.global_model_vector) self.model.cpu() self.old_grad = self.old_grad.cpu() self.global_model_vector= self.global_model_vector.cpu() self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time # # if self.privacy: # res, DELTA = get_dp_params(self.optimizer) # print(f"Client {self.id}", f"(ε = {res[0]:.2f}, δ = {DELTA}) for α = {res[1]}") def set_parameters(self, model): for new_param, old_param in zip(model.parameters(), self.model.parameters()): old_param.data = new_param.data.clone() self.global_model_vector = model_parameter_vector(model).detach().clone() def model_parameter_vector(model): param = [p.view(-1) for p in model.parameters()] return torch.cat(param, dim=0)
3,199
Python
.py
68
36.132353
93
0.587796
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,727
clientproto-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientproto-checkpoint.py
from collections import defaultdict import copy import torch import torch.nn as nn import numpy as np import time from flcore.clients.clientbase import Client # from utils.privacy import * class clientProto(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.loss = nn.CrossEntropyLoss() self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate) self.feature_dim = list(self.model.predictor.parameters())[0].shape[1] self.protos = None self.global_protos = None self.loss_mse = nn.MSELoss() self.lamda = args.lamda def train(self): trainloader = self.load_train_data() start_time = time.time() # self.model.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) protos = defaultdict(list) for step in range(max_local_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() rep = self.model.base(x) output = self.model.predictor(rep) loss = self.loss(output, y) if self.global_protos != None: proto_new = torch.zeros_like(rep) for i, yy in enumerate(y): y_c = yy.item() proto_new[i, :] = self.global_protos[y_c].data loss += self.loss_mse(proto_new, rep) * self.lamda for i, yy in enumerate(y): y_c = yy.item() protos[y_c].append(rep[i, :].detach().data) loss.backward() self.optimizer.step() # self.model.cpu() # rep = self.model.base(x) # print(torch.sum(rep!=0).item() / rep.numel()) # self.collect_protos() self.protos = agg_func(protos) self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time def set_protos(self, global_protos): self.global_protos = copy.deepcopy(global_protos) def collect_protos(self): trainloader = self.load_train_data() self.model.eval() protos = defaultdict(list) with torch.no_grad(): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() rep = self.model.base(x) for i, yy in enumerate(y): y_c = yy.item() protos[y_c].append(rep[i, :].detach().data) self.protos = agg_func(protos) def test_metrics(self, model=None): testloader = self.load_test_data() if model == None: model = self.model model.eval() test_acc = 0 test_num = 0 with torch.no_grad(): for x, y in testloader: if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) rep = self.model.base(x) output = float('inf') * torch.ones(y.shape[0], self.num_classes).to(self.device) for i, r in enumerate(rep): for j, pro in self.global_protos.items(): output[i, j] = self.loss_mse(r, pro) test_acc += (torch.sum(torch.argmin(output, dim=1) == y)).item() test_num += y.shape[0] return test_acc, test_num, 0 # https://github.com/yuetan031/fedproto/blob/main/lib/utils.py#L205 def agg_func(protos): """ Returns the average of the weights. """ for [label, proto_list] in protos.items(): if len(proto_list) > 1: proto = 0 * proto_list[0].data for i in proto_list: proto += i.data protos[label] = proto / len(proto_list) else: protos[label] = proto_list[0] return protos
4,762
Python
.py
115
28.756522
96
0.519461
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,728
clientmoon-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientmoon-checkpoint.py
import copy import torch import torch.nn as nn import numpy as np import time import torch.nn.functional as F from flcore.clients.clientbase import Client # from utils.privacy import * class clientMOON(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.loss = nn.CrossEntropyLoss() self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate) # # differential privacy # if self.privacy: # check_dp(self.model) # initialize_dp(self.model, self.optimizer, self.sample_rate, self.dp_sigma) self.tau = args.tau self.mu = args.mu self.global_model = None self.old_model = copy.deepcopy(self.model) def train(self): trainloader = self.load_train_data() start_time = time.time() self.model.to(self.device) self.global_model.to(self.device) self.old_model.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() rep = self.model.base(x) output = self.model.predictor(rep) loss = self.loss(output, y) rep_old = self.old_model.base(x).detach() rep_global = self.global_model.base(x).detach() loss_con = - torch.log(torch.exp(F.cosine_similarity(rep, rep_global) / self.tau) / (torch.exp(F.cosine_similarity(rep, rep_global) / self.tau) + torch.exp(F.cosine_similarity(rep, rep_old) / self.tau))) loss += self.mu * torch.mean(loss_con) loss.backward() if self.privacy: dp_step(self.optimizer, i, len(trainloader)) else: self.optimizer.step() self.model.cpu() self.global_model.cpu() self.old_model.cpu() self.old_model = copy.deepcopy(self.model) self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time # if self.privacy: # res, DELTA = get_dp_params(self.optimizer) # print(f"Client {self.id}", f"(ε = {res[0]:.2f}, δ = {DELTA}) for α = {res[1]}") def set_parameters(self, model): for new_param, old_param in zip(model.parameters(), self.model.parameters()): old_param.data = new_param.data.clone() self.global_model = model
3,023
Python
.py
66
34.757576
219
0.574403
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,729
clientperavg-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientperavg-checkpoint.py
import numpy as np import torch import time import copy import torch.nn as nn from flcore.optimizers.fedoptimizer import PerAvgOptimizer from flcore.clients.clientbase import Client class clientPerAvg(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) # self.beta = args.beta self.beta = self.learning_rate self.loss = nn.CrossEntropyLoss() self.optimizer = PerAvgOptimizer(self.model.parameters(), lr=self.learning_rate) def train(self): trainloader = self.load_train_data(self.batch_size*2) start_time = time.time() # self.model.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): # local update for X, Y in trainloader: temp_model = copy.deepcopy(list(self.model.parameters())) # step 1 if type(X) == type([]): x = [None, None] x[0] = X[0][:self.batch_size].to(self.device) x[1] = X[1][:self.batch_size] else: x = X[:self.batch_size].to(self.device) y = Y[:self.batch_size].to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() self.optimizer.step() # step 2 if type(X) == type([]): x = [None, None] x[0] = X[0][self.batch_size:].to(self.device) x[1] = X[1][self.batch_size:] else: x = X[self.batch_size:].to(self.device) y = Y[self.batch_size:].to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() # restore the model parameters to the one before first update for old_param, new_param in zip(self.model.parameters(), temp_model): old_param.data = new_param.data.clone() self.optimizer.step(beta=self.beta) # self.model.cpu() self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time def train_one_step(self): testloader = self.load_test_data(self.batch_size) iter_testloader = iter(testloader) # self.model.to(self.device) self.model.train() # step 1 (x, y) = next(iter_testloader) if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() self.optimizer.step() # step 2 (x, y) = next(iter_testloader) if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() self.optimizer.step(beta=self.beta) # self.model.cpu()
3,719
Python
.py
91
28.758242
88
0.529787
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,730
clientmtl-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientmtl-checkpoint.py
import torch import torch.nn as nn from flcore.clients.clientbase import Client import numpy as np import time import math import copy class clientMTL(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.omega = None self.W_glob = None self.idx = 0 self.itk = args.itk self.lamba = 1e-4 self.loss = nn.CrossEntropyLoss() self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate, momentum=0.5) def train(self): trainloader = self.load_train_data() start_time = time.time() # self.model = self.load_model('model') # self.model.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): for x, y in trainloader: if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) self.W_glob[:, self.idx] = flatten(self.model) loss_regularizer = 0 loss_regularizer += self.W_glob.norm() ** 2 # for i in range(self.W_glob.shape[0] // self.itk): # x = self.W_glob[i * self.itk:(i+1) * self.itk, :] # loss_regularizer += torch.sum(torch.sum((x*self.omega), 1)**2) loss_regularizer += torch.sum(torch.sum((self.W_glob*self.omega), 1)**2) f = (int)(math.log10(self.W_glob.shape[0])+1) + 1 loss_regularizer *= 10 ** (-f) loss += loss_regularizer loss.backward() self.optimizer.step() # self.model.cpu() # self.save_model(self.model, 'model') self.omega = None self.W_glob = None self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time def receive_values(self, W_glob, omega, idx): self.omega = torch.sqrt(omega[0][0]) self.W_glob = copy.deepcopy(W_glob) self.idx = idx def flatten(model): state_dict = model.state_dict() keys = state_dict.keys() W = [state_dict[key].flatten() for key in keys] return torch.cat(W)
2,734
Python
.py
65
31.030769
102
0.549183
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,731
clientapfl-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientapfl-checkpoint.py
import copy import torch import torch.nn as nn import numpy as np import time from flcore.clients.clientbase import Client class clientAPFL(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.loss = nn.CrossEntropyLoss() self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate) self.alpha = args.alpha self.model_per = copy.deepcopy(self.model) self.optimizer_per = torch.optim.SGD(self.model_per.parameters(), lr=self.learning_rate) def train(self): trainloader = self.load_train_data() start_time = time.time() self.model_per.to(self.device) self.model.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() self.optimizer.step() self.optimizer_per.zero_grad() output_per = self.model_per(x) loss_per = self.loss(output_per, y) loss_per.backward() self.optimizer_per.step() self.alpha_update() for lp, p in zip(self.model_per.parameters(), self.model.parameters()): lp.data = (1 - self.alpha) * p + self.alpha * lp self.model.cpu() self.model_per.cpu() self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time # https://github.com/MLOPTPSU/FedTorch/blob/b58da7408d783fd426872b63fbe0c0352c7fa8e4/fedtorch/comms/utils/flow_utils.py#L240 def alpha_update(self): grad_alpha = 0 for l_params, p_params in zip(self.model.parameters(), self.model_per.parameters()): dif = p_params.data - l_params.data grad = self.alpha * p_params.grad.data + (1-self.alpha) * l_params.grad.data grad_alpha += dif.view(-1).T.dot(grad.view(-1)) grad_alpha += 0.02 * self.alpha self.alpha = self.alpha - self.learning_rate * grad_alpha self.alpha = np.clip(self.alpha.item(), 0.0, 1.0)
2,786
Python
.py
59
35.627119
128
0.58352
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,732
clientt-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientt-checkpoint.py
import torch import torch.nn as nn import numpy as np import time from system.flcore.clients.clientbase import Client import copy # from utils.privacy import * class clientT(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.loss = nn.CrossEntropyLoss() self.poptimizer = torch.optim.SGD(self.model.generator.parameters(), lr=self.learning_rate) self.optimizer = torch.optim.SGD(self.model.base.parameters(), lr=self.learning_rate) self.ser_para() self.plocal_steps = args.plocal_steps def ser_para(self): torch.manual_seed(self.id) self.model.generator.pad_down.data = torch.rand_like(self.model.generator.pad_down.data ) self.model.generator.pad_left.data = torch.rand_like(self.model.generator.pad_left.data) self.model.generator.pad_right.data = torch.rand_like(self.model.generator.pad_right.data) self.model.generator.pad_up.data = torch.rand_like(self.model.generator.pad_up.data) def train(self): trainloader = self.load_train_data() start_time = time.time() old_prompt = copy.deepcopy(self.model.generator) self.model.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for param in self.model.base.parameters(): param.requires_grad = True for param in self.model.generator.parameters(): param.requires_grad = False for step in range(max_local_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() self.optimizer.step() self.model.cpu() new_prompt = copy.deepcopy(self.model.generator) diff_provalue = 0 for new_param, old_param in zip(old_prompt.parameters(), new_prompt.parameters()): diff_pro = new_param - old_param diff_pro = torch.where(diff_pro > 0, diff_pro, torch.zeros_like(diff_pro)-diff_pro) diff_provalue = diff_provalue + torch.sum(diff_pro) self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time return diff_provalue def set_parameters(self, base): for new_param, old_param in zip(base.parameters(), self.model.base.parameters()): old_param.data = new_param.data.clone()
2,962
Python
.py
61
38.245902
99
0.619229
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,733
clientbabu-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientbabu-checkpoint.py
import copy import torch import torch.nn as nn import numpy as np import time from flcore.clients.clientbase import Client class clientBABU(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.loss = nn.CrossEntropyLoss() self.optimizer = torch.optim.SGD(self.model.base.parameters(), lr=self.learning_rate) self.fine_tuning_steps = args.fine_tuning_steps for param in self.model.predictor.parameters(): param.requires_grad = False def train(self): trainloader = self.load_train_data() start_time = time.time() # self.model.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() self.optimizer.step() # self.model.cpu() self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time def set_parameters(self, base): for new_param, old_param in zip(base.parameters(), self.model.base.parameters()): old_param.data = new_param.data.clone() def fine_tune(self, which_module=['base', 'predictor']): trainloader = self.load_train_data() self.model.train() if 'predictor' in which_module: for param in self.model.predictor.parameters(): param.requires_grad = True if 'base' not in which_module: for param in self.model.predictor.parameters(): param.requires_grad = False for step in range(self.fine_tuning_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() self.optimizer.step()
2,762
Python
.py
63
31.285714
93
0.556186
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,734
clientrod-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientrod-checkpoint.py
import copy import torch import torch.nn as nn import numpy as np import time from flcore.clients.clientbase import Client import torch.nn.functional as F from sklearn.preprocessing import label_binarize from sklearn import metrics class clientROD(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.loss = nn.CrossEntropyLoss() self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate) self.pred = copy.deepcopy(self.model.predictor) self.opt_pred = torch.optim.SGD(self.pred.parameters(), lr=self.learning_rate) self.sample_per_class = torch.zeros(self.num_classes) trainloader = self.load_train_data() for x, y in trainloader: for yy in y: self.sample_per_class[yy.item()] += 1 def train(self): trainloader = self.load_train_data() start_time = time.time() self.model.to(self.device) self.pred.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): for i, (x, y) in enumerate(trainloader): if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) rep = self.model.base(x) out_g = self.model.predictor(rep) loss_bsm = balanced_softmax_loss(y, out_g, self.sample_per_class) self.optimizer.zero_grad() loss_bsm.backward() self.optimizer.step() out_p = self.pred(rep.detach()) loss = self.loss(out_g.detach() + out_p, y) self.opt_pred.zero_grad() loss.backward() self.opt_pred.step() self.model.cpu() self.pred.cpu() self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time def test_metrics(self, model=None): testloader = self.load_test_data() if model == None: model = self.model self.model.to(self.device) self.pred.to(self.device) model.eval() test_acc = 0 test_num = 0 y_prob = [] y_true = [] with torch.no_grad(): for x, y in testloader: if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) rep = self.model.base(x) out_g = self.model.predictor(rep) out_p = self.pred(rep.detach()) output = out_g.detach() + out_p test_acc += (torch.sum(torch.argmax(output, dim=1) == y)).item() test_num += y.shape[0] y_prob.append(F.softmax(output).detach().cpu().numpy()) nc = self.num_classes if self.num_classes == 2: nc += 1 lb = label_binarize(y.detach().cpu().numpy(), classes=np.arange(nc)) if self.num_classes == 2: lb = lb[:, :2] y_true.append(lb) self.model.cpu() self.pred.cpu() y_prob = np.concatenate(y_prob, axis=0) y_true = np.concatenate(y_true, axis=0) auc = metrics.roc_auc_score(y_true, y_prob, average='micro') return test_acc, test_num, auc # https://github.com/jiawei-ren/BalancedMetaSoftmax-Classification def balanced_softmax_loss(labels, logits, sample_per_class, reduction="mean"): """Compute the Balanced Softmax Loss between `logits` and the ground truth `labels`. Args: labels: A int tensor of size [batch]. logits: A float tensor of size [batch, no_of_classes]. sample_per_class: A int tensor of size [no of classes]. reduction: string. One of "none", "mean", "sum" Returns: loss: A float tensor. Balanced Softmax Loss. """ spc = sample_per_class.type_as(logits) spc = spc.unsqueeze(0).expand(logits.shape[0], -1) logits = logits + spc.log() loss = F.cross_entropy(input=logits, target=labels, reduction=reduction) return loss
4,529
Python
.py
106
31.443396
88
0.563661
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,735
clientprox-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientprox-checkpoint.py
import torch import numpy as np import time import copy import torch.nn as nn from flcore.optimizers.fedoptimizer import PerturbedGradientDescent from flcore.clients.clientbase import Client class clientProx(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.mu = args.mu self.global_params = copy.deepcopy(list(self.model.parameters())) self.loss = nn.CrossEntropyLoss() self.optimizer = PerturbedGradientDescent( self.model.parameters(), lr=self.learning_rate, mu=self.mu) def train(self): trainloader = self.load_train_data() start_time = time.time() self.model.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): for x, y in trainloader: if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) # if self.train_slow: # time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) loss.backward() self.optimizer.step(self.global_params, self.device) self.model.cpu() self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time def set_parameters(self, model): for new_param, global_param, param in zip(model.parameters(), self.global_params, self.model.parameters()): global_param.data = new_param.data.clone() param.data = new_param.data.clone()
1,947
Python
.py
44
34.613636
115
0.59651
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,736
clientamp-checkpoint.py
hkgdifyu_pFedPT/system/flcore/clients/.ipynb_checkpoints/clientamp-checkpoint.py
import torch import torch.nn as nn from flcore.clients.clientbase import Client import numpy as np import time import copy class clientAMP(Client): def __init__(self, args, id, train_samples, test_samples, **kwargs): super().__init__(args, id, train_samples, test_samples, **kwargs) self.alphaK = args.alphaK self.lamda = args.lamda self.client_u = copy.deepcopy(self.model) self.loss = nn.CrossEntropyLoss() self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate) def train(self): trainloader = self.load_train_data() start_time = time.time() self.model.to(self.device) self.client_u.to(self.device) self.model.train() max_local_steps = self.local_steps if self.train_slow: max_local_steps = np.random.randint(1, max_local_steps // 2) for step in range(max_local_steps): for x, y in trainloader: if type(x) == type([]): x[0] = x[0].to(self.device) else: x = x.to(self.device) y = y.to(self.device) if self.train_slow: time.sleep(0.1 * np.abs(np.random.rand())) self.optimizer.zero_grad() output = self.model(x) loss = self.loss(output, y) params = weight_flatten(self.model) params_ = weight_flatten(self.client_u) sub = params - params_ loss += self.lamda/self.alphaK/2 * torch.dot(sub, sub) loss.backward() self.optimizer.step() self.model.cpu() self.client_u.cpu() del trainloader # print(torch.dot(sub, sub)) self.train_time_cost['num_rounds'] += 1 self.train_time_cost['total_cost'] += time.time() - start_time def set_parameters(self, model, coef_self): for new_param, old_param in zip(model.parameters(), self.client_u.parameters()): old_param.data = (new_param.data + coef_self * old_param.data).clone() def weight_flatten(model): params = [] for u in model.parameters(): params.append(u.view(-1)) params = torch.cat(params) return params
2,315
Python
.py
56
30.660714
88
0.576629
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,737
models.py
hkgdifyu_pFedPT/system/flcore/trainmodel/models.py
import torch import torch.nn as nn import torch.nn.functional as F import math import logging from functools import partial from collections import OrderedDict from transformers import ViTImageProcessor, ViTForImageClassification import numpy as np from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ batch_size = 16 class LocalModel(nn.Module): def __init__(self, base, predictor): super(LocalModel, self).__init__() self.base = base self.predictor = predictor def forward(self, x): out = self.base(x) out = self.predictor(out) return out class LocalModel_pt(nn.Module): def __init__(self, generator, base): super(LocalModel_pt, self).__init__() self.generator = generator self.base = base def forward(self, x): out = self.generator(x) out = self.base(out) return out class FedAvgCNN(nn.Module): def __init__(self, in_features=1, num_classes=10, dim=1024): super().__init__() self.conv1 = nn.Sequential( nn.Conv2d(in_features, 32, kernel_size=5, padding=0, stride=1, bias=True), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=(2, 2)) ) self.conv2 = nn.Sequential( nn.Conv2d(32, 64, kernel_size=5, padding=0, stride=1, bias=True), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=(2, 2)) ) self.fc1 = nn.Sequential( nn.Linear(dim, 512), nn.ReLU(inplace=True) ) self.fc = nn.Linear(512, num_classes) def forward(self, x): out = self.conv1(x) out = self.conv2(out) out = torch.flatten(out, 1) out = self.fc1(out) out = self.fc(out) return out # ==================================================================================================================== def init_weights(m): classname = m.__class__.__name__ if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1: nn.init.kaiming_uniform_(m.weight) nn.init.zeros_(m.bias) elif classname.find('BatchNorm') != -1: nn.init.normal_(m.weight, 1.0, 0.02) nn.init.zeros_(m.bias) elif classname.find('Linear') != -1: nn.init.xavier_normal_(m.weight) nn.init.zeros_(m.bias) class PadPrompter(nn.Module): def __init__(self,inchanel = 1,pad_size=1,image_size=28,args =None): super(PadPrompter, self).__init__() pad_size = pad_size image_size = image_size self.args = args self.inchanel =inchanel self.base_size = image_size - pad_size*2 self.pad_up = nn.Parameter(torch.randn([1, self.inchanel, pad_size, image_size])) self.pad_down = nn.Parameter(torch.randn([1, self.inchanel, pad_size, image_size])) self.pad_left = nn.Parameter(torch.randn([1, self.inchanel, image_size - pad_size*2, pad_size])) self.pad_right = nn.Parameter(torch.randn([1,self.inchanel, image_size - pad_size*2, pad_size])) def forward(self, x): base = torch.zeros(1, self.inchanel, self.base_size, self.base_size).to(self.args.device) prompt = torch.cat([self.pad_left, base, self.pad_right], dim=3) prompt = torch.cat([self.pad_up, prompt, self.pad_down], dim=2) prompt = torch.cat(x.size(0) * [prompt]) return x + prompt # ==================================================================================================================== class PatchEmbed(nn.Module): # PatchEmbed from timm """ Image to Patch Embedding CNN_proj + Rearrange: [B, C, W, H] -> [B, D, Pn_W, Pn_H] -> [B, D, Pn] -> [B, Pn, D] """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): B, C, H, W = x.shape # FIXME look at relaxing size constraints assert H == self.img_size[0] and W == self.img_size[1], \ f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." # CNN_proj + Rearrange: [B, C, W, H] -> [B, D, Pn_W, Pn_H] -> [B, D, Pn] -> [B, Pn, D] x = self.proj(x).flatten(2).transpose(1, 2) # x: (B, 14*14, 768) return x class FFN(nn.Module): # Mlp from timm """ FFN (from timm) :param in_features: :param hidden_features: :param out_features: :param act_layer: :param drop: """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x # MHSA class Attention(nn.Module): # qkv Transform + MSA(MHSA) (Attention from timm) """ qkv Transform + MSA(MHSA) (from timm) # input x.shape = batch, patch_number, patch_dim # output x.shape = batch, patch_number, patch_dim :param dim: dim=CNN feature dim, because the patch size is 1x1 :param num_heads: :param qkv_bias: :param qk_scale: by default head_dim ** -0.5 (squre root) :param attn_drop: dropout rate after MHSA :param proj_drop: """ def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): # input x.shape = batch, patch_number, patch_dim batch, patch_number, patch_dim = x.shape # mlp transform + head split [B, Pn, D] -> [B, Pn, 3D] -> [B, Pn, 3, H, D/H] -> [3, B, H, Pn, D/H] qkv = self.qkv(x).reshape(batch, patch_number, 3, self.num_heads, patch_dim // self.num_heads).permute(2, 0, 3, 1, 4) # 3 [B, H, Pn, D/H] q, k, v = qkv[0], qkv[1], qkv[2] # [B, H, Pn, D/H] -> [B, H, Pn, D/H] attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) # Dropout # head fusion: [B, H, Pn, D/H] -> [B, Pn, H, D/H] -> [B, Pn, D] x = (attn @ v).transpose(1, 2).reshape(batch, patch_number, patch_dim) x = self.proj(x) x = self.proj_drop(x) # mlp # output x.shape = batch, patch_number, patch_dim return x # Encoder_Block class Block(nn.Module): # teansformer Block from timm def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): """ # input x.shape = batch, patch_number, patch_dim # output x.shape = batch, patch_number, patch_dim :param dim: dim :param num_heads: :param mlp_ratio: FFN :param qkv_bias: :param qk_scale: by default head_dim ** -0.5 (squre root) :param drop: :param attn_drop: dropout rate after Attention :param drop_path: dropout rate after sd :param act_layer: FFN act :param norm_layer: Pre Norm """ super().__init__() # Pre Norm self.norm1 = norm_layer(dim) # Transformer used the nn.LayerNorm self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) # NOTE from timm: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() # stochastic depth # Add & Norm self.norm2 = norm_layer(dim) # FFN mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = FFN(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x): # MHSA + Res_connection x = x + self.drop_path(self.attn(self.norm1(x))) # FFN + Res_connection x = x + self.drop_path(self.mlp(self.norm2(x))) return x class VisionTransformer(nn.Module): # From timm """ Vision Transformer A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 """ def __init__(self, img_size=32, patch_size=4, in_chans=3, num_classes=10, embed_dim=128, depth=8, num_heads=8, mlp_ratio=2., qkv_bias=True, representation_size=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, act_layer=None): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate embed_layer (nn.Module): patch embedding layer norm_layer: (nn.Module): normalization layer """ super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_tokens = 1 norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches # Embedding tokens self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) self.pos_drop = nn.Dropout(p=drop_rate) # Stochastic Depth Decay rule dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # Encoders self.blocks = nn.Sequential(*[ Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth)]) # last norm self.norm = norm_layer(embed_dim) # Representation layer if representation_size: self.num_features = representation_size self.pre_logits = nn.Sequential(OrderedDict([ ('fc', nn.Linear(embed_dim, representation_size)), ('act', nn.Tanh()) ])) else: self.pre_logits = nn.Identity() # Classifier head(s) self.fc = nn.Linear(self.num_features, self.num_classes) if self.num_classes > 0 else nn.Identity() self.head_dist = None def forward_features(self, x): x = self.patch_embed(x) # print(x.shape,self.pos_embed.shape) cls_token = self.cls_token.expand(x.shape[0], -1, -1) x = torch.cat((cls_token, x), dim=1) x = self.pos_drop(x + self.pos_embed) x = self.blocks(x) x = self.norm(x) return x def forward(self, x): x = self.forward_features(x) x = self.pre_logits(x[:, 0]) # use cls token for cls head [B,1,768] x = self.fc(x) return x
13,249
Python
.py
295
35.677966
118
0.577847
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,738
bilstm.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/trainmodel/__pycache__/bilstm.cpython-37.pyc
B É:ccÁã@s*ddlZddlmZGdd„dejƒZdS)éN)Únncs.eZdZd ‡fdd„ Zdd„Zdd„Z‡ZS) ÚBiLSTM_TextClassificationFNc sútt|ƒ ¡||_||_||_||_||_||_||_ | |_ ||_ | dk rbt j  t | ¡¡|_nt   |j|j ¡|_t j|jd�|_|j r´t  |jd|jd¡|_t j|j d�|_t j|j |j|j|dd�|_t j|jd�|_t  |jd|j¡|_dS)N)ÚpééT)ÚdropoutÚ bidirectional)ÚsuperrÚ__init__Ú input_sizeÚ hidden_sizeÚ output_sizeÚ num_layersÚembedding_dropoutÚ lstm_dropoutÚattention_dropoutÚ attentionÚembedding_lengthrÚ EmbeddingÚfrom_pretrainedÚtorchÚtensorÚword_embeddingsÚDropoutÚembedding_dropout_layerÚLinearÚattention_layerÚattention_dropout_layerÚLSTMÚ lstm_layerÚlstm_dropout_layerÚ output_layer) Úselfr r r rrrrrrZembedding_weights)Ú __class__©úF/root/autodl-tmp/PFL-Non-IID-master/system/flcore/trainmodel/bilstm.pyr s* z"BiLSTM_TextClassification.__init__c Cs¬| d¡}t ||¡ d¡}g}xZt|ƒD]N\}}t ||d|…d¡} t |  d¡||d|…dd…f¡} | | ¡q*Wt t |d¡|fd¡} |  | ¡} |  | ¡} | S)Nrré) Ú unsqueezerÚbmmÚsqueezeÚ enumerateÚsoftmaxÚmatmulÚappendÚcatrr) r"Z lstm_outputÚstateÚseq_lensÚhiddenZ attn_weightsZ new_hiddensÚiÚseq_lenZsoft_attn_weightsZ new_hiddenZ concat_hiddenZ output_hiddenr$r$r%Úattention_forward"s $  z+BiLSTM_TextClassification.attention_forwardc sê|\}}t|ƒ}| |¡}| |¡}t |jd||jf¡jdd�}t |jd||jf¡jdd�}| ddd¡}|  |||f¡\‰\}}ˆ ddd¡‰tj ‡fdd„t |ƒDƒdd�} |  | ¡} |j rØ| ˆ| |¡‰n| ‰| ˆ¡} | S) NrÚcuda)Údevicer&rcs,g|]$\}}ˆ||ddd…f d¡‘qS)r&Nr)r')Ú.0r2r3)Úoutputr$r%ú <listcomp>Jsz5BiLSTM_TextClassification.forward.<locals>.<listcomp>)Údim)ÚlenrrrÚzerosrr ÚtoÚpermuterr.r*r rr4r!) r"ÚxZ input_seqr0Ú batch_sizeZh_0Zc_0Zfinal_hidden_stateZfinal_cell_stater/Úlogitsr$)r8r%Úforward8s        z!BiLSTM_TextClassification.forward)FN)Ú__name__Ú __module__Ú __qualname__r r4rBÚ __classcell__r$r$)r#r%rsr)rrÚModulerr$r$r$r%Ú<module>s 
2,663
Python
.py
30
87.7
247
0.465452
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,739
vit.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/trainmodel/__pycache__/vit.cpython-38.pyc
U ”jfc7ã@s–ddlZddlmZddlmZmZddlmZdd„ZGdd„dejƒZ Gd d „d ejƒZ Gd d „d ejƒZ Gd d„dejƒZ Gdd„dejƒZ dS)éN)Únn)Ú rearrangeÚrepeat)Ú RearrangecCst|tƒr|S||fS©N)Ú isinstanceÚtuple©Út©r õHD:\京东\promot\cifar\cifar\Cifar10_iid\system\flcore\trainmodel\vit.pyÚpair sr cs$eZdZ‡fdd„Zdd„Z‡ZS)ÚPreNormcs tƒ ¡t |¡|_||_dSr)ÚsuperÚ__init__rÚ LayerNormÚnormÚfn)ÚselfÚdimr©Ú __class__r r rs  zPreNorm.__init__cKs|j| |¡f|�Sr)rr)rÚxÚkwargsr r r ÚforwardszPreNorm.forward©Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__r r rr rs rcs&eZdZd‡fdd„ Zdd„Z‡ZS)Ú FeedForwardçc sBtƒ ¡t t ||¡t ¡t |¡t ||¡t |¡¡|_dSr)rrrÚ SequentialÚLinearÚGELUÚDropoutÚnet)rrÚ hidden_dimÚdropoutrr r rs   ûzFeedForward.__init__cCs | |¡Sr)r&)rrr r r r szFeedForward.forward)r!rr r rr r s r cs&eZdZd‡fdd„ Zdd„Z‡ZS) Ú Attentionéé@çš™™™™™¹?cs„tƒ ¡||}|dko ||k }||_|d|_tjdd�|_tj||ddd�|_|rvt  t ||¡t  |¡¡nt  ¡|_ dS)Négà¿éÿÿÿÿ©réF)Úbias) rrÚheadsÚscalerÚSoftmaxÚattendr#Úto_qkvr"r%ÚIdentityÚto_out)rrr2Údim_headr(Z inner_dimZ project_outrr r r$s  ı şızAttention.__init__c spˆ |¡jddd�}t‡fdd„|ƒ\}}}t || dd¡¡ˆj}ˆ |¡}t ||¡}t|dƒ}ˆ  |¡S)Nr0r.r/cst|dˆjd�S)Nzb n (h d) -> b h n d)Úh)rr2r ©rr r Ú<lambda>6óz#Attention.forward.<locals>.<lambda>éşÿÿÿzb h n d -> b n (h d)) r6ÚchunkÚmapÚtorchÚmatmulÚ transposer3r5rr8) rrÚqkvÚqÚkÚvÚdotsÚattnÚoutr r;r r4s  zAttention.forward)r*r+r,rr r rr r)#sr)cs&eZdZd‡fdd„ Zdd„Z‡ZS)Ú Transformerr!c s`tƒ ¡t g¡|_t|ƒD]<}|j t t|t||||d�ƒt|t |||d�ƒg¡¡qdS)N)r2r9r()r() rrrÚ ModuleListÚlayersÚrangeÚappendrr)r )rrÚdepthr2r9Úmlp_dimr(Ú_rr r rIs    şzTransformer.__init__cCs8d}|jD](\}}|d7}||ƒ|}||ƒ|}q |S)Nrr-)rM)rrÚllrIÚffr r r rQs  zTransformer.forward)r!rr r rr rKHsrKcsJeZdZddddddœ‡fdd„ Zdd „Zd d „Zd d „Zdd„Z‡ZS)ÚViTÚclsr0r+r!)ÚpoolÚchannelsr9r(Ú emb_dropoutc s tƒ ¡t|ƒ\} }t|ƒ\}}| |dkr:||dksBtdƒ‚| |||}| ||}|dksntdƒ‚t td||d�t ||¡¡|_t  t   d|d|¡¡|_ t  t   dd|¡¡|_ t | ¡|_t|||| || ƒ|_||_t ¡|_t |¡|_t ||¡|_dS)Nrz5Image dimensions must be divisible by the patch size.>rVÚmeanz?pool type must be either cls (cls token) or mean (mean pooling)z&b c (h p1) (w p2) -> b (h w) (p1 p2 c))Úp1Úp2r-)rrr ÚAssertionErrorrr"rr#Úto_patch_embeddingÚ ParameterrAÚrandnÚ pos_embeddingÚ cls_tokenr%r(rKÚ transformerrWr7Ú to_latentrrÚfc)rÚ image_sizeÚ patch_sizeÚ num_classesrrPr2rQrWrXr9r(rYÚ image_heightÚ image_widthZ patch_heightZ patch_widthÚ num_patchesÚ patch_dimrr r rZs&       ş   z ViT.__init__cCs²| |¡}|j\}}}t|jd|d�}tj||fdd�}||jdd…d|d…f7}| |¡}| |¡}|j dkr€|j dd�n|dd…df}|  |¡}|  |¡}|  |¡}|S©Nú() n d -> b n d©Úbr-r/rZr)r^ÚshaperrbrAÚcatrar(rcrWrZrdrre©rÚimgrrpÚnrRÚ cls_tokensr r r rws    &   z ViT.forwardcCs�| |¡}|j\}}}t|jd|d�}tj||fdd�}||jdd…d|d…f7}| |¡}| |¡}|j dkr€|j dd�n|dd…df}|  |¡}|Srm) r^rqrrbrArrrar(rcrWrZrdrsr r r Úproduce_feature‡s    & zViT.produce_featurecCsn| |¡}|j\}}}t|jd|d�}tj||fdd�}||jdd…d|d…f7}| |¡}| |¡}|S©Nrnror-r/© r^rqrrbrArrrar(rcrsr r r Ú cal_feature—s    zViT.cal_featurecCsn| |¡}|j\}}}t|jd|d�}tj||fdd�}||jdd…d|d…f7}| |¡}| |¡}|Srxryrsr r r Ú Show_detail¤s    zViT.Show_detail) rrrrrrwrzr{rr r rr rUYs  rU)rArZeinopsrrZeinops.layers.torchrr ÚModulerr r)rKrUr r r r Ú<module>s   %
5,881
Python
.py
56
103.875
448
0.353072
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,740
vit.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/trainmodel/__pycache__/vit.cpython-37.pyc
B Ƭdc7ã@s–ddlZddlmZddlmZmZddlmZdd„ZGdd„dejƒZ Gd d „d ejƒZ Gd d „d ejƒZ Gd d„dejƒZ Gdd„dejƒZ dS)éN)Únn)Ú rearrangeÚrepeat)Ú RearrangecCst|tƒr|S||fS)N)Ú isinstanceÚtuple)Út©r úC/root/autodl-tmp/PFL-Non-IID-master/system/flcore/trainmodel/vit.pyÚpair sr cs$eZdZ‡fdd„Zdd„Z‡ZS)ÚPreNormcs tƒ ¡t |¡|_||_dS)N)ÚsuperÚ__init__rÚ LayerNormÚnormÚfn)ÚselfÚdimr)Ú __class__r r rs  zPreNorm.__init__cKs|j| |¡f|ŽS)N)rr)rÚxÚkwargsr r r ÚforwardszPreNorm.forward)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__r r )rr r s r cs&eZdZd‡fdd„ Zdd„Z‡ZS)Ú FeedForwardçc sBtƒ ¡t t ||¡t ¡t |¡t ||¡t |¡¡|_dS)N)r rrÚ SequentialÚLinearÚGELUÚDropoutÚnet)rrÚ hidden_dimÚdropout)rr r rs   zFeedForward.__init__cCs | |¡S)N)r")rrr r r r szFeedForward.forward)r)rrrrrrr r )rr rs rcs&eZdZd‡fdd„ Zdd„Z‡ZS) Ú Attentionéé@çš™™™™™¹?cs„tƒ ¡||}|dko ||k }||_|d|_tjdd�|_tj||ddd�|_|rvt  t ||¡t  |¡¡nt  ¡|_ dS)Négà¿éÿÿÿÿ)réF)Úbias) r rÚheadsÚscalerÚSoftmaxÚattendrÚto_qkvrr!ÚIdentityÚto_out)rrr-Údim_headr$Z inner_dimZ project_out)rr r r$s  zAttention.__init__c spˆ |¡jddd�}t‡fdd„|ƒ\}}}t || dd¡¡ˆj}ˆ |¡}t ||¡}t|dƒ}ˆ  |¡S)Nr+r*)rcst|dˆjd�S)Nzb n (h d) -> b h n d)Úh)rr-)r)rr r Ú<lambda>6óz#Attention.forward.<locals>.<lambda>éþÿÿÿzb h n d -> b n (h d)) r1ÚchunkÚmapÚtorchÚmatmulÚ transposer.r0rr3) rrÚqkvÚqÚkÚvÚdotsÚattnÚoutr )rr r4s  zAttention.forward)r&r'r()rrrrrrr r )rr r%#sr%cs&eZdZd‡fdd„ Zdd„Z‡ZS)Ú Transformerçc sdtƒ ¡t g¡|_xHt|ƒD]<}|j t t|t||||d�ƒt|t |||d�ƒg¡¡q WdS)N)r-r4r$)r$) r rrÚ ModuleListÚlayersÚrangeÚappendr r%r)rrÚdepthr-r4Úmlp_dimr$Ú_)rr r rIs    zTransformer.__init__cCs<d}x2|jD](\}}|d7}||ƒ|}||ƒ|}q W|S)Nrr))rH)rrÚllrCÚffr r r rQs  zTransformer.forward)rF)rrrrrrr r )rr rEHsrEcsJeZdZddddddœ‡fdd„ Zdd „Zd d „Zd d „Zdd„Z‡ZS)ÚViTÚclsr+r'g)ÚpoolÚchannelsr4r$Ú emb_dropoutc s tƒ ¡t|ƒ\} }t|ƒ\}}| |dkr:||dksBtdƒ‚| |||}| ||}|dksntdƒ‚t td||d�t ||¡¡|_t  t   d|d|¡¡|_ t  t   dd|¡¡|_ t | ¡|_t|||| || ƒ|_||_t ¡|_t |¡|_t ||¡|_dS)Nrz5Image dimensions must be divisible by the patch size.>ÚmeanrQz?pool type must be either cls (cls token) or mean (mean pooling)z&b c (h p1) (w p2) -> b (h w) (p1 p2 c))Úp1Úp2r))r rr ÚAssertionErrorrrrrÚto_patch_embeddingÚ Parameterr;ÚrandnÚ pos_embeddingÚ cls_tokenr!r$rEÚ transformerrRr2Ú to_latentrrÚfc)rÚ image_sizeÚ patch_sizeÚ num_classesrrKr-rLrRrSr4r$rTÚ image_heightÚ image_widthZ patch_heightZ patch_widthÚ num_patchesÚ patch_dim)rr r rZs$         z ViT.__init__cCs²| |¡}|j\}}}t|jd|d�}tj||fdd�}||jdd…d|d…f7}| |¡}| |¡}|j dkr€|j dd�n|dd…df}|  |¡}|  |¡}|  |¡}|S)Nz() n d -> b n d)Úbr))rrUr)rYÚshaperr]r;Úcatr\r$r^rRrUr_rr`)rÚimgrrhÚnrMÚ cls_tokensr r r rws    &   z ViT.forwardcCsž| |¡}|j\}}}t|jd|d�}tj||fdd�}||jdd…d|d…f7}| |¡}| |¡}|j dkr€|j dd�n|dd…df}|  |¡}|S)Nz() n d -> b n d)rhr))rrUr) rYrirr]r;rjr\r$r^rRrUr_)rrkrrhrlrMrmr r r Úproduce_feature‡s    & zViT.produce_featurecCsn| |¡}|j\}}}t|jd|d�}tj||fdd�}||jdd…d|d…f7}| |¡}| |¡}|S)Nz() n d -> b n d)rhr))r) rYrirr]r;rjr\r$r^)rrkrrhrlrMrmr r r Ú cal_feature—s    zViT.cal_featurecCsn| |¡}|j\}}}t|jd|d�}tj||fdd�}||jdd…d|d…f7}| |¡}| |¡}|S)Nz() n d -> b n d)rhr))r) rYrirr]r;rjr\r$r^)rrkrrhrlrMrmr r r Ú Show_detail¤s    zViT.Show_detail) rrrrrrnrorprr r )rr rPYs  rP)r;rZeinopsrrZeinops.layers.torchrr ÚModuler rr%rErPr r r r Ú<module>s   %
6,118
Python
.py
73
82.69863
448
0.349983
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,741
models.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/trainmodel/__pycache__/models.cpython-38.pyc
U >Nÿcblã@sddlZddlmZddlmmZddlZddlZddlm Z ddl m Z ddl Z ddlmZmZddlmZmZmZmZdZGdd„dejƒZGd d „d ejƒZGd d „d ejƒZGd d„dejƒZGdd„dejƒZGdd„dejƒZGdd„dejƒZGdd„dejƒZGdd„dejƒZ dd„Z!Gdd„dejƒZ"Gdd„dejƒZ#Gdd „d ejƒZ$Gd!d"„d"ejƒZ%Gd#d$„d$ejƒZ&Gd%d&„d&ejƒZ'Gd'd(„d(ejƒZ(Gd)d*„d*ejƒZ)Gd+d,„d,ejƒZ*Gd-d.„d.ejƒZ+Gd/d0„d0ejƒZ,Gd1d2„d2ejƒZ-dS)3éN)Úpartial)Ú OrderedDict)ÚIMAGENET_DEFAULT_MEANÚIMAGENET_DEFAULT_STD)Ú StdConv2dSameÚDropPathÚ to_2tupleÚ trunc_normal_écs$eZdZ‡fdd„Zdd„Z‡ZS)Ú LocalModelcstt|ƒ ¡||_||_dS©N)Úsuperr Ú__init__ÚbaseÚ predictor)Úselfrr©Ú __class__©õaD:\京东\promot\第二次投稿\å®�验\æœ�务器\native - pro\system\flcore\trainmodel\models.pyrszLocalModel.__init__cCs| |¡}| |¡}|Sr )rr©rÚxÚoutrrrÚforwards  zLocalModel.forward©Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rrrrr s r cs$eZdZ‡fdd„Zdd„Z‡ZS)Ú LocalModel_ptcstt|ƒ ¡||_||_dSr )r rrÚ generatorr)rr rrrrr#szLocalModel_pt.__init__cCs| |¡}| |¡}|Sr )r rrrrrr)s  zLocalModel_pt.forwardrrrrrr"s rcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚLocalModel_repptcs$tt|ƒ ¡||_||_||_dSr )r r!rr rr)rr rrrrrr0szLocalModel_reppt.__init__cCs"| |¡}| |¡}| |¡}|Sr )r rrrrrrr7s   zLocalModel_reppt.forwardrrrrrr!/s r!cs&eZdZd‡fdd„ Zdd„Z‡ZS) Ú FedAvgCNNéé éc s�tƒ ¡t tj|dddddd�tjdd�tjdd �¡|_t tjdd ddddd�tjdd�tjdd �¡|_t t  |d ¡tjdd�¡|_ t  d |¡|_ dS) Né érr#T)Ú kernel_sizeÚpaddingÚstrideÚbias©Úinplace)ér.©r(é@i) r rÚnnÚ SequentialÚConv2dÚReLUÚ MaxPool2dÚconv1Úconv2ÚLinearÚfc1Úfc)rÚ in_featuresÚ num_classesÚdimrrrr`s8 û  ø û  ø   şzFedAvgCNN.__init__cCs8| |¡}| |¡}t |d¡}| |¡}| |¡}|S©Nr#)r6r7ÚtorchÚflattenr9r:rrrrr|s      zFedAvgCNN.forward)r#r$r%rrrrrr"_sr"cs&eZdZd‡fdd„ Zdd„Z‡ZS) Ú FedAvgMLPér$éÈcs8tƒ ¡t ||¡|_t ||¡|_tjdd�|_dS)NTr,)r rr1r8r9Úfc2r4Úact)rr;r<Ú hidden_dimrrrrˆs zFedAvgMLP.__init__cCs:|jdkr| | d¡d¡}| | |¡¡}| |¡}|S)Néréÿÿÿÿ)ÚndimÚviewÚsizerEr9rD©rrrrrr�s   zFedAvgMLP.forward)rBr$rCrrrrrrA‡srAcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚNetcsjtt|ƒ ¡t dtdd¡|_t tddd¡|_t d¡|_ t d¡|_ t  dd¡|_ t  dd¡|_ dS) Nr#r.r&gĞ?çà?iHé€r$)r rMrr1r3Ú batch_sizer6r7ÚDropoutÚdropout1Údropout2r8r9r:)rrrrr˜s  z Net.__init__cCs�| |¡}t ¡|ƒ}t dd¡|ƒ}| |¡}| |¡}t ¡|ƒ}t dd¡|ƒ}| |¡}t |d¡}|  |¡}t ¡|ƒ}|  |¡}t j |dd�}|S)Nr.r#©r=) r6r1r4r5rRr7rSr?r@r9r:ÚFÚ log_softmax©rrÚoutputrrrr¡s          z Net.forwardrrrrrrM—s rMcs&eZdZd‡fdd„ Zdd„Z‡ZS)Ú Mclr_LogisticrBr$cs tt|ƒ ¡t ||¡|_dSr )r rYrr1r8r:)rÚ input_dimr<rrrr´szMclr_Logistic.__init__cCs(t |d¡}| |¡}tj|dd�}|S©Nr#rT)r?r@r:rUrVrWrrrr¸s  zMclr_Logistic.forward)rBr$rrrrrrY³srYcs&eZdZd‡fdd„ Zdd„Z‡ZS) ÚDNNrBédr$cs.tt|ƒ ¡t ||¡|_t ||¡|_dSr )r r\rr1r8r9r:)rrZZmid_dimr<rrrrÁsz DNN.__init__cCs8t |d¡}t | |¡¡}| |¡}tj|dd�}|Sr[)r?r@rUÚrelur9r:rVrLrrrrÆs   z DNN.forward)rBr]r$rrrrrr\Àsr\cs&eZdZd‡fdd„ Zdd„Z‡ZS)ÚCifarNetr$csrtt|ƒ ¡t ddd¡|_t dd¡|_t dtd¡|_ t  tddd¡|_ t  dd¡|_ t  d|¡|_ dS)Néér'r.éxéT)r r_rr1r3r6r5ÚpoolrPr7r8r9rDr:)rr<rrrrĞszCifarNet.__init__cCs|| t | |¡¡¡}| t | |¡¡¡}| dtdd¡}t | |¡¡}t | |¡¡}|  |¡}tj |dd�}|S)NrHr'r#rT) rdrUr^r6r7rJrPr9rDr:rVrLrrrrÙs zCifarNet.forward)r$rrrrrr_Ïs r_cCs |jj}| d¡dks$| d¡dkrBtj |j¡tj |j¡nZ| d¡dkrrtj  |jdd¡tj |j¡n*| d¡dkrœtj  |j¡tj |j¡dS)Nr3rHÚConvTranspose2dZ BatchNormgğ?g{®Gáz”?r8) rrÚfindr1ÚinitÚkaiming_uniform_ÚweightÚzeros_r+Únormal_Úxavier_normal_)ÚmÚ classnamerrrÚ init_weightssrocs&eZdZd‡fdd„ Zdd„Z‡ZS) Ú PadPrompterr#éNc s´tt|ƒ ¡|}|}||_||_||d|_t t  d|j||g¡¡|_ t t  d|j||g¡¡|_ t t  d|j||d|g¡¡|_ t t  d|j||d|g¡¡|_ dS)Nr.r#)r rprÚargsÚinchanelÚ base_sizer1Ú Parameterr?ÚzerosÚpad_upÚpad_downÚpad_leftÚ pad_right©rrsÚpad_sizeÚ image_sizerrrrrrs$zPadPrompter.__init__cCsnt d|j|j|j¡ |jj¡}tj|j||j gdd�}tj|j ||j gdd�}t |  d¡|g¡}||S)Nr#r`rTr.r) r?rvrsrtÚtorrÚdeviceÚcatryrzrwrxrK)rrrÚpromptrrrr)s  zPadPrompter.forward)r#r#rqNrrrrrrps rpcs&eZdZd‡fdd„ Zdd„Z‡ZS) ÚFixedPatchPrompterr#rqNcsJtt|ƒ ¡||_||_||_t t  d|j|j|jg¡¡|_ ||_ dSr>) r r‚rÚisizeÚpsizersr1rur?ÚrandnÚpatchrrr{rrrr4s  zFixedPatchPrompter.__init__cCsPt d|j|j|jg¡ |jj¡}|j|dd…dd…d|j…d|j…f<||Sr>) r?rvrsrƒr~rrrr†r„)rrr�rrrr=s"&zFixedPatchPrompter.forward)r#r#rqNrrrrrr‚3s r‚cs&eZdZd‡fdd„ Zdd„Z‡ZS) ÚRandomPatchPrompterr#rqNcsJtt|ƒ ¡||_||_||_t t  d|j|j|jg¡¡|_ ||_ dSr>) r r‡rrƒr„rsr1rur?r…r†rrr{rrrrEs  zRandomPatchPrompter.__init__cCs€tj |j|j¡}tj |j|j¡}t d|j|j|jg¡ |j j ¡}|j |dd…dd…|||j…|||j…f<||Sr>) ÚnpÚrandomÚchoicerƒr„r?rvrsr~rrrr†)rrÚx_Zy_r�rrrrNs ".zRandomPatchPrompter.forward)r#r#rqNrrrrrr‡Ds r‡cs&eZdZd ‡fdd„ Zdd„Z‡ZS) ÚLeNeté ér$Nc sÌtt|ƒ ¡t tjdddd�t d¡t ¡tjdddd�tjdd�t d¡t ¡¡|_ tj |d d �|_ tj dd�|_ t ||¡|_|j t¡t ||¡|_|d kr¼tjj|jd d �|_|j t¡dS)Nr#ér'r/r.é2rN©ÚpT)ÚaffineÚwnri)Úname)r rŒrr1r2r3r5r4Ú Dropout2dÚ conv_paramsÚ BatchNorm1dÚbnrQÚdropoutr8Ú bottleneckÚapplyror:ÚutilsÚ weight_norm)rÚ feature_dimZbottleneck_dimr<ZiswnrrrrXs$ ù  zLeNet.__init__cCsV| |¡}| | d¡d¡}| |¡}| |¡}| |¡}| |¡}tj|dd�}|S©NrrHr#rT) r—rJrKr›r™ršr:rUrVrLrrrrms     z LeNet.forward)r�r�r$NrrrrrrŒWsrŒcs&eZdZd ‡fdd„ Zd d „Z‡ZS) ÚLSTMNetr.Fçš™™™™™É?réK�r$c s`tƒ ¡t |¡|_t |||¡|_tj|||||dd�|_|rJ|dn|}t  ||¡|_ dS)NT)Ú input_sizeÚ hidden_sizeÚ num_layersÚ bidirectionalršÚ batch_firstr.) r rr1rQršÚ EmbeddingÚ embeddingÚLSTMÚlstmr8r:) rrFr¦r§ršÚ padding_idxÚ vocab_sizer<Údimsrrrr—s  ûzLSTMNet.__init__c Cs’|\}}| |¡}tjjj||ddd�}| |¡\}\}}tjjj|dd�\} } t | dd…ddd…f¡} |  | ¡} |  | ¡} t j | dd�} | S)NTF)r¨Úenforce_sorted)r¨rHr#rT) rªr1r�ÚrnnÚpack_padded_sequencer¬Úpad_packed_sequencer?Úrelu_ršr:rUrV) rrÚtextÚ text_lengthsZembeddedZpacked_embeddedZ packed_outputÚhiddenÚcellrZ out_lengthsrrrr¦s   zLSTMNet.forward)r.Fr¢rr£r$rrrrrr¡–s ÿr¡cs&eZdZd‡fdd„ Zdd„Z‡ZS) ÚfastTextrr£r$cs>tt|ƒ ¡t |||¡|_t ||¡|_t ||¡|_dSr ) r r¹rr1r©rªr8r9r:)rrFr­r®r<rrrr¼szfastText.__init__cCs>|\}}| |¡}| | d¡¡}| |¡}tj|dd�}|Sr[)rªr9Úmeanr:rUrV)rrrµr¶Ú embedded_sentÚhÚzrrrrrÈs   zfastText.forward)rr£r$rrrrrr¹»s r¹cs:eZdZddddgddddd f‡fd d „ Zd d „Z‡ZS)ÚTextCNNr]r`rGr'rCgš™™™™™é?rr£r$c sætt|ƒ ¡t |||¡|_t tj|||dd�t ¡t  ||dd¡¡|_ t tj|||dd�t ¡t  ||dd¡¡|_ t tj|||dd�t ¡t  ||dd¡¡|_ t  |¡|_t |t|ƒ|¡|_dS)Nr)Ú in_channelsÚ out_channelsr(r#r.)r r¾rr1r©rªr2ÚConv1dr4Ú MaxPool1dr6r7Úconv3rQršr8Úlenr:) rrFÚ num_channelsr(Úmax_lenršr­r®r<rrrrÕs&ııı zTextCNN.__init__c Cs„|\}}| |¡ ddd¡}| |¡ d¡}| |¡ d¡}| |¡ d¡}t |||fd¡}| |¡} |  | ¡} t j | dd�} | S)Nrr.r#rT) rªÚpermuter6Úsqueezer7rÃr?r€ršr:rUrV) rrrµr¶r»Z conv_out1Z conv_out2Z conv_out3Zall_outZfinal_feature_maprrrrrós  zTextCNN.forwardrrrrrr¾Ôs ÿr¾cs*eZdZdZd ‡fdd„ Zdd „Z‡ZS) Ú PatchEmbedz{ Image to Patch Embedding CNN_proj + Rearrange: [B, C, W, H] -> [B, D, Pn_W, Pn_H] -> [B, D, Pn] -> [B, Pn, D] éàr r`écsdtƒ ¡t|ƒ}t|ƒ}|d|d|d|d}||_||_||_tj||||d�|_dS)Nr#r)r(r*) r rrÚimg_sizeÚ patch_sizeÚ num_patchesr1r3Úproj)rrÌrÍÚin_chansÚ embed_dimrÎrrrr s  zPatchEmbed.__init__c Cst|j\}}}}||jdkr*||jdksXtd|›d|›d|jd›d|jd›d� ƒ‚| |¡ d¡ dd¡}|S)Nrr#zInput image size (Ú*z) doesn't match model (z).r.)ÚshaperÌÚAssertionErrorrÏr@Ú transpose)rrÚBÚCÚHÚWrrrrs (ÿzPatchEmbed.forward)rÊr r`rË©rrrÚ__doc__rrrrrrrrÉs rÉcs4eZdZdZddejdf‡fdd„ Zdd„Z‡ZS)ÚFFNz� FFN (from timm) :param in_features: :param hidden_features: :param out_features: :param act_layer: :param drop: NçcsNtƒ ¡|p|}|p|}t ||¡|_|ƒ|_t ||¡|_t |¡|_dSr ) r rr1r8r9rErDrQÚdrop)rr;Úhidden_featuresÚ out_featuresÚ act_layerrŞrrrr,s z FFN.__init__cCs6| |¡}| |¡}| |¡}| |¡}| |¡}|Sr )r9rErŞrDrLrrrr8s      z FFN.forward) rrrrÛr1ÚGELUrrrrrrrrÜ"s  rÜcs*eZdZdZd ‡fdd„ Zdd „Z‡ZS) Ú Attentiona� qkv Transform + MSA(MHSA) (from timm) # input x.shape = batch, patch_number, patch_dim # output x.shape = batch, patch_number, patch_dim :param dim: dim=CNN feature dim, because the patch size is 1x1 :param num_heads: :param qkv_bias: :param qk_scale: by default head_dim ** -0.5 (squre root) :param attn_drop: dropout rate after MHSA :param proj_drop: éFNrİcsftƒ ¡||_||}|p"|d|_tj||d|d�|_t |¡|_t ||¡|_ t |¡|_ dS)Ngà¿r`)r+) r rÚ num_headsÚscaler1r8ÚqkvrQÚ attn_droprÏÚ proj_drop)rr=råÚqkv_biasÚqk_scalerèréÚhead_dimrrrrQs  zAttention.__init__c Cs´|j\}}}| |¡ ||d|j||j¡ ddddd¡}|d|d|d}}}|| dd¡|j} | jdd�} | | ¡} | | dd¡ |||¡}|  |¡}|  |¡}|S) Nr`r.rr#rGéşÿÿÿrHrT) rÓrçÚreshaperårÇrÕræÚsoftmaxrèrÏré) rrÚbatchZ patch_numberZ patch_dimrçÚqÚkÚvÚattnrrrr^s$ ÿÿ    zAttention.forward)räFNrİrİrÚrrrrrãDs  rãcs:eZdZddddddejejf‡fdd„ Zdd„Z‡ZS) ÚBlockg@FNrİc sptƒ ¡| |ƒ|_t||||||d�|_|dkr:t|ƒnt ¡|_| |ƒ|_ t ||ƒ} t || | |d�|_ dS)aê # input x.shape = batch, patch_number, patch_dim # output x.shape = batch, patch_number, patch_dim :param dim: dim :param num_heads: :param mlp_ratio: FFN :param qkv_bias: :param qk_scale: by default head_dim ** -0.5 (squre root) :param drop: :param attn_drop: dropout rate after Attention :param drop_path: dropout rate after sd :param act_layer: FFN act :param norm_layer: Pre Norm )rårêrërèrérİ)r;rßrárŞN) r rÚnorm1rãrôrr1ÚIdentityÚ drop_pathÚnorm2ÚintrÜÚmlp) rr=råÚ mlp_ratiorêrërŞrèrøráÚ norm_layerZmlp_hidden_dimrrrr{s   ÿ  zBlock.__init__cCs8|| | | |¡¡¡}|| | | |¡¡¡}|Sr )rørôrörûrùrLrrrršsz Block.forward) rrrr1râÚ LayerNormrrrrrrrrõys  ÿrõcsReZdZdZddddddddd d d d d ed d f‡fd d „ Zdd„Zdd„Z‡ZS)ÚVisionTransformerz¬ Vision Transformer A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 r&rGr`r$rOräg@TNrİc sXtƒ ¡||_ˆ|_|_d|_ˆp2ttjdd�‰ˆp<tj ‰||||ˆd�|_ |j j }t  t  ddˆ¡¡|_t  t  d||jˆ¡¡|_tjˆd�|_dd„t  d| |¡Dƒ‰tj‡‡‡‡‡‡‡‡‡f d d„t|ƒDƒ�|_ˆˆƒ|_| �r| |_t td t ˆ| ¡fd t ¡fgƒ¡|_n t ¡|_|jdk�rDt |j|j¡nt ¡|_d |_d S) aš Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate embed_layer (nn.Module): patch embedding layer norm_layer: (nn.Module): normalization layer r#g�íµ ÷ư>)Úeps)rÌrÍrĞrÑr‘cSsg|] }| ¡‘qSr)Úitem)Ú.0rrrrÚ <listcomp>Òsz.VisionTransformer.__init__.<locals>.<listcomp>rc s*g|]"}tˆˆˆˆˆˆˆ|ˆˆd� ‘qS)) r=rårürêrŞrèrørırá)rõ)rÚi© ráÚattn_drop_rateZdprÚ drop_raterÑrürırårêrrrÕsş ÿr:rEN)r rr<Ú num_featuresrÑZ num_tokensrr1rşrâÚ patch_embedrÎrur?rvÚ cls_tokenÚ pos_embedrQÚpos_dropÚlinspacer2ÚrangeÚblocksÚnormrr8ÚTanhÚ pre_logitsr÷r:Z head_dist)rrÌrÍrĞr<rÑÚdepthrårürêÚrepresentation_sizerrZdrop_path_rateZ embed_layerrırárÎrrrrªs<   ÿı   ş  &zVisionTransformer.__init__cCsZ| |¡}|j |jddd¡}tj||fdd�}| ||j¡}| |¡}|  |¡}|Sr ) r r ÚexpandrÓr?r€r r rr)rrr rrrÚforward_featuresës   z"VisionTransformer.forward_featurescCs.| |¡}| |dd…df¡}| |¡}|S)Nr)rrr:rLrrrrös  zVisionTransformer.forward) rrrrÛrÉrrrrrrrrrÿ£s şA rÿ).r?Útorch.nnr1Útorch.nn.functionalÚ functionalrUÚmathÚloggingÚ functoolsrÚ collectionsrÚnumpyrˆZ timm.datarrZtimm.models.layersrrrr rPÚModuler rr!r"rArMrYr\r_rorpr‚r‡rŒr¡r¹r¾rÉrÜrãrõrÿrrrrÚ<module>s@    0( @ ?%0"5*
21,084
Python
.py
210
97.457143
663
0.359138
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,742
mobilenet_v2.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/trainmodel/__pycache__/mobilenet_v2.cpython-37.pyc
B Ê:ccã@svddlmZddlmZddgZddiZddd„ZGd d „d ejƒZGd d „d ej ƒZ Gd d„dej ƒZ ddd„Z dS)é)Únn)Úload_state_dict_from_urlÚ MobileNetV2Ú mobilenet_v2z=https://download.pytorch.org/models/mobilenet_v2-b0353104.pthNcCsB|dkr |}t|t||dƒ||ƒ}|d|kr>||7}|S)aD This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py :param v: :param divisor: :param min_value: :return: NégÍÌÌÌÌÌì?)ÚmaxÚint)ÚvÚdivisorÚ min_valueÚnew_v©r úL/root/autodl-tmp/PFL-Non-IID-master/system/flcore/trainmodel/mobilenet_v2.pyÚ_make_divisible s  rcseZdZd‡fdd„ Z‡ZS)Ú ConvBNReLUééNc sR|dd}|dkrtj}tt|ƒ tj||||||dd�||ƒtjdd�¡dS)NrrF)ÚgroupsÚbiasT)Úinplace)rÚ BatchNorm2dÚsuperrÚ__init__ÚConv2dÚReLU6)ÚselfÚ in_planesÚ out_planesÚ kernel_sizeÚstriderÚ norm_layerÚpadding)Ú __class__r rr"s  zConvBNReLU.__init__)rrrN)Ú__name__Ú __module__Ú __qualname__rÚ __classcell__r r )r"rr!srcs&eZdZd‡fdd„ Zdd„Z‡ZS)ÚInvertedResidualNc s¸tt|ƒ ¡||_|dks t‚|dkr.tj}tt||ƒƒ}|jdkoN||k|_ g}|dkrt|  t ||d|d�¡|  t |||||d�tj ||ddddd�||ƒg¡tj|�|_dS)N)rrr)rr )rrr rF)r)rr'rrÚAssertionErrorrrrÚroundÚuse_res_connectÚappendrÚextendrÚ SequentialÚconv)rÚinpÚouprÚ expand_ratior Ú hidden_dimÚlayers)r"r rr.s  zInvertedResidual.__init__cCs"|jr|| |¡S| |¡SdS)N)r*r.)rÚxr r rÚforwardFszInvertedResidual.forward)N)r#r$r%rr5r&r r )r"rr'-sr'cs.eZdZd ‡fdd„ Zdd„Zd d „Z‡ZS) réèçğ?Néc s<tt|ƒ ¡|dkrt}|dkr(tj}d}d}|dkr‚ddddgddddgddd dgdd d dgdd d dgdd d dgddddgg}t|ƒdks�t|dƒd kr¬td |¡ƒ‚t |||ƒ}t |t d|ƒ|ƒ|_ t d |d|d�g} xd|D]\\} } } } t | ||ƒ}x@t | ƒD]4}|dk�r| nd}|  ||||| |d�¡|}�q WqèW|  t ||j d|d�¡tj| �|_t d¡|_t |j |¡|_x®| ¡D]¢}t|tjƒ�rÒtjj|jdd�|jdk �r2tj |j¡n`t|tjtjfƒ�rtj |j¡tj |j¡n.t|tjƒ�r’tj |jdd¡tj |j¡�q’WdS)aA MobileNet V2 main class Args: num_classes (int): Number of classes width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount inverted_residual_setting: Network structure round_nearest (int): Round the number of channels in each layer to be a multiple of this number Set to 1 to turn off rounding block: Module specifying inverted residual building block for mobilenet norm_layer: Module specifying the normalization layer to use Né irééérré@éé`é i@rzIinverted_residual_setting should be non-empty or a 4-element list, got {}gğ?)rr )r1r )rr gš™™™™™É?Úfan_out)Úmodeg{®Gáz„?) rrrr'rrÚlenÚ ValueErrorÚformatrrÚ last_channelrÚranger+r-ÚfeaturesÚDropoutÚdropoutÚLinearÚfcÚmodulesÚ isinstancerÚinitÚkaiming_normal_ÚweightrÚzeros_Ú GroupNormÚones_Únormal_)rÚ num_classesÚ width_multÚinverted_residual_settingÚ round_nearestÚblockr Ú input_channelrFrHÚtÚcÚnÚsÚoutput_channelÚirÚm)r"r rrNsT         zMobileNetV2.__init__cCs>| |¡}tj |d¡ |jdd¡}| |¡}| |¡}|S)Nrréÿÿÿÿ)rHrÚ functionalÚadaptive_avg_pool2dÚreshapeÚshaperJrL)rr4r r rÚ _forward_implŸs    zMobileNetV2._forward_implcCs | |¡S)N)rh)rr4r r rr5©szMobileNetV2.forward)r6r7Nr8NN)r#r$r%rrhr5r&r r )r"rrMsK FTcKshtf|�}|rdttd|d�}i}x6| ¡D]*\}}d|krF|||<q,||| dd¡<q,W| |¡|S)aC Constructs a MobileNetV2 architecture from `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr r)ÚprogressÚ classifierz classifier.1rL)rrÚ model_urlsÚitemsÚreplaceÚload_state_dict)Ú pretrainedriÚkwargsÚmodelÚ state_dictÚnew_dictÚkr r r rr­s   )N)FT) ÚtorchrZ torch.hubrÚ__all__rkrr-rÚModuler'rrr r r rÚ<module>s     `
5,660
Python
.py
67
80.761194
395
0.473618
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,743
alexnet.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/trainmodel/__pycache__/alexnet.cpython-38.pyc
U ”jfc) ã@sPddlZddlmZddlmZddgZddiZGdd„dejƒZd d d„Z dS) éN)Úload_state_dict_from_urlÚAlexNetÚalexnetz<https://download.pytorch.org/models/alexnet-owt-4df8aa71.pthcs&eZdZd‡fdd„ Zdd„Z‡ZS)réècstt|ƒ ¡t tjdddddd�tjdd�tjddd �tjdd d dd �tjdd�tjddd �tjd d ddd �tjdd�tjd dddd �tjdd�tjddddd �tjdd�tjddd �¡ |_t  d¡|_ t t  ¡t  dd¡tjdd�t  ¡t  dd¡tjdd�¡|_ t  d|¡|_dS)Néé@é éé)Ú kernel_sizeÚstrideÚpaddingT)Úinplace)r r éÀé)r r i€éé)éri$i)ÚsuperrÚ__init__ÚnnÚ SequentialÚConv2dÚReLUÚ MaxPool2dÚfeaturesÚAdaptiveAvgPool2dÚavgpoolÚDropoutÚLinearÚ classifierÚfc)ÚselfÚ num_classes©Ú __class__©õLD:\京东\promot\cifar\cifar\Cifar10_iid\system\flcore\trainmodel\alexnet.pyrs4        ó     úzAlexNet.__init__cCs8| |¡}| |¡}t |d¡}| |¡}| |¡}|S)Nr)rrÚtorchÚflattenr r!)r"Úxr&r&r'Úforward+s      zAlexNet.forward)r)Ú__name__Ú __module__Ú __qualname__rr+Ú __classcell__r&r&r$r'r sFTcKsdtf|�}|r`ttd|d�}i}| ¡D]*\}}d|krD|||<q*||| dd¡<q*| |¡|S)aAlexNet model architecture from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr r)Úprogressz classifier.6r!)rrÚ model_urlsÚitemsÚreplaceÚload_state_dict)Ú pretrainedr0ÚkwargsÚmodelÚ state_dictÚnew_dictÚkÚvr&r&r'r4s ÿ  )FT) r(Útorch.nnrZ torch.hubrÚ__all__r1ÚModulerrr&r&r&r'Ú<module>s  ÿ'
2,184
Python
.py
34
62.352941
303
0.465581
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,744
models.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/trainmodel/__pycache__/models.cpython-37.pyc
B éecTgã@säddlZddlmZddlmmZddlZddlZddlm Z ddl m Z ddl Z ddlmZmZddlmZmZmZmZdZGdd„dejƒZGd d „d ejƒZGd d „d ejƒZGd d„dejƒZGdd„dejƒZGdd„dejƒZGdd„dejƒZGdd„dejƒZGdd„dejƒZ dd„Z!Gdd„dejƒZ"Gdd„dejƒZ#Gdd „d ejƒZ$Gd!d"„d"ejƒZ%Gd#d$„d$ejƒZ&Gd%d&„d&ejƒZ'Gd'd(„d(ejƒZ(Gd)d*„d*ejƒZ)Gd+d,„d,ejƒZ*Gd-d.„d.ejƒZ+dS)/éN)Úpartial)Ú OrderedDict)ÚIMAGENET_DEFAULT_MEANÚIMAGENET_DEFAULT_STD)Ú StdConv2dSameÚDropPathÚ to_2tupleÚ trunc_normal_écs$eZdZ‡fdd„Zdd„Z‡ZS)Ú LocalModelcstt|ƒ ¡||_||_dS)N)Úsuperr Ú__init__ÚbaseÚ predictor)Úselfrr)Ú __class__©úF/root/autodl-tmp/PFL-Non-IID-master/system/flcore/trainmodel/models.pyr szLocalModel.__init__cCs| |¡}| |¡}|S)N)rr)rÚxÚoutrrrÚforwards  zLocalModel.forward)Ú__name__Ú __module__Ú __qualname__r rÚ __classcell__rr)rrr s r cs$eZdZ‡fdd„Zdd„Z‡ZS)Ú LocalModel_ptcstt|ƒ ¡||_||_dS)N)r rr Ú generatorr)rrr)rrrr #szLocalModel_pt.__init__cCs| |¡}| |¡}|S)N)rr)rrrrrrr)s  zLocalModel_pt.forward)rrrr rrrr)rrr"s rcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚLocalModel_repptcs$tt|ƒ ¡||_||_||_dS)N)r rr rrr)rrrr)rrrr 0szLocalModel_reppt.__init__cCs"| |¡}| |¡}| |¡}|S)N)rrr)rrrrrrr7s   zLocalModel_reppt.forward)rrrr rrrr)rrr/s rcs&eZdZd‡fdd„ Zdd„Z‡ZS) Ú FedAvgCNNéé éc s�tƒ ¡t tj|dddddd�tjdd�tjdd �¡|_t tjdd ddddd�tjdd�tjdd �¡|_t t  |d ¡tjdd�¡|_ t  d |¡|_ dS) Né érrT)Ú kernel_sizeÚpaddingÚstrideÚbias)Úinplace)ér))r$é@i) r r ÚnnÚ SequentialÚConv2dÚReLUÚ MaxPool2dÚconv1Úconv2ÚLinearÚfc1Úfc)rÚ in_featuresÚ num_classesÚdim)rrrr `s.    zFedAvgCNN.__init__cCs8| |¡}| |¡}t |d¡}| |¡}| |¡}|S)Nr)r0r1ÚtorchÚflattenr3r4)rrrrrrr|s      zFedAvgCNN.forward)rr r!)rrrr rrrr)rrr_srcs&eZdZd‡fdd„ Zdd„Z‡ZS) Ú FedAvgMLPér éÈcs8tƒ ¡t ||¡|_t ||¡|_tjdd�|_dS)NT)r()r r r+r2r3Úfc2r.Úact)rr5r6Ú hidden_dim)rrrr ˆs zFedAvgMLP.__init__cCs:|jdkr| | d¡d¡}| | |¡¡}| |¡}|S)Néréÿÿÿÿ)ÚndimÚviewÚsizer>r3r=)rrrrrr�s   zFedAvgMLP.forward)r;r r<)rrrr rrrr)rrr:‡sr:cs$eZdZ‡fdd„Zdd„Z‡ZS)ÚNetcsjtt|ƒ ¡t dtdd¡|_t tddd¡|_t d¡|_ t d¡|_ t  dd¡|_ t  dd¡|_ dS) Nrr)r"gĞ?gà?iHé€r )r rEr r+r-Ú batch_sizer0r1ÚDropoutÚdropout1Údropout2r2r3r4)r)rrrr ˜s  z Net.__init__cCs�| |¡}t ¡|ƒ}t dd¡|ƒ}| |¡}| |¡}t ¡|ƒ}t dd¡|ƒ}| |¡}t |d¡}|  |¡}t ¡|ƒ}|  |¡}t j |dd�}|S)Nr)r)r7) r0r+r.r/rIr1rJr8r9r3r4ÚFÚ log_softmax)rrÚoutputrrrr¡s          z Net.forward)rrrr rrrr)rrrE—s rEcs&eZdZd‡fdd„ Zdd„Z‡ZS)Ú Mclr_Logisticér cs tt|ƒ ¡t ||¡|_dS)N)r rNr r+r2r4)rÚ input_dimr6)rrrr ´szMclr_Logistic.__init__cCs(t |d¡}| |¡}tj|dd�}|S)Nr)r7)r8r9r4rKrL)rrrMrrrr¸s  zMclr_Logistic.forward)rOr )rrrr rrrr)rrrN³srNcs&eZdZd‡fdd„ Zdd„Z‡ZS) ÚDNNéédr cs.tt|ƒ ¡t ||¡|_t ||¡|_dS)N)r rQr r+r2r3r4)rrPZmid_dimr6)rrrr Ász DNN.__init__cCs8t |d¡}t | |¡¡}| |¡}tj|dd�}|S)Nr)r7)r8r9rKÚrelur3r4rL)rrrrrrÆs   z DNN.forward)rRrSr )rrrr rrrr)rrrQÀsrQcs&eZdZd‡fdd„ Zdd„Z‡ZS)ÚCifarNetr csrtt|ƒ ¡t ddd¡|_t dd¡|_t dtd¡|_ t  tddd¡|_ t  dd¡|_ t  d|¡|_ dS)Néér#r)éxéT)r rUr r+r-r0r/ÚpoolrGr1r2r3r=r4)rr6)rrrr ĞszCifarNet.__init__cCs|| t | |¡¡¡}| t | |¡¡¡}| dtdd¡}t | |¡¡}t | |¡¡}|  |¡}tj |dd�}|S)NrAr#r)r7) rZrKrTr0r1rCrGr3r=r4rL)rrrrrrÙs zCifarNet.forward)r )rrrr rrrr)rrrUÏs rUcCs |jj}| d¡dks$| d¡dkrBtj |j¡tj |j¡nZ| d¡dkrrtj  |jdd¡tj |j¡n*| d¡dkrœtj  |j¡tj |j¡dS)Nr-rAÚConvTranspose2dZ BatchNormgğ?g{®Gáz”?r2) rrÚfindr+ÚinitÚkaiming_uniform_ÚweightÚzeros_r'Únormal_Úxavier_normal_)ÚmÚ classnamerrrÚ init_weightssrecs&eZdZd‡fdd„ Zdd„Z‡ZS) Ú PadPrompterréNc s´tt|ƒ ¡|}|}||_||_||d|_t t  d|j||g¡¡|_ t t  d|j||g¡¡|_ t t  d|j||d|g¡¡|_ t t  d|j||d|g¡¡|_ dS)Nr)r)r rfr ÚargsÚinchanelÚ base_sizer+Ú Parameterr8ÚrandnÚpad_upÚpad_downÚpad_leftÚ pad_right)rriÚpad_sizeÚ image_sizerh)rrrr s$zPadPrompter.__init__cCsnt d|j|j|j¡ |jj¡}tj|j||j gdd�}tj|j ||j gdd�}t |  d¡|g¡}||S)NrrV)r7r)r) r8ÚzerosrirjÚtorhÚdeviceÚcatrorprmrnrD)rrrÚpromptrrrr)s  zPadPrompter.forward)rrrgN)rrrr rrrr)rrrfs rfcs&eZdZd ‡fdd„ Zdd„Z‡ZS) ÚLeNeté ér Nc sÌtt|ƒ ¡t tjdddd�t d¡t ¡tjdddd�tjdd�t d¡t ¡¡|_ tj |d d �|_ tj dd�|_ t ||¡|_|j t¡t ||¡|_|d kr¼tjj|jd d �|_|j t¡dS)Nrér#)r$r)é2gà?)ÚpT)ÚaffineÚwnr_)Úname)r rxr r+r,r-r/r.Ú Dropout2dÚ conv_paramsÚ BatchNorm1dÚbnrHÚdropoutr2Ú bottleneckÚapplyrer4ÚutilsÚ weight_norm)rÚ feature_dimZbottleneck_dimr6Ziswn)rrrr 4s"   zLeNet.__init__cCsV| |¡}| | d¡d¡}| |¡}| |¡}| |¡}| |¡}tj|dd�}|S)NrrAr)r7) r‚rCrDr†r„r…r4rKrL)rrrrrrIs     z LeNet.forward)ryrzr N)rrrr rrrr)rrrx3srxcs&eZdZd ‡fdd„ Zd d „Z‡ZS) ÚLSTMNetr)Fçš™™™™™É?réK�r c s`tƒ ¡t |¡|_t |||¡|_tj|||||dd�|_|rJ|dn|}t  ||¡|_ dS)NT)Ú input_sizeÚ hidden_sizeÚ num_layersÚ bidirectionalr…Ú batch_firstr)) r r r+rHr…Ú EmbeddingÚ embeddingÚLSTMÚlstmr2r4) rr?r�r‘r…Ú padding_idxÚ vocab_sizer6Údims)rrrr ss   zLSTMNet.__init__c Cs’|\}}| |¡}tjjj||ddd�}| |¡\}\}}tjjj|dd�\} } t | dd…ddd…f¡} |  | ¡} |  | ¡} t j | dd�} | S)NTF)r’Úenforce_sorted)r’rAr)r7) r”r+rˆÚrnnÚpack_padded_sequencer–Úpad_packed_sequencer8Úrelu_r…r4rKrL) rrÚtextÚ text_lengthsZembeddedZpacked_embeddedZ packed_outputÚhiddenÚcellrZ out_lengthsrrrr‚s   zLSTMNet.forward)r)FrŒrr�r )rrrr rrrr)rrr‹rsr‹cs&eZdZd‡fdd„ Zdd„Z‡ZS) ÚfastTextréK�r cs>tt|ƒ ¡t |||¡|_t ||¡|_t ||¡|_dS)N) r r£r r+r“r”r2r3r4)rr?r—r˜r6)rrrr ˜szfastText.__init__cCs>|\}}| |¡}| | d¡¡}| |¡}tj|dd�}|S)Nr)r7)r”r3Úmeanr4rKrL)rrrŸr Ú embedded_sentÚhÚzrrrrr¤s   zfastText.forward)rr¤r )rrrr rrrr)rrr£—s r£cs:eZdZddddgddddd f‡fd d „ Zd d „Z‡ZS)ÚTextCNNrSrVr@r#r<gš™™™™™é?riK�r c sætt|ƒ ¡t |||¡|_t tj|||dd�t ¡t  ||dd¡¡|_ t tj|||dd�t ¡t  ||dd¡¡|_ t tj|||dd�t ¡t  ||dd¡¡|_ t  |¡|_t |t|ƒ|¡|_dS)Nr)Ú in_channelsÚ out_channelsr$rr))r r©r r+r“r”r,ÚConv1dr.Ú MaxPool1dr0r1Úconv3rHr…r2Úlenr4) rr?Ú num_channelsr$Úmax_lenr…r—r˜r6)rrrr ±s  zTextCNN.__init__c Cs„|\}}| |¡ ddd¡}| |¡ d¡}| |¡ d¡}| |¡ d¡}t |||fd¡}| |¡} |  | ¡} t j | dd�} | S)Nrr)r)r7) r”Úpermuter0Úsqueezer1r®r8rvr…r4rKrL) rrrŸr r¦Z conv_out1Z conv_out2Z conv_out3Úall_outZfinal_feature_maprrrrrÏs  zTextCNN.forward)rrrr rrrr)rrr©°sr©cs*eZdZdZd ‡fdd„ Zdd „Z‡ZS) Ú PatchEmbedz{ Image to Patch Embedding CNN_proj + Rearrange: [B, C, W, H] -> [B, D, Pn_W, Pn_H] -> [B, D, Pn] -> [B, Pn, D] éàr rVécsdtƒ ¡t|ƒ}t|ƒ}|d|d|d|d}||_||_||_tj||||d�|_dS)Nrr)r$r&) r r rÚimg_sizeÚ patch_sizeÚ num_patchesr+r-Úproj)rr¸r¹Úin_chansÚ embed_dimrº)rrrr æs  zPatchEmbed.__init__c Cst|j\}}}}||jdkr*||jdksXtd|›d|›d|jd›d|jd›d� ƒ‚| |¡ d¡ dd¡}|S)NrrzInput image size (Ú*z) doesn't match model (z).r))Úshaper¸ÚAssertionErrorr»r9Ú transpose)rrÚBÚCÚHÚWrrrrñs ,zPatchEmbed.forward)r¶r rVr·)rrrÚ__doc__r rrrr)rrrµàs rµcs4eZdZdZddejdf‡fdd„ Zdd„Z‡ZS)ÚFFNz� FFN (from timm) :param in_features: :param hidden_features: :param out_features: :param act_layer: :param drop: NgcsNtƒ ¡|p|}|p|}t ||¡|_|ƒ|_t ||¡|_t |¡|_dS)N) r r r+r2r3r>r=rHÚdrop)rr5Úhidden_featuresÚ out_featuresÚ act_layerrÈ)rrrr s z FFN.__init__cCs6| |¡}| |¡}| |¡}| |¡}| |¡}|S)N)r3r>rÈr=)rrrrrrs      z FFN.forward) rrrrÆr+ÚGELUr rrrr)rrrÇşs rÇcs*eZdZdZd ‡fdd„ Zdd „Z‡ZS) Ú Attentiona� qkv Transform + MSA(MHSA) (from timm) # input x.shape = batch, patch_number, patch_dim # output x.shape = batch, patch_number, patch_dim :param dim: dim=CNN feature dim, because the patch size is 1x1 :param num_heads: :param qkv_bias: :param qk_scale: by default head_dim ** -0.5 (squre root) :param attn_drop: dropout rate after MHSA :param proj_drop: éFNçcsftƒ ¡||_||}|p"|d|_tj||d|d�|_t |¡|_t ||¡|_ t |¡|_ dS)Ngà¿rV)r') r r Ú num_headsÚscaler+r2ÚqkvrHÚ attn_dropr»Ú proj_drop)rr7rĞÚqkv_biasÚqk_scalerÓrÔÚhead_dim)rrrr -s  zAttention.__init__c Cs´|j\}}}| |¡ ||d|j||j¡ ddddd¡}|d|d|d}}}|| dd¡|j} | jdd�} | | ¡} | | dd¡ |||¡}|  |¡}|  |¡}|S) NrVr)rrr@éşÿÿÿrA)r7) r¿rÒÚreshaperĞr²rÁrÑÚsoftmaxrÓr»rÔ) rrÚbatchZ patch_numberZ patch_dimrÒÚqÚkÚvÚattnrrrr:s     zAttention.forward)rÎFNrÏrÏ)rrrrÆr rrrr)rrrÍ s  rÍcs:eZdZddddddejejf‡fdd„ Zdd„Z‡ZS) ÚBlockg@FNgc sptƒ ¡| |ƒ|_t||||||d�|_|dkr:t|ƒnt ¡|_| |ƒ|_ t ||ƒ} t || | |d�|_ dS)aê # input x.shape = batch, patch_number, patch_dim # output x.shape = batch, patch_number, patch_dim :param dim: dim :param num_heads: :param mlp_ratio: FFN :param qkv_bias: :param qk_scale: by default head_dim ** -0.5 (squre root) :param drop: :param attn_drop: dropout rate after Attention :param drop_path: dropout rate after sd :param act_layer: FFN act :param norm_layer: Pre Norm )rĞrÕrÖrÓrÔg)r5rÉrËrÈN) r r Únorm1rÍrßrr+ÚIdentityÚ drop_pathÚnorm2ÚintrÇÚmlp) rr7rĞÚ mlp_ratiorÕrÖrÈrÓrãrËÚ norm_layerZmlp_hidden_dim)rrrr Ws      zBlock.__init__cCs8|| | | |¡¡¡}|| | | |¡¡¡}|S)N)rãrßrárærä)rrrrrrvsz Block.forward) rrrr+rÌÚ LayerNormr rrrr)rrràUs ràcsReZdZdZddddddddd d d d d ed d f‡fd d „ Zdd„Zdd„Z‡ZS)ÚVisionTransformerz¬ Vision Transformer A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 r"r rVr rFrÎg@TNgc sXtƒ ¡||_ˆ|_|_d|_ˆp2ttjdd�‰ˆp<tj ‰||||ˆd�|_ |j j }t  t  ddˆ¡¡|_t  t  d||jˆ¡¡|_tjˆd�|_dd„t  d| |¡Dƒ‰tj‡‡‡‡‡‡‡‡‡f d d„t|ƒDƒ�|_ˆˆƒ|_| �r| |_t td t ˆ| ¡fd t ¡fgƒ¡|_n t ¡|_|jdk�rDt |j|j¡nt ¡|_d |_d S) aš Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate embed_layer (nn.Module): patch embedding layer norm_layer: (nn.Module): normalization layer rg�íµ ÷ư>)Úeps)r¸r¹r¼r½)r}cSsg|] }| ¡‘qSr)Úitem)Ú.0rrrrú <listcomp>®sz.VisionTransformer.__init__.<locals>.<listcomp>rc s*g|]"}tˆˆˆˆˆˆˆ|ˆˆd� ‘qS)) r7rĞrçrÕrÈrÓrãrèrË)rà)ríÚi) rËÚattn_drop_rateÚdprÚ drop_rater½rçrèrĞrÕrrrî²sr4r>N)r r r6Ú num_featuresr½Z num_tokensrr+rérÌÚ patch_embedrºrkr8rsÚ cls_tokenÚ pos_embedrHÚpos_dropÚlinspacer,ÚrangeÚblocksÚnormrr2ÚTanhÚ pre_logitsrâr4Z head_dist)rr¸r¹r¼r6r½ÚdepthrĞrçrÕÚrepresentation_sizeròrğZdrop_path_rateZ embed_layerrèrËrº)r) rËrğrñròr½rçrèrĞrÕrr †s2     &zVisionTransformer.__init__cCsZ| |¡}|j |jddd¡}tj||fdd�}| ||j¡}| |¡}|  |¡}|S)NrrAr)r7) rôrõÚexpandr¿r8rvr÷rörúrû)rrrõrrrÚforward_featuresÇs   z"VisionTransformer.forward_featurescCs.| |¡}| |dd…df¡}| |¡}|S)Nr)rrır4)rrrrrrÒs  zVisionTransformer.forward) rrrrÆrµr rrrrr)rrrês   ? rê),r8Útorch.nnr+Útorch.nn.functionalÚ functionalrKÚmathÚloggingÚ functoolsrÚ collectionsrÚnumpyÚnpZ timm.datarrZtimm.models.layersrrrr rGÚModuler rrrr:rErNrQrUrerfrxr‹r£r©rµrÇrÍràrêrrrrÚ<module>s<    0( @ ?%0"5*
19,905
Python
.py
200
96.485
658
0.361362
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,745
alexnet.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/trainmodel/__pycache__/alexnet.cpython-37.pyc
B Ê:cc) ã@sPddlZddlmZddlmZddgZddiZGdd„dejƒZd d d„Z dS) éN)Úload_state_dict_from_urlÚAlexNetÚalexnetz<https://download.pytorch.org/models/alexnet-owt-4df8aa71.pthcs&eZdZd‡fdd„ Zdd„Z‡ZS)réècstt|ƒ ¡t tjdddddd�tjdd�tjddd �tjdd d dd �tjdd�tjddd �tjd d ddd �tjdd�tjd dddd �tjdd�tjddddd �tjdd�tjddd �¡ |_t  d¡|_ t t  ¡t  dd¡tjdd�t  ¡t  dd¡tjdd�¡|_ t  d|¡|_dS)Néé@é éé)Ú kernel_sizeÚstrideÚpaddingT)Úinplace)r r éÀé)r r i€éé)éri$i)ÚsuperrÚ__init__ÚnnÚ SequentialÚConv2dÚReLUÚ MaxPool2dÚfeaturesÚAdaptiveAvgPool2dÚavgpoolÚDropoutÚLinearÚ classifierÚfc)ÚselfÚ num_classes)Ú __class__©úG/root/autodl-tmp/PFL-Non-IID-master/system/flcore/trainmodel/alexnet.pyrs0           zAlexNet.__init__cCs8| |¡}| |¡}t |d¡}| |¡}| |¡}|S)Nr)rrÚtorchÚflattenr r!)r"Úxr%r%r&Úforward+s      zAlexNet.forward)r)Ú__name__Ú __module__Ú __qualname__rr*Ú __classcell__r%r%)r$r&r sFTcKshtf|�}|rdttd|d�}i}x6| ¡D]*\}}d|krF|||<q,||| dd¡<q,W| |¡|S)aAlexNet model architecture from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr r)Úprogressz classifier.6r!)rrÚ model_urlsÚitemsÚreplaceÚload_state_dict)Ú pretrainedr/ÚkwargsÚmodelÚ state_dictÚnew_dictÚkÚvr%r%r&r4s   )FT) r'Útorch.nnrZ torch.hubrÚ__all__r0ÚModulerrr%r%r%r&Ú<module>s   '
2,152
Python
.py
33
63.30303
295
0.470505
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,746
bilstm.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/trainmodel/__pycache__/bilstm.cpython-38.pyc
U ”jfcÁã@s*ddlZddlmZGdd„dejƒZdS)éN)Únncs.eZdZd ‡fdd„ Zdd„Zdd„Z‡ZS) ÚBiLSTM_TextClassificationFNc sútt|ƒ ¡||_||_||_||_||_||_||_ | |_ ||_ | dk rbt j  t | ¡¡|_nt   |j|j ¡|_t j|jd�|_|j r´t  |jd|jd¡|_t j|j d�|_t j|j |j|j|dd�|_t j|jd�|_t  |jd|j¡|_dS)N)ÚpééT)ÚdropoutÚ bidirectional)ÚsuperrÚ__init__Ú input_sizeÚ hidden_sizeÚ output_sizeÚ num_layersÚembedding_dropoutÚ lstm_dropoutÚattention_dropoutÚ attentionÚembedding_lengthrÚ EmbeddingÚfrom_pretrainedÚtorchÚtensorÚword_embeddingsÚDropoutÚembedding_dropout_layerÚLinearÚattention_layerÚattention_dropout_layerÚLSTMÚ lstm_layerÚlstm_dropout_layerÚ output_layer) Úselfr r r rrrrrrZembedding_weights©Ú __class__©õKD:\京东\promot\cifar\cifar\Cifar10_iid\system\flcore\trainmodel\bilstm.pyr s,ÿz"BiLSTM_TextClassification.__init__c Cs¨| d¡}t ||¡ d¡}g}t|ƒD]N\}}t ||d|…d¡} t |  d¡||d|…dd…f¡} | | ¡q(t t |d¡|fd¡} |  | ¡} |  | ¡} | S)Nrré) Ú unsqueezerÚbmmÚsqueezeÚ enumerateÚsoftmaxÚmatmulÚappendÚcatrr) r"Z lstm_outputÚstateÚseq_lensÚhiddenZ attn_weightsZ new_hiddensÚiÚseq_lenZsoft_attn_weightsZ new_hiddenZ concat_hiddenZ output_hiddenr%r%r&Úattention_forward"s $   z+BiLSTM_TextClassification.attention_forwardc sê|\}}t|ƒ}| |¡}| |¡}t |jd||jf¡jdd�}t |jd||jf¡jdd�}| ddd¡}|  |||f¡\‰\}}ˆ ddd¡‰tj ‡fdd„t |ƒDƒdd�} |  | ¡} |j rØ| ˆ| |¡‰n| ‰| ˆ¡} | S) NrÚcuda)Údevicer'rcs,g|]$\}}ˆ||ddd…f d¡‘qS)r'Nr)r()Ú.0r3r4©Úoutputr%r&Ú <listcomp>Jsz5BiLSTM_TextClassification.forward.<locals>.<listcomp>)Údim)ÚlenrrrÚzerosrr ÚtoÚpermuterr/r+r rr5r!) r"ÚxZ input_seqr1Ú batch_sizeZh_0Zc_0Zfinal_hidden_stateZfinal_cell_stater0Úlogitsr%r9r&Úforward8s        z!BiLSTM_TextClassification.forward)FN)Ú__name__Ú __module__Ú __qualname__r r5rDÚ __classcell__r%r%r#r&rs ÿr)rrÚModulerr%r%r%r&Ú<module>s 
2,695
Python
.py
30
88.766667
247
0.46099
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,747
mobilenet_v2.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/trainmodel/__pycache__/mobilenet_v2.cpython-38.pyc
U ”jfcã@svddlmZddlmZddgZddiZddd„ZGd d „d ejƒZGd d „d ej ƒZ Gd d„dej ƒZ ddd„Z dS)é)Únn)Úload_state_dict_from_urlÚ MobileNetV2Ú mobilenet_v2z=https://download.pytorch.org/models/mobilenet_v2-b0353104.pthNcCsB|dkr |}t|t||dƒ||ƒ}|d|kr>||7}|S)aD This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py :param v: :param divisor: :param min_value: :return: NégÍÌÌÌÌÌì?)ÚmaxÚint)ÚvÚdivisorÚ min_valueÚnew_v©r õQD:\京东\promot\cifar\cifar\Cifar10_iid\system\flcore\trainmodel\mobilenet_v2.pyÚ_make_divisible s  rcseZdZd‡fdd„ Z‡ZS)Ú ConvBNReLUééNc sR|dd}|dkrtj}tt|ƒ tj||||||dd�||ƒtjdd�¡dS)NrrF)ÚgroupsÚbiasT)Úinplace)rÚ BatchNorm2dÚsuperrÚ__init__ÚConv2dÚReLU6)ÚselfÚ in_planesÚ out_planesÚ kernel_sizeÚstriderÚ norm_layerÚpadding©Ú __class__r rr"s   ýzConvBNReLU.__init__)rrrN)Ú__name__Ú __module__Ú __qualname__rÚ __classcell__r r r"rr!srcs&eZdZd‡fdd„ Zdd„Z‡ZS)ÚInvertedResidualNc s¸tt|ƒ ¡||_|dks t‚|dkr.tj}tt||ƒƒ}|jdkoN||k|_ g}|dkrt|  t ||d|d�¡|  t |||||d�tj ||ddddd�||ƒg¡tj|Ž|_dS)N)rrr©rr )rrr rF)r)rr(rrÚAssertionErrorrrrÚroundÚuse_res_connectÚappendrÚextendrÚ SequentialÚconv)rÚinpÚouprÚ expand_ratior Ú hidden_dimÚlayersr"r rr.s  ûzInvertedResidual.__init__cCs"|jr|| |¡S| |¡SdS©N)r,r0©rÚxr r rÚforwardFszInvertedResidual.forward)N)r$r%r&rr9r'r r r"rr(-sr(cs.eZdZd ‡fdd„ Zdd„Zd d „Z‡ZS) réèçð?Néc s0tt|ƒ ¡|dkrt}|dkr(tj}d}d}|dkr‚ddddgddddgddd dgdd d dgdd d dgdd d dgddddgg}t|ƒdksžt|dƒd kr¬td |¡ƒ‚t |||ƒ}t |t d|ƒ|ƒ|_ t d |d|d�g} |D]X\} } } } t | ||ƒ}t | ƒD]4}|dk�r| nd}|  ||||| |d�¡|}�qqæ|  t ||j d|d�¡tj| Ž|_t d¡|_t |j |¡|_| ¡D]¢}t|tjƒ�rÈtjj|jdd�|jdk �r(tj |j¡n`t|tjtjfƒ�rútj |j¡tj |j¡n.t|tjƒ�rˆtj |jdd¡tj |j¡�qˆdS)aA MobileNet V2 main class Args: num_classes (int): Number of classes width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount inverted_residual_setting: Network structure round_nearest (int): Round the number of channels in each layer to be a multiple of this number Set to 1 to turn off rounding block: Module specifying inverted residual building block for mobilenet norm_layer: Module specifying the normalization layer to use Né irééérré@éé`é i@rzIinverted_residual_setting should be non-empty or a 4-element list, got {}r;)rr )r3r r)gš™™™™™É?Úfan_out)Úmodeg{®Gáz„?) rrrr(rrÚlenÚ ValueErrorÚformatrrÚ last_channelrÚranger-r/ÚfeaturesÚDropoutÚdropoutÚLinearÚfcÚmodulesÚ isinstancerÚinitÚkaiming_normal_ÚweightrÚzeros_Ú GroupNormÚones_Únormal_)rÚ num_classesÚ width_multÚinverted_residual_settingÚ round_nearestÚblockr Ú input_channelrJrLÚtÚcÚnÚsÚoutput_channelÚirÚmr"r rrNsX       ø ÿ      zMobileNetV2.__init__cCs>| |¡}tj |d¡ |jdd¡}| |¡}| |¡}|S)Nrréÿÿÿÿ)rLrÚ functionalÚadaptive_avg_pool2dÚreshapeÚshaperNrPr7r r rÚ _forward_implŸs    zMobileNetV2._forward_implcCs | |¡Sr6)rlr7r r rr9©szMobileNetV2.forward)r:r;Nr<NN)r$r%r&rrlr9r'r r r"rrMsúQ FTcKsdtf|Ž}|r`ttd|d�}i}| ¡D]*\}}d|krD|||<q*||| dd¡<q*| |¡|S)aC Constructs a MobileNetV2 architecture from `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr r)ÚprogressÚ classifierz classifier.1rP)rrÚ model_urlsÚitemsÚreplaceÚload_state_dict)Ú pretrainedrmÚkwargsÚmodelÚ state_dictÚnew_dictÚkr r r rr­s ÿ  )N)FT) ÚtorchrZ torch.hubrÚ__all__rorr/rÚModuler(rrr r r rÚ<module>s  ÿ   `
5,683
Python
.py
70
77.585714
401
0.472287
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,748
models.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/trainmodel/__pycache__/models.cpython-39.pyc
a °“bcZgã@säddlZddlmZddlmmZddlZddlZddlm Z ddl m Z ddl Z ddlmZmZddlmZmZmZmZdZGdd„dejƒZGd d „d ejƒZGd d „d ejƒZGd d„dejƒZGdd„dejƒZGdd„dejƒZGdd„dejƒZGdd„dejƒZGdd„dejƒZ dd„Z!Gdd„dejƒZ"Gdd„dejƒZ#Gdd „d ejƒZ$Gd!d"„d"ejƒZ%Gd#d$„d$ejƒZ&Gd%d&„d&ejƒZ'Gd'd(„d(ejƒZ(Gd)d*„d*ejƒZ)Gd+d,„d,ejƒZ*Gd-d.„d.ejƒZ+dS)/éN)Úpartial)Ú OrderedDict)ÚIMAGENET_DEFAULT_MEANÚIMAGENET_DEFAULT_STD)Ú StdConv2dSameÚDropPathÚ to_2tupleÚ trunc_normal_écs$eZdZ‡fdd„Zdd„Z‡ZS)Ú LocalModelcstt|ƒ ¡||_||_dS©N)Úsuperr Ú__init__ÚbaseÚ predictor)Úselfrr©Ú __class__©úr/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/trainmodel/models.pyrszLocalModel.__init__cCs| |¡}| |¡}|Sr )rr©rÚxÚoutrrrÚforwards  zLocalModel.forward©Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rrrrr s r cs$eZdZ‡fdd„Zdd„Z‡ZS)Ú LocalModel_ptcstt|ƒ ¡||_||_dSr )r rrÚ generatorr)rr rrrrr#szLocalModel_pt.__init__cCs| |¡}| |¡}|Sr )r rrrrrr)s  zLocalModel_pt.forwardrrrrrr"s rcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚLocalModel_repptcs$tt|ƒ ¡||_||_||_dSr )r r!rr rr)rr rrrrrr0szLocalModel_reppt.__init__cCs"| |¡}| |¡}| |¡}|Sr )r rrrrrrr7s   zLocalModel_reppt.forwardrrrrrr!/s r!cs&eZdZd‡fdd„ Zdd„Z‡ZS) Ú FedAvgCNNéé éc s�tƒ ¡t tj|dddddd�tjdd�tjdd �¡|_t tjdd ddddd�tjdd�tjdd �¡|_t t  |d ¡tjdd�¡|_ t  d |¡|_ dS) Né érr#T)Ú kernel_sizeÚpaddingÚstrideÚbias©Úinplace)ér.©r(é@i) r rÚnnÚ SequentialÚConv2dÚReLUÚ MaxPool2dÚconv1Úconv2ÚLinearÚfc1Úfc)rÚ in_featuresÚ num_classesÚdimrrrr`s8 û  ø û  ø   şzFedAvgCNN.__init__cCs8| |¡}| |¡}t |d¡}| |¡}| |¡}|S)Nr#)r6r7ÚtorchÚflattenr9r:rrrrr|s      zFedAvgCNN.forward)r#r$r%rrrrrr"_sr"cs&eZdZd‡fdd„ Zdd„Z‡ZS) Ú FedAvgMLPér$éÈcs8tƒ ¡t ||¡|_t ||¡|_tjdd�|_dS)NTr,)r rr1r8r9Úfc2r4Úact)rr;r<Ú hidden_dimrrrrˆs zFedAvgMLP.__init__cCs:|jdkr| | d¡d¡}| | |¡¡}| |¡}|S)Néréÿÿÿÿ)ÚndimÚviewÚsizerDr9rC©rrrrrr�s   zFedAvgMLP.forward)rAr$rBrrrrrr@‡sr@cs$eZdZ‡fdd„Zdd„Z‡ZS)ÚNetcsjtt|ƒ ¡t dtdd¡|_t tddd¡|_t d¡|_ t d¡|_ t  dd¡|_ t  dd¡|_ dS) Nr#r.r&gĞ?çà?iHé€r$)r rLrr1r3Ú batch_sizer6r7ÚDropoutÚdropout1Údropout2r8r9r:)rrrrr˜s  z Net.__init__cCs�| |¡}t ¡|ƒ}t dd¡|ƒ}| |¡}| |¡}t ¡|ƒ}t dd¡|ƒ}| |¡}t |d¡}|  |¡}t ¡|ƒ}|  |¡}t j |dd�}|S)Nr.r#©r=) r6r1r4r5rQr7rRr>r?r9r:ÚFÚ log_softmax©rrÚoutputrrrr¡s          z Net.forwardrrrrrrL—s rLcs&eZdZd‡fdd„ Zdd„Z‡ZS)Ú Mclr_LogisticrAr$cs tt|ƒ ¡t ||¡|_dSr )r rXrr1r8r:)rÚ input_dimr<rrrr´szMclr_Logistic.__init__cCs(t |d¡}| |¡}tj|dd�}|S©Nr#rS)r>r?r:rTrUrVrrrr¸s  zMclr_Logistic.forward)rAr$rrrrrrX³srXcs&eZdZd‡fdd„ Zdd„Z‡ZS) ÚDNNrAédr$cs.tt|ƒ ¡t ||¡|_t ||¡|_dSr )r r[rr1r8r9r:)rrYZmid_dimr<rrrrÁsz DNN.__init__cCs8t |d¡}t | |¡¡}| |¡}tj|dd�}|SrZ)r>r?rTÚrelur9r:rUrKrrrrÆs   z DNN.forward)rAr\r$rrrrrr[Àsr[cs&eZdZd‡fdd„ Zdd„Z‡ZS)ÚCifarNetr$csrtt|ƒ ¡t ddd¡|_t dd¡|_t dtd¡|_ t  tddd¡|_ t  dd¡|_ t  d|¡|_ dS)Néér'r.éxéT)r r^rr1r3r6r5ÚpoolrOr7r8r9rCr:)rr<rrrrĞszCifarNet.__init__cCs|| t | |¡¡¡}| t | |¡¡¡}| dtdd¡}t | |¡¡}t | |¡¡}|  |¡}tj |dd�}|S)NrGr'r#rS) rcrTr]r6r7rIrOr9rCr:rUrKrrrrÙs zCifarNet.forward)r$rrrrrr^Ïs r^cCs |jj}| d¡dks$| d¡dkrBtj |j¡tj |j¡nZ| d¡dkrrtj  |jdd¡tj |j¡n*| d¡dkrœtj  |j¡tj |j¡dS)Nr3rGÚConvTranspose2dZ BatchNormgğ?g{®Gáz”?r8) rrÚfindr1ÚinitÚkaiming_uniform_ÚweightÚzeros_r+Únormal_Úxavier_normal_)ÚmÚ classnamerrrÚ init_weightssrncs&eZdZd‡fdd„ Zdd„Z‡ZS) Ú PadPrompterr#éNc s´tt|ƒ ¡|}|}||_||_||d|_t t  d|j||g¡¡|_ t t  d|j||g¡¡|_ t t  d|j||d|g¡¡|_ t t  d|j||d|g¡¡|_ dS)Nr.r#)r rorÚargsÚinchanelÚ base_sizer1Ú Parameterr>ÚrandnÚpad_upÚpad_downÚpad_leftÚ pad_right)rrrÚpad_sizeÚ image_sizerqrrrrs$zPadPrompter.__init__cCsnt d|j|j|j¡ |jj¡}tj|j||j gdd�}tj|j ||j gdd�}t |  d¡|g¡}||S)Nr#r_rSr.r) r>ÚzerosrrrsÚtorqÚdeviceÚcatrxryrvrwrJ)rrrÚpromptrrrr)s  zPadPrompter.forward)r#r#rpNrrrrrros rocs&eZdZd ‡fdd„ Zdd„Z‡ZS) ÚLeNeté ér$Nc sÌtt|ƒ ¡t tjdddd�t d¡t ¡tjdddd�tjdd�t d¡t ¡¡|_ tj |d d �|_ tj dd�|_ t ||¡|_|j t¡t ||¡|_|d kr¼tjj|jd d �|_|j t¡dS)Nr#ér'r/r.é2rM©ÚpT)ÚaffineÚwnrh)Úname)r r�rr1r2r3r5r4Ú Dropout2dÚ conv_paramsÚ BatchNorm1dÚbnrPÚdropoutr8Ú bottleneckÚapplyrnr:ÚutilsÚ weight_norm)rÚ feature_dimZbottleneck_dimr<Ziswnrrrr4s$ ù  zLeNet.__init__cCsV| |¡}| | d¡d¡}| |¡}| |¡}| |¡}| |¡}tj|dd�}|S©NrrGr#rS) rŒrIrJr�r�r�r:rTrUrKrrrrIs     z LeNet.forward)r‚rƒr$Nrrrrrr�3sr�cs&eZdZd ‡fdd„ Zd d „Z‡ZS) ÚLSTMNetr.Fçš™™™™™É?réK�r$c s`tƒ ¡t |¡|_t |||¡|_tj|||||dd�|_|rJ|dn|}t  ||¡|_ dS)NT)Ú input_sizeÚ hidden_sizeÚ num_layersÚ bidirectionalr�Ú batch_firstr.) r rr1rPr�Ú EmbeddingÚ embeddingÚLSTMÚlstmr8r:) rrEr›rœr�Ú padding_idxÚ vocab_sizer<Údimsrrrrss  ûzLSTMNet.__init__c Cs’|\}}| |¡}tjjj||ddd�}| |¡\}\}}tjjj|dd�\} } t | dd…ddd…f¡} |  | ¡} |  | ¡} t j | dd�} | S)NTF)r�Úenforce_sorted)r�rGr#rS) rŸr1r’ÚrnnÚpack_padded_sequencer¡Úpad_packed_sequencer>Úrelu_r�r:rTrU) rrÚtextÚ text_lengthsZembeddedZpacked_embeddedZ packed_outputÚhiddenÚcellrZ out_lengthsrrrr‚s   zLSTMNet.forward)r.Fr—rr˜r$rrrrrr–rsÿr–cs&eZdZd‡fdd„ Zdd„Z‡ZS) ÚfastTextrr˜r$cs>tt|ƒ ¡t |||¡|_t ||¡|_t ||¡|_dSr ) r r®rr1r�rŸr8r9r:)rrEr¢r£r<rrrr˜szfastText.__init__cCs>|\}}| |¡}| | d¡¡}| |¡}tj|dd�}|SrZ)rŸr9Úmeanr:rTrU)rrrªr«Ú embedded_sentÚhÚzrrrrr¤s   zfastText.forward)rr˜r$rrrrrr®—s r®cs8eZdZdgd¢dddddf‡fdd „ Zd d „Z‡ZS) ÚTextCNNr\)r_rFr'rBgš™™™™™é?rr˜r$c sætt|ƒ ¡t |||¡|_t tj|||dd�t ¡t  ||dd¡¡|_ t tj|||dd�t ¡t  ||dd¡¡|_ t tj|||dd�t ¡t  ||dd¡¡|_ t  |¡|_t |t|ƒ|¡|_dS)Nr)Ú in_channelsÚ out_channelsr(r#r.)r r³rr1r�rŸr2ÚConv1dr4Ú MaxPool1dr6r7Úconv3rPr�r8Úlenr:) rrEÚ num_channelsr(Úmax_lenr�r¢r£r<rrrr±s&ııı zTextCNN.__init__c Cs„|\}}| |¡ ddd¡}| |¡ d¡}| |¡ d¡}| |¡ d¡}t |||fd¡}| |¡} |  | ¡} t j | dd�} | S)Nrr.r#rS) rŸÚpermuter6Úsqueezer7r¸r>rr�r:rTrU) rrrªr«r°Z conv_out1Z conv_out2Z conv_out3Zall_outZfinal_feature_maprrrrrÏs  zTextCNN.forwardrrrrrr³°s ÿr³cs*eZdZdZd ‡fdd„ Zdd „Z‡ZS) Ú PatchEmbedz{ Image to Patch Embedding CNN_proj + Rearrange: [B, C, W, H] -> [B, D, Pn_W, Pn_H] -> [B, D, Pn] -> [B, Pn, D] éàr r_écsdtƒ ¡t|ƒ}t|ƒ}|d|d|d|d}||_||_||_tj||||d�|_dS)Nr#r)r(r*) r rrÚimg_sizeÚ patch_sizeÚ num_patchesr1r3Úproj)rrÁrÂÚin_chansÚ embed_dimrÃrrrræs  zPatchEmbed.__init__c Cst|j\}}}}||jdkr*||jdksXJd|›d|›d|jd›d|jd›d� ƒ‚| |¡ d¡ dd¡}|S)Nrr#zInput image size (Ú*z) doesn't match model (z).r.)ÚshaperÁrÄr?Ú transpose)rrÚBÚCÚHÚWrrrrñs (ÿzPatchEmbed.forward)r¿r r_rÀ©rrrÚ__doc__rrrrrrrr¾às r¾cs4eZdZdZddejdf‡fdd„ Zdd„Z‡ZS)ÚFFNz� FFN (from timm) :param in_features: :param hidden_features: :param out_features: :param act_layer: :param drop: NçcsNtƒ ¡|p|}|p|}t ||¡|_|ƒ|_t ||¡|_t |¡|_dSr ) r rr1r8r9rDrCrPÚdrop)rr;Úhidden_featuresÚ out_featuresÚ act_layerrÒrrrrs z FFN.__init__cCs6| |¡}| |¡}| |¡}| |¡}| |¡}|Sr )r9rDrÒrCrKrrrrs      z FFN.forward) rrrrÏr1ÚGELUrrrrrrrrĞşs  rĞcs*eZdZdZd ‡fdd„ Zdd „Z‡ZS) Ú Attentiona� qkv Transform + MSA(MHSA) (from timm) # input x.shape = batch, patch_number, patch_dim # output x.shape = batch, patch_number, patch_dim :param dim: dim=CNN feature dim, because the patch size is 1x1 :param num_heads: :param qkv_bias: :param qk_scale: by default head_dim ** -0.5 (squre root) :param attn_drop: dropout rate after MHSA :param proj_drop: éFNrÑcsftƒ ¡||_||}|p"|d|_tj||d|d�|_t |¡|_t ||¡|_ t |¡|_ dS)Ngà¿r_)r+) r rÚ num_headsÚscaler1r8ÚqkvrPÚ attn_droprÄÚ proj_drop)rr=rÙÚqkv_biasÚqk_scalerÜrİÚhead_dimrrrr-s  zAttention.__init__c Cs´|j\}}}| |¡ ||d|j||j¡ ddddd¡}|d|d|d}}}|| dd¡|j} | jdd�} | | ¡} | | dd¡ |||¡}|  |¡}|  |¡}|S) Nr_r.rr#rFéşÿÿÿrGrS) rÈrÛÚreshaperÙr¼rÉrÚÚsoftmaxrÜrÄrİ) rrÚbatchZ patch_numberZ patch_dimrÛÚqÚkÚvÚattnrrrr:s ÿ ÿ    zAttention.forward)rØFNrÑrÑrÎrrrrr× s  r×cs:eZdZddddddejejf‡fdd„ Zdd„Z‡ZS) ÚBlockç@FNrÑc sptƒ ¡| |ƒ|_t||||||d�|_|dkr:t|ƒnt ¡|_| |ƒ|_ t ||ƒ} t || | |d�|_ dS)aê # input x.shape = batch, patch_number, patch_dim # output x.shape = batch, patch_number, patch_dim :param dim: dim :param num_heads: :param mlp_ratio: FFN :param qkv_bias: :param qk_scale: by default head_dim ** -0.5 (squre root) :param drop: :param attn_drop: dropout rate after Attention :param drop_path: dropout rate after sd :param act_layer: FFN act :param norm_layer: Pre Norm )rÙrŞrßrÜrİrÑ)r;rÓrÕrÒN) r rÚnorm1r×rèrr1ÚIdentityÚ drop_pathÚnorm2ÚintrĞÚmlp) rr=rÙÚ mlp_ratiorŞrßrÒrÜrírÕÚ norm_layerZmlp_hidden_dimrrrrWs   ÿ  zBlock.__init__cCs8|| | | |¡¡¡}|| | | |¡¡¡}|Sr )rírèrërğrîrKrrrrvsz Block.forward) rrrr1rÖÚ LayerNormrrrrrrrréUs  ÿrécsReZdZdZddddddddd d d d d ed d f‡fd d „ Zdd„Zdd„Z‡ZS)ÚVisionTransformerz¬ Vision Transformer A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 r&r r_r$rÀé rêTNrÑc sXtƒ ¡||_ˆ|_|_d|_ˆp2ttjdd�‰ˆp<tj ‰||||ˆd�|_ |j j }t  t  ddˆ¡¡|_t  t  d||jˆ¡¡|_tjˆd�|_dd„t  d| |¡Dƒ‰tj‡‡‡‡‡‡‡‡‡f d d„t|ƒDƒ�|_ˆˆƒ|_| �r| |_t td t ˆ| ¡fd t ¡fgƒ¡|_n t ¡|_|jdk�rDt |j|j¡nt ¡|_d |_d S) aš Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate embed_layer (nn.Module): patch embedding layer norm_layer: (nn.Module): normalization layer r#g�íµ ÷ư>)Úeps)rÁrÂrÅrÆr†cSsg|] }| ¡‘qSr)Úitem)Ú.0rrrrÚ <listcomp>®óz.VisionTransformer.__init__.<locals>.<listcomp>rc s*g|]"}tˆˆˆˆˆˆˆ|ˆˆd� ‘qS)) r=rÙrñrŞrÒrÜríròrÕ)ré)røÚi© rÕÚattn_drop_rateZdprÚ drop_raterÆrñròrÙrŞrrrù±sş  ÿr:rDN)r rr<Ú num_featuresrÆZ num_tokensrr1rórÖÚ patch_embedrÃrtr>r|Ú cls_tokenÚ pos_embedrPÚpos_dropÚlinspacer2ÚrangeÚblocksÚnormrr8ÚTanhÚ pre_logitsrìÚheadZ head_dist)rrÁrÂrÅr<rÆÚdepthrÙrñrŞÚrepresentation_sizerşrıZdrop_path_rateZ embed_layerròrÕrÃrrürr†s6   ÿı   ş  &zVisionTransformer.__init__cCsZ| |¡}|j |jddd¡}tj||fdd�}| ||j¡}| |¡}|  |¡}|Sr•) rrÚexpandrÈr>rrrrr)rrrrrrÚforward_featuresÇs   z"VisionTransformer.forward_featurescCs.| |¡}| |dd…df¡}| |¡}|S)Nr)rr r rKrrrrÒs  zVisionTransformer.forward) rrrrÏr¾rrrrrrrrrôs  şA rô),r>Útorch.nnr1Útorch.nn.functionalÚ functionalrTÚmathÚloggingÚ functoolsrÚ collectionsrÚnumpyÚnpZ timm.datarrZtimm.models.layersrrrr rOÚModuler rr!r"r@rLrXr[r^rnror�r–r®r³r¾rĞr×rérôrrrrÚ<module>s<    0( @ ?%0"5*
19,577
Python
.py
207
91.603865
633
0.364153
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,749
vit-checkpoint.py
hkgdifyu_pFedPT/system/flcore/trainmodel/.ipynb_checkpoints/vit-checkpoint.py
import torch from torch import nn from einops import rearrange, repeat from einops.layers.torch import Rearrange # helpers def pair(t): return t if isinstance(t, tuple) else (t, t) # classes class PreNorm(nn.Module): def __init__(self, dim, fn): super().__init__() self.norm = nn.LayerNorm(dim) self.fn = fn def forward(self, x, **kwargs): return self.fn(self.norm(x), **kwargs) class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.1): super().__init__() inner_dim = dim_head * heads project_out = not (heads == 1 and dim_head == dim) self.heads = heads self.scale = dim_head ** -0.5 self.attend = nn.Softmax(dim = -1) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) if project_out else nn.Identity() def forward(self, x): qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale attn = self.attend(dots) out = torch.matmul(attn, v) # print("out: ", out) # print("QKV output dimension: ",out.shape) # for i in range(8): # print("Head "+str(i+1)+": ") # print("Total mean: ",torch.mean(out[0][i])) # print("QKV output: ",out[0][i]) # print("Isinstance mean: ",torch.mean(out[0][i],dim=0)) # print("Whole activation: ",out) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)), PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout)) ])) def forward(self, x): ll = 0 for attn, ff in self.layers: ll += 1 x = attn(x) + x x = ff(x) + x return x class ViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_height // patch_height) * (image_width // patch_width) patch_dim = channels * patch_height * patch_width assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)' self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width), nn.Linear(patch_dim, dim), ) self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout) self.pool = pool self.to_latent = nn.Identity() self.norm = nn.LayerNorm(dim) self.fc = nn.Linear(dim, num_classes) def forward(self, img): x = self.to_patch_embedding(img) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding[:, :(n + 1)] x = self.dropout(x) x = self.transformer(x) x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0] x = self.to_latent(x) x = self.norm(x) x = self.fc(x) return x def produce_feature(self, img): x = self.to_patch_embedding(img) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding[:, :(n + 1)] x = self.dropout(x) x = self.transformer(x) x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0] x = self.to_latent(x) return x def cal_feature(self, img): x = self.to_patch_embedding(img) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding[:, :(n + 1)] x = self.dropout(x) x = self.transformer(x) return x def Show_detail(self, img): x = self.to_patch_embedding(img) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding[:, :(n + 1)] x = self.dropout(x) x = self.transformer(x) return x
5,687
Python
.py
135
33.274074
166
0.548481
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,750
models-checkpoint.py
hkgdifyu_pFedPT/system/flcore/trainmodel/.ipynb_checkpoints/models-checkpoint.py
import torch import torch.nn as nn import torch.nn.functional as F import math import logging from functools import partial from collections import OrderedDict import numpy as np from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ batch_size = 16 class LocalModel(nn.Module): def __init__(self, base, predictor): super(LocalModel, self).__init__() self.base = base self.predictor = predictor def forward(self, x): out = self.base(x) out = self.predictor(out) return out class LocalModel_pt(nn.Module): def __init__(self, generator, base): super(LocalModel_pt, self).__init__() self.generator = generator self.base = base def forward(self, x): out = self.generator(x) out = self.base(out) return out class LocalModel_reppt(nn.Module): def __init__(self, generator, base, predictor): super(LocalModel_reppt, self).__init__() self.generator = generator self.base = base self.predictor = predictor def forward(self, x): out = self.generator(x) out = self.base(out) out = self.predictor(out) return out # # https://github.com/katsura-jp/fedavg.pytorch/blob/master/src/models/cnn.py # class FedAvgCNN(nn.Module): # def __init__(self, in_features=1, num_classes=10, dim=1024): # super().__init__() # self.conv1 = nn.Conv2d(in_features, # 32, # kernel_size=5, # padding=0, # stride=1, # bias=True) # self.conv2 = nn.Conv2d(32, # 64, # kernel_size=5, # padding=0, # stride=1, # bias=True) # self.fc1 = nn.Linear(dim, 512) # self.fc = nn.Linear(512, num_classes) # self.act = nn.ReLU(inplace=True) # self.maxpool = nn.MaxPool2d(kernel_size=(2, 2)) # def forward(self, x): # x = self.act(self.conv1(x)) # x = self.maxpool(x) # x = self.act(self.conv2(x)) # x = self.maxpool(x) # x = torch.flatten(x, 1) # x = self.act(self.fc1(x)) # x = self.fc(x) # return x class FedAvgCNN(nn.Module): def __init__(self, in_features=1, num_classes=10, dim=1024): super().__init__() self.conv1 = nn.Sequential( nn.Conv2d(in_features, 32, kernel_size=5, padding=0, stride=1, bias=True), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=(2, 2)) ) self.conv2 = nn.Sequential( nn.Conv2d(32, 64, kernel_size=5, padding=0, stride=1, bias=True), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=(2, 2)) ) self.fc1 = nn.Sequential( nn.Linear(dim, 512), nn.ReLU(inplace=True) ) self.fc = nn.Linear(512, num_classes) def forward(self, x): out = self.conv1(x) out = self.conv2(out) out = torch.flatten(out, 1) out = self.fc1(out) out = self.fc(out) return out # ==================================================================================================================== # https://github.com/katsura-jp/fedavg.pytorch/blob/master/src/models/mlp.py class FedAvgMLP(nn.Module): def __init__(self, in_features=784, num_classes=10, hidden_dim=200): super().__init__() self.fc1 = nn.Linear(in_features, hidden_dim) self.fc2 = nn.Linear(hidden_dim, num_classes) self.act = nn.ReLU(inplace=True) def forward(self, x): if x.ndim == 4: x = x.view(x.size(0), -1) x = self.act(self.fc1(x)) x = self.fc2(x) return x # ==================================================================================================================== class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, batch_size, 2, 1) self.conv2 = nn.Conv2d(batch_size, 32, 2, 1) self.dropout1 = nn.Dropout(0.25) self.dropout2 = nn.Dropout(0.5) self.fc1 = nn.Linear(18432, 128) self.fc = nn.Linear(128, 10) def forward(self, x): x = self.conv1(x) x = nn.ReLU()(x) x = nn.MaxPool2d(2, 1)(x) x = self.dropout1(x) x = self.conv2(x) x = nn.ReLU()(x) x = nn.MaxPool2d(2, 1)(x) x = self.dropout2(x) x = torch.flatten(x, 1) x = self.fc1(x) x = nn.ReLU()(x) x = self.fc(x) output = F.log_softmax(x, dim=1) return output # ==================================================================================================================== class Mclr_Logistic(nn.Module): def __init__(self, input_dim=1*28*28, num_classes=10): super(Mclr_Logistic, self).__init__() self.fc = nn.Linear(input_dim, num_classes) def forward(self, x): x = torch.flatten(x, 1) x = self.fc(x) output = F.log_softmax(x, dim=1) return output # ==================================================================================================================== class DNN(nn.Module): def __init__(self, input_dim=1*28*28, mid_dim=100, num_classes=10): super(DNN, self).__init__() self.fc1 = nn.Linear(input_dim, mid_dim) self.fc = nn.Linear(mid_dim, num_classes) def forward(self, x): x = torch.flatten(x, 1) x = F.relu(self.fc1(x)) x = self.fc(x) x = F.log_softmax(x, dim=1) return x # ==================================================================================================================== class CifarNet(nn.Module): def __init__(self, num_classes=10): super(CifarNet, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, batch_size, 5) self.fc1 = nn.Linear(batch_size * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc = nn.Linear(84, num_classes) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, batch_size * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc(x) x = F.log_softmax(x, dim=1) return x # ==================================================================================================================== # cfg = { # 'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], # 'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], # 'VGGbatch_size': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], # 'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], # } # class VGG(nn.Module): # def __init__(self, vgg_name): # super(VGG, self).__init__() # self.features = self._make_layers(cfg[vgg_name]) # self.classifier = nn.Sequential( # nn.Linear(512, 512), # nn.ReLU(True), # nn.Linear(512, 512), # nn.ReLU(True), # nn.Linear(512, 10) # ) # def forward(self, x): # out = self.features(x) # out = out.view(out.size(0), -1) # out = self.classifier(out) # output = F.log_softmax(out, dim=1) # return output # def _make_layers(self, cfg): # layers = [] # in_channels = 3 # for x in cfg: # if x == 'M': # layers += [nn.MaxPool2d(kernel_size=2, stride=2)] # else: # layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), # nn.BatchNorm2d(x), # nn.ReLU(inplace=True)] # in_channels = x # layers += [nn.AvgPool2d(kernel_size=1, stride=1)] # return nn.Sequential(*layers) # ==================================================================================================================== def init_weights(m): classname = m.__class__.__name__ if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1: nn.init.kaiming_uniform_(m.weight) nn.init.zeros_(m.bias) elif classname.find('BatchNorm') != -1: nn.init.normal_(m.weight, 1.0, 0.02) nn.init.zeros_(m.bias) elif classname.find('Linear') != -1: nn.init.xavier_normal_(m.weight) nn.init.zeros_(m.bias) class PadPrompter(nn.Module): def __init__(self,inchanel = 1,pad_size=1,image_size=28,args =None): super(PadPrompter, self).__init__() pad_size = pad_size image_size = image_size self.args = args self.inchanel =inchanel self.base_size = image_size - pad_size*2 self.pad_up = nn.Parameter(torch.randn([1, self.inchanel, pad_size, image_size])) self.pad_down = nn.Parameter(torch.randn([1, self.inchanel, pad_size, image_size])) self.pad_left = nn.Parameter(torch.randn([1, self.inchanel, image_size - pad_size*2, pad_size])) self.pad_right = nn.Parameter(torch.randn([1,self.inchanel, image_size - pad_size*2, pad_size])) def forward(self, x): base = torch.zeros(1, self.inchanel, self.base_size, self.base_size).to(self.args.device) prompt = torch.cat([self.pad_left, base, self.pad_right], dim=3) prompt = torch.cat([self.pad_up, prompt, self.pad_down], dim=2) prompt = torch.cat(x.size(0) * [prompt]) return x + prompt class LeNet(nn.Module): def __init__(self, feature_dim=50*4*4, bottleneck_dim=256, num_classes=10, iswn=None): super(LeNet, self).__init__() self.conv_params = nn.Sequential( nn.Conv2d(1, 20, kernel_size=5), nn.MaxPool2d(2), nn.ReLU(), nn.Conv2d(20, 50, kernel_size=5), nn.Dropout2d(p=0.5), nn.MaxPool2d(2), nn.ReLU(), ) self.bn = nn.BatchNorm1d(bottleneck_dim, affine=True) self.dropout = nn.Dropout(p=0.5) self.bottleneck = nn.Linear(feature_dim, bottleneck_dim) self.bottleneck.apply(init_weights) self.fc = nn.Linear(bottleneck_dim, num_classes) if iswn == "wn": self.fc = nn.utils.weight_norm(self.fc, name="weight") self.fc.apply(init_weights) def forward(self, x): x = self.conv_params(x) x = x.view(x.size(0), -1) x = self.bottleneck(x) x = self.bn(x) x = self.dropout(x) x = self.fc(x) x = F.log_softmax(x, dim=1) return x # ==================================================================================================================== # class CNNCifar(nn.Module): # def __init__(self, num_classes=10): # super(CNNCifar, self).__init__() # self.conv1 = nn.Conv2d(3, 6, 5) # self.pool = nn.MaxPool2d(2, 2) # self.conv2 = nn.Conv2d(6, batch_size, 5) # self.fc1 = nn.Linear(batch_size * 5 * 5, 120) # self.fc2 = nn.Linear(120, 100) # self.fc3 = nn.Linear(100, num_classes) # # self.weight_keys = [['fc1.weight', 'fc1.bias'], # # ['fc2.weight', 'fc2.bias'], # # ['fc3.weight', 'fc3.bias'], # # ['conv2.weight', 'conv2.bias'], # # ['conv1.weight', 'conv1.bias'], # # ] # def forward(self, x): # x = self.pool(F.relu(self.conv1(x))) # x = self.pool(F.relu(self.conv2(x))) # x = x.view(-1, batch_size * 5 * 5) # x = F.relu(self.fc1(x)) # x = F.relu(self.fc2(x)) # x = self.fc3(x) # x = F.log_softmax(x, dim=1) # return x # ==================================================================================================================== class LSTMNet(nn.Module): def __init__(self, hidden_dim, num_layers=2, bidirectional=False, dropout=0.2, padding_idx=0, vocab_size=98635, num_classes=10): super().__init__() self.dropout = nn.Dropout(dropout) self.embedding = nn.Embedding(vocab_size, hidden_dim, padding_idx) self.lstm = nn.LSTM(input_size=hidden_dim, hidden_size=hidden_dim, num_layers=num_layers, bidirectional=bidirectional, dropout=dropout, batch_first=True) dims = hidden_dim*2 if bidirectional else hidden_dim self.fc = nn.Linear(dims, num_classes) def forward(self, x): text, text_lengths = x embedded = self.embedding(text) #pack sequence packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths, batch_first=True, enforce_sorted=False) packed_output, (hidden, cell) = self.lstm(packed_embedded) #unpack sequence out, out_lengths = nn.utils.rnn.pad_packed_sequence(packed_output, batch_first=True) out = torch.relu_(out[:,-1,:]) out = self.dropout(out) out = self.fc(out) out = F.log_softmax(out, dim=1) return out # ==================================================================================================================== class fastText(nn.Module): def __init__(self, hidden_dim, padding_idx=0, vocab_size=98635, num_classes=10): super(fastText, self).__init__() # Embedding Layer self.embedding = nn.Embedding(vocab_size, hidden_dim, padding_idx) # Hidden Layer self.fc1 = nn.Linear(hidden_dim, hidden_dim) # Output Layer self.fc = nn.Linear(hidden_dim, num_classes) def forward(self, x): text, text_lengths = x embedded_sent = self.embedding(text) h = self.fc1(embedded_sent.mean(1)) z = self.fc(h) out = F.log_softmax(z, dim=1) return out # ==================================================================================================================== class TextCNN(nn.Module): def __init__(self, hidden_dim, num_channels=100, kernel_size=[3,4,5], max_len=200, dropout=0.8, padding_idx=0, vocab_size=98635, num_classes=10): super(TextCNN, self).__init__() # Embedding Layer self.embedding = nn.Embedding(vocab_size, hidden_dim, padding_idx) # This stackoverflow thread clarifies how conv1d works # https://stackoverflow.com/questions/46503816/keras-conv1d-layer-parameters-filters-and-kernel-size/46504997 self.conv1 = nn.Sequential( nn.Conv1d(in_channels=hidden_dim, out_channels=num_channels, kernel_size=kernel_size[0]), nn.ReLU(), nn.MaxPool1d(max_len - kernel_size[0]+1) ) self.conv2 = nn.Sequential( nn.Conv1d(in_channels=hidden_dim, out_channels=num_channels, kernel_size=kernel_size[1]), nn.ReLU(), nn.MaxPool1d(max_len - kernel_size[1]+1) ) self.conv3 = nn.Sequential( nn.Conv1d(in_channels=hidden_dim, out_channels=num_channels, kernel_size=kernel_size[2]), nn.ReLU(), nn.MaxPool1d(max_len - kernel_size[2]+1) ) self.dropout = nn.Dropout(dropout) # Fully-Connected Layer self.fc = nn.Linear(num_channels*len(kernel_size), num_classes) def forward(self, x): text, text_lengths = x embedded_sent = self.embedding(text).permute(0,2,1) conv_out1 = self.conv1(embedded_sent).squeeze(2) conv_out2 = self.conv2(embedded_sent).squeeze(2) conv_out3 = self.conv3(embedded_sent).squeeze(2) all_out = torch.cat((conv_out1, conv_out2, conv_out3), 1) final_feature_map = self.dropout(all_out) out = self.fc(final_feature_map) out = F.log_softmax(out, dim=1) return out # ==================================================================================================================== class PatchEmbed(nn.Module): # PatchEmbed from timm """ Image to Patch Embedding CNN_proj + Rearrange: [B, C, W, H] -> [B, D, Pn_W, Pn_H] -> [B, D, Pn] -> [B, Pn, D] """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): B, C, H, W = x.shape # FIXME look at relaxing size constraints assert H == self.img_size[0] and W == self.img_size[1], \ f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." # CNN_proj + Rearrange: [B, C, W, H] -> [B, D, Pn_W, Pn_H] -> [B, D, Pn] -> [B, Pn, D] x = self.proj(x).flatten(2).transpose(1, 2) # x: (B, 14*14, 768) return x class FFN(nn.Module): # Mlp from timm """ FFN (from timm) :param in_features: :param hidden_features: :param out_features: :param act_layer: :param drop: """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x # MHSA class Attention(nn.Module): # qkv Transform + MSA(MHSA) (Attention from timm) """ qkv Transform + MSA(MHSA) (from timm) # input x.shape = batch, patch_number, patch_dim # output x.shape = batch, patch_number, patch_dim :param dim: dim=CNN feature dim, because the patch size is 1x1 :param num_heads: :param qkv_bias: :param qk_scale: by default head_dim ** -0.5 (squre root) :param attn_drop: dropout rate after MHSA :param proj_drop: """ def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): # input x.shape = batch, patch_number, patch_dim batch, patch_number, patch_dim = x.shape # mlp transform + head split [B, Pn, D] -> [B, Pn, 3D] -> [B, Pn, 3, H, D/H] -> [3, B, H, Pn, D/H] qkv = self.qkv(x).reshape(batch, patch_number, 3, self.num_heads, patch_dim // self.num_heads).permute(2, 0, 3, 1, 4) # 3 [B, H, Pn, D/H] q, k, v = qkv[0], qkv[1], qkv[2] # [B, H, Pn, D/H] -> [B, H, Pn, D/H] attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) # Dropout # head fusion: [B, H, Pn, D/H] -> [B, Pn, H, D/H] -> [B, Pn, D] x = (attn @ v).transpose(1, 2).reshape(batch, patch_number, patch_dim) x = self.proj(x) x = self.proj_drop(x) # mlp # output x.shape = batch, patch_number, patch_dim return x # Encoder_Block class Block(nn.Module): # teansformer Block from timm def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): """ # input x.shape = batch, patch_number, patch_dim # output x.shape = batch, patch_number, patch_dim :param dim: dim :param num_heads: :param mlp_ratio: FFN :param qkv_bias: :param qk_scale: by default head_dim ** -0.5 (squre root) :param drop: :param attn_drop: dropout rate after Attention :param drop_path: dropout rate after sd :param act_layer: FFN act :param norm_layer: Pre Norm """ super().__init__() # Pre Norm self.norm1 = norm_layer(dim) # Transformer used the nn.LayerNorm self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) # NOTE from timm: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() # stochastic depth # Add & Norm self.norm2 = norm_layer(dim) # FFN mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = FFN(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x): # MHSA + Res_connection x = x + self.drop_path(self.attn(self.norm1(x))) # FFN + Res_connection x = x + self.drop_path(self.mlp(self.norm2(x))) return x class VisionTransformer(nn.Module): # From timm """ Vision Transformer A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 """ def __init__(self, img_size=32, patch_size=16, in_chans=3, num_classes=10, embed_dim=128, depth=8, num_heads=8, mlp_ratio=2., qkv_bias=True, representation_size=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, act_layer=None): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate embed_layer (nn.Module): patch embedding layer norm_layer: (nn.Module): normalization layer """ super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_tokens = 1 norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches # Embedding tokens self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) self.pos_drop = nn.Dropout(p=drop_rate) # Stochastic Depth Decay rule dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # Encoders self.blocks = nn.Sequential(*[ Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth)]) # last norm self.norm = norm_layer(embed_dim) # Representation layer if representation_size: self.num_features = representation_size self.pre_logits = nn.Sequential(OrderedDict([ ('fc', nn.Linear(embed_dim, representation_size)), ('act', nn.Tanh()) ])) else: self.pre_logits = nn.Identity() # Classifier head(s) self.fc = nn.Linear(self.num_features, self.num_classes) if self.num_classes > 0 else nn.Identity() self.head_dist = None def forward_features(self, x): x = self.patch_embed(x) # print(x.shape,self.pos_embed.shape) cls_token = self.cls_token.expand(x.shape[0], -1, -1) x = torch.cat((cls_token, x), dim=1) x = self.pos_drop(x + self.pos_embed) x = self.blocks(x) x = self.norm(x) return x def forward(self, x): x = self.forward_features(x) x = self.pre_logits(x[:, 0]) # use cls token for cls head [B,1,768] x = self.fc(x) return x # class linear(Function): # @staticmethod # def forward(ctx, input): # return input # @staticmethod # def backward(ctx, grad_output): # return grad_output
26,452
Python
.py
595
36.364706
123
0.531646
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,751
serverpfedpt.py
hkgdifyu_pFedPT/system/flcore/servers/serverpfedpt.py
from flcore.clients.clientpt import clientPT from flcore.servers.serverbase import Server from threading import Thread import time import torch import os import h5py import copy import numpy as np class PFedPT(Server): def __init__(self, args, times): super().__init__(args, times) # select slow clients self.set_slow_clients() self.args = args self.set_clients(args, clientPT) self.global_model = copy.deepcopy(args.model.base) self.diff_pro = [] self.clients_diverge = [] print(f"\nJoin ratio / total clients: {self.join_ratio} / {self.num_clients}") print("Finished creating server and clients.") # self.load_model() self.Budget = [] def train(self): local_acc = [] for i in range(self.global_rounds+1): s_t = time.time() self.selected_clients = self.select_clients() self.send_models() if i%self.eval_gap == 0: print(f"\n-------------Round number: {i}-------------") print("\nEvaluate global model") self.evaluate() temp_diff_pro = 0 for client in self.selected_clients: temp_diff_pro_client = client.train() temp_diff_pro = temp_diff_pro +temp_diff_pro_client.item() print("Averaged prompr difference: {:.4f}".format(temp_diff_pro)) self.diff_pro.append(temp_diff_pro) diverge_clents =0 for new_param, old_param in zip(self.clients[0].model.generator.parameters(), self.clients[1].model.generator.parameters()): diff_pro = new_param - old_param diff_pro = torch.where(diff_pro > 0, diff_pro, torch.zeros_like(diff_pro) - diff_pro) diverge_clents = diverge_clents+torch.sum(diff_pro) print("0 and 1 clients difference: {:.4f}".format(diverge_clents.item())) self.clients_diverge.append(diverge_clents.item()) if i%self.eval_gap == 0: print("\nEvaluate local model") self.evaluate(acc=local_acc) # threads = [Thread(target=client.train) # for client in self.selected_clients] # [t.start() for t in threads] # [t.join() for t in threads] self.receive_models() self.aggregate_parameters() self.Budget.append(time.time() - s_t) print('-'*25, 'time cost', '-'*25, self.Budget[-1]) print("\nBest global accuracy.") # self.print_(max(self.rs_test_acc), max( # self.rs_train_acc), min(self.rs_train_loss)) print(max(self.rs_test_acc)) print("\nAverage time cost per round.") print(sum(self.Budget[1:])/len(self.Budget[1:])) print("\nBest local accuracy.") print(max(local_acc)) self.save_results() self.save_global_model() self.save_client_model() def receive_models(self): assert (len(self.selected_clients) > 0) active_train_samples = 0 for client in self.selected_clients: active_train_samples += client.train_samples self.uploaded_weights = [] self.uploaded_ids = [] self.uploaded_models = [] for client in self.selected_clients: self.uploaded_weights.append(client.train_samples / active_train_samples) self.uploaded_ids.append(client.id) self.uploaded_models.append(copy.deepcopy(client.model.base)) def save_client_model(self): model_path = os.path.join("models", self.dataset,"client",self.args.arv1+"*"+self.args.arv2+"*"+self.args.arv3+"*"+self.args.arv4+"*"+self.args.arv5+"*"+self.args.arv6 ) if not os.path.exists(model_path): os.makedirs(model_path) for c_idx,c in enumerate(self.clients): model_path_save = os.path.join(model_path, self.algorithm + "_client" +str(c_idx)+ "_" + str(self.args.num_prompt) + "_" + str(self.args.join_ratio) + "_" + str(self.args.num_clients)+ "_" + str(self.args.plocal_steps) + "_" + str(self.args.global_rounds)+ ".pt") torch.save(c.model, model_path_save) def save_results(self): algo = self.dataset + "_" + self.algorithm result_path = "../results/"+ self.args.arv1+"*"+self.args.arv2+"*"+self.args.arv3+"*"+self.args.arv4+"*"+self.args.arv5+"*"+self.args.arv6 +"/" if not os.path.exists(result_path): os.makedirs(result_path) if (len(self.rs_test_acc)): algo =algo + "_" + self.goal + "_" + str(self.times)+ "_" + str(self.args.num_prompt) + "_" + str(self.args.join_ratio) + "_" + str(self.args.num_clients)+ "_" + str(self.args.plocal_steps) + "_" + str(self.args.global_rounds) file_path = result_path + "{}.h5".format(algo) print("File path: " + file_path) with h5py.File(file_path, 'w') as hf: hf.create_dataset('rs_test_acc', data=self.rs_test_acc) hf.create_dataset('rs_test_acc_std', data=self.rs_test_acc_std) hf.create_dataset('rs_test_auc', data=self.rs_test_auc) hf.create_dataset('rs_train_loss', data=self.rs_train_loss) hf.create_dataset('diff_pro', data=self.diff_pro) hf.create_dataset('clients_diverge', data=self.clients_diverge) def evaluate(self, acc=None, loss=None): stats = self.test_metrics() stats_train = self.train_metrics() test_acc = sum(stats[2]) * 1.0 / sum(stats[1]) test_acc2 = sum(stats[4]) * 1.0 / sum(stats[1]) test_auc = sum(stats[3]) * 1.0 / sum(stats[1]) train_loss = sum(stats_train[2]) * 1.0 / sum(stats_train[1]) accs = [a / n for a, n in zip(stats[2], stats[1])] aucs = [a / n for a, n in zip(stats[3], stats[1])] if acc == None: self.rs_test_acc.append(test_acc) else: acc.append(test_acc) self.rs_test_auc.append(test_auc) self.rs_test_acc_std.append(np.std(accs)) if loss == None: self.rs_train_loss.append(train_loss) else: loss.append(train_loss) print("Averaged Train Loss: {:.4f}".format(train_loss)) print("Averaged Test Accurancy: {:.4f}".format(test_acc)) print("Averaged Test oral Accurancy: {:.4f}".format(test_acc2)) print("Averaged Test AUC: {:.4f}".format(test_auc)) # self.print_(test_acc, train_acc, train_loss) print("Std Test Accurancy: {:.4f}".format(np.std(accs))) print("Std Test AUC: {:.4f}".format(np.std(aucs))) def test_metrics(self): num_samples = [] tot_correct = [] tot_correct2 = [] tot_auc = [] for c in self.clients: ct, ct2, ns, auc = c.test_metrics() tot_correct.append(ct * 1.0) tot_correct2.append(ct2 * 1.0) tot_auc.append(auc * ns) num_samples.append(ns) ids = [c.id for c in self.clients] return ids, num_samples, tot_correct, tot_auc, tot_correct2
7,153
Python
.py
142
39.802817
275
0.580849
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,752
serverbase.py
hkgdifyu_pFedPT/system/flcore/servers/serverbase.py
import torch import os import numpy as np import h5py import copy import time import random from utils.data_utils import read_client_data class Server(object): def __init__(self, args, times): # Set up the main attributes self.device = args.device self.dataset = args.dataset self.global_rounds = args.global_rounds self.local_steps = args.local_steps self.batch_size = args.batch_size self.learning_rate = args.local_learning_rate self.global_model = copy.deepcopy(args.model) self.num_clients = args.num_clients self.join_ratio = args.join_ratio self.join_clients = int(self.num_clients * self.join_ratio) self.algorithm = args.algorithm self.time_select = args.time_select self.goal = args.goal self.time_threthold = args.time_threthold self.save_folder_name = args.save_folder_name self.num_prompt = args.num_prompt self.args = args self.plocal_steps = args.plocal_steps self.top_cnt = 100 self.clients = [] self.selected_clients = [] self.train_slow_clients = [] self.send_slow_clients = [] self.uploaded_weights = [] self.uploaded_ids = [] self.uploaded_models = [] self.rs_test_acc = [] self.rs_test_acc_std = [] self.rs_test_auc = [] self.rs_train_loss = [] self.times = times self.eval_gap = args.eval_gap self.client_drop_rate = args.client_drop_rate self.train_slow_rate = args.train_slow_rate self.send_slow_rate = args.send_slow_rate def set_clients(self, args, clientObj): for i, train_slow, send_slow in zip(range(self.num_clients), self.train_slow_clients, self.send_slow_clients): train_data = read_client_data(self.dataset, i, args,is_train=True) test_data = read_client_data(self.dataset, i, args,is_train=False) client = clientObj(args, id=i, train_samples=len(train_data), test_samples=len(test_data), train_slow=train_slow, send_slow=send_slow) self.clients.append(client) # random select slow clients def select_slow_clients(self, slow_rate): slow_clients = [False for i in range(self.num_clients)] idx = [i for i in range(self.num_clients)] idx_ = np.random.choice(idx, int(slow_rate * self.num_clients)) for i in idx_: slow_clients[i] = True return slow_clients def set_slow_clients(self): self.train_slow_clients = self.select_slow_clients( self.train_slow_rate) self.send_slow_clients = self.select_slow_clients( self.send_slow_rate) def select_clients(self): selected_clients = list(np.random.choice(self.clients, self.join_clients, replace=False)) return selected_clients def send_models(self): assert (len(self.clients) > 0) for client in self.clients: client.set_parameters(self.global_model) def receive_models(self): assert (len(self.selected_clients) > 0) self.uploaded_weights = [] tot_samples = 0 self.uploaded_ids = [] self.uploaded_models = [] for client in self.selected_clients: self.uploaded_weights.append(client.train_samples) tot_samples += client.train_samples self.uploaded_ids.append(client.id) self.uploaded_models.append(client.model) for i, w in enumerate(self.uploaded_weights): self.uploaded_weights[i] = w / tot_samples def aggregate_parameters(self): assert (len(self.uploaded_models) > 0) self.global_model = copy.deepcopy(self.uploaded_models[0]) for param in self.global_model.parameters(): param.data.zero_() for w, client_model in zip(self.uploaded_weights, self.uploaded_models): self.add_parameters(w, client_model) def add_parameters(self, w, client_model): for server_param, client_param in zip(self.global_model.parameters(), client_model.parameters()): server_param.data += client_param.data.clone() * w def save_global_model(self): model_path = os.path.join("models", self.dataset, self.args.arv1+"*"+self.args.arv2+"*"+self.args.arv3+"*"+self.args.arv4+"*"+self.args.arv5+"*"+self.args.arv6) if not os.path.exists(model_path): os.makedirs(model_path) model_path = os.path.join(model_path, self.algorithm + "_server"+ "_" + str(self.num_prompt) + "_" + str(self.join_ratio) + "_" + str(self.num_clients)+ "_" + str(self.plocal_steps) + "_" + str(self.global_rounds) + ".pt") torch.save(self.global_model, model_path) def load_model(self): model_path = os.path.join("models", self.dataset, self.args.arv1+"*"+self.args.arv2+"*"+self.args.arv3+"*"+self.args.arv4+"*"+self.args.arv5+"*"+self.args.arv6) model_path = os.path.join(model_path, self.algorithm + "_server" + "_" + str(self.num_prompt) + "_" + str(self.join_ratio) + "_" + str(self.num_clients)+ "_" + str(self.plocal_steps) + "_" + str(self.global_rounds) +".pt") assert (os.path.exists(model_path)) self.global_model = torch.load(model_path) def model_exists(self): model_path = os.path.join("models", self.dataset, self.args.arv1+"*"+self.args.arv2+"*"+self.args.arv3+"*"+self.args.arv4+"*"+self.args.arv5+"*"+self.args.arv6) model_path = os.path.join(model_path, self.algorithm + "_" + str(self.num_prompt) + "_" + str(self.join_ratio) + "_" + str(self.num_clients)+ "_" + str(self.plocal_steps) + "_" + str(self.global_rounds)+".pt") return os.path.exists(model_path) def save_results(self): algo = self.dataset + "_" + self.algorithm result_path = "../results/"+ self.args.arv1+"*"+self.args.arv2+"*"+self.args.arv3+"*"+self.args.arv4+"*"+self.args.arv5+"*"+self.args.arv6 +"/" if not os.path.exists(result_path): os.makedirs(result_path) if (len(self.rs_test_acc)): algo = algo + "_" + self.goal + "_" + str(self.times)+ "_" + str(self.num_prompt) + "_" + str(self.join_ratio) + "_" + str(self.num_clients)+ "_" + str(self.plocal_steps) + "_" + str(self.global_rounds) file_path = result_path + "{}.h5".format(algo) print("File path: " + file_path) with h5py.File(file_path, 'w') as hf: hf.create_dataset('rs_test_acc', data=self.rs_test_acc) hf.create_dataset('rs_test_acc_std', data=self.rs_test_acc_std) hf.create_dataset('rs_test_auc', data=self.rs_test_auc) hf.create_dataset('rs_train_loss', data=self.rs_train_loss) def save_item(self, item, item_name): if not os.path.exists(self.save_folder_name): os.makedirs(self.save_folder_name) torch.save(item, os.path.join(self.save_folder_name, "server_" + item_name + "_" + str(self.num_prompt) + "_" + str(self.join_ratio) + "_" + str(self.num_clients)+ "_" + str(self.plocal_steps) + "_" + str(self.global_rounds) + ".pt")) def load_item(self, item_name): return torch.load(os.path.join(self.save_folder_name, "server_" + item_name + "_" + str(self.num_prompt) + "_" + str(self.join_ratio) + "_" + str(self.num_clients)+ "_" + str(self.plocal_steps) + "_" + str(self.global_rounds) + ".pt")) def test_metrics(self): num_samples = [] tot_correct = [] tot_auc = [] for c in self.clients: ct, ns, auc = c.test_metrics() tot_correct.append(ct*1.0) tot_auc.append(auc*ns) num_samples.append(ns) ids = [c.id for c in self.clients] return ids, num_samples, tot_correct, tot_auc def test_metrics2(self): num_samples = [] tot_correct = [] tot_auc = [] for c in self.selected_clients: ct, ns, auc = c.test_metrics() tot_correct.append(ct*1.0) tot_auc.append(auc*ns) num_samples.append(ns) ids = [c.id for c in self.clients] return ids, num_samples, tot_correct, tot_auc def train_metrics(self): num_samples = [] losses = [] for c in self.clients: cl, ns = c.train_metrics() num_samples.append(ns) losses.append(cl*1.0) ids = [c.id for c in self.clients] return ids, num_samples, losses # evaluate selected clients def evaluate(self, acc=None, loss=None): if acc == None: stats = self.test_metrics() else: stats = self.test_metrics() stats_train = self.train_metrics() test_acc = sum(stats[2])*1.0 / sum(stats[1]) test_auc = sum(stats[3])*1.0 / sum(stats[1]) train_loss = sum(stats_train[2])*1.0 / sum(stats_train[1]) accs = [a / n for a, n in zip(stats[2], stats[1])] aucs = [a / n for a, n in zip(stats[3], stats[1])] if acc == None: self.rs_test_acc.append(test_acc) else: acc.append(test_acc) self.rs_test_auc.append(test_auc) self.rs_test_acc_std.append(np.std(accs)) if loss == None: self.rs_train_loss.append(train_loss) else: loss.append(train_loss) print("Averaged Train Loss: {:.4f}".format(train_loss)) print("Averaged Test Accurancy: {:.4f}".format(test_acc)) print("Averaged Test AUC: {:.4f}".format(test_auc)) # self.print_(test_acc, train_acc, train_loss) print("Std Test Accurancy: {:.4f}".format(np.std(accs))) print("Std Test AUC: {:.4f}".format(np.std(aucs))) def print_(self, test_acc, test_auc, train_loss): print("Average Test Accurancy: {:.4f}".format(test_acc)) print("Average Test AUC: {:.4f}".format(test_auc)) print("Average Train Loss: {:.4f}".format(train_loss)) def check_done(self, acc_lss, top_cnt=None, div_value=None): for acc_ls in acc_lss: if top_cnt != None and div_value != None: find_top = len(acc_ls) - torch.topk(torch.tensor(acc_ls), 1).indices[0] > top_cnt find_div = len(acc_ls) > 1 and np.std(acc_ls[-top_cnt:]) < div_value if find_top and find_div: pass else: return False elif top_cnt != None: find_top = len(acc_ls) - torch.topk(torch.tensor(acc_ls), 1).indices[0] > top_cnt if find_top: pass else: return False elif div_value != None: find_div = len(acc_ls) > 1 and np.std(acc_ls[-top_cnt:]) < div_value if find_div: pass else: return False else: raise NotImplementedError return True
11,178
Python
.py
222
39.572072
243
0.583571
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,753
serveravg.py
hkgdifyu_pFedPT/system/flcore/servers/serveravg.py
import time import torch from flcore.clients.clientavg import clientAVG from flcore.servers.serverbase import Server from threading import Thread class FedAvg(Server): def __init__(self, args, times): super().__init__(args, times) # select slow clients self.set_slow_clients() self.set_clients(args, clientAVG) print(f"\nJoin ratio / total clients: {self.join_ratio} / {self.num_clients}") print("Finished creating server and clients.") # self.load_model() self.Budget = [] def train(self): local_acc = [] for i in range(self.global_rounds+1): s_t = time.time() self.selected_clients = self.select_clients() self.send_models() if i%self.eval_gap == 0: print(f"\n-------------Round number: {i}-------------") print("\nEvaluate global model") self.evaluate() for client in self.selected_clients: client.train() # threads = [Thread(target=client.train) # for client in self.selected_clients] # [t.start() for t in threads] # [t.join() for t in threads] if i%self.eval_gap == 0: print("\nEvaluate local model") self.evaluate(acc=local_acc) self.receive_models() self.aggregate_parameters() self.Budget.append(time.time() - s_t) print('-'*25, 'time cost', '-'*25, self.Budget[-1]) # i= i+1 # # s_t = time.time() # self.selected_clients = self.clients # self.send_models() # # if i % self.eval_gap == 0: # print(f"\n-------------Round number: {i}-------------") # print("\nEvaluate global model") # self.evaluate() # # for client in self.selected_clients: # client.local_steps = 50 # client.train() # # # threads = [Thread(target=client.train) # # for client in self.selected_clients] # # [t.start() for t in threads] # # [t.join() for t in threads] # if i % self.eval_gap == 0: # print("\nEvaluate local model") # self.evaluate(acc=local_acc) # self.receive_models() # self.aggregate_parameters() # # self.Budget.append(time.time() - s_t) # print('-' * 25, 'time cost', '-' * 25, self.Budget[-1]) print("\nBest global accuracy.") # self.print_(max(self.rs_test_acc), max( # self.rs_train_acc), min(self.rs_train_loss)) print(max(self.rs_test_acc)) print("\nBest local accuracy.") print(max(local_acc)) print("\nAverage time cost per round.") print(sum(self.Budget[1:])/len(self.Budget[1:])) self.save_results() self.save_global_model()
2,949
Python
.py
75
29.466667
86
0.528691
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,754
serverrep.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverrep.cpython-37.pyc
B ·:cc ã@sHddlmZddlmZddlmZddlZddlZGdd„deƒZdS)é)Ú clientRep)ÚServer)ÚThreadNcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)ÚFedRepcsLtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfÚargsÚtimes)Ú __class__©úF/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverrep.pyr s  zFedRep.__init__cCs x®t|jdƒD]œ}t ¡}| ¡|_| ¡||jdkr\td|›d�ƒtdƒ| ¡x|jD] }|  ¡qdW|  ¡|  ¡|j   t ¡|¡tddd|j dƒqWtd ƒtt|jƒƒtd ƒtt|j dd…ƒt|j dd…ƒƒ| ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz-------------------------z time costéÿÿÿÿz Best global accuracy.z Average time cost per round.)ÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersr ÚappendÚmaxÚ rs_test_accÚsumÚlenÚ save_resultsÚsave_global_model)rÚiÚs_tÚclientrrrrs(   (z FedRep.traincCsŒt|jƒdkst‚d}x|jD]}||j7}qWg|_g|_g|_xD|jD]:}|j |j|¡|j |j¡|j t   |j j ¡¡qJWdS)Nr) r%rÚAssertionErrorÚ train_samplesÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr!ÚidÚcopyÚdeepcopyÚmodelÚbase)rZactive_train_samplesr*rrrr:s  zFedRep.receive_models)Ú__name__Ú __module__Ú __qualname__rrrÚ __classcell__rr)rrrs #r) Zsystem.flcore.clients.clientreprÚ system.flcore.servers.serverbaserÚ threadingrrr1rrrrrÚ<module>s   
2,146
Python
.py
16
132.6875
338
0.47771
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,755
servermtl.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/servermtl.cpython-39.pyc
a f¾`c ã@s@ddlZddlmZddlmZddlmZGdd„deƒZdS)éN)Ú clientMTL)ÚServer)ÚThreadcs4eZdZ‡fdd„Zdd„Zdd„Zdd„Z‡ZS) ÚFedMTLcsÈtƒ ||¡t| |j¡ƒ|_tj|j|jf|j d�|_ |j |_ t  |j|jf¡}t  |jdf¡}|d|j|  |j ¡d}| |j ¡|_| ¡| |t¡td|j›d|j›�ƒtdƒdS)N©Údeviceééz Join clients / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__ÚlenÚflattenÚ global_modelÚdimÚtorchÚzerosÚ join_clientsrÚW_globÚonesÚmmÚTÚtoÚomegaÚset_slow_clientsÚ set_clientsrÚprintÚ num_clients)ÚselfÚargsÚtimesÚIÚir©Ú __class__©úr/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/servermtl.pyr s zFedMTL.__init__cCs¬t|jdƒD]r}| ¡|_| ¡||jdkrRtd|›d�ƒtdƒ| ¡t|jƒD]"\}}|  |j |j |¡|  ¡q\qtdƒtt |jƒƒ| ¡| ¡dS)Nrrz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚaggregate_parametersÚeval_gaprÚevaluateÚ enumerateZreceive_valuesrrÚtrainÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rr!ÚidxÚclientr$r$r%r.s  z FedMTL.traincs,| ¡‰ˆ ¡}‡fdd„|Dƒ}t |¡S)Ncsg|]}ˆ| ¡‘qSr$)r )Ú.0Úkey©Ú state_dictr$r%Ú <listcomp><óz"FedMTL.flatten.<locals>.<listcomp>)r8ÚkeysrÚcat)rÚmodelr;ÚWr$r7r%r 9szFedMTL.flattencCsLtj|j|jf|jd�|_t|jƒD]"\}}| |j ¡|jdd…|f<q$dS)Nr) rrrrrrr-r)r r=)rr3r4r$r$r%r*?szFedMTL.aggregate_parameters)Ú__name__Ú __module__Ú __qualname__r r.r r*Ú __classcell__r$r$r"r%rs r)rZflcore.clients.clientmtlrÚflcore.servers.serverbaserÚ threadingrrr$r$r$r%Ú<module>s   
2,406
Python
.py
13
183.846154
602
0.442356
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,756
serverdyn.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverdyn.cpython-38.pyc
U Mrgcã@sPddlZddlZddlmZddlmZddlmZddlZGdd„deƒZ dS)éN)Ú clientDyn)ÚServer)ÚThreadcs<eZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Z‡ZS) ÚFedDyncs€tƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_|j |_ t   |j ¡|_ |j  ¡D]}t |j¡|_qhdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudgetÚalphaÚcopyÚdeepcopyÚmodelÚ server_stateÚ parametersÚtorchÚ zeros_likeÚdata)ÚselfÚargsÚtimesÚparam©Ú __class__©õDD:\京东\promot\cifar\cifar\tiny\system\flcore\servers\serverdyn.pyr s zFedDyn.__init__cCshg}d|_d}t|jdƒD]Ü}t ¡}| ¡|_| ¡||jdkrhtd|›d�ƒtdƒ|  ¡|jD] }|  ¡qn||jdkržtdƒ|j |d�|  ¡|  ¡|  ¡|j t ¡|¡td |jd ƒ|j|jg|jd �|_|d7}qtd ƒtt|jƒƒtd ƒtt|ƒƒtdƒtt|jdd…ƒt|jdd…ƒƒ| ¡| ¡dS)NFréz -------------Round number: z -------------z Evaluate global modelz Evaluate local model)Úaccz2--------------------------------------------------éÿÿÿÿ)Úacc_lssÚtop_cntz Best global accuracy.z Best local accuracy.z Averaged time per iteration.)ÚdoneÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚupdate_server_stateÚaggregate_parametersr ÚappendÚ check_doneÚ rs_test_accr#ÚmaxÚsumÚlenÚ save_resultsÚsave_global_model)rZ local_accÚiÚs_tÚclientrrrr-s>      (z FedDyn.traincCs<t|j ¡| ¡ƒD]"\}}|j|j ¡|j7_qdS)N)ÚzipÚ global_modelrrÚcloneÚ join_clients)rÚ client_modelÚ server_paramÚ client_paramrrrÚadd_parametersOszFedDyn.add_parameterscCs”t|jƒdkst‚t |jd¡|_|j ¡D]}t |j ¡|_ q.|jD]}|  |¡qHt |j ¡|j  ¡ƒD] \}}|j d|j |8_ qndS)Nrr)r6Úuploaded_modelsÚAssertionErrorrrr=rrrrrCr<rr)rrr@rAÚ state_paramrrrr0Ss  zFedDyn.aggregate_parameterscCs¾t|jƒdkst‚t |jd¡}| ¡D]}t |j¡|_q*|jD]B}t |j  ¡| ¡| ¡ƒD]"\}}}|j|||j 7_qbqDt |j  ¡| ¡ƒD]\}}|j|j |8_qœdS)Nr)r6rDrErrrrrrr<r=r rr)rZ model_deltarr@rArBZ delta_paramrFrrrr/`s  $zFedDyn.update_server_state) Ú__name__Ú __module__Ú __qualname__rr-rCr0r/Ú __classcell__rrrrr s  1 r) rrZflcore.clients.clientdynrÚflcore.servers.serverbaserÚ threadingrr'rrrrrÚ<module>s    
3,276
Python
.py
30
107.9
399
0.434247
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,757
serverperavg.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverperavg.cpython-38.pyc
U ”jfc‚ã@sHddlZddlZddlmZddlmZddlmZGdd„deƒZdS)éN)Ú clientPerAvg)ÚServer)ÚThreadcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)ÚPerAvgcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©õND:\京东\promot\cifar\cifar\Cifar10_iid\system\flcore\servers\serverperavg.pyr s  zPerAvg.__init__cCsªt|jdƒD]p}| ¡|_| ¡||jdkrRtd|›d�ƒtdƒ| ¡|jD]}| ¡| ¡qX|  ¡|  ¡qtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z+ Evaluate global model with one step updatez Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr Úevaluate_one_stepÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)r ÚiÚclientrrrrs     z PerAvg.traincCs’g}|jD]}| t |j¡¡| ¡q | ¡}t|jƒD]\}}| |||j¡q<t |dƒdt |dƒ}|j  |¡t d  |¡ƒdS)Négğ?rzAverage Test Accurancy: {:.4f}) ÚclientsÚappendÚcopyÚdeepcopyÚmodelZtrain_one_stepÚ test_metricsÚ enumerateÚ clone_modelÚsumr r Úformat)r Z models_tempÚcÚstatsr#Útest_accrrrr4s   zPerAvg.evaluate_one_step)Ú__name__Ú __module__Ú __qualname__rrrÚ __classcell__rrrrrs !r) r(ÚtorchZflcore.clients.clientperavgrÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s    
2,024
Python
.py
22
90.727273
347
0.482776
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,758
serverpFedMe.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverpFedMe.cpython-39.pyc
a f¾`cEã@sPddlZddlZddlZddlmZddlmZddlmZGdd„deƒZ dS)éN)Ú clientpFedMe)ÚServer)ÚThreadcsLeZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Zd d „Zd d„Z ‡Z S)ÚpFedMecs`tƒ ||¡| ¡| |t¡|j|_g|_g|_g|_t d|j ›d|j ›�ƒt dƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚbetaÚrs_train_acc_perÚrs_train_loss_perÚrs_test_acc_perÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©úu/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverpFedMe.pyr s zpFedMe.__init__cCsÀt|jdƒD]†}| ¡|_| ¡|jD] }| ¡q*||jdkrftd|›d�ƒtdƒ| ¡t   t |j   ¡ƒ¡|_| ¡| ¡| ¡qtdƒtt|jƒƒ| ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate personalized modelz Best personalized results.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚtrainÚeval_gaprÚevaluate_personalized_modelÚcopyÚdeepcopyÚlistÚ global_modelÚ parametersÚprevious_global_modelÚreceive_modelsÚaggregate_parametersÚbeta_aggregate_parametersÚmaxr Ú save_resultsÚsave_global_model)rÚiÚclientrrrrs"    z pFedMe.traincCs>t|j|j ¡ƒD]&\}}d|j|j|j|j|_qdS)Nr)Úzipr&r$r%r Údata)rZ pre_paramÚparamrrrr)Dsz pFedMe.beta_aggregate_parameterscCsRg}g}|jD](}| ¡\}}| |d¡| |¡qdd„|jDƒ}|||fS)Nçð?cSsg|] }|j‘qSr©Úid©Ú.0ÚcrrrÚ <listcomp>Póz4pFedMe.test_metrics_personalized.<locals>.<listcomp>)ÚclientsÚtest_metrics_personalizedÚappend)rÚ num_samplesÚ tot_correctr7ÚctÚnsÚidsrrrr;Is   z pFedMe.test_metrics_personalizedc Cshg}g}g}|jD]8}| ¡\}}}| |d¡| |¡| |d¡qdd„|jDƒ}||||fS)Nr2cSsg|] }|j‘qSrr3r5rrrr8^r9z5pFedMe.train_metrics_personalized.<locals>.<listcomp>)r:Útrain_metrics_personalizedr<) rr=r>Úlossesr7r?Úclr@rArrrrBTs  z!pFedMe.train_metrics_personalizedcCsB| ¡}t|dƒdt|dƒ}|j |¡td |¡ƒdS)Nér2rz+Average Personalized Test Accurancy: {:.4f})r;Úsumr r<rÚformat)rÚstatsÚtest_accrrrr bs z"pFedMe.evaluate_personalized_modelcCsº|jd|j}d}tj |¡s*t |¡t|jƒr¶|d|jdt |j ƒ}t   |d  |¡d¡�@}|jd|jd�|jd|jd�|jd|jd�Wdƒn1s¬0YdS) NÚ_z ../results/z{}.h5ÚwÚ rs_test_acc)r0Z rs_train_accÚ rs_train_loss)ÚdatasetÚ algorithmÚosÚpathÚexistsÚmakedirsÚlenr ÚgoalÚstrrÚh5pyÚFilerGÚcreate_datasetr r )rÚalgoÚ result_pathZalgo2Úhfrrrr+qs  zpFedMe.save_results) Ú__name__Ú __module__Ú __qualname__rrr)r;rBr r+Ú __classcell__rrrrr s + r) rPr!rWZflcore.clients.clientpFedMerZflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s    
3,828
Python
.py
28
135.428571
434
0.464596
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,759
serverphp.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverphp.cpython-39.pyc
a f¾`c”ã@sHddlmZddlmZddlmZddlZddlZGdd„deƒZdS)é)Ú clientPHP)ÚServer)ÚThreadNcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)ÚFedPHPcsLtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfÚargsÚtimes©Ú __class__©úr/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverphp.pyr s  zFedPHP.__init__cCs¤t|jdƒD]j}| ¡|_| |¡||jdkrTtd|›d�ƒtdƒ| ¡|jD] }| ¡qZ|  ¡|  ¡qtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rÚiÚclientrrrrs     z FedPHP.traincCs0t|jƒdksJ‚|jD]}| |j|¡qdS)Nr)ÚlenrÚset_parametersÚ global_model)rÚRr%rrrr5s zFedPHP.send_models)Ú__name__Ú __module__Ú __qualname__rrrÚ __classcell__rrrrrs r) Zflcore.clients.clientphprÚflcore.servers.serverbaserÚ threadingrÚtimeÚcopyrrrrrÚ<module>s   
1,758
Python
.py
17
102.176471
371
0.487371
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,760
serveravg.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serveravg.cpython-37.pyc
B ¿:ccçã@sHddlZddlZddlmZddlmZddlmZGdd„deƒZdS)éN)Ú clientAVG)ÚServer)ÚThreadcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚFedAvgcsLtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Zset_slow_clientsZ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfÚargsÚtimes)Ú __class__©úF/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serveravg.pyr s  zFedAvg.__init__cCs x®t|jdƒD]œ}t ¡}| ¡|_| ¡||jdkr\td|›d�ƒtdƒ| ¡x|jD] }|  ¡qdW|  ¡|  ¡|j   t ¡|¡tddd|j dƒqWtd ƒtt|jƒƒtd ƒtt|j dd…ƒt|j dd…ƒƒ| ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz-------------------------z time costéÿÿÿÿz Best global accuracy.z Average time cost per round.)ÚrangeÚ global_roundsÚtimeZselect_clientsZselected_clientsZ send_modelsÚeval_gaprZevaluateÚtrainZreceive_modelsZaggregate_parametersr ÚappendÚmaxZ rs_test_accÚsumÚlenZ save_resultsZsave_global_model)r ÚiZs_tÚclientrrrrs(   (z FedAvg.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rr)rrrs r) rÚtorchZsystem.flcore.clients.clientavgrZ system.flcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s    
1,721
Python
.py
14
121.571429
330
0.485948
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,761
servermtlpt.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/servermtlpt.cpython-38.pyc
U Õ şc²ã@shddlZddlmZddlmZddlmZddlZddlZddl Z ddlZddl Z Gdd„deƒZ dS)éN)Ú clientMTLPT)ÚServer)ÚThreadcs^eZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Zdd d „Zdd„Z dd„Z dd„Z ‡Z S)ÚFedMTLPTcsÚtƒ ||¡||_t| |j¡ƒ|_tj|j|j f|j d�|_ |j |_ t  |j |j f¡}t  |j df¡}|d|j |  |j¡d}| |j ¡|_g|_g|_| ¡| |t¡td|j ›d|j›�ƒtdƒdS)N©Údeviceééz Join clients / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__ÚargsÚlenÚflattenÚ global_modelÚdimÚtorchÚzerosÚ join_clientsrÚW_globÚonesÚmmÚTÚtoÚomegaÚclients_divergeÚdiff_proÚset_slow_clientsÚ set_clientsrÚprintÚ num_clients)Úselfr ÚtimesÚIÚir©Ú __class__©õcD:\京东\promot\第二次投稿\å®�验\æœ�务器\native - pro\system\flcore\servers\servermtlpt.pyr s zFedMTLPT.__init__c Cs¢g}t|jdƒD�]F}| ¡|_| ¡||jdkrXtd|›d�ƒtdƒ| ¡d}t|jƒD].\}}|  |j |j |¡|  ¡}||  ¡}qftd |¡ƒ|j |¡d}t|jdjj ¡|jdjj ¡ƒD]:\}} || } t | dk| t | ¡| ¡} |t | ¡}qÚtd |  ¡¡ƒ|j |  ¡¡||jdkrtdƒ|j|d �qtd ƒtt|jƒƒtd ƒtt|ƒƒ| ¡| ¡| ¡dS) Nrrz -------------Round number: z -------------z Evaluate global modelz"Averaged prompr difference: {:.4f}z"0 and 1 clients difference: {:.4f}z Evaluate local model)Úaccz Best global accuracy.z Best local accuracy.) ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚaggregate_parametersÚeval_gaprÚevaluateÚ enumerateÚreceive_valuesrrÚtrainÚitemÚformatrÚappendÚzipÚclientsÚmodelÚ generatorÚ parametersrÚwhereÚ zeros_likeÚsumrÚmaxÚ rs_test_accÚ save_resultsÚsave_global_modelÚsave_client_model) r Ú local_accr#Ú temp_diff_proÚidxÚclientÚtemp_diff_pro_clientÚdiverge_clentsÚ new_paramÚ old_paramrr&r&r'r2 sD  ÿ  zFedMTLPT.traincCs‚t|jƒdkst‚d}|jD]}||j7}qg|_g|_g|_|jD]8}|j |j|¡|j |j¡|j t   |j ¡¡qDdS)Nr) r r,ÚAssertionErrorÚ train_samplesÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr5ÚidÚcopyÚdeepcopyr8)r Úactive_train_samplesrFr&r&r'Úreceive_modelsMs   zFedMTLPT.receive_modelscCstj d|jd|jjd|jjd|jjd|jjd|jj d|jj ¡}tj  |¡sjt  |¡t |jƒD]†\}}tj ||jdt|ƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒd¡}t |j|¡qtdS)NÚmodelsrFÚ*Ú_clientÚ_z.pt)ÚosÚpathÚjoinÚdatasetr Úarv1Úarv2Úarv3Úarv4Úarv5Úarv6ÚexistsÚmakedirsr0r7Ú algorithmÚstrÚ num_promptÚ join_ratiorÚ plocal_stepsr*rÚsaver8)r Ú model_pathÚc_idxÚcÚmodel_path_saver&r&r'rB[s T  pzFedMTLPT.save_client_modelc Cs€|jd|j}d|jjd|jjd|jjd|jjd|jjd|jjd}t j   |¡srt   |¡t |jƒ�r||d|jdt|jƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒ}|d |¡}td|ƒt |d¡�f}|jd|jd �|jd |jd �|jd |jd �|jd |jd �|jd |jd �|jd|j d �W5QRXdS)NrXz ../results/rVú/z{}.h5z File path: Úwr?)ÚdataÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossrr)!r\rer r]r^r_r`rarbrYrZrcrdr r?Úgoalrfr!rgrhrrir*r4rÚh5pyÚFileÚcreate_datasetrrrsrtrr)r ÚalgoÚ result_pathÚ file_pathÚhfr&r&r'r@csL   l zFedMTLPT.save_resultsNc Cs|| ¡}| ¡}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}dd„t|d|dƒDƒ} dd„t|d|dƒDƒ} |dkrÎ|j |¡n | |¡|j |¡|j t  | ¡¡|dk�r|j  |¡n | |¡t d   |¡ƒt d   |¡ƒt d   |¡ƒt d   |¡ƒt d   t  | ¡¡ƒt d  t  | ¡¡ƒdS)Nr çğ?réécSsg|]\}}||‘qSr&r&©Ú.0ÚaÚnr&r&r'Ú <listcomp>sz%FedMTLPT.evaluate.<locals>.<listcomp>cSsg|]\}}||‘qSr&r&r€r&r&r'r„€szAveraged Train Loss: {:.4f}zAveraged Test Accurancy: {:.4f}z$Averaged Test oral Accurancy: {:.4f}zAveraged Test AUC: {:.4f}zStd Test Accurancy: {:.4f}zStd Test AUC: {:.4f}) Ú test_metricsÚ train_metricsr=r6r?r5rsrrÚnpÚstdrtrr4) r r(ÚlossÚstatsÚ stats_trainÚtest_accÚ test_acc2Útest_aucÚ train_lossÚaccsÚaucsr&r&r'r/vs,    zFedMTLPT.evaluatec Cs~g}g}g}g}|jD]H}| ¡\}}}} | |d¡| |d¡| | |¡| |¡qdd„|jDƒ} | ||||fS)Nr}cSsg|] }|j‘qSr&)rP)r�rmr&r&r'r„¢sz)FedMTLPT.test_metrics.<locals>.<listcomp>)r7r…r5) r Ú num_samplesÚ tot_correctÚ tot_correct2Útot_aucrmÚctÚct2ÚnsÚaucÚidsr&r&r'r…•s  zFedMTLPT.test_metricscs,| ¡‰ˆ ¡}‡fdd„|Dƒ}t |¡S)Ncsg|]}ˆ| ¡‘qSr&)r)r�Úkey©Ú state_dictr&r'r„©sz$FedMTLPT.flatten.<locals>.<listcomp>)r�ÚkeysrÚcat)r r8r�ÚWr&rœr'r¦szFedMTLPT.flattencCsLtj|j|jf|jd�|_t|jƒD]"\}}| |j ¡|jdd…|f<q$dS)Nr) rrrrrrr0r,rr8)r rErFr&r&r'r-¬szFedMTLPT.aggregate_parameters)NN) Ú__name__Ú __module__Ú __qualname__r r2rTrBr@r/r…rr-Ú __classcell__r&r&r$r'r s - r) rZflcore.clients.clientmtlptrÚflcore.servers.serverbaserÚ threadingrrvrQrYÚnumpyr‡rr&r&r&r'Ú<module>s   
6,744
Python
.py
57
117.122807
640
0.419258
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,762
serverperpt.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverperpt.cpython-38.pyc
U Õ şcGã@s`ddlmZddlmZddlmZddlZddlZddlZddl Z ddl Z Gdd„deƒZ dS)é)Ú clientPerPT)ÚServer)ÚThreadNcsNeZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Zdd d „Zdd„Z ‡Z S)ÚFedPerPTcsRtƒ ||¡| ¡| |t¡g|_g|_td|j›d|j ›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚclients_divergeÚdiff_proÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©õcD:\京东\promot\第二次投稿\å®�验\æœ�务器\native - pro\system\flcore\servers\serverperpt.pyr s zFedPerPT.__init__c Csšg}t|jdƒD�]>}| ¡|_| ¡||jdkrXtd|›d�ƒtdƒ| ¡d}|jD]}| ¡}||  ¡}qbtd  |¡ƒ|j   |¡d}t |jdjj ¡|jdjj ¡ƒD]:\}}||} t | dk| t | ¡| ¡} |t | ¡}qÀtd  |  ¡¡ƒ|j  |  ¡¡||jdk�rBtdƒ|j|d �| ¡| ¡qtd ƒtt|jƒƒtd ƒtt|ƒƒ| ¡| ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz"Averaged prompr difference: {:.4f}z"0 and 1 clients difference: {:.4f}z Evaluate local model)Úaccz Best global accuracy.z Best local accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚitemÚformatr ÚappendÚzipÚclientsÚmodelÚ generatorÚ parametersÚtorchÚwhereÚ zeros_likeÚsumr Úreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_modelÚsave_client_model) rÚ local_accÚiÚ temp_diff_proÚclientÚtemp_diff_pro_clientÚdiverge_clentsÚ new_paramÚ old_paramr rrrrsF   ÿ    zFedPerPT.traincCstj d|jd|jjd|jjd|jjd|jjd|jj d|jj ¡}tj  |¡sjt  |¡t |jƒD]†\}}tj ||jdt|ƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒd¡}t |j|¡qtdS)NÚmodelsr6Ú*Ú_clientÚ_z.pt)ÚosÚpathÚjoinÚdatasetrÚarv1Úarv2Úarv3Úarv4Úarv5Úarv6ÚexistsÚmakedirsÚ enumerater$Ú algorithmÚstrÚ num_promptr rÚ plocal_stepsrr(Úsaver%)rÚ model_pathÚc_idxÚcÚmodel_path_saverrrr2Es T  pzFedPerPT.save_client_modelcCs�t|jƒdkst‚g|_d}g|_g|_|jD]8}|j |j¡||j7}|j |j¡|j |j ¡q.t |jƒD]\}}|||j|<qrdS)Nr) ÚlenrÚAssertionErrorÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr"Ú train_samplesÚidr%rK)rÚ tot_samplesr6r4Úwrrrr,Ms  zFedPerPT.receive_modelsc Cs |jd|j}d|jjd|jjd|jjd|jjd|jjd|jjd}t j   |¡srt   |¡t |jƒ�r|d|jdt|jƒ}|d |¡}td|ƒt |d¡�V}|jd|jd �|jd |jd �|jd |jd �|jd |jd �|jd |jd �W5QRXdS)Nr>z ../results/r<ú/z{}.h5z File path: r]r/)ÚdataÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossr )rBrLrrCrDrErFrGrHr?r@rIrJrUr/ÚgoalrMrr!r Úh5pyÚFileÚcreate_datasetr`rarbr )rÚalgoÚ result_pathÚ file_pathÚhfrrrr0\sL    zFedPerPT.save_resultsNc Cs|| ¡}| ¡}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}dd„t|d|dƒDƒ} dd„t|d|dƒDƒ} |dkrÎ|j |¡n | |¡|j |¡|j t  | ¡¡|dk�r|j  |¡n | |¡t d   |¡ƒt d   |¡ƒt d   |¡ƒt d   |¡ƒt d   t  | ¡¡ƒt d  t  | ¡¡ƒdS)Néçğ?réécSsg|]\}}||‘qSrr©Ú.0ÚaÚnrrrÚ <listcomp>vsz%FedPerPT.evaluate.<locals>.<listcomp>cSsg|]\}}||‘qSrrrorrrrswszAveraged Train Loss: {:.4f}zAveraged Test Accurancy: {:.4f}z$Averaged Test oral Accurancy: {:.4f}zAveraged Test AUC: {:.4f}zStd Test Accurancy: {:.4f}zStd Test AUC: {:.4f}) Ú test_metricsÚ train_metricsr+r#r/r"rar`ÚnpÚstdrbr r!) rrÚlossÚstatsÚ stats_trainÚtest_accÚ test_acc2Útest_aucÚ train_lossÚaccsÚaucsrrrrms,    zFedPerPT.evaluatec Cs~g}g}g}g}|jD]H}| ¡\}}}} | |d¡| |d¡| | |¡| |¡qdd„|jDƒ} | ||||fS)NrlcSsg|] }|j‘qSr)r[)rprSrrrrsšsz)FedPerPT.test_metrics.<locals>.<listcomp>)r$rtr") rÚ num_samplesÚ tot_correctÚ tot_correct2Útot_aucrSÚctÚct2ÚnsÚaucÚidsrrrrt‹s  zFedPerPT.test_metrics)NN) Ú__name__Ú __module__Ú __qualname__rrr2r,r0rrtÚ __classcell__rrrrr s - r) Zflcore.clients.clientperptrÚflcore.servers.serverbaserÚ threadingrrdÚcopyr?r(ÚnumpyrvrrrrrÚ<module>s   
5,687
Python
.py
52
108.153846
496
0.432044
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,763
serverdynpt.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverdynpt.cpython-37.pyc
B ‰½ccÉã@shddlZddlZddlmZddlmZddlmZddlZddl Z ddlZddl Z Gdd„deƒZ dS)éN)Ú clientDynPT)ÚServer)ÚThreadcsLeZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Zd d „Zd d„Z ‡Z S)ÚFedDynPTcs�tƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_g|_ |j |_ g|_ t   |j¡|_x |j ¡D]}t |j¡|_qvWdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudgetÚdiff_proÚalphaÚclients_divergeÚcopyÚdeepcopyÚmodelÚ server_stateÚ parametersÚtorchÚ zeros_likeÚdata)ÚselfÚargsÚtimesÚparam)Ú __class__©úH/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverdynpt.pyr s zFedDynPT.__init__c Cs,g}d|_d}�x¬t|jdƒD�]˜}t ¡}| ¡|_| ¡||jdkrntd|›d�ƒtdƒ|  ¡d}x"|jD]}|  ¡}||  ¡}qzWtd  |¡ƒ|j  |¡d}xdt|jdjj ¡|jdjj ¡ƒD]:\}} || } t | dk| t | ¡| ¡} |t | ¡}qÜWtd  |  ¡¡ƒ|j |  ¡¡||jdk�r`td ƒ|j |d �| ¡| ¡| ¡|j t ¡|¡td |jd ƒ|j|jg|jd �|_|d7}q Wtdƒtt |jƒƒtdƒtt |ƒƒtdƒtt|jdd…ƒt!|jdd…ƒƒ| "¡| #¡dS)NFréz -------------Round number: z -------------z Evaluate global modelz"Averaged prompr difference: {:.4f}z"0 and 1 clients difference: {:.4f}z Evaluate local model)Úaccz2--------------------------------------------------éÿÿÿÿ)Úacc_lssÚtop_cntz Best global accuracy.z Best local accuracy.z Averaged time per iteration.)$ÚdoneÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚitemÚformatrÚappendÚzipÚclientsrÚ generatorrrÚwhererÚsumrÚreceive_modelsÚupdate_server_stateÚaggregate_parametersr Ú check_doneÚ rs_test_accr$ÚmaxÚlenÚ save_resultsÚsave_global_model) rÚ local_accÚiÚs_tÚ temp_diff_proÚclientÚtemp_diff_pro_clientÚdiverge_clentsÚ new_paramÚ old_paramrrrrr. sT   0   (zFedDynPT.traincCs@x:t|j ¡| ¡ƒD]"\}}|j|j ¡|j7_qWdS)N)r2Ú global_modelrrÚcloneÚ join_clients)rÚ client_modelÚ server_paramÚ client_paramrrrÚadd_parameters\szFedDynPT.add_parameterscCs t|jƒdkst‚t |jd¡|_x |j ¡D]}t |j ¡|_ q0Wx|jD]}|  |¡qNWx:t |j ¡|j  ¡ƒD] \}}|j d|j |8_ qxWdS)Nrr )r=Úuploaded_modelsÚAssertionErrorrrrIrrrrrOr2rr)rrrLrMÚ state_paramrrrr9`s  zFedDynPT.aggregate_parameterscCsÎt|jƒdkst‚t |jd¡}x| ¡D]}t |j¡|_q,WxP|jD]F}x@t |j  ¡| ¡| ¡ƒD]"\}}}|j|||j 7_qjWqJWx4t |j  ¡| ¡ƒD]\}}|j|j |8_qªWdS)Nr)r=rPrQrrrrrrr2rIr rr)rÚ model_deltarrLrMrNÚ delta_paramrRrrrr8ms   zFedDynPT.update_server_statecCsŠt|jƒdkst‚d}x|jD]}||j7}qWg|_g|_g|_xB|jD]8}|j |j|¡|j |j¡|j t   |j ¡¡qJWdS)Nr) r=r*rQÚ train_samplesÚuploaded_weightsÚ uploaded_idsrPr1Úidrrr)rÚactive_train_samplesrDrrrr7{s  zFedDynPT.receive_modelsc CsÖ|jd|j}d}tj |¡s*t |¡t|jƒrÒ|d|jdt |j ƒ}|d  |¡}t d|ƒt  |d¡�V}|jd|jd�|jd|jd�|jd |jd�|jd |jd�|jd |jd�WdQRXdS) NÚ_z ../results/z{}.h5z File path: Úwr;)rÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossr)ÚdatasetÚ algorithmÚosÚpathÚexistsÚmakedirsr=r;ÚgoalÚstrrr0r Úh5pyÚFileÚcreate_datasetr\r]r^r)rÚalgoÚ result_pathÚ file_pathÚhfrrrr>‰s    zFedDynPT.save_results) Ú__name__Ú __module__Ú __qualname__rr.rOr9r8r7r>Ú __classcell__rr)rrr s < r) rrZ!system.flcore.clients.clientdynptrÚ system.flcore.servers.serverbaserÚ threadingrr(rgrarrrrrÚ<module>s   
4,852
Python
.py
39
123.179487
482
0.439551
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,764
serverproxpt.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverproxpt.cpython-38.pyc
U Õ şcœã@s`ddlmZddlmZddlmZddlZddlZddlZddl Z ddl Z Gdd„deƒZ dS)é)Ú clientProxPT)ÚServer)ÚThreadNcsNeZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Zdd d „Zdd„Z ‡Z S)Ú FedProxPTcsftƒ ||¡||_| ¡| |t¡g|_g|_t  |j ¡|_ t d|j ›d|j›�ƒt dƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__ÚargsÚset_slow_clientsÚ set_clientsrÚclients_divergeÚdiff_proÚcopyÚdeepcopyÚmodelÚ global_modelÚprintÚ join_ratioÚ num_clients)ÚselfrÚtimes©Ú __class__©õdD:\京东\promot\第二次投稿\å®�验\æœ�务器\native - pro\system\flcore\servers\serverproxpt.pyr s zFedProxPT.__init__c Csšg}t|jdƒD�]>}| ¡|_| ¡||jdkrXtd|›d�ƒtdƒ| ¡d}|jD]}| ¡}||  ¡}qbtd  |¡ƒ|j   |¡d}t |jdjj ¡|jdjj ¡ƒD]:\}}||} t | dk| t | ¡| ¡} |t | ¡}qÀtd  |  ¡¡ƒ|j  |  ¡¡||jdk�rBtdƒ|j|d �| ¡| ¡qtd ƒtt|jƒƒtd ƒtt|ƒƒ| ¡| ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz"Averaged prompr difference: {:.4f}z"0 and 1 clients difference: {:.4f}z Evaluate local model)Úaccz Best global accuracy.z Best local accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gaprÚevaluateÚtrainÚitemÚformatr ÚappendÚzipÚclientsrÚ generatorÚ parametersÚtorchÚwhereÚ zeros_likeÚsumr Úreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_modelÚsave_client_model) rÚ local_accÚiZ temp_diff_proÚclientZtemp_diff_pro_clientZdiverge_clentsÚ new_paramÚ old_paramr rrrr#sF   ÿ    zFedProxPT.traincCs‚t|jƒdkst‚d}|jD]}||j7}qg|_g|_g|_|jD]8}|j |j|¡|j |j¡|j t   |j ¡¡qDdS)Nr) ÚlenrÚAssertionErrorÚ train_samplesÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr&Úidr rr)rZactive_train_samplesr8rrrr/Hs   zFedProxPT.receive_modelscCstj d|jd|jjd|jjd|jjd|jjd|jj d|jj ¡}tj  |¡sjt  |¡t |jƒD]†\}}tj ||jdt|ƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒd¡}t |j|¡qtdS)NÚmodelsr8Ú*Z_clientÚ_z.pt)ÚosÚpathÚjoinÚdatasetrÚarv1Úarv2Úarv3Úarv4Úarv5Úarv6ÚexistsÚmakedirsÚ enumerater(Ú algorithmÚstrÚ num_promptrrÚ plocal_stepsrr+Úsaver)rÚ model_pathÚc_idxÚcZmodel_path_saverrrr5Vs T  pzFedProxPT.save_client_modelc Cs€|jd|j}d|jjd|jjd|jjd|jjd|jjd|jjd}t j   |¡srt   |¡t |jƒ�r||d|jdt|jƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒ}|d |¡}td|ƒt |d¡�f}|jd|jd �|jd |jd �|jd |jd �|jd |jd �|jd |jd �|jd|j d �W5QRXdS)NrDz ../results/rCú/z{}.h5z File path: Úwr2)ÚdataÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossr r )!rHrRrrIrJrKrLrMrNrErFrOrPr;r2ÚgoalrSrrTrrrUrr%rÚh5pyÚFileÚcreate_datasetr]r^r_r r )rÚalgoÚ result_pathÚ file_pathÚhfrrrr3^sL   l zFedProxPT.save_resultsNc Cs|| ¡}| ¡}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}dd„t|d|dƒDƒ} dd„t|d|dƒDƒ} |dkrÎ|j |¡n | |¡|j |¡|j t  | ¡¡|dk�r|j  |¡n | |¡t d   |¡ƒt d   |¡ƒt d   |¡ƒt d   |¡ƒt d   t  | ¡¡ƒt d  t  | ¡¡ƒdS)Néçğ?réécSsg|]\}}||‘qSrr©Ú.0ÚaÚnrrrÚ <listcomp>zsz&FedProxPT.evaluate.<locals>.<listcomp>cSsg|]\}}||‘qSrrrlrrrrp{szAveraged Train Loss: {:.4f}zAveraged Test Accurancy: {:.4f}z$Averaged Test oral Accurancy: {:.4f}zAveraged Test AUC: {:.4f}zStd Test Accurancy: {:.4f}zStd Test AUC: {:.4f}) Ú test_metricsÚ train_metricsr.r'r2r&r^r]ÚnpÚstdr_rr%) rrÚlossÚstatsÚ stats_trainÚtest_accZ test_acc2Útest_aucÚ train_lossÚaccsÚaucsrrrr"qs,    zFedProxPT.evaluatec Cs~g}g}g}g}|jD]H}| ¡\}}}} | |d¡| |d¡| | |¡| |¡qdd„|jDƒ} | ||||fS)NricSsg|] }|j‘qSr)rA)rmrYrrrrp�sz*FedProxPT.test_metrics.<locals>.<listcomp>)r(rqr&) rÚ num_samplesÚ tot_correctZ tot_correct2Útot_aucrYÚctÚct2ÚnsÚaucÚidsrrrrq�s  zFedProxPT.test_metrics)NN) Ú__name__Ú __module__Ú __qualname__rr#r/r5r3r"rqÚ __classcell__rrrrr s . r) Zflcore.clients.clientproxptrÚflcore.servers.serverbaserÚ threadingrrar rEr+ÚnumpyrsrrrrrÚ<module>s   
5,886
Python
.py
51
114.156863
497
0.43146
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,765
serverbabu.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverbabu.cpython-39.pyc
a f¾`c•ã@s8ddlmZddlmZddlmZGdd„deƒZdS)é)Ú clientBABU)ÚServer)ÚThreadcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)ÚFedBABUcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©ús/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverbabu.pyrs  zFedBABU.__init__cCsÆt|jdƒD]h}| ¡|_| ¡||jdkrRtd|›d�ƒtdƒ| ¡|jD] }| ¡qX|  ¡|  ¡qtdƒtt |j ƒƒ|j D] }| ¡q”tdƒ| ¡| ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.z4 -------------Evaluate fine-tuned model-------------)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚclientsZ fine_tuneÚ save_resultsÚsave_global_model)r ÚiÚclientrrrrs&      z FedBABU.traincCs�t|jƒdksJ‚g|_d}g|_g|_|jD]:}|j |j¡||j7}|j |j¡|j |jj ¡q.t |jƒD]\}}|||j|<qtdS)Nr) ÚlenrÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsÚappendÚ train_samplesÚidÚmodelÚbaseÚ enumerate)r Ú tot_samplesr%r$Úwrrrr7s  zFedBABU.receive_models)Ú__name__Ú __module__Ú __qualname__rrrÚ __classcell__rrrrrs #rN)Zflcore.clients.clientbaburÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s   
2,038
Python
.py
22
91.454545
232
0.481904
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,766
serverreppt.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverreppt.cpython-39.pyc
a Òibck ã@sXddlmZddlmZddlmZddlZddlZddlZddl Z Gdd„deƒZ dS)é)Ú clientREPPT)ÚServer)ÚThreadNcs4eZdZ‡fdd„Zdd„Zdd„Zdd„Z‡ZS) Ú PFedRepPTcs\tƒ ||¡| ¡| |t¡t |jj¡|_ t d|j ›d|j ›�ƒt dƒg|_ dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚcopyÚdeepcopyÚmodelÚbaseÚ global_modelÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfÚargsÚtimes©Ú __class__©út/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverreppt.pyr s zPFedRepPT.__init__cCs t|jdƒD]˜}t ¡}| ¡|_| ¡||jdkrZtd|›d�ƒtdƒ| ¡|jD] }|  ¡q`|  ¡|  ¡|j   t ¡|¡tddd|j dƒqtd ƒtt|jƒƒtd ƒtt|j dd…ƒt|j dd…ƒƒ| ¡| ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz-------------------------z time costéÿÿÿÿz Best global accuracy.z Average time cost per round.)ÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gaprÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersrÚappendÚmaxÚ rs_test_accÚsumÚlenÚ save_resultsÚsave_global_modelÚsave_client_model)rÚiÚs_tÚclientrrrr$s*   (zPFedRepPT.traincCs„t|jƒdksJ‚d}|jD]}||j7}qg|_g|_g|_|jD]:}|j |j|¡|j |j¡|j t  |j j ¡¡qDdS)Nr) r+r Ú train_samplesÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr'Úidr r r r )rÚactive_train_samplesr1rrrr%@s   zPFedRepPT.receive_modelscCsntj d|jd¡}tj |¡s(t |¡t|jƒD]6\}}tj ||jdt |ƒd¡}t   |j |¡q2dS)NÚmodelsr1Ú_serverz.pt) ÚosÚpathÚjoinÚdatasetÚexistsÚmakedirsÚ enumerateÚclientsÚ algorithmÚstrÚtorchÚsaver )rÚ model_pathÚc_idxÚcÚmodel_path_saverrrr.Ns    zPFedRepPT.save_client_model)Ú__name__Ú __module__Ú __qualname__rr$r%r.Ú __classcell__rrrrr s &r) Z!system.flcore.clients.clientrepptrÚ system.flcore.servers.serverbaserÚ threadingrrr rDr:rrrrrÚ<module>s   
2,705
Python
.py
27
98.925926
326
0.483763
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,767
serverlocal.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverlocal.cpython-38.pyc
U ºĞıccã@s8ddlmZddlmZddlmZGdd„deƒZdS)é)Ú clientAVG)ÚServer)ÚThreadcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚLocalcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©õYD:\京东\promot\第二次投稿\å®�验\native - pro\system\flcore\servers\serverlocal.pyrs  zLocal.__init__cCsÎg}t|jdƒD]|}| ¡|_||jdkrNtd|›d�ƒtdƒ| ¡| ¡|_|jD] }| ¡q^||jdkrtdƒ|j|d�qtdƒtt|j ƒƒtd ƒtt|ƒƒ|  ¡|  ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz Evaluate local model)Úaccz Best global accuracy.z Best local accuracy.) ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚeval_gapr ÚevaluateÚtrainÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)r Ú local_accÚiÚclientrrrrs&     z Local.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rrrrrs rN)Úflcore.clients.clientavgrÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s   
1,535
Python
.py
18
84.055556
318
0.480896
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,768
serverfedpt.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverfedpt.cpython-37.pyc
B ÐecÓã@s`ddlmZddlmZddlmZddlZddlZddlZddl Z ddl Z Gdd„deƒZ dS)é)ÚclientT)ÚServer)ÚThreadNcs<eZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Z‡ZS) ÚFedPTcsntƒ ||¡||_| ¡| |t¡t |jj ¡|_ g|_ g|_ t d|j›d|j›�ƒt dƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__ÚargsÚset_slow_clientsÚ set_clientsrÚcopyÚdeepcopyÚmodelÚbaseÚ global_modelÚdiff_proÚclients_divergeÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfrÚtimes)Ú __class__©úH/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverfedpt.pyr s zFedPT.__init__c CsÌ�xft|jdƒD�]R}t ¡}| ¡|_| ¡||jdkr`td|›d�ƒtdƒ| ¡d}x"|jD]}|  ¡}||  ¡}qlWd}xdt |j dj j ¡|j dj j ¡ƒD]:\}}||} t | dk| t | ¡| ¡} |t | ¡}q´Wtd |¡ƒtd |  ¡¡ƒ|j |¡|j |  ¡¡| ¡| ¡|j t ¡|¡tdd d|jd ƒqWtd ƒtt|jƒƒtd ƒtt|jdd…ƒt|jdd…ƒƒ| ¡| ¡|  ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz"Averaged prompr difference: {:.4f}z"0 and 1 clients difference: {:.4f}z-------------------------z time costéÿÿÿÿz Best global accuracy.z Average time cost per round.)!ÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gaprÚevaluateÚtrainÚitemÚzipÚclientsr Ú generatorÚ parametersÚtorchÚwhereÚ zeros_likeÚsumÚformatrÚappendrÚreceive_modelsÚaggregate_parametersrÚmaxÚ rs_test_accÚlenÚ save_resultsÚsave_global_modelÚsave_client_model) rÚiÚs_tÚ temp_diff_proÚclientÚtemp_diff_pro_clientÚdiverge_clentsÚ new_paramÚ old_paramrrrrr%s@  0 (z FedPT.traincCsŒt|jƒdkst‚d}x|jD]}||j7}qWg|_g|_g|_xD|jD]:}|j |j|¡|j |j¡|j t   |j j ¡¡qJWdS)Nr) r5r!ÚAssertionErrorÚ train_samplesÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr0Úidr r r r)rÚactive_train_samplesr<rrrr1Os  zFedPT.receive_modelscCsÂtj d|jd¡}tj |¡s(t |¡x”t|jƒD]†\}}tj ||jdt |ƒdt |j j ƒdt |j j ƒdt |j j ƒdt |j jƒdt |j jƒd¡}t |j|¡q4WdS)NÚmodelsr<Ú_clientÚ_z.pt)ÚosÚpathÚjoinÚdatasetÚexistsÚmakedirsÚ enumerater(Ú algorithmÚstrrÚ num_promptrrÚ plocal_stepsrr+Úsaver )rÚ model_pathÚc_idxÚcÚmodel_path_saverrrr8]s   pzFedPT.save_client_modelc Cs8|jd|j}d}tj |¡s*t |¡t|jƒ�r4|d|jdt |j ƒdt |j j ƒdt |j j ƒdt |j jƒdt |j jƒdt |j jƒ}|d |¡}td|ƒt |d¡�f}|jd|jd�|jd|jd�|jd |jd�|jd |jd�|jd |jd�|jd |jd�WdQRXdS) NrJz ../results/z{}.h5z File path: Úwr4)ÚdataÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossrr)rNrRrKrLrOrPr5r4ÚgoalrSrrrTrrrUrr/rÚh5pyÚFileÚcreate_datasetr]r^r_rr)rÚalgoÚ result_pathÚ file_pathÚhfrrrr6es   l zFedPT.save_results) Ú__name__Ú __module__Ú __qualname__rr%r1r8r6Ú __classcell__rr)rrr s  2r) Zsystem.flcore.clients.clienttrÚ system.flcore.servers.serverbaserÚ threadingrrr+rKrar rrrrrÚ<module>s   
4,029
Python
.py
36
110.666667
397
0.457687
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,769
serverfomo.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverfomo.cpython-39.pyc
a f¾`ciã@s`ddlZddlZddlZddlZddlZddlmZddlm Z ddl m Z Gdd„de ƒZ dS)éN)Ú clientFomo)ÚServer)ÚThreadcs4eZdZ‡fdd„Zdd„Zdd„Zdd„Z‡ZS) ÚFedFomocs€tƒ ||¡| ¡| |t¡t tj|j|j d�¡|_ |j g|_ g|_ t|j|jƒ|_td|j›d|j›�ƒtdƒdS)N)Údevicez Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚtorchÚdiagÚonesÚ num_clientsrÚPÚ global_modelÚuploaded_modelsÚ uploaded_idsÚminÚMÚ join_clientsÚprintÚ join_ratio)ÚselfÚargsÚtimes©Ú __class__©ús/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverfomo.pyr s  zFedFomo.__init__cCsšt|jdƒD]`}| ¡|_| ¡||jdkrRtd|›d�ƒtdƒ| ¡|jD] }| ¡qX|  ¡qtdƒtt |j ƒƒ|  ¡|  ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gaprÚevaluateÚtrainÚreceive_modelsÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rÚiÚclientrrrr's    z FedFomo.traincCsøt|jƒdksJ‚|jD]Ú}t ¡}|jrDt dt tj ¡¡¡t|j ƒdkrÂt |j t|j ƒƒ}t  |j|j|j |¡j ¡}g}g}|D]$}| |j |¡| |j |¡q�| ||¡|jdd7<|jddt ¡|7<qdS)Nrgš™™™™™¹?Ú num_roundsrÚ total_costé)Úlenr#ÚtimeÚ send_slowÚsleepÚnpÚabsÚrandomÚrandrrrrr ÚtopkrÚidÚindicesÚtolistÚappendr(Úsend_time_cost)rr.Ú start_timeZM_r<rrr-rrrr$9s    zFedFomo.send_modelscCsøt|jƒdksJ‚t |jtd|j|jƒ¡}g|_g|_d}g|_ |D]†}|j d|j d|j d|j d}||j krJ|j  |j¡|j  |j¡||j7}|j   t |j¡¡|j|j|j7<qJt|jƒD]\}}|||j|<qÜdS)Nrrr0r/)r2r#r8ÚsampleÚintÚclient_drop_raterrÚuploaded_weightsrÚtrain_time_costr?Útime_thretholdr>r;Ú train_samplesÚcopyÚdeepcopyÚmodelrZ weight_vectorÚ enumerate)rZactive_clientsÚ tot_samplesr.Zclient_time_costr-Úwrrrr(Ps(ÿÿ  zFedFomo.receive_models)Ú__name__Ú __module__Ú __qualname__rr'r$r(Ú __classcell__rrrrr s r) r r3rHr8Únumpyr6Zflcore.clients.clientfomorÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s   
3,014
Python
.py
26
114.807692
501
0.457009
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,770
serverditto.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverditto.cpython-39.pyc
a f¾`cüã@sHddlZddlmZddlmZddlmZddlZGdd„deƒZdS)éN)Ú clientDitto)ÚServer)ÚThreadcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚDittocsLtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfÚargsÚtimes©Ú __class__©út/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverditto.pyr s  zDitto.__init__cCs t|jdƒD] }t ¡}| ¡|_| ¡||jdkrZtd|›d�ƒtdƒ| ¡|jD]}|  ¡|  ¡q`|  ¡|  ¡|j  t ¡|¡tddd|j dƒqtd ƒtt|jƒƒtd ƒtt|j dd…ƒt|j dd…ƒƒ| ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz-------------------------z time costéÿÿÿÿz Best global accuracy.z Average time cost per round.)ÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateZptrainÚtrainÚreceive_modelsÚaggregate_parametersr ÚappendÚmaxÚ rs_test_accÚsumÚlenÚ save_resultsÚsave_global_model)rÚiÚs_tÚclientrrrrs*   (z Ditto.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rrrrrs r) ÚcopyZflcore.clients.clientdittorÚflcore.servers.serverbaserÚ threadingrrrrrrrÚ<module>s    
1,777
Python
.py
17
103.235294
343
0.488927
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,771
serverfomo.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverfomo.cpython-37.pyc
B ¸:cciã@s`ddlZddlZddlZddlZddlZddlmZddlm Z ddl m Z Gdd„de ƒZ dS)éN)Ú clientFomo)ÚServer)ÚThreadcs4eZdZ‡fdd„Zdd„Zdd„Zdd„Z‡ZS) ÚFedFomocs€tƒ ||¡| ¡| |t¡t tj|j|j d�¡|_ |j g|_ g|_ t|j|jƒ|_td|j›d|j›�ƒtdƒdS)N)Údevicez Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚtorchÚdiagÚonesÚ num_clientsrÚPÚ global_modelÚuploaded_modelsÚ uploaded_idsÚminÚMÚ join_clientsÚprintÚ join_ratio)ÚselfÚargsÚtimes)Ú __class__©úG/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverfomo.pyr s  zFedFomo.__init__cCs¢xvt|jdƒD]d}| ¡|_| ¡||jdkrTtd|›d�ƒtdƒ| ¡x|jD] }| ¡q\W|  ¡qWtdƒtt |j ƒƒ|  ¡|  ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gaprÚevaluateÚtrainÚreceive_modelsÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rÚiÚclientrrrr&s    z FedFomo.traincCst|jƒdkst‚xè|jD]Ş}t ¡}|jrFt dt tj  ¡¡¡t|j ƒdkrÈt |j t|j ƒƒ}t |j|j|j |¡j ¡}g}g}x,|D]$}| |j |¡| |j |¡q”W| ||¡|jdd7<|jddt ¡|7<qWdS)Nrgš™™™™™¹?Ú num_roundsrÚ total_costé)Úlenr"ÚAssertionErrorÚtimeÚ send_slowÚsleepÚnpÚabsÚrandomÚrandrrrrr ÚtopkrÚidÚindicesÚtolistÚappendr'Úsend_time_cost)rr-Ú start_timeZM_r<rrr,rrrr#9s     zFedFomo.send_modelscCst|jƒdkst‚t |jtd|j|jƒ¡}g|_g|_ d}g|_ x�|D]†}|j d|j d|j d|j d}||j krL|j |j¡|j  |j¡||j7}|j  t |j¡¡|j|j|j7<qLWx$t|j ƒD]\}}|||j |<qâWdS)Nrrr/r.)r1r"r2r8ÚsampleÚintÚclient_drop_raterrÚuploaded_weightsrÚtrain_time_costr?Útime_thretholdr>r;Ú train_samplesÚcopyÚdeepcopyÚmodelrZ weight_vectorÚ enumerate)rZactive_clientsÚ tot_samplesr-Zclient_time_costr,Úwrrrr'Ps$   zFedFomo.receive_models)Ú__name__Ú __module__Ú __qualname__rr&r#r'Ú __classcell__rr)rrr s r) r r3rHr8Únumpyr6Zflcore.clients.clientfomorÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s   
2,989
Python
.py
26
113.807692
422
0.45614
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,772
serverapfl.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverapfl.cpython-39.pyc
a f¾`c®ã@s8ddlmZddlmZddlmZGdd„deƒZdS)é)Ú clientAPFL)ÚServer)ÚThreadcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚAPFLcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©ús/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverapfl.pyrs  z APFL.__init__cCs¢t|jdƒD]h}| ¡|_| ¡||jdkrRtd|›d�ƒtdƒ| ¡|jD] }| ¡qX|  ¡|  ¡qtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)r ÚiÚclientrrrrs    z APFL.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rrrrrs rN)Zflcore.clients.clientapflrÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s   
1,492
Python
.py
17
86.529412
225
0.500678
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,773
serverbn.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverbn.cpython-38.pyc
U ”jfcãã@sLddlmZddlmZddlmZddlmZddlZGdd„deƒZ dS)é)ÚclientBN)ÚServer)Úread_client_data)ÚThreadNcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚFedBNcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©õJD:\京东\promot\cifar\cifar\Cifar10_iid\system\flcore\servers\serverbn.pyr s  zFedBN.__init__cCs¢t|jdƒD]h}| ¡|_| ¡||jdkrRtd|›d�ƒtdƒ| ¡|jD] }| ¡qX|  ¡|  ¡qtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rÚiÚclientrrrrs    z FedBN.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rrrrrs r) Zflcore.clients.clientbnrÚflcore.servers.serverbaserÚutils.data_utilsrÚ threadingrÚtimerrrrrÚ<module>s    
1,526
Python
.py
16
94.125
358
0.494375
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,774
serverreppt.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverreppt.cpython-38.pyc
U Õ şc¡ã@spddlmZddlmZddlmZddlZddlZddlZddl Z ddl Z ddlZddl Z Gdd„deƒZ dS)é)Ú clientREPPT)ÚServer)ÚThreadNcsNeZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Zdd d „Zdd„Z ‡Z S)Ú PFedRepPTcsntƒ ||¡||_| ¡| |t¡t |jj ¡|_ g|_ g|_ t d|j›d|j›�ƒt dƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__ÚargsÚset_slow_clientsÚ set_clientsrÚcopyÚdeepcopyÚmodelÚbaseÚ global_modelÚdiff_proÚclients_divergeÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfrÚtimes©Ú __class__©õcD:\京东\promot\第二次投稿\å®�验\æœ�务器\native - pro\system\flcore\servers\serverreppt.pyr s zPFedRepPT.__init__c Csúg}t|jdƒD�]n}t ¡}| ¡|_| ¡||jdkr`td|›d�ƒtdƒ| ¡d}|jD]}|  ¡}||  ¡}qjd}t |j dj j ¡|j dj j ¡ƒD]:\}} || } t | dk| t | ¡| ¡} |t | ¡}q®td |  ¡¡ƒ|j |  ¡¡td |¡ƒ|j |¡||jdk�rJtdƒ|j|d �| ¡| ¡|j t ¡|¡td d d |jd ƒqtd ƒtt|jƒƒtdƒtt|jdd…ƒt|jdd…ƒƒtdƒtt|ƒƒ| ¡| ¡|  ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz"0 and 1 clients difference: {:.4f}z"Averaged prompr difference: {:.4f}z Evaluate local model)Úaccz-------------------------z time costéÿÿÿÿz Best global accuracy.z Average time cost per round.z Best local accuracy.)!ÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gaprÚevaluateÚtrainÚitemÚzipÚclientsr Ú generatorÚ parametersÚtorchÚwhereÚ zeros_likeÚsumÚformatrÚappendrÚreceive_modelsÚaggregate_parametersrÚmaxÚ rs_test_accÚlenÚ save_resultsÚsave_global_modelÚsave_client_model) rÚ local_accÚiÚs_tÚ temp_diff_proÚclientÚtemp_diff_pro_clientÚdiverge_clentsÚ new_paramÚ old_paramrrrrr'sL  .  ( zPFedRepPT.traincCs„t|jƒdkst‚d}|jD]}||j7}qg|_g|_g|_|jD]:}|j |j|¡|j |j¡|j t   |j j ¡¡qDdS)Nr) r7r#ÚAssertionErrorÚ train_samplesÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr2Úidr r r r)rÚactive_train_samplesr?rrrr3Qs   zPFedRepPT.receive_modelscCstj d|jd|jjd|jjd|jjd|jjd|jj d|jj ¡}tj  |¡sjt  |¡t |jƒD]†\}}tj ||jdt|ƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒd¡}t |j|¡qtdS)NÚmodelsr?Ú*Ú_clientÚ_z.pt)ÚosÚpathÚjoinÚdatasetrÚarv1Úarv2Úarv3Úarv4Úarv5Úarv6ÚexistsÚmakedirsÚ enumerater*Ú algorithmÚstrÚ num_promptrrÚ plocal_stepsr r-Úsaver )rÚ model_pathÚc_idxÚcÚmodel_path_saverrrr:_s T  pzPFedRepPT.save_client_modelc Cs€|jd|j}d|jjd|jjd|jjd|jjd|jjd|jjd}t j   |¡srt   |¡t |jƒ�r||d|jdt|jƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒ}|d |¡}td|ƒt |d¡�f}|jd|jd �|jd |jd �|jd |jd �|jd |jd �|jd |jd �|jd|j d �W5QRXdS)NrNz ../results/rLú/z{}.h5z File path: Úwr6)ÚdataÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossrr)!rRr\rrSrTrUrVrWrXrOrPrYrZr7r6Úgoalr]rr^rrr_r r1rÚh5pyÚFileÚcreate_datasetrhrirjrr)rÚalgoÚ result_pathÚ file_pathÚhfrrrr8gsL   l zPFedRepPT.save_resultsNc Cs|| ¡}| ¡}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}dd„t|d|dƒDƒ} dd„t|d|dƒDƒ} |dkrÎ|j |¡n | |¡|j |¡|j t  | ¡¡|dk�r|j  |¡n | |¡t d   |¡ƒt d   |¡ƒt d   |¡ƒt d   |¡ƒt d   t  | ¡¡ƒt d  t  | ¡¡ƒdS)Néçğ?réécSsg|]\}}||‘qSrr©Ú.0ÚaÚnrrrÚ <listcomp>‚sz&PFedRepPT.evaluate.<locals>.<listcomp>cSsg|]\}}||‘qSrrrwrrrr{ƒszAveraged Train Loss: {:.4f}zAveraged Test Accurancy: {:.4f}z$Averaged Test oral Accurancy: {:.4f}zAveraged Test AUC: {:.4f}zStd Test Accurancy: {:.4f}zStd Test AUC: {:.4f}) Ú test_metricsÚ train_metricsr0r)r6r2rirhÚnpÚstdrjrr1) rrÚlossÚstatsÚ stats_trainÚtest_accÚ test_acc2Útest_aucÚ train_lossÚaccsÚaucsrrrr&ys,    zPFedRepPT.evaluatec Cs~g}g}g}g}|jD]H}| ¡\}}}} | |d¡| |d¡| | |¡| |¡qdd„|jDƒ} | ||||fS)NrtcSsg|] }|j‘qSr)rI)rxrcrrrr{¦sz*PFedRepPT.test_metrics.<locals>.<listcomp>)r*r|r2) rÚ num_samplesÚ tot_correctÚ tot_correct2Útot_aucrcÚctÚct2ÚnsÚaucÚidsrrrr|—s  zPFedRepPT.test_metrics)NN) Ú__name__Ú __module__Ú __qualname__rr'r3r:r8r&r|Ú __classcell__rrrrr s 4 r)Zflcore.clients.clientrepptrÚflcore.servers.serverbaserÚ threadingrr!r r-rOrlÚnumpyr~rrrrrÚ<module>s   
6,136
Python
.py
58
104.534483
497
0.430005
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,775
serverper.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverper.cpython-37.pyc
B ¿:ccîã@s8ddlmZddlmZddlmZGdd„deƒZdS)é)Ú clientPer)ÚServer)ÚThreadcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)ÚFedPercsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes)Ú __class__©úF/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverper.pyrs  zFedPer.__init__cCsªx~t|jdƒD]l}| ¡|_| ¡||jdkrTtd|›d�ƒtdƒ| ¡x|jD] }| ¡q\W|  ¡|  ¡qWtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)r ÚiÚclientrrrrs    z FedPer.traincCs˜t|jƒdkst‚g|_d}g|_g|_xD|jD]:}|j |j¡||j7}|j |j¡|j |j j ¡q0Wx$t |jƒD]\}}|||j|<qzWdS)Nr) ÚlenrÚAssertionErrorÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsÚappendÚ train_samplesÚidÚmodelÚbaseÚ enumerate)r Ú tot_samplesr#r"Úwrrrr2s  zFedPer.receive_models)Ú__name__Ú __module__Ú __qualname__rrrÚ __classcell__rr)rrrs rN)Zflcore.clients.clientperrÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s   
1,883
Python
.py
14
133.214286
315
0.483422
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,776
serverdynpt.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverdynpt.cpython-38.pyc
U œMgc|ã@shddlZddlZddlmZddlmZddlmZddlZddl Z ddlZddl Z Gdd„deƒZ dS)éN)Ú clientDynPT)ÚServer)ÚThreadcsDeZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Zd d „Z‡Z S) ÚFedDynPTcsŒtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_g|_ |j |_ g|_ t   |j¡|_|j ¡D]}t |j¡|_qtdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudgetÚdiff_proÚalphaÚclients_divergeÚcopyÚdeepcopyÚmodelÚ server_stateÚ parametersÚtorchÚ zeros_likeÚdata)ÚselfÚargsÚtimesÚparam©Ú __class__©õFD:\京东\promot\cifar\cifar\tiny\system\flcore\servers\serverdynpt.pyr s zFedDynPT.__init__c Csg}d|_d}t|jdƒD�]�}t ¡}| ¡|_| ¡||jdkrjtd|›d�ƒtdƒ|  ¡d}|jD]}|  ¡}||  ¡}qttd  |¡ƒ|j  |¡d}t|jdjj ¡|jdjj ¡ƒD]:\}} || } t | dk| t | ¡| ¡} |t | ¡}qÒtd  |  ¡¡ƒ|j |  ¡¡||jdk�rTtd ƒ|j |d �| ¡| ¡| ¡|j t ¡|¡td |jd ƒ|j|jg|jd �|_|d7}qtdƒtt |jƒƒtdƒtt |ƒƒtdƒtt|jdd…ƒt!|jdd…ƒƒ| "¡| #¡dS)NFréz -------------Round number: z -------------z Evaluate global modelz"Averaged prompr difference: {:.4f}z"0 and 1 clients difference: {:.4f}z Evaluate local model)Úaccz2--------------------------------------------------éÿÿÿÿ)Úacc_lssÚtop_cntz Best global accuracy.z Best local accuracy.z Averaged time per iteration.)$ÚdoneÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚitemÚformatrÚappendÚzipÚclientsrÚ generatorrrÚwhererÚsumrÚreceive_modelsÚupdate_server_stateÚaggregate_parametersr Ú check_doneÚ rs_test_accr%ÚmaxÚlenÚ save_resultsÚsave_global_model) rÚ local_accÚiÚs_tÚ temp_diff_proÚclientÚtemp_diff_pro_clientÚdiverge_clentsÚ new_paramÚ old_paramrrrr r/ sT   .   (zFedDynPT.traincCs<t|j ¡| ¡ƒD]"\}}|j|j ¡|j7_qdS)N)r3Ú global_modelrrÚcloneÚ join_clients)rÚ client_modelÚ server_paramÚ client_paramrrr Úadd_parameters\szFedDynPT.add_parameterscCs”t|jƒdkst‚t |jd¡|_|j ¡D]}t |j ¡|_ q.|jD]}|  |¡qHt |j ¡|j  ¡ƒD] \}}|j d|j |8_ qndS)Nrr!)r>Úuploaded_modelsÚAssertionErrorrrrJrrrrrPr3rr)rrrMrNÚ state_paramrrr r:`s  zFedDynPT.aggregate_parameterscCs¾t|jƒdkst‚t |jd¡}| ¡D]}t |j¡|_q*|jD]B}t |j  ¡| ¡| ¡ƒD]"\}}}|j|||j 7_qbqDt |j  ¡| ¡ƒD]\}}|j|j |8_qœdS)Nr)r>rQrRrrrrrrr3rJr rr)rÚ model_deltarrMrNrOÚ delta_paramrSrrr r9ms   ÿzFedDynPT.update_server_statec CsÖ|jd|j}d}tj |¡s*t |¡t|jƒrÒ|d|jdt |j ƒ}|d  |¡}t d|ƒt  |d¡�V}|jd|jd�|jd|jd�|jd |jd�|jd |jd�|jd |jd�W5QRXdS) NÚ_z ../results/z{}.h5z File path: Úwr<)rÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossr)ÚdatasetÚ algorithmÚosÚpathÚexistsÚmakedirsr>r<ÚgoalÚstrrr1r Úh5pyÚFileÚcreate_datasetrXrYrZr)rÚalgoÚ result_pathÚ file_pathÚhfrrr r?|s    zFedDynPT.save_results) Ú__name__Ú __module__Ú __qualname__rr/rPr:r9r?Ú __classcell__rrrr r s  < r) rrZflcore.clients.clientdynptrÚflcore.servers.serverbaserÚ threadingrr)rcr]rrrrr Ú<module>s   
4,433
Python
.py
43
101.930233
403
0.438169
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,777
serverfedpt.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverfedpt.cpython-38.pyc
U [ÐýcÇã@s`ddlmZddlmZddlmZddlZddlZddlZddl Z ddl Z Gdd„deƒZ dS)é)ÚclientT)ÚServer)ÚThreadNcs<eZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Z‡ZS) ÚFedPTcsntƒ ||¡||_| ¡| |t¡t |jj ¡|_ g|_ g|_ t d|j›d|j›�ƒt dƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__ÚargsÚset_slow_clientsÚ set_clientsrÚcopyÚdeepcopyÚmodelÚbaseÚ global_modelÚdiff_proÚclients_divergeÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfrÚtimes©Ú __class__©õYD:\京东\promot\第二次投稿\实验\native - pro\system\flcore\servers\serverfedpt.pyr s zFedPT.__init__c Cs¾t|jdƒD�]J}t ¡}| ¡|_| ¡||jdkr\td|›d�ƒtdƒ| ¡d}|jD]}|  ¡}||  ¡}qfd}t |j dj j ¡|j dj j ¡ƒD]:\}}||} t | dk| t | ¡| ¡} |t | ¡}qªtd |¡ƒtd |  ¡¡ƒ|j |¡|j |  ¡¡| ¡| ¡|j t ¡|¡tdd d|jd ƒqtd ƒtt|jƒƒtd ƒtt|jdd…ƒt|jdd…ƒƒ| ¡| ¡|  ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz"Averaged prompr difference: {:.4f}z"0 and 1 clients difference: {:.4f}z-------------------------z time costéÿÿÿÿz Best global accuracy.z Average time cost per round.)!ÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gaprÚevaluateÚtrainÚitemÚzipÚclientsr Ú generatorÚ parametersÚtorchÚwhereÚ zeros_likeÚsumÚformatrÚappendrÚreceive_modelsÚaggregate_parametersrÚmaxÚ rs_test_accÚlenÚ save_resultsÚsave_global_modelÚsave_client_model) rÚiÚs_tÚ temp_diff_proÚclientÚtemp_diff_pro_clientÚdiverge_clentsÚ new_paramÚ old_paramrrrrr&s@  . (z FedPT.traincCs„t|jƒdkst‚d}|jD]}||j7}qg|_g|_g|_|jD]:}|j |j|¡|j |j¡|j t   |j j ¡¡qDdS)Nr) r6r"ÚAssertionErrorÚ train_samplesÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr1Úidr r r r)rÚactive_train_samplesr=rrrr2Ps   zFedPT.receive_modelscCs¾tj d|jd¡}tj |¡s(t |¡t|jƒD]†\}}tj ||jdt |ƒdt |j j ƒdt |j j ƒdt |j j ƒdt |j jƒdt |j jƒd¡}t |j|¡q2dS)NÚmodelsr=Ú_clientÚ_z.pt)ÚosÚpathÚjoinÚdatasetÚexistsÚmakedirsÚ enumerater)Ú algorithmÚstrrÚ num_promptrrÚ plocal_stepsrr,Úsaver )rÚ model_pathÚc_idxÚcÚmodel_path_saverrrr9^s   pzFedPT.save_client_modelc Cs8|jd|j}d}tj |¡s*t |¡t|jƒ�r4|d|jdt |j ƒdt |j j ƒdt |j j ƒdt |j jƒdt |j jƒdt |j jƒ}|d |¡}td|ƒt |d¡�f}|jd|jd�|jd|jd�|jd |jd�|jd |jd�|jd |jd�|jd |jd�W5QRXdS) NrKz ../results/z{}.h5z File path: Úwr5)ÚdataÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossrr)rOrSrLrMrPrQr6r5ÚgoalrTrrrUrrrVrr0rÚh5pyÚFileÚcreate_datasetr^r_r`rr)rÚalgoÚ result_pathÚ file_pathÚhfrrrr7fs   l zFedPT.save_results) Ú__name__Ú __module__Ú __qualname__rr&r2r9r7Ú __classcell__rrrrr s  3r) Zflcore.clients.clienttrÚflcore.servers.serverbaserÚ threadingrr r,rLrbr rrrrrÚ<module>s   
4,035
Python
.py
39
102.230769
397
0.451839
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,778
serverdynpt.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverdynpt.cpython-39.pyc
a »¢bcã@sPddlZddlZddlmZddlmZddlmZddlZGdd„deƒZ dS)éN)Ú clientDynPT)ÚServer)ÚThreadcsDeZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Zd d „Z‡Z S) ÚFedDynPTcs€tƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_|j |_ t   |j ¡|_ |j  ¡D]}t |j¡|_qhdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudgetÚalphaÚcopyÚdeepcopyÚmodelÚ server_stateÚ parametersÚtorchÚ zeros_likeÚdata)ÚselfÚargsÚtimesÚparam©Ú __class__©út/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverdynpt.pyr s zFedDynPT.__init__cCshg}d|_d}t|jdƒD]Ü}t ¡}| ¡|_| ¡||jdkrhtd|›d�ƒtdƒ|  ¡|jD] }|  ¡qn||jdkržtdƒ|j |d�|  ¡|  ¡|  ¡|j t ¡|¡td |jd ƒ|j|jg|jd �|_|d7}qtd ƒtt|jƒƒtd ƒtt|ƒƒtdƒtt|jdd…ƒt|jdd…ƒƒ| ¡| ¡dS)NFréz -------------Round number: z -------------z Evaluate global modelz Evaluate local model)Úaccz2--------------------------------------------------éÿÿÿÿ)Úacc_lssÚtop_cntz Best global accuracy.z Best local accuracy.z Averaged time per iteration.)ÚdoneÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚupdate_server_stateÚaggregate_parametersr ÚappendÚ check_doneÚ rs_test_accr#ÚmaxÚsumÚlenÚ save_resultsÚsave_global_model)rÚ local_accÚiÚs_tÚclientrrrr-s>      (zFedDynPT.traincCs<t|j ¡| ¡ƒD]"\}}|j|j ¡|j7_qdS)N)ÚzipÚ global_modelrrÚcloneÚ join_clients)rÚ client_modelÚ server_paramÚ client_paramrrrÚadd_parametersOszFedDynPT.add_parameterscCs”t|jƒdksJ‚t |jd¡|_|j ¡D]}t |j¡|_q.|jD]}|  |¡qHt |j ¡|j  ¡ƒD] \}}|jd|j |8_qndS)Nrr) r6Úuploaded_modelsrrr>rrrrrDr=rr)rrrArBÚ state_paramrrrr0Ss  zFedDynPT.aggregate_parameterscCs¾t|jƒdksJ‚t |jd¡}| ¡D]}t |j¡|_q*|jD]B}t|j  ¡| ¡| ¡ƒD]"\}}}|j|||j 7_qbqDt|j  ¡| ¡ƒD]\}}|j|j |8_qœdS©Nr) r6rErrrrrrr=r>r rr)rÚ model_deltarrArBrCÚ delta_paramrFrrrr/`s    ÿzFedDynPT.update_server_statecCs‚t|jƒdksJ‚d}|jD]}||j7}qg|_g|_g|_|jD]8}|j |j|¡|j |j¡|j t  |j ¡¡qDdSrG) r6r)Ú train_samplesÚuploaded_weightsÚ uploaded_idsrEr1Úidrrr)rÚactive_train_samplesr<rrrr.ns   zFedDynPT.receive_models) Ú__name__Ú __module__Ú __qualname__rr-rDr0r/r.Ú __classcell__rrrrr s  2 r) rrZ!system.flcore.clients.clientdynptrÚ system.flcore.servers.serverbaserÚ threadingrr'rrrrrÚ<module>s    
3,712
Python
.py
34
107.970588
413
0.437075
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,779
servermtl.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/servermtl.cpython-38.pyc
U ºĞıcå ã@s@ddlZddlmZddlmZddlmZGdd„deƒZdS)éN)Ú clientMTL)ÚServer)ÚThreadcs4eZdZ‡fdd„Zdd„Zdd„Zdd„Z‡ZS) ÚFedMTLcsÈtƒ ||¡t| |j¡ƒ|_tj|j|jf|j d�|_ |j |_ t  |j|jf¡}t  |jdf¡}|d|j|  |j ¡d}| |j ¡|_| ¡| |t¡td|j›d|j›�ƒtdƒdS)N©Údeviceééz Join clients / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__ÚlenÚflattenÚ global_modelÚdimÚtorchÚzerosÚ join_clientsrÚW_globÚonesÚmmÚTÚtoÚomegaÚset_slow_clientsÚ set_clientsrÚprintÚ num_clients)ÚselfÚargsÚtimesÚIÚir©Ú __class__©õWD:\京东\promot\第二次投稿\å®�验\native - pro\system\flcore\servers\servermtl.pyr s zFedMTL.__init__cCsæg}t|jdƒD]”}| ¡|_| ¡||jdkrVtd|›d�ƒtdƒ| ¡t|jƒD]"\}}|  |j |j |¡|  ¡q`||jdkrtdƒ|j|d�qtdƒtt |jƒƒtd ƒtt |ƒƒ| ¡| ¡dS) Nrrz -------------Round number: z -------------z Evaluate global modelz Evaluate local model)Úaccz Best global accuracy.z Best local accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚaggregate_parametersÚeval_gaprÚevaluateÚ enumerateZreceive_valuesrrÚtrainÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rÚ local_accr!ÚidxÚclientr$r$r%r/s(   z FedMTL.traincs,| ¡‰ˆ ¡}‡fdd„|Dƒ}t |¡S)Ncsg|]}ˆ| ¡‘qSr$)r )Ú.0Úkey©Ú state_dictr$r%Ú <listcomp>@sz"FedMTL.flatten.<locals>.<listcomp>)r:ÚkeysrÚcat)rÚmodelr<ÚWr$r9r%r =szFedMTL.flattencCsLtj|j|jf|jd�|_t|jƒD]"\}}| |j ¡|jdd…|f<q$dS)Nr) rrrrrrr.r*r r>)rr5r6r$r$r%r+CszFedMTL.aggregate_parameters)Ú__name__Ú __module__Ú __qualname__r r/r r+Ú __classcell__r$r$r"r%rs !r)rZflcore.clients.clientmtlrÚflcore.servers.serverbaserÚ threadingrrr$r$r$r%Ú<module>s   
2,520
Python
.py
17
147.058824
582
0.436502
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,780
serverpFedMe.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverpFedMe.cpython-38.pyc
U ”jfcEã@sPddlZddlZddlZddlmZddlmZddlmZGdd„deƒZ dS)éN)Ú clientpFedMe)ÚServer)ÚThreadcsLeZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Zd d „Zd d„Z ‡Z S)ÚpFedMecs`tƒ ||¡| ¡| |t¡|j|_g|_g|_g|_t d|j ›d|j ›�ƒt dƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚbetaÚrs_train_acc_perÚrs_train_loss_perÚrs_test_acc_perÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©õND:\京东\promot\cifar\cifar\Cifar10_iid\system\flcore\servers\serverpFedMe.pyr s zpFedMe.__init__cCsÀt|jdƒD]†}| ¡|_| ¡|jD] }| ¡q*||jdkrftd|›d�ƒtdƒ| ¡t   t |j   ¡ƒ¡|_| ¡| ¡| ¡qtdƒtt|jƒƒ| ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate personalized modelz Best personalized results.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚtrainÚeval_gaprÚevaluate_personalized_modelÚcopyÚdeepcopyÚlistÚ global_modelÚ parametersÚprevious_global_modelÚreceive_modelsÚaggregate_parametersÚbeta_aggregate_parametersÚmaxr Ú save_resultsÚsave_global_model)rÚiÚclientrrrrs"    z pFedMe.traincCs>t|j|j ¡ƒD]&\}}d|j|j|j|j|_qdS)Nr)Úzipr&r$r%r Údata)rZ pre_paramÚparamrrrr)Dsz pFedMe.beta_aggregate_parameterscCsRg}g}|jD](}| ¡\}}| |d¡| |¡qdd„|jDƒ}|||fS)Nçğ?cSsg|] }|j‘qSr©Úid©Ú.0ÚcrrrÚ <listcomp>Psz4pFedMe.test_metrics_personalized.<locals>.<listcomp>)ÚclientsÚtest_metrics_personalizedÚappend)rÚ num_samplesÚ tot_correctr7ÚctÚnsÚidsrrrr:Is   z pFedMe.test_metrics_personalizedc Cshg}g}g}|jD]8}| ¡\}}}| |d¡| |¡| |d¡qdd„|jDƒ}||||fS)Nr2cSsg|] }|j‘qSrr3r5rrrr8^sz5pFedMe.train_metrics_personalized.<locals>.<listcomp>)r9Útrain_metrics_personalizedr;) rr<r=Úlossesr7r>Úclr?r@rrrrATs  z!pFedMe.train_metrics_personalizedcCsB| ¡}t|dƒdt|dƒ}|j |¡td |¡ƒdS)Nér2rz+Average Personalized Test Accurancy: {:.4f})r:Úsumr r;rÚformat)rÚstatsÚtest_accrrrr bs z"pFedMe.evaluate_personalized_modelc Cs¦|jd|j}d}tj |¡s*t |¡t|jƒr¢|d|jdt |j ƒ}t   |d  |¡d¡�6}|jd|jd�|jd|jd�|jd|jd�W5QRXdS) NÚ_z ../results/z{}.h5ÚwÚ rs_test_acc)r0Z rs_train_accÚ rs_train_loss)ÚdatasetÚ algorithmÚosÚpathÚexistsÚmakedirsÚlenr ÚgoalÚstrrÚh5pyÚFilerFÚcreate_datasetr r )rÚalgoÚ result_pathZalgo2Úhfrrrr+qs  zpFedMe.save_results) Ú__name__Ú __module__Ú __qualname__rrr)r:rAr r+Ú __classcell__rrrrr s + r) rOr!rVZflcore.clients.clientpFedMerZflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s    
3,780
Python
.py
28
133.714286
434
0.460144
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,781
serverprox.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverprox.cpython-39.pyc
a f¾`c²ã@s8ddlmZddlmZddlmZGdd„deƒZdS)é)Ú clientProx)ÚServer)ÚThreadcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚFedProxcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©ús/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverprox.pyrs  zFedProx.__init__cCs¢t|jdƒD]h}| ¡|_| ¡||jdkrRtd|›d�ƒtdƒ| ¡|jD] }| ¡qX|  ¡|  ¡qtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)r ÚiÚclientrrrrs    z FedProx.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rrrrrs rN)Zflcore.clients.clientproxrÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s   
1,501
Python
.py
16
92.5625
225
0.504038
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,782
serverlocalpt.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverlocalpt.cpython-38.pyc
U şc4ã@s`ddlmZddlmZddlmZddlZddlZddlZddl Z ddl Z Gdd„deƒZ dS)é)Ú clientAVGPT)ÚServer)ÚThreadNcsNeZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Zdd d „Zdd„Z ‡Z S)ÚLocalPTcsXtƒ ||¡||_| ¡| |t¡g|_g|_td|j ›d|j ›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__ÚargsÚset_slow_clientsÚ set_clientsrÚdiff_proÚclients_divergeÚprintÚ join_ratioÚ num_clients)ÚselfrÚtimes©Ú __class__©õeD:\京东\promot\第二次投稿\å®�验\æœ�务器\native - pro\system\flcore\servers\serverlocalpt.pyr s zLocalPT.__init__c CsŠg}t|jdƒD�].}| ¡|_||jdkrPtd|›d�ƒtdƒ| ¡| ¡|_d}|jD]}| ¡}|| ¡}qdtd  |¡ƒ|j   |¡d}t |j djj ¡|j djj ¡ƒD]:\}}||} t | dk| t | ¡| ¡} |t | ¡}qÂtd  | ¡¡ƒ|j  | ¡¡||jdkrtdƒ|j|d �qtd ƒtt|jƒƒtd ƒtt|ƒƒ| ¡| ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz"Averaged prompr difference: {:.4f}z"0 and 1 clients difference: {:.4f}z Evaluate local model)Úaccz Best global accuracy.z Best local accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚeval_gapr ÚevaluateÚtrainÚitemÚformatr ÚappendÚzipÚclientsÚmodelÚ generatorÚ parametersÚtorchÚwhereÚ zeros_likeÚsumr ÚmaxÚ rs_test_accÚ save_resultsÚsave_global_modelÚsave_client_model) rÚ local_accÚiÚ temp_diff_proÚclientÚtemp_diff_pro_clientÚdiverge_clentsÚ new_paramÚ old_paramr rrrrsB    ÿ  z LocalPT.traincCs„t|jƒdkst‚d}|jD]}||j7}qg|_g|_g|_|jD]:}|j |j|¡|j |j¡|j t   |j j ¡¡qDdS)Nr) ÚlenrÚAssertionErrorÚ train_samplesÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr!ÚidÚcopyÚdeepcopyr$Úbase)rÚactive_train_samplesr3rrrÚreceive_modelsDs   zLocalPT.receive_modelscCstj d|jd|jjd|jjd|jjd|jjd|jj d|jj ¡}tj  |¡sjt  |¡t |jƒD]†\}}tj ||jdt|ƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒd¡}t |j|¡qtdS)NÚmodelsr3Ú*Ú_clientÚ_z.pt)ÚosÚpathÚjoinÚdatasetrÚarv1Úarv2Úarv3Úarv4Úarv5Úarv6ÚexistsÚmakedirsÚ enumerater#Ú algorithmÚstrÚ num_promptrrÚ plocal_stepsrr'Úsaver$)rÚ model_pathÚc_idxÚcÚmodel_path_saverrrr/Rs T  pzLocalPT.save_client_modelc Cs€|jd|j}d|jjd|jjd|jjd|jjd|jjd|jjd}t j   |¡srt   |¡t |jƒ�r||d|jdt|jƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒ}|d |¡}td|ƒt |d¡�f}|jd|jd �|jd |jd �|jd |jd �|jd |jd �|jd |jd �|jd|j d �W5QRXdS)NrGz ../results/rEú/z{}.h5z File path: Úwr,)ÚdataÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossr r )!rKrUrrLrMrNrOrPrQrHrIrRrSr8r,ÚgoalrVrrWrrrXrr r Úh5pyÚFileÚcreate_datasetrarbrcr r )rÚalgoÚ result_pathÚ file_pathÚhfrrrr-ZsL   l zLocalPT.save_resultsNc Cs|| ¡}| ¡}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}dd„t|d|dƒDƒ} dd„t|d|dƒDƒ} |dkrÎ|j |¡n | |¡|j |¡|j t  | ¡¡|dk�r|j  |¡n | |¡t d   |¡ƒt d   |¡ƒt d   |¡ƒt d   |¡ƒt d   t  | ¡¡ƒt d  t  | ¡¡ƒdS)Néçğ?réécSsg|]\}}||‘qSrr©Ú.0ÚaÚnrrrÚ <listcomp>vsz$LocalPT.evaluate.<locals>.<listcomp>cSsg|]\}}||‘qSrrrprrrrtwszAveraged Train Loss: {:.4f}zAveraged Test Accurancy: {:.4f}z$Averaged Test oral Accurancy: {:.4f}zAveraged Test AUC: {:.4f}zStd Test Accurancy: {:.4f}zStd Test AUC: {:.4f}) Ú test_metricsÚ train_metricsr*r"r,r!rbraÚnpÚstdrcr r ) rrÚlossÚstatsÚ stats_trainÚtest_accÚ test_acc2Útest_aucÚ train_lossÚaccsÚaucsrrrrms,    zLocalPT.evaluatec Cs~g}g}g}g}|jD]H}| ¡\}}}} | |d¡| |d¡| | |¡| |¡qdd„|jDƒ} | ||||fS)NrmcSsg|] }|j‘qSr)r>)rqr\rrrrt™sz(LocalPT.test_metrics.<locals>.<listcomp>)r#rur!) rÚ num_samplesÚ tot_correctÚ tot_correct2Útot_aucr\ÚctÚct2ÚnsÚaucÚidsrrrruŒs  zLocalPT.test_metrics)NN) Ú__name__Ú __module__Ú __qualname__rrrCr/r-rruÚ __classcell__rrrrr s , r) Zflcore.clients.clientavgptrÚflcore.servers.serverbaserÚ threadingrr'rHrer?ÚnumpyrwrrrrrÚ<module>s   
5,768
Python
.py
51
111.823529
495
0.428821
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,783
serverdyn.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverdyn.cpython-37.pyc
B ¿:ccã@sPddlZddlZddlmZddlmZddlmZddlZGdd„deƒZ dS)éN)Ú clientDyn)ÚServer)ÚThreadcs<eZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Z‡ZS) ÚFedDyncs„tƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_|j |_ t   |j ¡|_ x |j  ¡D]}t |j¡|_qjWdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudgetÚalphaÚcopyÚdeepcopyÚmodelÚ server_stateÚ parametersÚtorchÚ zeros_likeÚdata)ÚselfÚargsÚtimesÚparam)Ú __class__©úF/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverdyn.pyr s zFedDyn.__init__cCspg}d|_d}xòt|jdƒD]à}t ¡}| ¡|_| ¡||jdkrjtd|›d�ƒtdƒ|  ¡x|jD] }|  ¡qrW||jdkr¤tdƒ|j |d�|  ¡|  ¡|  ¡|j t ¡|¡td |jd ƒ|j|jg|jd �|_|d7}qWtd ƒtt|jƒƒtd ƒtt|ƒƒtdƒtt|jdd…ƒt|jdd…ƒƒ| ¡| ¡dS)NFréz -------------Round number: z -------------z Evaluate global modelz Evaluate local model)Úaccz2--------------------------------------------------éÿÿÿÿ)Úacc_lssÚtop_cntz Best global accuracy.z Best local accuracy.z Averaged time per iteration.)ÚdoneÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚupdate_server_stateÚaggregate_parametersr ÚappendÚ check_doneÚ rs_test_accr"ÚmaxÚsumÚlenÚ save_resultsÚsave_global_model)rZ local_accÚiÚs_tÚclientrrrr,s>      (z FedDyn.traincCs@x:t|j ¡| ¡ƒD]"\}}|j|j ¡|j7_qWdS)N)ÚzipÚ global_modelrrÚcloneÚ join_clients)rÚ client_modelÚ server_paramÚ client_paramrrrÚadd_parametersOszFedDyn.add_parameterscCs t|jƒdkst‚t |jd¡|_x |j ¡D]}t |j ¡|_ q0Wx|jD]}|  |¡qNWx:t |j ¡|j  ¡ƒD] \}}|j d|j |8_ qxWdS)Nrr)r5Úuploaded_modelsÚAssertionErrorrrr<rrrrrBr;rr)rrr?r@Ú state_paramrrrr/Ss  zFedDyn.aggregate_parameterscCsÎt|jƒdkst‚t |jd¡}x| ¡D]}t |j¡|_q,WxP|jD]F}x@t |j  ¡| ¡| ¡ƒD]"\}}}|j|||j 7_qjWqJWx4t |j  ¡| ¡ƒD]\}}|j|j |8_qªWdS)Nr)r5rCrDrrrrrrr;r<r rr)rZ model_deltarr?r@rAZ delta_paramrErrrr.`s & zFedDyn.update_server_state) Ú__name__Ú __module__Ú __qualname__rr,rBr/r.Ú __classcell__rr)rrr s  1 r) rrZsystem.flcore.clients.clientdynrÚ system.flcore.servers.serverbaserÚ threadingrr&rrrrrÚ<module>s    
3,307
Python
.py
25
130.92
435
0.439842
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,784
serverditto.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverditto.cpython-38.pyc
U ”jfcüã@sHddlZddlmZddlmZddlmZddlZGdd„deƒZdS)éN)Ú clientDitto)ÚServer)ÚThreadcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚDittocsLtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfÚargsÚtimes©Ú __class__©õMD:\京东\promot\cifar\cifar\Cifar10_iid\system\flcore\servers\serverditto.pyr s  zDitto.__init__cCs t|jdƒD] }t ¡}| ¡|_| ¡||jdkrZtd|›d�ƒtdƒ| ¡|jD]}|  ¡|  ¡q`|  ¡|  ¡|j  t ¡|¡tddd|j dƒqtd ƒtt|jƒƒtd ƒtt|j dd…ƒt|j dd…ƒƒ| ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz-------------------------z time costéÿÿÿÿz Best global accuracy.z Average time cost per round.)ÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateZptrainÚtrainÚreceive_modelsÚaggregate_parametersr ÚappendÚmaxÚ rs_test_accÚsumÚlenÚ save_resultsÚsave_global_model)rÚiÚs_tÚclientrrrrs*   (z Ditto.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rrrrrs r) ÚcopyZflcore.clients.clientdittorÚflcore.servers.serverbaserÚ threadingrrrrrrrÚ<module>s    
1,741
Python
.py
17
101.117647
343
0.478841
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,785
serverprox.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverprox.cpython-37.pyc
B ¿:cc²ã@s8ddlmZddlmZddlmZGdd„deƒZdS)é)Ú clientProx)ÚServer)ÚThreadcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚFedProxcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes)Ú __class__©úG/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverprox.pyrs  zFedProx.__init__cCsªx~t|jdƒD]l}| ¡|_| ¡||jdkrTtd|›d�ƒtdƒ| ¡x|jD] }| ¡q\W|  ¡|  ¡qWtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)r ÚiÚclientrrrrs    z FedProx.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rr)rrrs rN)Zflcore.clients.clientproxrÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s   
1,451
Python
.py
13
110.307692
227
0.496178
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,786
serverrep.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverrep.cpython-39.pyc
a ï`c ã@sHddlmZddlmZddlmZddlZddlZGdd„deƒZdS)é)Ú clientRep)ÚServer)ÚThreadNcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)ÚFedRepcsLtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfÚargsÚtimes©Ú __class__©úr/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverrep.pyr s  zFedRep.__init__cCst|jdƒD]˜}t ¡}| ¡|_| ¡||jdkrZtd|›d�ƒtdƒ| ¡|jD] }|  ¡q`|  ¡|  ¡|j   t ¡|¡tddd|j dƒqtd ƒtt|jƒƒtd ƒtt|j dd…ƒt|j dd…ƒƒ| ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz-------------------------z time costéÿÿÿÿz Best global accuracy.z Average time cost per round.)ÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersr ÚappendÚmaxÚ rs_test_accÚsumÚlenÚ save_resultsÚsave_global_model)rÚiÚs_tÚclientrrrrs(   (z FedRep.traincCs„t|jƒdksJ‚d}|jD]}||j7}qg|_g|_g|_|jD]:}|j |j|¡|j |j¡|j t  |j j ¡¡qDdS)Nr) r&rÚ train_samplesÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr"ÚidÚcopyÚdeepcopyÚmodelÚbase)rZactive_train_samplesr+rrrr :s   zFedRep.receive_models)Ú__name__Ú __module__Ú __qualname__rrr Ú __classcell__rrrrrs #r) Zsystem.flcore.clients.clientreprÚ system.flcore.servers.serverbaserÚ threadingrrr1rrrrrÚ<module>s   
2,176
Python
.py
19
113.263158
369
0.477294
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,787
serverbnpt.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverbnpt.cpython-38.pyc
U €fc" ã@slddlmZddlmZddlmZddlmZddlZddl Z ddl Z ddl Z ddl Z Gdd„deƒZ dS)é)Ú clientBNPT)ÚServer)Úread_client_data)ÚThreadNcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)ÚFedBNPTcsRtƒ ||¡| ¡| |t¡g|_g|_td|j›d|j ›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚdiff_proÚclients_divergeÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©õLD:\京东\promot\cifar\cifar\Cifar10_iid\system\flcore\servers\serverbnpt.pyr s zFedBNPT.__init__c CsVt|jdƒD�]}| ¡|_| ¡||jdkrTtd|›d�ƒtdƒ| ¡d}|jD]}| ¡}||  ¡}q^td  |¡ƒ|j   |¡d}t |jdjj ¡|jdjj ¡ƒD]:\}}||}t |dk|t |¡|¡}|t |¡}q¼td  |  ¡¡ƒ|j  |  ¡¡| ¡| ¡qtdƒtt|jƒƒ| ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz"Averaged prompr difference: {:.4f}z"0 and 1 clients difference: {:.4f}z Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚitemÚformatr ÚappendÚzipÚclientsÚmodelÚ generatorÚ parametersÚtorchÚwhereÚ zeros_likeÚsumr Úreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model) rÚiÚ temp_diff_proÚclientÚtemp_diff_pro_clientÚdiverge_clentsÚ new_paramÚ old_paramr rrrrs8   ÿ  z FedBNPT.trainc CsÖ|jd|j}d}tj |¡s*t |¡t|jƒrÒ|d|jdt |j ƒ}|d  |¡}t d|ƒt  |d¡�V}|jd|jd�|jd|jd�|jd |jd�|jd |jd�|jd |jd�W5QRXdS) NÚ_z ../results/z{}.h5z File path: Úwr/)ÚdataÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossr )ÚdatasetÚ algorithmÚosÚpathÚexistsÚmakedirsÚlenr/ÚgoalÚstrrr!r Úh5pyÚFileÚcreate_datasetr<r=r>r )rÚalgoÚ result_pathÚ file_pathÚhfrrrr0Cs    zFedBNPT.save_results)Ú__name__Ú __module__Ú __qualname__rrr0Ú __classcell__rrrrr s (r)Zflcore.clients.clientbnptrÚflcore.servers.serverbaserÚutils.data_utilsrÚ threadingrÚtimerHÚcopyrAr(rrrrrÚ<module>s    
2,725
Python
.py
23
117.26087
357
0.483907
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,788
serverper.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverper.cpython-38.pyc
U ºĞıc ã@s`ddlmZddlmZddlmZddlZddlZddlZddl Z ddl Z Gdd„deƒZ dS)é)Ú clientPer)ÚServer)ÚThreadNcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)ÚFedPercsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©õWD:\京东\promot\第二次投稿\å®�验\native - pro\system\flcore\servers\serverper.pyr s  zFedPer.__init__cCsÜg}t|jdƒD]Š}| ¡|_| ¡||jdkrVtd|›d�ƒtdƒ| ¡|jD] }| ¡q\||jdkrŒtdƒ|j|d�|  ¡|  ¡qtdƒtt |j ƒƒtd ƒtt |ƒƒ|  ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz Evaluate local model)Úaccz Best global accuracy.z Best local accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)r Ú local_accÚiÚclientrrrrs*      z FedPer.traincCs�t|jƒdkst‚g|_d}g|_g|_|jD]:}|j |j¡||j7}|j |j¡|j |j j ¡q.t |jƒD]\}}|||j|<qtdS)Nr) ÚlenrÚAssertionErrorÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsÚappendÚ train_samplesÚidÚmodelÚbaseÚ enumerate)r Ú tot_samplesr&r%Úwrrrr:s  zFedPer.receive_models)Ú__name__Ú __module__Ú __qualname__rrrÚ __classcell__rrrrr s "r) Zflcore.clients.clientperrÚflcore.servers.serverbaserÚ threadingrÚh5pyÚcopyÚosÚtorchÚnumpyÚnprrrrrÚ<module>s   
2,123
Python
.py
23
91.130435
254
0.482151
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,789
serverprox.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverprox.cpython-38.pyc
U ºĞıc�ã@s8ddlmZddlmZddlmZGdd„deƒZdS)é)Ú clientProx)ÚServer)ÚThreadcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚFedProxcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©õXD:\京东\promot\第二次投稿\å®�验\native - pro\system\flcore\servers\serverprox.pyrs  zFedProx.__init__cCsÜg}t|jdƒD]Š}| ¡|_| ¡||jdkrVtd|›d�ƒtdƒ| ¡|jD] }| ¡q\||jdkrŒtdƒ|j|d�|  ¡|  ¡qtdƒtt |j ƒƒtd ƒtt |ƒƒ|  ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz Evaluate local model)Úaccz Best global accuracy.z Best local accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)r Ú local_accÚiÚclientrrrrs*      z FedProx.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rrrrrs rN)Zflcore.clients.clientproxrÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s   
1,611
Python
.py
19
83.578947
225
0.491525
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,790
serverlocal.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverlocal.cpython-39.pyc
a f¾`c…ã@s8ddlmZddlmZddlmZGdd„deƒZdS)é)Ú clientAVG)ÚServer)ÚThreadcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚLocalcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©út/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serverlocal.pyrs  zLocal.__init__cCs”t|jdƒD]Z}| ¡|_||jdkrJtd|›d�ƒtdƒ| ¡| ¡|_|jD] }| ¡qZqtdƒtt|j ƒƒ|  ¡|  ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.) ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚeval_gapr ÚevaluateÚtrainÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)r ÚiÚclientrrrrs    z Local.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rrrrrs rN)Zflcore.clients.clientavgrÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s   
1,425
Python
.py
14
100.5
318
0.492918
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,791
serveramppt.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serveramppt.cpython-38.pyc
U şc£!ã@sŒddlZddlZddlZddlZddlZddlmZmZddl m Z ddl m Z ddl Z ddlZddlZddlZddlZGdd„de ƒZdS)éN)Ú clientAMPPTÚweight_flatten)ÚServer)ÚThreadcs^eZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Zdd d „Zdd„Z dd„Z dd„Z ‡Z S)ÚFedAMPPTcshtƒ ||¡||_| ¡| |t¡g|_g|_|j|_|j |_ t d|j ›d|j ›�ƒt dƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__ÚargsÚset_slow_clientsÚ set_clientsrÚclients_divergeÚdiff_proÚalphaKÚsigmaÚprintÚ join_ratioÚ num_clients)Úselfr Útimes©Ú __class__©õcD:\京东\promot\第二次投稿\å®�验\æœ�务器\native - pro\system\flcore\servers\serveramppt.pyrs zFedAMPPT.__init__c Cs’g}t|jdƒD�]6}| ¡|_| ¡||jdkrXtd|›d�ƒtdƒ| ¡d}|jD]}| ¡}||  ¡}qbtd  |¡ƒ|j   |¡d}t |jdjj ¡|jdjj ¡ƒD]:\}}||} t | dk| t | ¡| ¡} |t | ¡}qÀtd  |  ¡¡ƒ|j  |  ¡¡||jdk�rBtdƒ|j|d �| ¡qtd ƒtt|jƒƒtd ƒtt|ƒƒ| ¡| ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz"Averaged prompr difference: {:.4f}z"0 and 1 clients difference: {:.4f}z Evaluate local model)Úaccz Best global accuracy.z Best local accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gaprÚevaluateÚtrainÚitemÚformatr ÚappendÚzipÚclientsÚmodelÚ generatorÚ parametersÚtorchÚwhereÚ zeros_likeÚsumr Úreceive_modelsÚmaxÚ rs_test_accÚ save_resultsÚsave_global_modelÚsave_client_model) rÚ local_accÚiÚ temp_diff_proÚclientÚtemp_diff_pro_clientÚdiverge_clentsÚ new_paramÚ old_paramr rrrr"sD   ÿ    zFedAMPPT.traincCs‚t|jƒdkst‚d}|jD]}||j7}qg|_g|_g|_|jD]8}|j |j|¡|j |j¡|j t   |j ¡¡qDdS)Nr) ÚlenrÚAssertionErrorÚ train_samplesÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr%ÚidÚcopyÚdeepcopyr()rÚactive_train_samplesr8rrrr/Ks   zFedAMPPT.receive_modelscCstj d|jd|jjd|jjd|jjd|jjd|jj d|jj ¡}tj  |¡sjt  |¡t |jƒD]†\}}tj ||jdt|ƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒd¡}t |j|¡qtdS)NÚmodelsr8Ú*Ú_clientÚ_z.pt)ÚosÚpathÚjoinÚdatasetr Úarv1Úarv2Úarv3Úarv4Úarv5Úarv6ÚexistsÚmakedirsÚ enumerater'Ú algorithmÚstrÚ num_promptrrÚ plocal_stepsrr+Úsaver()rÚ model_pathÚc_idxÚcÚmodel_path_saverrrr4Ys T  pzFedAMPPT.save_client_modelc Cs€|jd|j}d|jjd|jjd|jjd|jjd|jjd|jjd}t j   |¡srt   |¡t |jƒ�r||d|jdt|jƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒ}|d |¡}td|ƒt |d¡�f}|jd|jd �|jd |jd �|jd |jd �|jd |jd �|jd |jd �|jd|j d �W5QRXdS)NrJz ../results/rHú/z{}.h5z File path: Úwr1)ÚdataÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossr r )!rNrXr rOrPrQrRrSrTrKrLrUrVr=r1ÚgoalrYrrZrrr[rr$rÚh5pyÚFileÚcreate_datasetrdrerfr r )rÚalgoÚ result_pathÚ file_pathÚhfrrrr2asL   l zFedAMPPT.save_resultsNc Cs|| ¡}| ¡}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}dd„t|d|dƒDƒ} dd„t|d|dƒDƒ} |dkrÎ|j |¡n | |¡|j |¡|j t  | ¡¡|dk�r|j  |¡n | |¡t d   |¡ƒt d   |¡ƒt d   |¡ƒt d   |¡ƒt d   t  | ¡¡ƒt d  t  | ¡¡ƒdS)Néçğ?réécSsg|]\}}||‘qSrr©Ú.0ÚaÚnrrrÚ <listcomp>}sz%FedAMPPT.evaluate.<locals>.<listcomp>cSsg|]\}}||‘qSrrrsrrrrw~szAveraged Train Loss: {:.4f}zAveraged Test Accurancy: {:.4f}z$Averaged Test oral Accurancy: {:.4f}zAveraged Test AUC: {:.4f}zStd Test Accurancy: {:.4f}zStd Test AUC: {:.4f}) Ú test_metricsÚ train_metricsr.r&r1r%rerdÚnpÚstdrfrr$) rrÚlossÚstatsÚ stats_trainÚtest_accÚ test_acc2Útest_aucÚ train_lossÚaccsÚaucsrrrr!ts,    zFedAMPPT.evaluatec Cs~g}g}g}g}|jD]H}| ¡\}}}} | |d¡| |d¡| | |¡| |¡qdd„|jDƒ} | ||||fS)NrpcSsg|] }|j‘qSr)rC)rtr_rrrrw sz)FedAMPPT.test_metrics.<locals>.<listcomp>)r'rxr%) rÚ num_samplesÚ tot_correctÚ tot_correct2Útot_aucr_ÚctÚct2ÚnsÚaucÚidsrrrrx“s  zFedAMPPT.test_metricsc CsŒt|jƒdkst‚t|jƒdk�rˆ|jD�]\}t |j¡}| ¡D]}|j   ¡qBt   |j ¡}t|jƒD]b\}}|j|j|krÂt|jƒ}t|ƒ}|| d¡} t  | | ¡} |j| | ¡||<qhd||<qhdt  |¡} t|jƒD]:\}}t| ¡| ¡ƒD]\}} |j ||| 7_ qşqät ¡} |j�rJt dt tj ¡¡¡|  || ¡|j!dd7<|j!ddt ¡| 7<q(dS)Nréÿÿÿÿrgš™™™™™¹?Ú num_roundsÚ total_costro)"r=rr>rBr'rDrEÚ global_modelr*rcÚzero_r+ÚzerosÚ join_clientsrWrCrArr(ÚviewÚdotrÚer.r&ÚtimeÚ send_slowÚsleeprzÚabsÚrandomÚrandÚset_parametersÚsend_time_cost) rr_ÚmuÚparamÚcoefÚjÚmwÚ weights_iÚ weights_jÚsubÚ coef_selfÚparam_jÚ start_timerrrr¥s2         zFedAMPPT.send_modelscCst | |j¡|jS)N)ÚmathÚexpr)rÚxrrrr—Ész FedAMPPT.e)NN) Ú__name__Ú __module__Ú __qualname__rr"r/r4r2r!rxrr—Ú __classcell__rrrrrs - $r)r+rDr˜Únumpyrzr«Zflcore.clients.clientampptrrÚflcore.servers.serverbaserÚ threadingrrhrKrrrrrÚ<module>s  
6,980
Python
.py
62
111.33871
496
0.421882
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,792
serverpfedpt.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverpfedpt.cpython-37.pyc
B �ÑecŞã@s`ddlmZddlmZddlmZddlZddlZddlZddl Z ddl Z Gdd„deƒZ dS)é)ÚclientPT)ÚServer)ÚThreadNcs<eZdZ‡fdd„Zdd„Zdd„Zdd„Zd d „Z‡ZS) ÚPFedPTcsntƒ ||¡| ¡||_| |t¡t |jj ¡|_ g|_ g|_ t d|j›d|j›�ƒt dƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚargsÚ set_clientsrÚcopyÚdeepcopyÚmodelÚbaseÚ global_modelÚdiff_proÚclients_divergeÚprintÚ join_ratioÚ num_clientsÚBudget)Úselfr Útimes)Ú __class__©úI/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverpfedpt.pyr s zPFedPT.__init__c CsÌ�xft|jdƒD�]R}t ¡}| ¡|_| ¡||jdkr`td|›d�ƒtdƒ| ¡d}x"|jD]}|  ¡}||  ¡}qlWtd  |¡ƒ|j   |¡d}xdt|jdjj ¡|jdjj ¡ƒD]:\}}||} t | dk| t | ¡| ¡} |t | ¡}qÎWtd  |  ¡¡ƒ|j  |  ¡¡| ¡| ¡|j  t ¡|¡tdd d|jd ƒqWtd ƒtt|jƒƒtd ƒtt|jdd…ƒt|jdd…ƒƒ| ¡| ¡|  ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz"Averaged prompr difference: {:.4f}z"0 and 1 clients difference: {:.4f}z-------------------------z time costéÿÿÿÿz Best global accuracy.z Average time cost per round.)!ÚrangeÚ global_roundsÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gaprÚevaluateÚtrainÚitemÚformatrÚappendÚzipÚclientsr Ú generatorÚ parametersÚtorchÚwhereÚ zeros_likeÚsumrÚreceive_modelsÚaggregate_parametersrÚmaxÚ rs_test_accÚlenÚ save_resultsÚsave_global_modelÚsave_client_model) rÚiÚs_tZ temp_diff_proÚclientZtemp_diff_pro_clientZdiverge_clentsÚ new_paramÚ old_paramrrrrr%s@   0(z PFedPT.traincCsŒt|jƒdkst‚d}x|jD]}||j7}qWg|_g|_g|_xD|jD]:}|j |j|¡|j |j¡|j t   |j j ¡¡qJWdS)Nr) r5r!ÚAssertionErrorÚ train_samplesÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr(Úidr r r r)rÚactive_train_samplesr;rrrr1Os  zPFedPT.receive_modelscCsÂtj d|jd¡}tj |¡s(t |¡x”t|jƒD]†\}}tj ||jdt |ƒdt |j j ƒdt |j j ƒdt |j j ƒdt |j jƒdt |j jƒd¡}t |j|¡q4WdS)NÚmodelsr;Z_clientÚ_z.pt)ÚosÚpathÚjoinÚdatasetÚexistsÚmakedirsÚ enumerater*Ú algorithmÚstrr Ú num_promptrrÚ plocal_stepsrr-Úsaver )rÚ model_pathÚc_idxÚcZmodel_path_saverrrr8]s   pzPFedPT.save_client_modelc Cs8|jd|j}d}tj |¡s*t |¡t|jƒ�r4|d|jdt |j ƒdt |j j ƒdt |j j ƒdt |j jƒdt |j jƒdt |j jƒ}|d |¡}td|ƒt |d¡�f}|jd|jd�|jd|jd�|jd |jd�|jd |jd�|jd |jd�|jd |jd�WdQRXdS) NrFz ../results/z{}.h5z File path: Úwr4)ÚdataÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossrr)rJrNrGrHrKrLr5r4ÚgoalrOrr rPrrrQrr'rÚh5pyÚFileÚcreate_datasetrXrYrZrr)rÚalgoÚ result_pathÚ file_pathÚhfrrrr6es   l zPFedPT.save_results) Ú__name__Ú __module__Ú __qualname__rr%r1r8r6Ú __classcell__rr)rrr s  1r) Zsystem.flcore.clients.clientptrÚ system.flcore.servers.serverbaserÚ threadingrrr-rGr\r rrrrrÚ<module>s   
4,038
Python
.py
36
110.916667
397
0.459405
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,793
serverproto.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverproto.cpython-37.pyc
B ¾:ccÜã@shddlmZddlmZddlmZddlmZddlZddl Z ddl m Z Gdd„deƒZ d d „ZdS) é)Ú clientProto)ÚServer)Úread_client_data)ÚThreadN)Ú defaultdictcs>eZdZ‡fdd„Zdd„Zdd„Zdd„Zd d d „Z‡ZS) ÚFedProtocsjtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_|j |_ dd„t |j ƒDƒ|_ dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.cSsg|]}d‘qS)N©)Ú.0Ú_rrúH/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverproto.pyú <listcomp>sz%FedProto.__init__.<locals>.<listcomp>) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudgetÚ num_classesÚrangeÚ global_protos)ÚselfÚargsÚtimes)Ú __class__rr r s zFedProto.__init__cCs&d|_d}xĞ|jsÚt ¡}| ¡|_||jdkrZ|dkrZtd|›d�ƒtdƒ| ¡x|jD] }| ¡qbW| ¡t |j ƒ|_ |  ¡|j  t ¡|¡td|j dƒ|dkrĞ|j|jg|jd�|_|d 7}q Wtd ƒtt|jƒƒtt|j d d…ƒt|j d d…ƒƒ| ¡dS) NFrz -------------Round number: z -------------z Evaluate global modelz2--------------------------------------------------éÿÿÿÿ)Úacc_lssÚtop_cntéz Best global accuracy.)ÚdoneÚtimeÚselect_clientsÚselected_clientsÚeval_gaprÚevaluateÚtrainÚreceive_protosÚproto_aggregationÚuploaded_protosrÚ send_protosrÚappendÚ check_doneÚ rs_test_accrÚmaxÚsumÚlenÚ save_results)rÚiÚs_tÚclientrrr r&s.     (zFedProto.traincCs2t|jƒdkst‚x|jD]}| |j¡qWdS)Nr)r0r#ÚAssertionErrorZ set_protosr)rr4rrr r*Cs zFedProto.send_protoscCsNt|jƒdkst‚g|_g|_x*|jD] }|j |j¡|j |j¡q&WdS)Nr)r0r#r5Ú uploaded_idsr)r+ÚidÚprotos)rr4rrr r'Is  zFedProto.receive_protosNcCsØ| ¡}| ¡}t|dƒdt|dƒ}t|dƒdt|dƒ}dd„t|d|dƒDƒ}|dkrz|j |¡n | |¡|dkrš|j |¡n | |¡td |¡ƒtd |¡ƒtd t   |¡¡ƒdS) Négğ?rcSsg|]\}}||‘qSrr)r ÚaÚnrrr r Xsz%FedProto.evaluate.<locals>.<listcomp>zAveraged Train Loss: {:.4f}zAveraged Test Accurancy: {:.4f}zStd Test Accurancy: {:.4f}) Ú test_metricsÚ train_metricsr/Úzipr-r+Ú rs_train_lossrÚformatÚnpÚstd)rÚaccÚlossÚstatsÚ stats_trainÚtest_accÚ train_lossÚaccsrrr r%Rs  zFedProto.evaluate)NN) Ú__name__Ú __module__Ú __qualname__rr&r*r'r%Ú __classcell__rr)rr r s  ( rcCs¦ttƒ}x0|D](}x"| ¡D]}|| ||¡qWqWxf| ¡D]Z\}}t|ƒdkr�d|dj}x|D]}||j7}qlW|t|ƒ||<qD|dj||<qDW|S)Nrr)rÚlistÚkeysr+Úitemsr0Údata)Zlocal_protos_listZagg_protos_labelZ local_protosÚlabelZ proto_listÚprotor2rrr r(ks   r()Zflcore.clients.clientprotorÚflcore.servers.serverbaserÚutils.data_utilsrÚ threadingrr!ÚnumpyrAÚ collectionsrrr(rrrr Ú<module>s     a
3,696
Python
.py
29
126.275862
475
0.439204
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,794
serverphp.cpython-37.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverphp.cpython-37.pyc
B ¿:cc”ã@sHddlmZddlmZddlmZddlZddlZGdd„deƒZdS)é)Ú clientPHP)ÚServer)ÚThreadNcs,eZdZ‡fdd„Zdd„Zdd„Z‡ZS)ÚFedPHPcsLtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfÚargsÚtimes)Ú __class__©úF/root/autodl-tmp/PFL-Non-IID-master/system/flcore/servers/serverphp.pyr s  zFedPHP.__init__cCs¬x€t|jdƒD]n}| ¡|_| |¡||jdkrVtd|›d�ƒtdƒ| ¡x|jD] }| ¡q^W|  ¡|  ¡qWtdƒtt |j ƒƒ|  ¡| ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rÚiÚclientrrrrs     z FedPHP.traincCs4t|jƒdkst‚x|jD]}| |j|¡qWdS)Nr)ÚlenrÚAssertionErrorÚset_parametersÚ global_model)rÚRr$rrrr5s zFedPHP.send_models)Ú__name__Ú __module__Ú __qualname__rrrÚ __classcell__rr)rrrs r) Zflcore.clients.clientphprÚflcore.servers.serverbaserÚ threadingrÚtimeÚcopyrrrrrÚ<module>s   
1,724
Python
.py
13
131.307692
338
0.484813
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,795
servermoon.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/servermoon.cpython-39.pyc
a f¾`c{ã@sLddlmZddlmZddlmZddlmZddlZGdd„deƒZ dS)é)Ú clientMOON)ÚServer)Úread_client_data)ÚThreadNcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚMOONcsLtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudget)ÚselfÚargsÚtimes©Ú __class__©ús/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/servermoon.pyr s  z MOON.__init__cCsTg}d|_d}|jsæt ¡}| ¡|_| ¡||jdkr\td|›d�ƒtdƒ| ¡|jD] }| ¡qb||jdkr’tdƒ|j|d�|  ¡|  ¡|j   t ¡|¡td|j d ƒ|j |jg|jd �|_|d 7}qtd ƒtt|jƒƒtd ƒtt|ƒƒtdƒtt|j d d…ƒt|j d d…ƒƒ| ¡| ¡dS)NFrz -------------Round number: z -------------z Evaluate global modelz Evaluate local model)Úaccz2--------------------------------------------------éÿÿÿÿ)Úacc_lssÚtop_cntéz Best global accuracy.z Best local accuracy.z Averaged time per iteration.)ÚdoneÚtimeÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersrÚappendÚ check_doneÚ rs_test_accrÚmaxÚsumÚlenÚ save_resultsÚsave_global_model)rÚ local_accÚiÚs_tÚclientrrrr"s<      (z MOON.train)Ú__name__Ú __module__Ú __qualname__rr"Ú __classcell__rrrrrs r) Zflcore.clients.clientmoonrÚflcore.servers.serverbaserÚutils.data_utilsrÚ threadingrrrrrrrÚ<module>s    
1,996
Python
.py
22
89.5
412
0.482025
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,796
serverbabupt.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverbabupt.cpython-38.pyc
U şc ã@s`ddlmZddlmZddlmZddlZddlZddlZddl Z ddl Z Gdd„deƒZ dS)é)Ú clientBABUPT)ÚServer)ÚThreadNcsNeZdZ‡fdd„Zdd„Zdd„Zdd„Zdd d „Zd d „Zdd„Z ‡Z S)Ú FedBABUPTcsXtƒ ||¡||_| ¡| |t¡g|_g|_td|j ›d|j ›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__ÚargsÚset_slow_clientsÚ set_clientsrÚclients_divergeÚdiff_proÚprintÚ join_ratioÚ num_clients)ÚselfrÚtimes©Ú __class__©õdD:\京东\promot\第二次投稿\å®�验\æœ�务器\native - pro\system\flcore\servers\serverbabupt.pyr s zFedBABUPT.__init__c CsÀg}t|jdƒD�]>}| ¡|_| ¡||jdkrXtd|›d�ƒtdƒ| ¡d}|jD]}| ¡}||  ¡}qbtd  |¡ƒ|j   |¡d}t |jdjj ¡|jdjj ¡ƒD]:\}}||} t | dk| t | ¡| ¡} |t | ¡}qÀtd  |  ¡¡ƒ|j  |  ¡¡||jdk�rBtdƒ|j|d �| ¡| ¡qtd ƒtt|jƒƒtd ƒtt|ƒƒ|jD]}| ¡�q„td ƒ| ¡| ¡| ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz"Averaged prompr difference: {:.4f}z"0 and 1 clients difference: {:.4f}z Evaluate local model)Úaccz Best global accuracy.z Best local accuracy.z4 -------------Evaluate fine-tuned model-------------)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚitemÚformatr ÚappendÚzipÚclientsÚmodelÚ generatorÚ parametersÚtorchÚwhereÚ zeros_likeÚsumr Úreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ fine_tuneÚ save_resultsÚsave_global_modelÚsave_client_model) rÚ local_accÚiÚ temp_diff_proÚclientÚtemp_diff_pro_clientÚdiverge_clentsÚ new_paramÚ old_paramr rrrrsN   ÿ      zFedBABUPT.traincCstj d|jd|jjd|jjd|jjd|jjd|jj d|jj ¡}tj  |¡sjt  |¡t |jƒD]†\}}tj ||jdt|ƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒd¡}t |j|¡qtdS)NÚmodelsr7Ú*Ú_clientÚ_z.pt)ÚosÚpathÚjoinÚdatasetrÚarv1Úarv2Úarv3Úarv4Úarv5Úarv6ÚexistsÚmakedirsÚ enumerater$Ú algorithmÚstrÚ num_promptrrÚ plocal_stepsrr(Úsaver%)rÚ model_pathÚc_idxÚcÚmodel_path_saverrrr3Ns T  pzFedBABUPT.save_client_modelc Cs€|jd|j}d|jjd|jjd|jjd|jjd|jjd|jjd}t j   |¡srt   |¡t |jƒ�r||d|jdt|jƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒdt|jjƒ}|d |¡}td|ƒt |d¡�f}|jd|jd �|jd |jd �|jd |jd �|jd |jd �|jd |jd �|jd|j d �W5QRXdS)Nr?z ../results/r=ú/z{}.h5z File path: Úwr/)ÚdataÚrs_test_acc_stdÚ rs_test_aucÚ rs_train_lossr r )!rCrMrrDrErFrGrHrIr@rArJrKÚlenr/ÚgoalrNrrOrrrPrr!r Úh5pyÚFileÚcreate_datasetrYrZr[r r )rÚalgoÚ result_pathÚ file_pathÚhfrrrr1VsL   l zFedBABUPT.save_resultsNc Cs|| ¡}| ¡}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}t|dƒdt|dƒ}dd„t|d|dƒDƒ} dd„t|d|dƒDƒ} |dkrÎ|j |¡n | |¡|j |¡|j t  | ¡¡|dk�r|j  |¡n | |¡t d   |¡ƒt d   |¡ƒt d   |¡ƒt d   |¡ƒt d   t  | ¡¡ƒt d  t  | ¡¡ƒdS)Néçğ?réécSsg|]\}}||‘qSrr©Ú.0ÚaÚnrrrÚ <listcomp>rsz&FedBABUPT.evaluate.<locals>.<listcomp>cSsg|]\}}||‘qSrrrirrrrmsszAveraged Train Loss: {:.4f}zAveraged Test Accurancy: {:.4f}z$Averaged Test oral Accurancy: {:.4f}zAveraged Test AUC: {:.4f}zStd Test Accurancy: {:.4f}zStd Test AUC: {:.4f}) Ú test_metricsÚ train_metricsr+r#r/r"rZrYÚnpÚstdr[r r!) rrÚlossÚstatsÚ stats_trainÚtest_accÚ test_acc2Útest_aucÚ train_lossÚaccsÚaucsrrrris,    zFedBABUPT.evaluatec Cs~g}g}g}g}|jD]H}| ¡\}}}} | |d¡| |d¡| | |¡| |¡qdd„|jDƒ} | ||||fS)NrfcSsg|] }|j‘qSr)Úid)rjrTrrrrm•sz*FedBABUPT.test_metrics.<locals>.<listcomp>)r$rnr") rÚ num_samplesÚ tot_correctÚ tot_correct2Útot_aucrTÚctÚct2ÚnsÚaucÚidsrrrrnˆs  zFedBABUPT.test_metricscCs�t|jƒdkst‚g|_d}g|_g|_|jD]8}|j |j¡||j7}|j |j¡|j |j ¡q.t |jƒD]\}}|||j|<qrdS)Nr) r\rÚAssertionErrorÚuploaded_weightsÚ uploaded_idsÚuploaded_modelsr"Ú train_samplesr{r%rL)rÚ tot_samplesr7r5rWrrrr,šs  zFedBABUPT.receive_models)NN) Ú__name__Ú __module__Ú __qualname__rrr3r1rrnr,Ú __classcell__rrrrr s 4 r) Zflcore.clients.clientbabuptrÚflcore.servers.serverbaserÚ threadingrr^Úcopyr@r(ÚnumpyrprrrrrÚ<module>s   
5,957
Python
.py
53
111.188679
497
0.427942
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,797
serverproto.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverproto.cpython-38.pyc
U •icÊã@shddlmZddlmZddlmZddlmZddlZddl Z ddl m Z Gdd„deƒZ d d „ZdS) é)Ú clientProto)ÚServer)Úread_client_data)ÚThreadN)Ú defaultdictcs>eZdZ‡fdd„Zdd„Zdd„Zdd„Zd d d „Z‡ZS) ÚFedProtocsjtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒg|_|j |_ dd„t |j ƒDƒ|_ dS)Nz Join ratio / total clients: z / z%Finished creating server and clients.cSsg|]}d‘qS)N©)Ú.0Ú_rrõFD:\京东\promot\cifar\cifar\tiny\system\flcore\servers\serverproto.pyÚ <listcomp>sz%FedProto.__init__.<locals>.<listcomp>) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clientsÚBudgetÚ num_classesÚrangeÚ global_protos)ÚselfÚargsÚtimes©Ú __class__rr r s zFedProto.__init__cCsd|_d}|jsÔt ¡}| ¡|_||jdkrX|dkrXtd|›d�ƒtdƒ| ¡|jD] }| ¡q^| ¡t |j ƒ|_ |  ¡|j  t ¡|¡td|j dƒ|dkrÊ|j|jg|jd�|_|d 7}q td ƒtt|jƒƒtt|j d d…ƒt|j d d…ƒƒ| ¡dS) NFrz -------------Round number: z -------------z Evaluate global modelz2--------------------------------------------------éÿÿÿÿ)Úacc_lssÚtop_cntéz Best global accuracy.)ÚdoneÚtimeÚselect_clientsÚselected_clientsÚeval_gaprÚevaluateÚtrainÚreceive_protosÚproto_aggregationÚuploaded_protosrÚ send_protosrÚappendÚ check_doneÚ rs_test_accrÚmaxÚsumÚlenÚ save_results)rÚiÚs_tÚclientrrr r's.     (zFedProto.traincCs.t|jƒdkst‚|jD]}| |j¡qdS©Nr)r1ÚclientsÚAssertionErrorZ set_protosr©rr5rrr r+Cs zFedProto.send_protoscCsJt|jƒdkst‚g|_g|_|jD] }|j |j¡|j |j¡q$dSr6)r1r$r8Ú uploaded_idsr*r,ÚidÚprotosr9rrr r(Is  zFedProto.receive_protosNcCsØ| ¡}| ¡}t|dƒdt|dƒ}t|dƒdt|dƒ}dd„t|d|dƒDƒ}|dkrz|j |¡n | |¡|dkrš|j |¡n | |¡td |¡ƒtd |¡ƒtd t   |¡¡ƒdS) Négğ?r cSsg|]\}}||‘qSrr)r ÚaÚnrrr r Xsz%FedProto.evaluate.<locals>.<listcomp>zAveraged Train Loss: {:.4f}zAveraged Test Accurancy: {:.4f}zStd Test Accurancy: {:.4f}) Ú test_metricsÚ train_metricsr0Úzipr.r,Ú rs_train_lossrÚformatÚnpÚstd)rÚaccÚlossÚstatsÚ stats_trainÚtest_accÚ train_lossÚaccsrrr r&Rs  zFedProto.evaluate)NN) Ú__name__Ú __module__Ú __qualname__rr'r+r(r&Ú __classcell__rrrr r s  ( rcCs–ttƒ}|D]$}| ¡D]}|| ||¡qq | ¡D]V\}}t|ƒdkr‚d|dj}|D]}||j7}q`|t|ƒ||<q:|dj||<q:|S)Nr r)rÚlistÚkeysr,Úitemsr1Údata)Zlocal_protos_listZagg_protos_labelZ local_protosÚlabelZ proto_listÚprotor3rrr r)ks   r))Zflcore.clients.clientprotorÚflcore.servers.serverbaserÚutils.data_utilsrÚ threadingrr"ÚnumpyrEÚ collectionsrrr)rrrr Ú<module>s     a
3,701
Python
.py
33
111
412
0.433361
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,798
serverrod.cpython-38.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serverrod.cpython-38.pyc
U ºĞıcÄã@sLddlmZddlmZddlmZddlmZddlZGdd„deƒZ dS)é)Ú clientROD)ÚServer)Úread_client_data)ÚThreadNcs$eZdZ‡fdd„Zdd„Z‡ZS)ÚFedRODcsFtƒ ||¡| ¡| |t¡td|j›d|j›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.)ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©õWD:\京东\promot\第二次投稿\å®�验\native - pro\system\flcore\servers\serverrod.pyr s  zFedROD.__init__cCsÜg}t|jdƒD]Š}| ¡|_| ¡||jdkrVtd|›d�ƒtdƒ| ¡|jD] }| ¡q\||jdkrŒtdƒ|j|d�|  ¡|  ¡qtdƒtt |j ƒƒtd ƒtt |ƒƒ|  ¡| ¡dS) Nérz -------------Round number: z -------------z Evaluate global modelz Evaluate local model)Úaccz Best global accuracy.z Best local accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚaggregate_parametersÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rÚ local_accÚiÚclientrrrrs*      z FedROD.train)Ú__name__Ú __module__Ú __qualname__rrÚ __classcell__rrrrrs r) Zflcore.clients.clientrodrÚflcore.servers.serverbaserÚutils.data_utilsrÚ threadingrÚtimerrrrrÚ<module>s    
1,678
Python
.py
19
87.105263
360
0.495783
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)
2,286,799
serveramp.cpython-39.pyc
hkgdifyu_pFedPT/system/flcore/servers/__pycache__/serveramp.cpython-39.pyc
a f¾`cÍ ã@sdddlZddlZddlZddlZddlZddlmZmZddl m Z ddl m Z Gdd„de ƒZ dS)éN)Ú clientAMPÚweight_flatten)ÚServer)ÚThreadcs4eZdZ‡fdd„Zdd„Zdd„Zdd„Z‡ZS) ÚFedAMPcsVtƒ ||¡| ¡| |t¡|j|_|j|_td|j›d|j ›�ƒtdƒdS)Nz Join ratio / total clients: z / z%Finished creating server and clients.) ÚsuperÚ__init__Úset_slow_clientsÚ set_clientsrÚalphaKÚsigmaÚprintÚ join_ratioÚ num_clients)ÚselfÚargsÚtimes©Ú __class__©úr/media/sim812/391e55df-b6f2-4fe9-a920-53434a8506fa/lgh/pdept/PFL-Non-IID-master/system/flcore/servers/serveramp.pyr s zFedAMP.__init__cCsšt|jdƒD]`}| ¡|_| ¡||jdkrRtd|›d�ƒtdƒ| ¡|jD] }| ¡qX|  ¡qtdƒtt |j ƒƒ|  ¡|  ¡dS)Nérz -------------Round number: z -------------z Evaluate global modelz Best global accuracy.)ÚrangeÚ global_roundsÚselect_clientsÚselected_clientsÚ send_modelsÚeval_gapr ÚevaluateÚtrainÚreceive_modelsÚmaxÚ rs_test_accÚ save_resultsÚsave_global_model)rÚiÚclientrrrrs    z FedAMP.trainc CsŒt|jƒdksJ‚t|jƒdk�rˆ|jD�]\}t |j¡}| ¡D]}|j ¡qBt   |j ¡}t |jƒD]b\}}|j |j|krÂt|jƒ}t|ƒ}|| d¡} t  | | ¡} |j| | ¡||<qhd||<qhdt  |¡} t |jƒD]:\}}t| ¡| ¡ƒD]\}} |j||| 7_qşqät ¡} |j�rJt dt tj ¡¡¡| || ¡|jdd7<|jddt ¡| 7<q(dS)Nréÿÿÿÿrgš™™™™™¹?Ú num_roundsÚ total_costé) ÚlenrÚuploaded_modelsÚcopyÚdeepcopyÚ global_modelÚ parametersÚdataÚzero_ÚtorchÚzerosÚ join_clientsÚ enumerateÚidÚ uploaded_idsrÚmodelÚviewÚdotr ÚeÚsumÚzipÚtimeÚ send_slowÚsleepÚnpÚabsÚrandomÚrandÚset_parametersÚsend_time_cost) rÚcÚmuÚparamÚcoefÚjÚmwZ weights_iZ weights_jÚsubZ coef_selfZparam_jÚ start_timerrrr6s2         zFedAMP.send_modelscCst | |j¡|jS)N)ÚmathÚexpr )rÚxrrrr<ZszFedAMP.e)Ú__name__Ú __module__Ú __qualname__rrrr<Ú __classcell__rrrrr s $r)r3r-r?ÚnumpyrBrPZflcore.clients.clientamprrÚflcore.servers.serverbaserÚ threadingrrrrrrÚ<module>s  
2,696
Python
.py
26
102.5
373
0.469861
hkgdifyu/pFedPT
8
3
0
GPL-2.0
9/5/2024, 10:48:09 PM (Europe/Amsterdam)